From 04f093b34d6bf61cab570f6f6c395dabec72efeb Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Wed, 7 Jun 2023 19:47:48 +0300 Subject: [PATCH 01/38] integrated latest mx-chain-communication-go and updated networkComponents to create and manage both messengers added new fullArchiveP2P.toml file(for the moment just a copy of p2p.toml) + new flags removed MetricP2PFullHistoryObservers which won't be needed anymore removed FullHistoryList from diffPeerListCreator as it won't be needed anymore added the needed disabled components added todos for further implementation --- cmd/node/config/fullArchiveP2P.toml | 74 ++++++ cmd/node/config/p2p.toml | 4 - cmd/node/flags.go | 24 +- cmd/node/main.go | 17 +- cmd/seednode/config/p2p.toml | 4 - cmd/seednode/main.go | 1 + common/constants.go | 3 - config/config.go | 4 +- config/overridableConfig/configOverriding.go | 13 +- .../configOverriding_test.go | 14 +- config/tomlConfig_test.go | 4 +- dataRetriever/interface.go | 2 - dataRetriever/mock/messageHandlerStub.go | 12 +- dataRetriever/mock/peerListCreatorStub.go | 6 - .../topicSender/diffPeerListCreator.go | 5 - .../topicSender/diffPeerListCreator_test.go | 18 -- .../topicSender/topicRequestSender.go | 5 +- .../topicSender/topicRequestSender_test.go | 81 +++---- errors/errors.go | 3 + factory/consensus/consensusComponents.go | 1 + factory/disabled/networkMessenger.go | 186 +++++++++++++++ factory/disabled/peersRatingHandler.go | 29 +++ factory/disabled/peersRatingMonitor.go | 19 ++ factory/network/networkComponents.go | 219 ++++++++++++------ factory/network/networkComponentsHandler.go | 74 ++++-- .../network/networkComponentsHandler_test.go | 8 +- factory/network/networkComponents_test.go | 10 + factory/status/statusComponentsHandler.go | 4 +- .../status/statusComponentsHandler_test.go | 10 +- go.mod | 2 +- go.sum | 3 +- integrationTests/factory/componentsHelper.go | 5 +- integrationTests/factory/constants.go | 1 + .../networkSharding_test.go | 3 - .../realcomponents/processorRunner.go | 3 +- integrationTests/testHeartbeatNode.go | 3 +- integrationTests/testInitializer.go | 15 +- integrationTests/testProcessorNode.go | 1 + node/metrics/metrics.go | 1 - node/metrics/metrics_test.go | 1 - node/nodeRunner.go | 4 +- p2p/config/config.go | 4 - p2p/interface.go | 3 + testscommon/components/components.go | 17 +- testscommon/p2pmocks/messengerStub.go | 10 - testscommon/realConfigsHandling.go | 28 ++- 46 files changed, 689 insertions(+), 269 deletions(-) create mode 100644 cmd/node/config/fullArchiveP2P.toml create mode 100644 factory/disabled/networkMessenger.go create mode 100644 factory/disabled/peersRatingHandler.go create mode 100644 factory/disabled/peersRatingMonitor.go diff --git a/cmd/node/config/fullArchiveP2P.toml b/cmd/node/config/fullArchiveP2P.toml new file mode 100644 index 00000000000..9e751da6005 --- /dev/null +++ b/cmd/node/config/fullArchiveP2P.toml @@ -0,0 +1,74 @@ +#FullArchiveP2P config file + +#TODO[Sorin]: proper values on this config, it is just a copy of p2p.toml for the moment + +#NodeConfig holds the P2P settings +[Node] + #Port is the port that will be opened by the node on all interfaces so other peers can connect to it + #If the port = 0, the node will search for a free port on the machine and use it + Port = "37373-38383" + + #ThresholdMinConnectedPeers represents the minimum number of connections a node should have before it can start + #the sync and consensus mechanisms + ThresholdMinConnectedPeers = 3 + + # MinNumPeersToWaitForOnBootstrap is the minimum number of peers to wait on bootstrap or the node will wait the default + # time which is now set to ~20 seconds (the const defined in the common package named TimeToWaitForP2PBootstrap) + MinNumPeersToWaitForOnBootstrap = 10 + +# P2P peer discovery section + +#The following sections correspond to the way new peers will be discovered +#If all config types are disabled then the peer will run in single mode (will not try to find other peers) +#If more than one peer discovery mechanism is enabled, the application will output an error and will not start + +[KadDhtPeerDiscovery] + #Enabled: true/false to enable/disable this discovery mechanism + Enabled = true + + #Type represents the kad-dht glue code implementation. + #"legacy" will define the first implementation. + #"optimized" represents the new variant able to connect to multiple seeders at once. This implementation also has + #a built-in timer that will try to automatically reconnect to the seeders (in case the seeders recover after a + #premature shutdown) + Type = "optimized" + + #RefreshIntervalInSec represents the time in seconds between querying for new peers + RefreshIntervalInSec = 10 + + #ProtocolID represents the protocol that this node will advertize to other peers + #To connect to other nodes, those nodes should have the same ProtocolID string + ProtocolID = "/erd/kad/1.0.0" + + #InitialPeerList represents the list of strings of some known nodes that will bootstrap this node + #The address will be in a self-describing addressing format. + #More can be found here: https://github.com/libp2p/specs/blob/master/3-requirements.md#34-transport-agnostic + #Example: + # /ip6/fe80::8823:6dff:fee7:f172/tcp/4001/p2p/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # /ip4/162.246.145.218/udp/4001/utp/ipfs/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # + #If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap + #phase but will accept connections and will do the network discovery if another peer connects to it + InitialPeerList = ["/ip4/127.0.0.1/tcp/9999/p2p/16Uiu2HAkw5SNNtSvH1zJiQ6Gc3WoGNSxiyNueRKe6fuAuh57G3Bk"] + + #kademlia's routing table bucket size + BucketSize = 100 + + #RoutingTableRefreshIntervalInSec defines how many seconds should pass between 2 kad routing table auto refresh calls + RoutingTableRefreshIntervalInSec = 300 + +[Sharding] + # The targeted number of peer connections + TargetPeerCount = 36 + MaxIntraShardValidators = 7 + MaxCrossShardValidators = 15 + MaxIntraShardObservers = 2 + MaxCrossShardObservers = 3 + MaxSeeders = 2 + + #available options: + # `ListsSharder` will split the peers based on the shard membership (intra, cross or unknown) + # `OneListSharder` will do just the connection triming (upto TargetPeerCount value) not taking into account + # the shard membership of the connected peers + # `NilListSharder` will disable conection trimming (sharder is off) + Type = "ListsSharder" diff --git a/cmd/node/config/p2p.toml b/cmd/node/config/p2p.toml index 2ce99da3ba0..d95fca754cc 100644 --- a/cmd/node/config/p2p.toml +++ b/cmd/node/config/p2p.toml @@ -70,7 +70,3 @@ # the shard membership of the connected peers # `NilListSharder` will disable conection trimming (sharder is off) Type = "ListsSharder" - - [AdditionalConnections] - #this value will be added to the target peer count automatically when the node will be in full archive mode - MaxFullHistoryObservers = 10 diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 004f8d0c024..9c2bbc93846 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -91,6 +91,13 @@ var ( "configurations such as port, target peer count or KadDHT settings", Value: "./config/p2p.toml", } + // fullArchiveP2PConfigurationFile defines a flag for the path to the toml file containing P2P configuration for the full archive network + fullArchiveP2PConfigurationFile = cli.StringFlag{ + Name: "full-archive-p2p-config", + Usage: "The `" + filePathPlaceholder + "` for the p2p configuration file for the full archive network. This TOML file contains peer-to-peer " + + "configurations such as port, target peer count or KadDHT settings", + Value: "./config/fullArchiveP2P.toml", + } // epochConfigurationFile defines a flag for the path to the toml file containing the epoch configuration epochConfigurationFile = cli.StringFlag{ Name: "epoch-config", @@ -111,13 +118,20 @@ var ( Usage: "The `" + filePathPlaceholder + "` for the gas costs configuration directory.", Value: "./config/gasSchedules", } - // port defines a flag for setting the port on which the node will listen for connections + // port defines a flag for setting the port on which the node will listen for connections on the main network port = cli.StringFlag{ Name: "port", Usage: "The `[p2p port]` number on which the application will start. Can use single values such as " + "`0, 10230, 15670` or range of ports such as `5000-10000`", Value: "0", } + // fullArchivePort defines a flag for setting the port on which the node will listen for connections on the full archive network + fullArchivePort = cli.StringFlag{ + Name: "full-archive-port", + Usage: "The `[p2p port]` number on which the application will start the second network when running in full archive mode. " + + "Can use single values such as `0, 10230, 15670` or range of ports such as `5000-10000`", + Value: "0", + } // profileMode defines a flag for profiling the binary // If enabled, it will open the pprof routes over the default gin rest webserver. // There are several routes that will be available for profiling (profiling can be analyzed with: go tool pprof): @@ -405,6 +419,7 @@ func getFlags() []cli.Flag { configurationPreferencesFile, externalConfigFile, p2pConfigurationFile, + fullArchiveP2PConfigurationFile, epochConfigurationFile, roundConfigurationFile, gasScheduleConfigurationDirectory, @@ -412,6 +427,7 @@ func getFlags() []cli.Flag { validatorKeyPemFile, allValidatorKeysPemFile, port, + fullArchivePort, profileMode, useHealthService, storageCleanup, @@ -670,7 +686,8 @@ func processLiteObserverMode(log logger.Logger, configs *config.Configs) { func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error { importDbFlags := configs.ImportDbConfig generalConfigs := configs.GeneralConfig - p2pConfigs := configs.P2pConfig + p2pConfigs := configs.MainP2pConfig + fullArchiveP2PConfigs := configs.FullArchiveP2pConfig prefsConfig := configs.PreferencesConfig var err error @@ -690,6 +707,8 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error generalConfigs.StateTriesConfig.CheckpointRoundsModulus = 100000000 p2pConfigs.Node.ThresholdMinConnectedPeers = 0 p2pConfigs.KadDhtPeerDiscovery.Enabled = false + fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers = 0 + fullArchiveP2PConfigs.KadDhtPeerDiscovery.Enabled = false alterStorageConfigsForDBImport(generalConfigs) @@ -700,6 +719,7 @@ func processConfigImportDBMode(log logger.Logger, configs *config.Configs) error "StoragePruning.NumEpochsToKeep", generalConfigs.StoragePruning.NumEpochsToKeep, "StoragePruning.NumActivePersisters", generalConfigs.StoragePruning.NumActivePersisters, "p2p.ThresholdMinConnectedPeers", p2pConfigs.Node.ThresholdMinConnectedPeers, + "fullArchiveP2P.ThresholdMinConnectedPeers", fullArchiveP2PConfigs.Node.ThresholdMinConnectedPeers, "no sig check", importDbFlags.ImportDbNoSigCheckFlag, "import save trie epoch root hash", importDbFlags.ImportDbSaveTrieEpochRootHash, "import DB start in epoch", importDbFlags.ImportDBStartInEpoch, diff --git a/cmd/node/main.go b/cmd/node/main.go index f89702cb3c3..6bfba8d64e4 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -202,12 +202,19 @@ func readConfigs(ctx *cli.Context, log logger.Logger) (*config.Configs, error) { log.Debug("config", "file", configurationPaths.External) configurationPaths.P2p = ctx.GlobalString(p2pConfigurationFile.Name) - p2pConfig, err := common.LoadP2PConfig(configurationPaths.P2p) + mainP2PConfig, err := common.LoadP2PConfig(configurationPaths.P2p) if err != nil { return nil, err } log.Debug("config", "file", configurationPaths.P2p) + configurationPaths.FullArchiveP2p = ctx.GlobalString(fullArchiveP2PConfigurationFile.Name) + fullArchiveP2PConfig, err := common.LoadP2PConfig(configurationPaths.FullArchiveP2p) + if err != nil { + return nil, err + } + log.Debug("config", "file", configurationPaths.FullArchiveP2p) + configurationPaths.Epoch = ctx.GlobalString(epochConfigurationFile.Name) epochConfig, err := common.LoadEpochConfig(configurationPaths.Epoch) if err != nil { @@ -223,7 +230,10 @@ func readConfigs(ctx *cli.Context, log logger.Logger) (*config.Configs, error) { log.Debug("config", "file", configurationPaths.RoundActivation) if ctx.IsSet(port.Name) { - p2pConfig.Node.Port = ctx.GlobalString(port.Name) + mainP2PConfig.Node.Port = ctx.GlobalString(port.Name) + } + if ctx.IsSet(fullArchivePort.Name) { + fullArchiveP2PConfig.Node.Port = ctx.GlobalString(fullArchivePort.Name) } if ctx.IsSet(destinationShardAsObserver.Name) { preferencesConfig.Preferences.DestinationShardAsObserver = ctx.GlobalString(destinationShardAsObserver.Name) @@ -243,7 +253,8 @@ func readConfigs(ctx *cli.Context, log logger.Logger) (*config.Configs, error) { RatingsConfig: ratingsConfig, PreferencesConfig: preferencesConfig, ExternalConfig: externalConfig, - P2pConfig: p2pConfig, + MainP2pConfig: mainP2PConfig, + FullArchiveP2pConfig: fullArchiveP2PConfig, ConfigurationPathsHolder: configurationPaths, EpochConfig: epochConfig, RoundConfig: roundConfig, diff --git a/cmd/seednode/config/p2p.toml b/cmd/seednode/config/p2p.toml index 5e13f92574f..43dbd7989bc 100644 --- a/cmd/seednode/config/p2p.toml +++ b/cmd/seednode/config/p2p.toml @@ -69,7 +69,3 @@ # the shard membership of the connected peers # `NilListSharder` will disable conection trimming (sharder is off) Type = "NilListSharder" - - [AdditionalConnections] - #this value will be added to the target peer count automatically when the node will be in full archive mode - MaxFullHistoryObservers = 0 diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index 5be736d3abd..9005d2d83ab 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -269,6 +269,7 @@ func createNode( P2pPrivateKey: p2pKey, P2pSingleSigner: p2pSingleSigner, P2pKeyGenerator: p2pKeyGen, + Logger: logger.GetOrCreate("seed/p2p"), } return p2pFactory.NewNetworkMessenger(arg) diff --git a/common/constants.go b/common/constants.go index 521ef905d8e..5cc7e7ccd4c 100644 --- a/common/constants.go +++ b/common/constants.go @@ -649,9 +649,6 @@ const MetricP2PIntraShardObservers = "erd_p2p_intra_shard_observers" // MetricP2PCrossShardObservers is the metric that outputs the cross-shard connected observers const MetricP2PCrossShardObservers = "erd_p2p_cross_shard_observers" -// MetricP2PFullHistoryObservers is the metric that outputs the full-history connected observers -const MetricP2PFullHistoryObservers = "erd_p2p_full_history_observers" - // MetricP2PUnknownPeers is the metric that outputs the unknown-shard connected peers const MetricP2PUnknownPeers = "erd_p2p_unknown_shard_peers" diff --git a/config/config.go b/config/config.go index 1f429ad2de3..96367fa3c1b 100644 --- a/config/config.go +++ b/config/config.go @@ -569,7 +569,8 @@ type Configs struct { RatingsConfig *RatingsConfig PreferencesConfig *Preferences ExternalConfig *ExternalConfig - P2pConfig *p2pConfig.P2PConfig + MainP2pConfig *p2pConfig.P2PConfig + FullArchiveP2pConfig *p2pConfig.P2PConfig FlagsConfig *ContextFlagsConfig ImportDbConfig *ImportDbConfig ConfigurationPathsHolder *ConfigurationPathsHolder @@ -587,6 +588,7 @@ type ConfigurationPathsHolder struct { Preferences string External string P2p string + FullArchiveP2p string GasScheduleDirectoryName string Nodes string Genesis string diff --git a/config/overridableConfig/configOverriding.go b/config/overridableConfig/configOverriding.go index c1d82b52dbb..7e9f3a153de 100644 --- a/config/overridableConfig/configOverriding.go +++ b/config/overridableConfig/configOverriding.go @@ -10,10 +10,11 @@ import ( ) const ( - configTomlFile = "config.toml" - enableEpochsTomlFile = "enableEpochs.toml" - p2pTomlFile = "p2p.toml" - externalTomlFile = "external.toml" + configTomlFile = "config.toml" + enableEpochsTomlFile = "enableEpochs.toml" + p2pTomlFile = "p2p.toml" + fullArchiveP2PTomlFile = "fullArchiveP2P.toml" + externalTomlFile = "external.toml" ) var ( @@ -31,7 +32,9 @@ func OverrideConfigValues(newConfigs []config.OverridableConfig, configs *config case enableEpochsTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.EpochConfig, newConfig.Path, newConfig.Value) case p2pTomlFile: - err = reflectcommon.AdaptStructureValueBasedOnPath(configs.P2pConfig, newConfig.Path, newConfig.Value) + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.MainP2pConfig, newConfig.Path, newConfig.Value) + case fullArchiveP2PTomlFile: + err = reflectcommon.AdaptStructureValueBasedOnPath(configs.FullArchiveP2pConfig, newConfig.Path, newConfig.Value) case externalTomlFile: err = reflectcommon.AdaptStructureValueBasedOnPath(configs.ExternalConfig, newConfig.Path, newConfig.Value) default: diff --git a/config/overridableConfig/configOverriding_test.go b/config/overridableConfig/configOverriding_test.go index 77a48590cd2..b15cf8e5c5c 100644 --- a/config/overridableConfig/configOverriding_test.go +++ b/config/overridableConfig/configOverriding_test.go @@ -45,11 +45,21 @@ func TestOverrideConfigValues(t *testing.T) { t.Run("should work for p2p.toml", func(t *testing.T) { t.Parallel() - configs := &config.Configs{P2pConfig: &p2pConfig.P2PConfig{Sharding: p2pConfig.ShardingConfig{TargetPeerCount: 5}}} + configs := &config.Configs{MainP2pConfig: &p2pConfig.P2PConfig{Sharding: p2pConfig.ShardingConfig{TargetPeerCount: 5}}} err := OverrideConfigValues([]config.OverridableConfig{{Path: "Sharding.TargetPeerCount", Value: "37", File: "p2p.toml"}}, configs) require.NoError(t, err) - require.Equal(t, uint32(37), configs.P2pConfig.Sharding.TargetPeerCount) + require.Equal(t, uint32(37), configs.MainP2pConfig.Sharding.TargetPeerCount) + }) + + t.Run("should work for fullArchiveP2P.toml", func(t *testing.T) { + t.Parallel() + + configs := &config.Configs{FullArchiveP2pConfig: &p2pConfig.P2PConfig{Sharding: p2pConfig.ShardingConfig{TargetPeerCount: 5}}} + + err := OverrideConfigValues([]config.OverridableConfig{{Path: "Sharding.TargetPeerCount", Value: "37", File: "fullArchiveP2P.toml"}}, configs) + require.NoError(t, err) + require.Equal(t, uint32(37), configs.FullArchiveP2pConfig.Sharding.TargetPeerCount) }) t.Run("should work for external.toml", func(t *testing.T) { diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 765919b32ff..a33f910a832 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -481,9 +481,7 @@ func TestP2pConfig(t *testing.T) { MaxIntraShardObservers = 0 MaxCrossShardObservers = 0 MaxSeeders = 0 - Type = "` + shardingType + `" - [AdditionalConnections] - MaxFullHistoryObservers = 0` + Type = "` + shardingType + `"` expectedCfg := p2pConfig.P2PConfig{ Node: p2pConfig.NodeConfig{ diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 77f59710677..4da2c3669db 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -135,7 +135,6 @@ type ManualEpochStartNotifier interface { // MessageHandler defines the functionality needed by structs to send data to other peers type MessageHandler interface { ConnectedPeersOnTopic(topic string) []core.PeerID - ConnectedFullHistoryPeersOnTopic(topic string) []core.PeerID SendToConnectedPeer(topic string, buff []byte, peerID core.PeerID) error ID() core.PeerID IsInterfaceNil() bool @@ -168,7 +167,6 @@ type StorageType uint8 type PeerListCreator interface { CrossShardPeerList() []core.PeerID IntraShardPeerList() []core.PeerID - FullHistoryList() []core.PeerID IsInterfaceNil() bool } diff --git a/dataRetriever/mock/messageHandlerStub.go b/dataRetriever/mock/messageHandlerStub.go index 3b6943bb71d..541bee0270a 100644 --- a/dataRetriever/mock/messageHandlerStub.go +++ b/dataRetriever/mock/messageHandlerStub.go @@ -6,10 +6,9 @@ import ( // MessageHandlerStub - type MessageHandlerStub struct { - ConnectedPeersOnTopicCalled func(topic string) []core.PeerID - ConnectedFullHistoryPeersOnTopicCalled func(topic string) []core.PeerID - SendToConnectedPeerCalled func(topic string, buff []byte, peerID core.PeerID) error - IDCalled func() core.PeerID + ConnectedPeersOnTopicCalled func(topic string) []core.PeerID + SendToConnectedPeerCalled func(topic string, buff []byte, peerID core.PeerID) error + IDCalled func() core.PeerID } // ConnectedPeersOnTopic - @@ -17,11 +16,6 @@ func (mhs *MessageHandlerStub) ConnectedPeersOnTopic(topic string) []core.PeerID return mhs.ConnectedPeersOnTopicCalled(topic) } -// ConnectedFullHistoryPeersOnTopic - -func (mhs *MessageHandlerStub) ConnectedFullHistoryPeersOnTopic(topic string) []core.PeerID { - return mhs.ConnectedFullHistoryPeersOnTopicCalled(topic) -} - // SendToConnectedPeer - func (mhs *MessageHandlerStub) SendToConnectedPeer(topic string, buff []byte, peerID core.PeerID) error { return mhs.SendToConnectedPeerCalled(topic, buff, peerID) diff --git a/dataRetriever/mock/peerListCreatorStub.go b/dataRetriever/mock/peerListCreatorStub.go index 7acea5d64e0..c933aa81056 100644 --- a/dataRetriever/mock/peerListCreatorStub.go +++ b/dataRetriever/mock/peerListCreatorStub.go @@ -8,7 +8,6 @@ import ( type PeerListCreatorStub struct { CrossShardPeerListCalled func() []core.PeerID IntraShardPeerListCalled func() []core.PeerID - FullHistoryListCalled func() []core.PeerID } // CrossShardPeerList - @@ -21,11 +20,6 @@ func (p *PeerListCreatorStub) IntraShardPeerList() []core.PeerID { return p.IntraShardPeerListCalled() } -// FullHistoryList - -func (p *PeerListCreatorStub) FullHistoryList() []core.PeerID { - return p.FullHistoryListCalled() -} - // IsInterfaceNil returns true if there is no value under the interface func (p *PeerListCreatorStub) IsInterfaceNil() bool { return p == nil diff --git a/dataRetriever/topicSender/diffPeerListCreator.go b/dataRetriever/topicSender/diffPeerListCreator.go index 8261fa590ab..ff7e83ae6e5 100644 --- a/dataRetriever/topicSender/diffPeerListCreator.go +++ b/dataRetriever/topicSender/diffPeerListCreator.go @@ -73,11 +73,6 @@ func (dplc *diffPeerListCreator) IntraShardPeerList() []core.PeerID { return dplc.messenger.ConnectedPeersOnTopic(dplc.intraShardTopic) } -// FullHistoryList returns the full history peers list -func (dplc *diffPeerListCreator) FullHistoryList() []core.PeerID { - return dplc.messenger.ConnectedFullHistoryPeersOnTopic(dplc.intraShardTopic) -} - // IsInterfaceNil returns true if there is no value under the interface func (dplc *diffPeerListCreator) IsInterfaceNil() bool { return dplc == nil diff --git a/dataRetriever/topicSender/diffPeerListCreator_test.go b/dataRetriever/topicSender/diffPeerListCreator_test.go index 4b63b757608..4a9e043d281 100644 --- a/dataRetriever/topicSender/diffPeerListCreator_test.go +++ b/dataRetriever/topicSender/diffPeerListCreator_test.go @@ -240,21 +240,3 @@ func TestDiffPeerListCreator_IntraShardPeersList(t *testing.T) { assert.Equal(t, peerList, dplc.IntraShardPeerList()) } - -func TestDiffPeerListCreator_FullHistoryList(t *testing.T) { - t.Parallel() - - peerList := []core.PeerID{"pid1", "pid2"} - dplc, _ := topicsender.NewDiffPeerListCreator( - &mock.MessageHandlerStub{ - ConnectedFullHistoryPeersOnTopicCalled: func(topic string) []core.PeerID { - return peerList - }, - }, - mainTopic, - intraTopic, - excludedTopic, - ) - - assert.Equal(t, peerList, dplc.FullHistoryList()) -} diff --git a/dataRetriever/topicSender/topicRequestSender.go b/dataRetriever/topicSender/topicRequestSender.go index 22831cdf038..f09fb194f95 100644 --- a/dataRetriever/topicSender/topicRequestSender.go +++ b/dataRetriever/topicSender/topicRequestSender.go @@ -126,8 +126,9 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, preferredPeer = trs.getPreferredPeer(trs.selfShardId) numSentIntra = trs.sendOnTopic(intraPeers, preferredPeer, topicToSendRequest, buff, trs.numIntraShardPeers, core.IntraShardPeer.String()) } else { - // TODO: select preferred peers of type full history as well. - fullHistoryPeers = trs.peerListCreator.FullHistoryList() + // TODO[Sorin]: select preferred peers of type full history as well. + // TODO[Sorin]: replace the following line with the proper functionality from the full archive messenger + // fullHistoryPeers = trs.peerListCreator.FullHistoryList() numSentIntra = trs.sendOnTopic(fullHistoryPeers, "", topicToSendRequest, buff, trs.numFullHistoryPeers, core.FullHistoryPeer.String()) } diff --git a/dataRetriever/topicSender/topicRequestSender_test.go b/dataRetriever/topicSender/topicRequestSender_test.go index 83fef0bba9c..62833ad45b1 100644 --- a/dataRetriever/topicSender/topicRequestSender_test.go +++ b/dataRetriever/topicSender/topicRequestSender_test.go @@ -268,46 +268,47 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.True(t, sentToPid2) assert.Equal(t, 2, decreaseCalledCounter) }) - t.Run("should work and send to full history", func(t *testing.T) { - t.Parallel() - - pIDfullHistory := core.PeerID("full history peer") - sentToFullHistoryPeer := false - - arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - if bytes.Equal(peerID.Bytes(), pIDfullHistory.Bytes()) { - sentToFullHistoryPeer = true - } - - return nil - }, - } - arg.PeerListCreator = &mock.PeerListCreatorStub{ - FullHistoryListCalled: func() []core.PeerID { - return []core.PeerID{pIDfullHistory} - }, - } - arg.CurrentNetworkEpochProvider = &mock.CurrentNetworkEpochProviderStub{ - EpochIsActiveInNetworkCalled: func(epoch uint32) bool { - return false - }, - } - decreaseCalledCounter := 0 - arg.PeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ - DecreaseRatingCalled: func(pid core.PeerID) { - decreaseCalledCounter++ - assert.Equal(t, pIDfullHistory, pid) - }, - } - trs, _ := topicsender.NewTopicRequestSender(arg) - - err := trs.SendOnRequestTopic(&dataRetriever.RequestData{}, defaultHashes) - assert.Nil(t, err) - assert.True(t, sentToFullHistoryPeer) - assert.Equal(t, 1, decreaseCalledCounter) - }) + // TODO[Sorin]: fix this test + //t.Run("should work and send to full history", func(t *testing.T) { + // t.Parallel() + // + // pIDfullHistory := core.PeerID("full history peer") + // sentToFullHistoryPeer := false + // + // arg := createMockArgTopicRequestSender() + // arg.Messenger = &mock.MessageHandlerStub{ + // SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + // if bytes.Equal(peerID.Bytes(), pIDfullHistory.Bytes()) { + // sentToFullHistoryPeer = true + // } + // + // return nil + // }, + // } + // arg.PeerListCreator = &mock.PeerListCreatorStub{ + // FullHistoryListCalled: func() []core.PeerID { + // return []core.PeerID{pIDfullHistory} + // }, + // } + // arg.CurrentNetworkEpochProvider = &mock.CurrentNetworkEpochProviderStub{ + // EpochIsActiveInNetworkCalled: func(epoch uint32) bool { + // return false + // }, + // } + // decreaseCalledCounter := 0 + // arg.PeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ + // DecreaseRatingCalled: func(pid core.PeerID) { + // decreaseCalledCounter++ + // assert.Equal(t, pIDfullHistory, pid) + // }, + // } + // trs, _ := topicsender.NewTopicRequestSender(arg) + // + // err := trs.SendOnRequestTopic(&dataRetriever.RequestData{}, defaultHashes) + // assert.Nil(t, err) + // assert.True(t, sentToFullHistoryPeer) + // assert.Equal(t, 1, decreaseCalledCounter) + //}) t.Run("should work and send to preferred peers", func(t *testing.T) { t.Parallel() diff --git a/errors/errors.go b/errors/errors.go index aa88cd55d99..9bdbed5dd60 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -558,3 +558,6 @@ var ErrInvalidTrieNodeVersion = errors.New("invalid trie node version") // ErrNilTrieMigrator signals that a nil trie migrator has been provided var ErrNilTrieMigrator = errors.New("nil trie migrator") + +// ErrInvalidNodeOperationMode signals that an invalid node operation mode has been provided +var ErrInvalidNodeOperationMode = errors.New("invalid node operation mode") diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 9681bdde7d0..b34f011724b 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -668,6 +668,7 @@ func (ccf *consensusComponentsFactory) createP2pSigningHandler() (consensus.P2PS p2pSignerArgs := p2pFactory.ArgsMessageVerifier{ Marshaller: ccf.coreComponents.InternalMarshalizer(), P2PSigner: ccf.networkComponents.NetworkMessenger(), + Logger: logger.GetOrCreate("main/p2p/messagecheck"), } return p2pFactory.NewMessageVerifier(p2pSignerArgs) diff --git a/factory/disabled/networkMessenger.go b/factory/disabled/networkMessenger.go new file mode 100644 index 00000000000..3db01a8c9bf --- /dev/null +++ b/factory/disabled/networkMessenger.go @@ -0,0 +1,186 @@ +package disabled + +import ( + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/p2p" +) + +type networkMessenger struct { +} + +// NewNetworkMessenger creates a new disabled Messenger implementation +func NewNetworkMessenger() *networkMessenger { + return &networkMessenger{} +} + +// Close returns nil as it is disabled +func (netMes *networkMessenger) Close() error { + return nil +} + +// CreateTopic returns nil as it is disabled +func (netMes *networkMessenger) CreateTopic(_ string, _ bool) error { + return nil +} + +// HasTopic returns true as it is disabled +func (netMes *networkMessenger) HasTopic(_ string) bool { + return true +} + +// RegisterMessageProcessor returns nil as it is disabled +func (netMes *networkMessenger) RegisterMessageProcessor(_ string, _ string, _ p2p.MessageProcessor) error { + return nil +} + +// UnregisterAllMessageProcessors returns nil as it is disabled +func (netMes *networkMessenger) UnregisterAllMessageProcessors() error { + return nil +} + +// UnregisterMessageProcessor returns nil as it is disabled +func (netMes *networkMessenger) UnregisterMessageProcessor(_ string, _ string) error { + return nil +} + +// Broadcast does nothing as it is disabled +func (netMes *networkMessenger) Broadcast(_ string, _ []byte) { +} + +// BroadcastOnChannel does nothing as it is disabled +func (netMes *networkMessenger) BroadcastOnChannel(_ string, _ string, _ []byte) { +} + +// BroadcastUsingPrivateKey does nothing as it is disabled +func (netMes *networkMessenger) BroadcastUsingPrivateKey(_ string, _ []byte, _ core.PeerID, _ []byte) { +} + +// BroadcastOnChannelUsingPrivateKey does nothing as it is disabled +func (netMes *networkMessenger) BroadcastOnChannelUsingPrivateKey(_ string, _ string, _ []byte, _ core.PeerID, _ []byte) { +} + +// SendToConnectedPeer returns nil as it is disabled +func (netMes *networkMessenger) SendToConnectedPeer(_ string, _ []byte, _ core.PeerID) error { + return nil +} + +// UnJoinAllTopics returns nil as it is disabled +func (netMes *networkMessenger) UnJoinAllTopics() error { + return nil +} + +// Bootstrap returns nil as it is disabled +func (netMes *networkMessenger) Bootstrap() error { + return nil +} + +// Peers returns an empty slice as it is disabled +func (netMes *networkMessenger) Peers() []core.PeerID { + return make([]core.PeerID, 0) +} + +// Addresses returns an empty slice as it is disabled +func (netMes *networkMessenger) Addresses() []string { + return make([]string, 0) +} + +// ConnectToPeer returns nil as it is disabled +func (netMes *networkMessenger) ConnectToPeer(_ string) error { + return nil +} + +// IsConnected returns true as it is disabled +func (netMes *networkMessenger) IsConnected(_ core.PeerID) bool { + return true +} + +// ConnectedPeers returns an empty slice as it is disabled +func (netMes *networkMessenger) ConnectedPeers() []core.PeerID { + return make([]core.PeerID, 0) +} + +// ConnectedAddresses returns an empty slice as it is disabled +func (netMes *networkMessenger) ConnectedAddresses() []string { + return make([]string, 0) +} + +// PeerAddresses returns an empty slice as it is disabled +func (netMes *networkMessenger) PeerAddresses(_ core.PeerID) []string { + return make([]string, 0) +} + +// ConnectedPeersOnTopic returns an empty slice as it is disabled +func (netMes *networkMessenger) ConnectedPeersOnTopic(_ string) []core.PeerID { + return make([]core.PeerID, 0) +} + +// SetPeerShardResolver returns nil as it is disabled +func (netMes *networkMessenger) SetPeerShardResolver(_ p2p.PeerShardResolver) error { + return nil +} + +// GetConnectedPeersInfo returns an empty structure as it is disabled +func (netMes *networkMessenger) GetConnectedPeersInfo() *p2p.ConnectedPeersInfo { + return &p2p.ConnectedPeersInfo{} +} + +// WaitForConnections does nothing as it is disabled +func (netMes *networkMessenger) WaitForConnections(_ time.Duration, _ uint32) { +} + +// IsConnectedToTheNetwork returns true as it is disabled +func (netMes *networkMessenger) IsConnectedToTheNetwork() bool { + return true +} + +// ThresholdMinConnectedPeers returns 0 as it is disabled +func (netMes *networkMessenger) ThresholdMinConnectedPeers() int { + return 0 +} + +// SetThresholdMinConnectedPeers returns nil as it is disabled +func (netMes *networkMessenger) SetThresholdMinConnectedPeers(_ int) error { + return nil +} + +// SetPeerDenialEvaluator returns nil as it is disabled +func (netMes *networkMessenger) SetPeerDenialEvaluator(_ p2p.PeerDenialEvaluator) error { + return nil +} + +// ID returns an empty peerID as it is disabled +func (netMes *networkMessenger) ID() core.PeerID { + return "" +} + +// Port returns 0 as it is disabled +func (netMes *networkMessenger) Port() int { + return 0 +} + +// Sign returns an empty slice and nil as it is disabled +func (netMes *networkMessenger) Sign(_ []byte) ([]byte, error) { + return make([]byte, 0), nil +} + +// Verify returns nil as it is disabled +func (netMes *networkMessenger) Verify(_ []byte, _ core.PeerID, _ []byte) error { + return nil +} + +// SignUsingPrivateKey returns an empty slice and nil as it is disabled +func (netMes *networkMessenger) SignUsingPrivateKey(_ []byte, _ []byte) ([]byte, error) { + return make([]byte, 0), nil +} + +// AddPeerTopicNotifier returns nil as it is disabled +func (netMes *networkMessenger) AddPeerTopicNotifier(_ p2p.PeerTopicNotifier) error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (netMes *networkMessenger) IsInterfaceNil() bool { + return netMes == nil +} diff --git a/factory/disabled/peersRatingHandler.go b/factory/disabled/peersRatingHandler.go new file mode 100644 index 00000000000..4bccd2bd7da --- /dev/null +++ b/factory/disabled/peersRatingHandler.go @@ -0,0 +1,29 @@ +package disabled + +import "github.com/multiversx/mx-chain-core-go/core" + +type peersRatingHandler struct { +} + +// NewPeersRatingHandler returns a new disabled PeersRatingHandler implementation +func NewPeersRatingHandler() *peersRatingHandler { + return &peersRatingHandler{} +} + +// IncreaseRating does nothing as it is disabled +func (handler *peersRatingHandler) IncreaseRating(_ core.PeerID) { +} + +// DecreaseRating does nothing as it is disabled +func (handler *peersRatingHandler) DecreaseRating(_ core.PeerID) { +} + +// GetTopRatedPeersFromList returns the provided peers list as it is disabled +func (handler *peersRatingHandler) GetTopRatedPeersFromList(peers []core.PeerID, _ int) []core.PeerID { + return peers +} + +// IsInterfaceNil returns true if there is no value under the interface +func (handler *peersRatingHandler) IsInterfaceNil() bool { + return handler == nil +} diff --git a/factory/disabled/peersRatingMonitor.go b/factory/disabled/peersRatingMonitor.go new file mode 100644 index 00000000000..c846d4dde4d --- /dev/null +++ b/factory/disabled/peersRatingMonitor.go @@ -0,0 +1,19 @@ +package disabled + +type peersRatingMonitor struct { +} + +// NewPeersRatingMonitor returns a new disabled PeersRatingMonitor implementation +func NewPeersRatingMonitor() *peersRatingMonitor { + return &peersRatingMonitor{} +} + +// GetConnectedPeersRatings returns an empty string as it is disabled +func (monitor *peersRatingMonitor) GetConnectedPeersRatings() string { + return "" +} + +// IsInterfaceNil returns true if there is no value under the interface +func (monitor *peersRatingMonitor) IsInterfaceNil() bool { + return monitor == nil +} diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index 1ff92da63af..d75af6875f8 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/debug/antiflood" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/p2p" p2pConfig "github.com/multiversx/mx-chain-go/p2p/config" p2pFactory "github.com/multiversx/mx-chain-go/p2p/factory" @@ -27,7 +28,8 @@ import ( // NetworkComponentsFactoryArgs holds the arguments to create a network component handler instance type NetworkComponentsFactoryArgs struct { - P2pConfig p2pConfig.P2PConfig + MainP2pConfig p2pConfig.P2PConfig + FullArchiveP2pConfig p2pConfig.P2PConfig MainConfig config.Config RatingsConfig config.RatingsConfig StatusHandler core.AppStatusHandler @@ -41,7 +43,8 @@ type NetworkComponentsFactoryArgs struct { } type networkComponentsFactory struct { - p2pConfig p2pConfig.P2PConfig + mainP2PConfig p2pConfig.P2PConfig + fullArchiveP2PConfig p2pConfig.P2PConfig mainConfig config.Config ratingsConfig config.RatingsConfig statusHandler core.AppStatusHandler @@ -55,21 +58,26 @@ type networkComponentsFactory struct { cryptoComponents factory.CryptoComponentsHolder } +type networkComponentsHolder struct { + netMessenger p2p.Messenger + peersRatingHandler p2p.PeersRatingHandler + peersRatingMonitor p2p.PeersRatingMonitor +} + // networkComponents struct holds the network components type networkComponents struct { - netMessenger p2p.Messenger - inputAntifloodHandler factory.P2PAntifloodHandler - outputAntifloodHandler factory.P2PAntifloodHandler - pubKeyTimeCacher process.TimeCacher - topicFloodPreventer process.TopicFloodPreventer - floodPreventers []process.FloodPreventer - peerBlackListHandler process.PeerBlackListCacher - antifloodConfig config.AntifloodConfig - peerHonestyHandler consensus.PeerHonestyHandler - peersHolder factory.PreferredPeersHolderHandler - peersRatingHandler p2p.PeersRatingHandler - peersRatingMonitor p2p.PeersRatingMonitor - closeFunc context.CancelFunc + mainNetworkHolder networkComponentsHolder + fullArchiveNetworkHolder networkComponentsHolder + inputAntifloodHandler factory.P2PAntifloodHandler + outputAntifloodHandler factory.P2PAntifloodHandler + pubKeyTimeCacher process.TimeCacher + topicFloodPreventer process.TopicFloodPreventer + floodPreventers []process.FloodPreventer + peerBlackListHandler process.PeerBlackListCacher + antifloodConfig config.AntifloodConfig + peerHonestyHandler consensus.PeerHonestyHandler + peersHolder factory.PreferredPeersHolderHandler + closeFunc context.CancelFunc } var log = logger.GetOrCreate("factory") @@ -90,9 +98,13 @@ func NewNetworkComponentsFactory( if check.IfNil(args.CryptoComponents) { return nil, errors.ErrNilCryptoComponentsHolder } + if args.NodeOperationMode != p2p.NormalOperation && args.NodeOperationMode != p2p.FullArchiveMode { + return nil, errors.ErrInvalidNodeOperationMode + } return &networkComponentsFactory{ - p2pConfig: args.P2pConfig, + mainP2PConfig: args.MainP2pConfig, + fullArchiveP2PConfig: args.FullArchiveP2pConfig, ratingsConfig: args.RatingsConfig, marshalizer: args.Marshalizer, mainConfig: args.MainConfig, @@ -109,43 +121,17 @@ func NewNetworkComponentsFactory( // Create creates and returns the network components func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { - ph, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) + peersHolder, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) if err != nil { return nil, err } - peersRatingCfg := ncf.mainConfig.PeersRatingConfig - topRatedCache, err := cache.NewLRUCache(peersRatingCfg.TopRatedCacheCapacity) - if err != nil { - return nil, err - } - badRatedCache, err := cache.NewLRUCache(peersRatingCfg.BadRatedCacheCapacity) - if err != nil { - return nil, err - } - argsPeersRatingHandler := p2pFactory.ArgPeersRatingHandler{ - TopRatedCache: topRatedCache, - BadRatedCache: badRatedCache, - } - peersRatingHandler, err := p2pFactory.NewPeersRatingHandler(argsPeersRatingHandler) + mainNetworkComp, err := ncf.createMainNetworkHolder(peersHolder) if err != nil { return nil, err } - arg := p2pFactory.ArgsNetworkMessenger{ - Marshaller: ncf.marshalizer, - ListenAddress: ncf.listenAddress, - P2pConfig: ncf.p2pConfig, - SyncTimer: ncf.syncer, - PreferredPeersHolder: ph, - NodeOperationMode: ncf.nodeOperationMode, - PeersRatingHandler: peersRatingHandler, - ConnectionWatcherType: ncf.connectionWatcherType, - P2pPrivateKey: ncf.cryptoComponents.P2pPrivateKey(), - P2pSingleSigner: ncf.cryptoComponents.P2pSingleSigner(), - P2pKeyGenerator: ncf.cryptoComponents.P2pKeyGen(), - } - netMessenger, err := p2pFactory.NewNetworkMessenger(arg) + fullArchiveNetworkComp, err := ncf.createFullArchiveNetworkHolder(peersHolder) if err != nil { return nil, err } @@ -157,18 +143,8 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { } }() - argsPeersRatingMonitor := p2pFactory.ArgPeersRatingMonitor{ - TopRatedCache: topRatedCache, - BadRatedCache: badRatedCache, - ConnectionsProvider: netMessenger, - } - peersRatingMonitor, err := p2pFactory.NewPeersRatingMonitor(argsPeersRatingMonitor) - if err != nil { - return nil, err - } - var antiFloodComponents *antifloodFactory.AntiFloodComponents - antiFloodComponents, err = antifloodFactory.NewP2PAntiFloodComponents(ctx, ncf.mainConfig, ncf.statusHandler, netMessenger.ID()) + antiFloodComponents, err = antifloodFactory.NewP2PAntiFloodComponents(ctx, ncf.mainConfig, ncf.statusHandler, mainNetworkComp.netMessenger.ID()) if err != nil { return nil, err } @@ -215,27 +191,33 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { return nil, err } - err = netMessenger.Bootstrap() + err = mainNetworkComp.netMessenger.Bootstrap() + if err != nil { + return nil, err + } + + mainNetworkComp.netMessenger.WaitForConnections(ncf.bootstrapWaitTime, ncf.mainP2PConfig.Node.MinNumPeersToWaitForOnBootstrap) + + err = fullArchiveNetworkComp.netMessenger.Bootstrap() if err != nil { return nil, err } - netMessenger.WaitForConnections(ncf.bootstrapWaitTime, ncf.p2pConfig.Node.MinNumPeersToWaitForOnBootstrap) + fullArchiveNetworkComp.netMessenger.WaitForConnections(ncf.bootstrapWaitTime, ncf.fullArchiveP2PConfig.Node.MinNumPeersToWaitForOnBootstrap) return &networkComponents{ - netMessenger: netMessenger, - inputAntifloodHandler: inputAntifloodHandler, - outputAntifloodHandler: outputAntifloodHandler, - topicFloodPreventer: antiFloodComponents.TopicPreventer, - floodPreventers: antiFloodComponents.FloodPreventers, - peerBlackListHandler: antiFloodComponents.BlacklistHandler, - pubKeyTimeCacher: antiFloodComponents.PubKeysCacher, - antifloodConfig: ncf.mainConfig.Antiflood, - peerHonestyHandler: peerHonestyHandler, - peersHolder: ph, - peersRatingHandler: peersRatingHandler, - peersRatingMonitor: peersRatingMonitor, - closeFunc: cancelFunc, + mainNetworkHolder: mainNetworkComp, + fullArchiveNetworkHolder: fullArchiveNetworkComp, + inputAntifloodHandler: inputAntifloodHandler, + outputAntifloodHandler: outputAntifloodHandler, + pubKeyTimeCacher: antiFloodComponents.PubKeysCacher, + topicFloodPreventer: antiFloodComponents.TopicPreventer, + floodPreventers: antiFloodComponents.FloodPreventers, + peerBlackListHandler: antiFloodComponents.BlacklistHandler, + antifloodConfig: ncf.mainConfig.Antiflood, + peerHonestyHandler: peerHonestyHandler, + peersHolder: peersHolder, + closeFunc: cancelFunc, }, nil } @@ -253,6 +235,87 @@ func (ncf *networkComponentsFactory) createPeerHonestyHandler( return peerHonesty.NewP2pPeerHonesty(ratingConfig.PeerHonesty, pkTimeCache, suCache) } +func (ncf *networkComponentsFactory) createNetworkHolder( + peersHolder p2p.PreferredPeersHolderHandler, + p2pConfig p2pConfig.P2PConfig, + logger p2p.Logger, +) (networkComponentsHolder, error) { + + peersRatingCfg := ncf.mainConfig.PeersRatingConfig + topRatedCache, err := cache.NewLRUCache(peersRatingCfg.TopRatedCacheCapacity) + if err != nil { + return networkComponentsHolder{}, err + } + badRatedCache, err := cache.NewLRUCache(peersRatingCfg.BadRatedCacheCapacity) + if err != nil { + return networkComponentsHolder{}, err + } + + argsPeersRatingHandler := p2pFactory.ArgPeersRatingHandler{ + TopRatedCache: topRatedCache, + BadRatedCache: badRatedCache, + Logger: logger, + } + peersRatingHandler, err := p2pFactory.NewPeersRatingHandler(argsPeersRatingHandler) + if err != nil { + return networkComponentsHolder{}, err + } + + argsMessenger := p2pFactory.ArgsNetworkMessenger{ + ListenAddress: ncf.listenAddress, + Marshaller: ncf.marshalizer, + P2pConfig: p2pConfig, + SyncTimer: ncf.syncer, + PreferredPeersHolder: peersHolder, + NodeOperationMode: ncf.nodeOperationMode, + PeersRatingHandler: peersRatingHandler, + ConnectionWatcherType: ncf.connectionWatcherType, + P2pPrivateKey: ncf.cryptoComponents.P2pPrivateKey(), + P2pSingleSigner: ncf.cryptoComponents.P2pSingleSigner(), + P2pKeyGenerator: ncf.cryptoComponents.P2pKeyGen(), + Logger: logger, + } + networkMessenger, err := p2pFactory.NewNetworkMessenger(argsMessenger) + if err != nil { + return networkComponentsHolder{}, err + } + + argsPeersRatingMonitor := p2pFactory.ArgPeersRatingMonitor{ + TopRatedCache: topRatedCache, + BadRatedCache: badRatedCache, + ConnectionsProvider: networkMessenger, + } + peersRatingMonitor, err := p2pFactory.NewPeersRatingMonitor(argsPeersRatingMonitor) + if err != nil { + return networkComponentsHolder{}, err + } + + return networkComponentsHolder{ + netMessenger: networkMessenger, + peersRatingHandler: peersRatingHandler, + peersRatingMonitor: peersRatingMonitor, + }, nil +} + +func (ncf *networkComponentsFactory) createMainNetworkHolder(peersHolder p2p.PreferredPeersHolderHandler) (networkComponentsHolder, error) { + loggerInstance := logger.GetOrCreate("main/p2p") + return ncf.createNetworkHolder(peersHolder, ncf.mainP2PConfig, loggerInstance) +} + +func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder(peersHolder p2p.PreferredPeersHolderHandler) (networkComponentsHolder, error) { + if ncf.nodeOperationMode != p2p.FullArchiveMode { + return networkComponentsHolder{ + netMessenger: disabled.NewNetworkMessenger(), + peersRatingHandler: disabled.NewPeersRatingHandler(), + peersRatingMonitor: disabled.NewPeersRatingMonitor(), + }, nil + } + + loggerInstance := logger.GetOrCreate("full-archive/p2p") + + return ncf.createNetworkHolder(peersHolder, ncf.fullArchiveP2PConfig, loggerInstance) +} + // Close closes all underlying components that need closing func (nc *networkComponents) Close() error { nc.closeFunc() @@ -270,10 +333,16 @@ func (nc *networkComponents) Close() error { log.LogIfError(nc.peerHonestyHandler.Close()) } - if nc.netMessenger != nil { - log.Debug("calling close on the network messenger instance...") - err := nc.netMessenger.Close() - log.LogIfError(err) + mainNetMessenger := nc.mainNetworkHolder.netMessenger + if !check.IfNil(mainNetMessenger) { + log.Debug("calling close on the main network messenger instance...") + log.LogIfError(mainNetMessenger.Close()) + } + + fullArchiveNetMessenger := nc.fullArchiveNetworkHolder.netMessenger + if !check.IfNil(fullArchiveNetMessenger) { + log.Debug("calling close on the full archive network messenger instance...") + log.LogIfError(fullArchiveNetMessenger.Close()) } return nil diff --git a/factory/network/networkComponentsHandler.go b/factory/network/networkComponentsHandler.go index 578794bce98..40a3be8dd6f 100644 --- a/factory/network/networkComponentsHandler.go +++ b/factory/network/networkComponentsHandler.go @@ -74,8 +74,23 @@ func (mnc *managedNetworkComponents) CheckSubcomponents() error { if mnc.networkComponents == nil { return errors.ErrNilNetworkComponents } - if check.IfNil(mnc.netMessenger) { - return errors.ErrNilMessenger + if check.IfNil(mnc.mainNetworkHolder.netMessenger) { + return fmt.Errorf("%w for main", errors.ErrNilMessenger) + } + if check.IfNil(mnc.mainNetworkHolder.peersRatingHandler) { + return fmt.Errorf("%w for main", errors.ErrNilPeersRatingHandler) + } + if check.IfNil(mnc.mainNetworkHolder.peersRatingMonitor) { + return fmt.Errorf("%w for main", errors.ErrNilPeersRatingMonitor) + } + if check.IfNil(mnc.fullArchiveNetworkHolder.netMessenger) { + return fmt.Errorf("%w for full archive", errors.ErrNilMessenger) + } + if check.IfNil(mnc.fullArchiveNetworkHolder.peersRatingHandler) { + return fmt.Errorf("%w for full archive", errors.ErrNilPeersRatingHandler) + } + if check.IfNil(mnc.fullArchiveNetworkHolder.peersRatingMonitor) { + return fmt.Errorf("%w for full archive", errors.ErrNilPeersRatingMonitor) } if check.IfNil(mnc.inputAntifloodHandler) { return errors.ErrNilInputAntiFloodHandler @@ -89,17 +104,11 @@ func (mnc *managedNetworkComponents) CheckSubcomponents() error { if check.IfNil(mnc.peerHonestyHandler) { return errors.ErrNilPeerHonestyHandler } - if check.IfNil(mnc.peersRatingHandler) { - return errors.ErrNilPeersRatingHandler - } - if check.IfNil(mnc.peersRatingMonitor) { - return errors.ErrNilPeersRatingMonitor - } return nil } -// NetworkMessenger returns the p2p messenger +// NetworkMessenger returns the p2p messenger of the main network func (mnc *managedNetworkComponents) NetworkMessenger() p2p.Messenger { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() @@ -108,7 +117,7 @@ func (mnc *managedNetworkComponents) NetworkMessenger() p2p.Messenger { return nil } - return mnc.netMessenger + return mnc.mainNetworkHolder.netMessenger } // InputAntiFloodHandler returns the input p2p anti-flood handler @@ -183,7 +192,7 @@ func (mnc *managedNetworkComponents) PreferredPeersHolderHandler() factory.Prefe return mnc.networkComponents.peersHolder } -// PeersRatingHandler returns the peers rating handler +// PeersRatingHandler returns the peers rating handler of the main network func (mnc *managedNetworkComponents) PeersRatingHandler() p2p.PeersRatingHandler { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() @@ -192,10 +201,10 @@ func (mnc *managedNetworkComponents) PeersRatingHandler() p2p.PeersRatingHandler return nil } - return mnc.networkComponents.peersRatingHandler + return mnc.mainNetworkHolder.peersRatingHandler } -// PeersRatingMonitor returns the peers rating monitor +// PeersRatingMonitor returns the peers rating monitor of the main network func (mnc *managedNetworkComponents) PeersRatingMonitor() p2p.PeersRatingMonitor { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() @@ -204,7 +213,44 @@ func (mnc *managedNetworkComponents) PeersRatingMonitor() p2p.PeersRatingMonitor return nil } - return mnc.networkComponents.peersRatingMonitor + return mnc.mainNetworkHolder.peersRatingMonitor +} + +// FullArchiveNetworkMessenger returns the p2p messenger of the full archive network +// TODO[Sorin]: add these new methods into the interface +func (mnc *managedNetworkComponents) FullArchiveNetworkMessenger() p2p.Messenger { + mnc.mutNetworkComponents.RLock() + defer mnc.mutNetworkComponents.RUnlock() + + if mnc.networkComponents == nil { + return nil + } + + return mnc.fullArchiveNetworkHolder.netMessenger +} + +// FullArchivePeersRatingHandler returns the peers rating handler of the full archive network +func (mnc *managedNetworkComponents) FullArchivePeersRatingHandler() p2p.PeersRatingHandler { + mnc.mutNetworkComponents.RLock() + defer mnc.mutNetworkComponents.RUnlock() + + if mnc.networkComponents == nil { + return nil + } + + return mnc.fullArchiveNetworkHolder.peersRatingHandler +} + +// FullArchivePeersRatingMonitor returns the peers rating monitor of the full archive network +func (mnc *managedNetworkComponents) FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor { + mnc.mutNetworkComponents.RLock() + defer mnc.mutNetworkComponents.RUnlock() + + if mnc.networkComponents == nil { + return nil + } + + return mnc.fullArchiveNetworkHolder.peersRatingMonitor } // IsInterfaceNil returns true if the value under the interface is nil diff --git a/factory/network/networkComponentsHandler_test.go b/factory/network/networkComponentsHandler_test.go index 51bfe86372c..c4383174113 100644 --- a/factory/network/networkComponentsHandler_test.go +++ b/factory/network/networkComponentsHandler_test.go @@ -37,7 +37,7 @@ func TestManagedNetworkComponents_Create(t *testing.T) { t.Parallel() networkArgs := componentsMock.GetNetworkFactoryArgs() - networkArgs.P2pConfig.Node.Port = "invalid" + networkArgs.MainP2pConfig.Node.Port = "invalid" networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) managedNetworkComponents, err := networkComp.NewManagedNetworkComponents(networkComponentsFactory) require.NoError(t, err) @@ -61,6 +61,9 @@ func TestManagedNetworkComponents_Create(t *testing.T) { require.Nil(t, managedNetworkComponents.PreferredPeersHolderHandler()) require.Nil(t, managedNetworkComponents.PeerHonestyHandler()) require.Nil(t, managedNetworkComponents.PeersRatingHandler()) + require.Nil(t, managedNetworkComponents.FullArchiveNetworkMessenger()) + require.Nil(t, managedNetworkComponents.FullArchivePeersRatingHandler()) + require.Nil(t, managedNetworkComponents.FullArchivePeersRatingMonitor()) err = managedNetworkComponents.Create() require.NoError(t, err) @@ -72,6 +75,9 @@ func TestManagedNetworkComponents_Create(t *testing.T) { require.NotNil(t, managedNetworkComponents.PreferredPeersHolderHandler()) require.NotNil(t, managedNetworkComponents.PeerHonestyHandler()) require.NotNil(t, managedNetworkComponents.PeersRatingHandler()) + require.NotNil(t, managedNetworkComponents.FullArchiveNetworkMessenger()) + require.NotNil(t, managedNetworkComponents.FullArchivePeersRatingHandler()) + require.NotNil(t, managedNetworkComponents.FullArchivePeersRatingMonitor()) require.Equal(t, factory.NetworkComponentsName, managedNetworkComponents.String()) }) diff --git a/factory/network/networkComponents_test.go b/factory/network/networkComponents_test.go index 1fe95107b6f..dca1e2f2d80 100644 --- a/factory/network/networkComponents_test.go +++ b/factory/network/networkComponents_test.go @@ -50,6 +50,16 @@ func TestNewNetworkComponentsFactory(t *testing.T) { require.Nil(t, ncf) require.Equal(t, errorsMx.ErrNilCryptoComponentsHolder, err) }) + t.Run("invalid node operation mode should error", func(t *testing.T) { + t.Parallel() + + args := componentsMock.GetNetworkFactoryArgs() + args.NodeOperationMode = "invalid" + + ncf, err := networkComp.NewNetworkComponentsFactory(args) + require.Equal(t, errorsMx.ErrInvalidNodeOperationMode, err) + require.Nil(t, ncf) + }) t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/factory/status/statusComponentsHandler.go b/factory/status/statusComponentsHandler.go index 71f69b2a325..79cf938cf8c 100644 --- a/factory/status/statusComponentsHandler.go +++ b/factory/status/statusComponentsHandler.go @@ -262,12 +262,11 @@ func computeConnectedPeers( ) { peersInfo := netMessenger.GetConnectedPeersInfo() - peerClassification := fmt.Sprintf("intraVal:%d,crossVal:%d,intraObs:%d,crossObs:%d,fullObs:%d,unknown:%d,", + peerClassification := fmt.Sprintf("intraVal:%d,crossVal:%d,intraObs:%d,crossObs:%d,unknown:%d,", len(peersInfo.IntraShardValidators), len(peersInfo.CrossShardValidators), len(peersInfo.IntraShardObservers), len(peersInfo.CrossShardObservers), - len(peersInfo.FullHistoryObservers), len(peersInfo.UnknownPeers), ) appStatusHandler.SetStringValue(common.MetricNumConnectedPeersClassification, peerClassification) @@ -283,7 +282,6 @@ func setP2pConnectedPeersMetrics(appStatusHandler core.AppStatusHandler, info *p appStatusHandler.SetStringValue(common.MetricP2PIntraShardObservers, mapToString(info.IntraShardObservers)) appStatusHandler.SetStringValue(common.MetricP2PCrossShardValidators, mapToString(info.CrossShardValidators)) appStatusHandler.SetStringValue(common.MetricP2PCrossShardObservers, mapToString(info.CrossShardObservers)) - appStatusHandler.SetStringValue(common.MetricP2PFullHistoryObservers, mapToString(info.FullHistoryObservers)) } func sliceToString(input []string) string { diff --git a/factory/status/statusComponentsHandler_test.go b/factory/status/statusComponentsHandler_test.go index f84cc07ce20..b6e22ffac94 100644 --- a/factory/status/statusComponentsHandler_test.go +++ b/factory/status/statusComponentsHandler_test.go @@ -208,10 +208,6 @@ func TestComputeConnectedPeers(t *testing.T) { 0: {"cross-o-0"}, 1: {"cross-o-1"}, }, - FullHistoryObservers: map[uint32][]string{ - 0: {"fh-0"}, - 1: {"fh-1"}, - }, NumValidatorsOnShard: map[uint32]int{ 0: 1, 1: 1, @@ -228,14 +224,13 @@ func TestComputeConnectedPeers(t *testing.T) { NumIntraShardObservers: 2, NumCrossShardValidators: 2, NumCrossShardObservers: 2, - NumFullHistoryObservers: 2, } }, AddressesCalled: func() []string { return []string{"intra-v-0", "intra-v-1", "intra-o-0", "intra-o-1", "cross-v-0", "cross-v-1"} }, } - expectedPeerClassification := "intraVal:2,crossVal:2,intraObs:2,crossObs:2,fullObs:2,unknown:1," + expectedPeerClassification := "intraVal:2,crossVal:2,intraObs:2,crossObs:2,unknown:1," cnt := 0 appStatusHandler := &statusHandler.AppStatusHandlerStub{ SetStringValueHandler: func(key string, value string) { @@ -263,9 +258,6 @@ func TestComputeConnectedPeers(t *testing.T) { require.Equal(t, common.MetricP2PCrossShardObservers, key) require.Equal(t, "cross-o-0,cross-o-1", value) case 8: - require.Equal(t, common.MetricP2PFullHistoryObservers, key) - require.Equal(t, "fh-0,fh-1", value) - case 9: require.Equal(t, common.MetricP2PPeerInfo, key) require.Equal(t, "intra-v-0,intra-v-1,intra-o-0,intra-o-1,cross-v-0,cross-v-1", value) default: diff --git a/go.mod b/go.mod index d389610c895..36121573f49 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.2 + github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230607144431-6c71c8fcb631 github.com/multiversx/mx-chain-core-go v1.2.6 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.4 diff --git a/go.sum b/go.sum index a6eac87d03e..75d8504376c 100644 --- a/go.sum +++ b/go.sum @@ -616,8 +616,9 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.2 h1:1AKdqFZNmigt1kcwYMl+L8fzolsb+WpeTX6yzpmvbV4= github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230607144431-6c71c8fcb631 h1:FVE0WDfN36glhzFuZOFiyOGIESBUGdzIGcHJ4ez2bik= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230607144431-6c71c8fcb631/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= diff --git a/integrationTests/factory/componentsHelper.go b/integrationTests/factory/componentsHelper.go index 64f70e6bb8c..05a223ded64 100644 --- a/integrationTests/factory/componentsHelper.go +++ b/integrationTests/factory/componentsHelper.go @@ -35,6 +35,7 @@ func CreateDefaultConfig(tb testing.TB) *config.Configs { economicsConfig, _ := common.LoadEconomicsConfig(configPathsHolder.Economics) prefsConfig, _ := common.LoadPreferencesConfig(configPathsHolder.Preferences) p2pConfig, _ := common.LoadP2PConfig(configPathsHolder.P2p) + fullArchiveP2PConfig, _ := common.LoadP2PConfig(configPathsHolder.FullArchiveP2p) externalConfig, _ := common.LoadExternalConfig(configPathsHolder.External) systemSCConfig, _ := common.LoadSystemSmartContractsConfig(configPathsHolder.SystemSC) epochConfig, _ := common.LoadEpochConfig(configPathsHolder.Epoch) @@ -50,7 +51,8 @@ func CreateDefaultConfig(tb testing.TB) *config.Configs { configs.EconomicsConfig = economicsConfig configs.SystemSCConfig = systemSCConfig configs.PreferencesConfig = prefsConfig - configs.P2pConfig = p2pConfig + configs.MainP2pConfig = p2pConfig + configs.FullArchiveP2pConfig = fullArchiveP2PConfig configs.ExternalConfig = externalConfig configs.EpochConfig = epochConfig configs.RoundConfig = roundConfig @@ -80,6 +82,7 @@ func createConfigurationsPathsHolder() *config.ConfigurationPathsHolder { Preferences: concatPath(PrefsPath), External: concatPath(ExternalPath), P2p: concatPath(P2pPath), + FullArchiveP2p: concatPath(FullArchiveP2pPath), Epoch: concatPath(EpochPath), SystemSC: concatPath(SystemSCConfigPath), GasScheduleDirectoryName: concatPath(GasSchedule), diff --git a/integrationTests/factory/constants.go b/integrationTests/factory/constants.go index 1db46e07547..1d5a634598a 100644 --- a/integrationTests/factory/constants.go +++ b/integrationTests/factory/constants.go @@ -9,6 +9,7 @@ const ( PrefsPath = "prefs.toml" ExternalPath = "external.toml" P2pPath = "p2p.toml" + FullArchiveP2pPath = "fullArchiveP2P.toml" EpochPath = "enableEpochs.toml" SystemSCConfigPath = "systemSmartContractsConfig.toml" GasSchedule = "gasSchedules" diff --git a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go index f8916c7e016..c78b4ef7320 100644 --- a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go @@ -40,9 +40,6 @@ func TestConnectionsInNetworkShardingWithShardingWithLists(t *testing.T) { MaxCrossShardObservers: 1, MaxSeeders: 1, Type: p2p.ListsSharder, - AdditionalConnections: p2pConfig.AdditionalConnectionsConfig{ - MaxFullHistoryObservers: 1, - }, } testConnectionsInNetworkSharding(t, p2pCfg) diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 6176a54858e..eb7e62c4bd9 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -167,7 +167,8 @@ func (pr *ProcessorRunner) createStatusCoreComponents(tb testing.TB) { func (pr *ProcessorRunner) createNetworkComponents(tb testing.TB) { argsNetwork := factoryNetwork.NetworkComponentsFactoryArgs{ - P2pConfig: *pr.Config.P2pConfig, + MainP2pConfig: *pr.Config.MainP2pConfig, + FullArchiveP2pConfig: *pr.Config.FullArchiveP2pConfig, MainConfig: *pr.Config.GeneralConfig, RatingsConfig: *pr.Config.RatingsConfig, StatusHandler: pr.StatusCoreComponents.AppStatusHandler(), diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index d8457d853ab..54a2b206587 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -742,13 +742,12 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st fmt.Sprintf("%d", n.CountGlobalMessages()), fmt.Sprintf("%d", n.CountIntraShardMessages()), fmt.Sprintf("%d", n.CountCrossShardMessages()), - fmt.Sprintf("%d/%d/%d/%d/%d/%d/%d/%d", + fmt.Sprintf("%d/%d/%d/%d/%d/%d/%d", len(n.Messenger.ConnectedPeers()), peerInfo.NumIntraShardValidators, peerInfo.NumCrossShardValidators, peerInfo.NumIntraShardObservers, peerInfo.NumCrossShardObservers, - peerInfo.NumFullHistoryObservers, len(peerInfo.UnknownPeers), len(peerInfo.Seeders), ), diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index b99b683edcc..acd2ecd4caa 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -167,6 +167,7 @@ func CreateMessengerWithKadDht(initialAddr string) p2p.Messenger { P2pPrivateKey: mock.NewPrivateKeyMock(), P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, + Logger: logger.GetOrCreate("tests/p2p"), } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -189,11 +190,7 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { P2pPrivateKey: mock.NewPrivateKeyMock(), P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, - } - - if p2pConfig.Sharding.AdditionalConnections.MaxFullHistoryObservers > 0 { - // we deliberately set this, automatically choose full archive node mode - arg.NodeOperationMode = p2p.FullArchiveMode + Logger: logger.GetOrCreate("tests/p2p"), } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -210,17 +207,13 @@ func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConf P2pConfig: p2pConfig, SyncTimer: &p2pFactory.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - NodeOperationMode: p2p.NormalOperation, PeersRatingHandler: peersRatingHandler, ConnectionWatcherType: p2p.ConnectionWatcherTypePrint, P2pPrivateKey: mock.NewPrivateKeyMock(), P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, - } - - if p2pConfig.Sharding.AdditionalConnections.MaxFullHistoryObservers > 0 { - // we deliberately set this, automatically choose full archive node mode - arg.NodeOperationMode = p2p.FullArchiveMode + Logger: logger.GetOrCreate("tests/p2p"), + NodeOperationMode: p2p.NormalOperation, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 0fa62816e8e..82f0a4d20cf 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -442,6 +442,7 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { p2pFactory.ArgPeersRatingHandler{ TopRatedCache: topRatedCache, BadRatedCache: badRatedCache, + Logger: &testscommon.LoggerStub{}, }) } diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index d584be00004..e862de23c9c 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -69,7 +69,6 @@ func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { appStatusHandler.SetStringValue(common.MetricP2PIntraShardObservers, initString) appStatusHandler.SetStringValue(common.MetricP2PCrossShardValidators, initString) appStatusHandler.SetStringValue(common.MetricP2PCrossShardObservers, initString) - appStatusHandler.SetStringValue(common.MetricP2PFullHistoryObservers, initString) appStatusHandler.SetStringValue(common.MetricP2PUnknownPeers, initString) appStatusHandler.SetStringValue(common.MetricInflation, initZeroString) diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 381a16100d6..a3a6cd8ea30 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -52,7 +52,6 @@ func TestInitBaseMetrics(t *testing.T) { common.MetricP2PIntraShardObservers, common.MetricP2PCrossShardValidators, common.MetricP2PCrossShardObservers, - common.MetricP2PFullHistoryObservers, common.MetricP2PUnknownPeers, common.MetricInflation, common.MetricDevRewardsInEpoch, diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 024a2e19020..00d3bf8e038 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -1104,6 +1104,7 @@ func (nr *nodeRunner) logSessionInformation( configurationPaths.SmartContracts, configurationPaths.Nodes, configurationPaths.P2p, + configurationPaths.FullArchiveP2p, configurationPaths.Preferences, configurationPaths.Ratings, configurationPaths.SystemSC, @@ -1384,7 +1385,8 @@ func (nr *nodeRunner) CreateManagedNetworkComponents( cryptoComponents mainFactory.CryptoComponentsHolder, ) (mainFactory.NetworkComponentsHandler, error) { networkComponentsFactoryArgs := networkComp.NetworkComponentsFactoryArgs{ - P2pConfig: *nr.configs.P2pConfig, + MainP2pConfig: *nr.configs.MainP2pConfig, + FullArchiveP2pConfig: *nr.configs.FullArchiveP2pConfig, MainConfig: *nr.configs.GeneralConfig, RatingsConfig: *nr.configs.RatingsConfig, StatusHandler: statusCoreComponents.AppStatusHandler(), diff --git a/p2p/config/config.go b/p2p/config/config.go index eb2bf95d07c..311a6e64484 100644 --- a/p2p/config/config.go +++ b/p2p/config/config.go @@ -13,7 +13,3 @@ type KadDhtPeerDiscoveryConfig = config.KadDhtPeerDiscoveryConfig // ShardingConfig will hold the network sharding config settings type ShardingConfig = config.ShardingConfig - -// AdditionalConnectionsConfig will hold the additional connections that will be open when certain conditions are met -// All these values should be added to the maximum target peer count value -type AdditionalConnectionsConfig = config.AdditionalConnectionsConfig diff --git a/p2p/interface.go b/p2p/interface.go index f643852dc32..808f22b77c3 100644 --- a/p2p/interface.go +++ b/p2p/interface.go @@ -125,3 +125,6 @@ type P2PKeyConverter interface { ConvertPublicKeyToPeerID(pk crypto.PublicKey) (core.PeerID, error) IsInterfaceNil() bool } + +// Logger defines the behavior of a data logger component +type Logger = p2p.Logger diff --git a/testscommon/components/components.go b/testscommon/components/components.go index c979f7c2775..c2873aa728c 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -261,9 +261,6 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { MaxCrossShardObservers: 10, MaxSeeders: 2, Type: "NilListSharder", - AdditionalConnections: p2pConfig.AdditionalConnectionsConfig{ - MaxFullHistoryObservers: 10, - }, }, } @@ -295,10 +292,11 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { cryptoCompMock := GetDefaultCryptoComponents() return networkComp.NetworkComponentsFactoryArgs{ - P2pConfig: p2pCfg, - MainConfig: mainConfig, - StatusHandler: appStatusHandler, - Marshalizer: &mock.MarshalizerMock{}, + MainP2pConfig: p2pCfg, + NodeOperationMode: p2p.NormalOperation, + MainConfig: mainConfig, + StatusHandler: appStatusHandler, + Marshalizer: &mock.MarshalizerMock{}, RatingsConfig: config.RatingsConfig{ General: config.General{}, ShardChain: config.ShardChain{}, @@ -312,9 +310,8 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { UnitValue: 1.0, }, }, - Syncer: &p2pFactory.LocalSyncTimer{}, - NodeOperationMode: p2p.NormalOperation, - CryptoComponents: cryptoCompMock, + Syncer: &p2pFactory.LocalSyncTimer{}, + CryptoComponents: cryptoCompMock, } } diff --git a/testscommon/p2pmocks/messengerStub.go b/testscommon/p2pmocks/messengerStub.go index a1c2cefd481..5dd1722402d 100644 --- a/testscommon/p2pmocks/messengerStub.go +++ b/testscommon/p2pmocks/messengerStub.go @@ -9,7 +9,6 @@ import ( // MessengerStub - type MessengerStub struct { - ConnectedFullHistoryPeersOnTopicCalled func(topic string) []core.PeerID IDCalled func() core.PeerID CloseCalled func() error CreateTopicCalled func(name string, createChannelForTopic bool) error @@ -47,15 +46,6 @@ type MessengerStub struct { SignUsingPrivateKeyCalled func(skBytes []byte, payload []byte) ([]byte, error) } -// ConnectedFullHistoryPeersOnTopic - -func (ms *MessengerStub) ConnectedFullHistoryPeersOnTopic(topic string) []core.PeerID { - if ms.ConnectedFullHistoryPeersOnTopicCalled != nil { - return ms.ConnectedFullHistoryPeersOnTopicCalled(topic) - } - - return make([]core.PeerID, 0) -} - // ID - func (ms *MessengerStub) ID() core.PeerID { if ms.IDCalled != nil { diff --git a/testscommon/realConfigsHandling.go b/testscommon/realConfigsHandling.go index 2041d9f7375..c69f5471b30 100644 --- a/testscommon/realConfigsHandling.go +++ b/testscommon/realConfigsHandling.go @@ -42,7 +42,10 @@ func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Config prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) require.Nil(tb, err) - p2pConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) + mainP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) + require.Nil(tb, err) + + fullArchiveP2PConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "fullArchiveP2P.toml")) require.Nil(tb, err) externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) @@ -58,18 +61,21 @@ func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Config require.Nil(tb, err) // make the node pass the network wait constraints - p2pConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 - p2pConfig.Node.ThresholdMinConnectedPeers = 0 + mainP2PConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 + mainP2PConfig.Node.ThresholdMinConnectedPeers = 0 + fullArchiveP2PConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 + fullArchiveP2PConfig.Node.ThresholdMinConnectedPeers = 0 return &config.Configs{ - GeneralConfig: generalConfig, - ApiRoutesConfig: apiConfig, - EconomicsConfig: economicsConfig, - SystemSCConfig: systemSCConfig, - RatingsConfig: ratingsConfig, - PreferencesConfig: prefsConfig, - ExternalConfig: externalConfig, - P2pConfig: p2pConfig, + GeneralConfig: generalConfig, + ApiRoutesConfig: apiConfig, + EconomicsConfig: economicsConfig, + SystemSCConfig: systemSCConfig, + RatingsConfig: ratingsConfig, + PreferencesConfig: prefsConfig, + ExternalConfig: externalConfig, + MainP2pConfig: mainP2PConfig, + FullArchiveP2pConfig: fullArchiveP2PConfig, FlagsConfig: &config.ContextFlagsConfig{ WorkingDir: tempDir, NoKeyProvided: true, From d0f274e2f1d40fee504c3833af7a85541c767ac0 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 8 Jun 2023 14:13:22 +0300 Subject: [PATCH 02/38] fixes after review + updated mx-chain-communication-go as part of the fixes were made there --- cmd/node/config/fullArchiveP2P.toml | 2 -- cmd/node/main.go | 6 +++--- cmd/seednode/main.go | 1 - config/config.go | 2 +- factory/network/networkComponents.go | 6 ++---- go.mod | 2 +- go.sum | 4 ++-- integrationTests/factory/componentsHelper.go | 8 ++++---- integrationTests/factory/constants.go | 2 +- integrationTests/testInitializer.go | 3 --- node/nodeRunner.go | 2 +- {factory => p2p}/disabled/networkMessenger.go | 0 12 files changed, 15 insertions(+), 23 deletions(-) rename {factory => p2p}/disabled/networkMessenger.go (100%) diff --git a/cmd/node/config/fullArchiveP2P.toml b/cmd/node/config/fullArchiveP2P.toml index 9e751da6005..67ed99f52ea 100644 --- a/cmd/node/config/fullArchiveP2P.toml +++ b/cmd/node/config/fullArchiveP2P.toml @@ -1,7 +1,5 @@ #FullArchiveP2P config file -#TODO[Sorin]: proper values on this config, it is just a copy of p2p.toml for the moment - #NodeConfig holds the P2P settings [Node] #Port is the port that will be opened by the node on all interfaces so other peers can connect to it diff --git a/cmd/node/main.go b/cmd/node/main.go index 6bfba8d64e4..9da31568a1d 100644 --- a/cmd/node/main.go +++ b/cmd/node/main.go @@ -201,12 +201,12 @@ func readConfigs(ctx *cli.Context, log logger.Logger) (*config.Configs, error) { } log.Debug("config", "file", configurationPaths.External) - configurationPaths.P2p = ctx.GlobalString(p2pConfigurationFile.Name) - mainP2PConfig, err := common.LoadP2PConfig(configurationPaths.P2p) + configurationPaths.MainP2p = ctx.GlobalString(p2pConfigurationFile.Name) + mainP2PConfig, err := common.LoadP2PConfig(configurationPaths.MainP2p) if err != nil { return nil, err } - log.Debug("config", "file", configurationPaths.P2p) + log.Debug("config", "file", configurationPaths.MainP2p) configurationPaths.FullArchiveP2p = ctx.GlobalString(fullArchiveP2PConfigurationFile.Name) fullArchiveP2PConfig, err := common.LoadP2PConfig(configurationPaths.FullArchiveP2p) diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index 9005d2d83ab..c76756357d5 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -263,7 +263,6 @@ func createNode( P2pConfig: p2pConfig, SyncTimer: &p2pFactory.LocalSyncTimer{}, PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - NodeOperationMode: p2p.NormalOperation, PeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), ConnectionWatcherType: "disabled", P2pPrivateKey: p2pKey, diff --git a/config/config.go b/config/config.go index 96367fa3c1b..57dc7139e74 100644 --- a/config/config.go +++ b/config/config.go @@ -587,7 +587,7 @@ type ConfigurationPathsHolder struct { Ratings string Preferences string External string - P2p string + MainP2p string FullArchiveP2p string GasScheduleDirectoryName string Nodes string diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index d75af6875f8..e34993b5ac7 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/p2p" p2pConfig "github.com/multiversx/mx-chain-go/p2p/config" + p2pDisabled "github.com/multiversx/mx-chain-go/p2p/disabled" p2pFactory "github.com/multiversx/mx-chain-go/p2p/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/rating/peerHonesty" @@ -203,8 +204,6 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { return nil, err } - fullArchiveNetworkComp.netMessenger.WaitForConnections(ncf.bootstrapWaitTime, ncf.fullArchiveP2PConfig.Node.MinNumPeersToWaitForOnBootstrap) - return &networkComponents{ mainNetworkHolder: mainNetworkComp, fullArchiveNetworkHolder: fullArchiveNetworkComp, @@ -267,7 +266,6 @@ func (ncf *networkComponentsFactory) createNetworkHolder( P2pConfig: p2pConfig, SyncTimer: ncf.syncer, PreferredPeersHolder: peersHolder, - NodeOperationMode: ncf.nodeOperationMode, PeersRatingHandler: peersRatingHandler, ConnectionWatcherType: ncf.connectionWatcherType, P2pPrivateKey: ncf.cryptoComponents.P2pPrivateKey(), @@ -305,7 +303,7 @@ func (ncf *networkComponentsFactory) createMainNetworkHolder(peersHolder p2p.Pre func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder(peersHolder p2p.PreferredPeersHolderHandler) (networkComponentsHolder, error) { if ncf.nodeOperationMode != p2p.FullArchiveMode { return networkComponentsHolder{ - netMessenger: disabled.NewNetworkMessenger(), + netMessenger: p2pDisabled.NewNetworkMessenger(), peersRatingHandler: disabled.NewPeersRatingHandler(), peersRatingMonitor: disabled.NewPeersRatingMonitor(), }, nil diff --git a/go.mod b/go.mod index 36121573f49..17f33caa31a 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230607144431-6c71c8fcb631 + github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230608110322-586e89326c74 github.com/multiversx/mx-chain-core-go v1.2.6 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.4 diff --git a/go.sum b/go.sum index 75d8504376c..6c8a9c3057f 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230607144431-6c71c8fcb631 h1:FVE0WDfN36glhzFuZOFiyOGIESBUGdzIGcHJ4ez2bik= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230607144431-6c71c8fcb631/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230608110322-586e89326c74 h1:jf3bWYdUku19843q7KwBKBjIOQNi/OTLyjbsE1Yfra8= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230608110322-586e89326c74/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= diff --git a/integrationTests/factory/componentsHelper.go b/integrationTests/factory/componentsHelper.go index 05a223ded64..6238243659e 100644 --- a/integrationTests/factory/componentsHelper.go +++ b/integrationTests/factory/componentsHelper.go @@ -34,14 +34,14 @@ func CreateDefaultConfig(tb testing.TB) *config.Configs { ratingsConfig, _ := common.LoadRatingsConfig(configPathsHolder.Ratings) economicsConfig, _ := common.LoadEconomicsConfig(configPathsHolder.Economics) prefsConfig, _ := common.LoadPreferencesConfig(configPathsHolder.Preferences) - p2pConfig, _ := common.LoadP2PConfig(configPathsHolder.P2p) + mainP2PConfig, _ := common.LoadP2PConfig(configPathsHolder.MainP2p) fullArchiveP2PConfig, _ := common.LoadP2PConfig(configPathsHolder.FullArchiveP2p) externalConfig, _ := common.LoadExternalConfig(configPathsHolder.External) systemSCConfig, _ := common.LoadSystemSmartContractsConfig(configPathsHolder.SystemSC) epochConfig, _ := common.LoadEpochConfig(configPathsHolder.Epoch) roundConfig, _ := common.LoadRoundConfig(configPathsHolder.RoundActivation) - p2pConfig.KadDhtPeerDiscovery.Enabled = false + mainP2PConfig.KadDhtPeerDiscovery.Enabled = false prefsConfig.Preferences.DestinationShardAsObserver = "0" prefsConfig.Preferences.ConnectionWatcherType = p2p.ConnectionWatcherTypePrint @@ -51,7 +51,7 @@ func CreateDefaultConfig(tb testing.TB) *config.Configs { configs.EconomicsConfig = economicsConfig configs.SystemSCConfig = systemSCConfig configs.PreferencesConfig = prefsConfig - configs.MainP2pConfig = p2pConfig + configs.MainP2pConfig = mainP2PConfig configs.FullArchiveP2pConfig = fullArchiveP2PConfig configs.ExternalConfig = externalConfig configs.EpochConfig = epochConfig @@ -81,7 +81,7 @@ func createConfigurationsPathsHolder() *config.ConfigurationPathsHolder { Economics: concatPath(EconomicsPath), Preferences: concatPath(PrefsPath), External: concatPath(ExternalPath), - P2p: concatPath(P2pPath), + MainP2p: concatPath(MainP2pPath), FullArchiveP2p: concatPath(FullArchiveP2pPath), Epoch: concatPath(EpochPath), SystemSC: concatPath(SystemSCConfigPath), diff --git a/integrationTests/factory/constants.go b/integrationTests/factory/constants.go index 1d5a634598a..9fa9133b135 100644 --- a/integrationTests/factory/constants.go +++ b/integrationTests/factory/constants.go @@ -8,7 +8,7 @@ const ( EconomicsPath = "economics.toml" PrefsPath = "prefs.toml" ExternalPath = "external.toml" - P2pPath = "p2p.toml" + MainP2pPath = "p2p.toml" FullArchiveP2pPath = "fullArchiveP2P.toml" EpochPath = "enableEpochs.toml" SystemSCConfigPath = "systemSmartContractsConfig.toml" diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index acd2ecd4caa..7f6461dba49 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -161,7 +161,6 @@ func CreateMessengerWithKadDht(initialAddr string) p2p.Messenger { P2pConfig: createP2PConfig(initialAddresses), SyncTimer: &p2pFactory.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - NodeOperationMode: p2p.NormalOperation, PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, ConnectionWatcherType: p2p.ConnectionWatcherTypePrint, P2pPrivateKey: mock.NewPrivateKeyMock(), @@ -184,7 +183,6 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { P2pConfig: p2pConfig, SyncTimer: &p2pFactory.LocalSyncTimer{}, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - NodeOperationMode: p2p.NormalOperation, PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, ConnectionWatcherType: p2p.ConnectionWatcherTypePrint, P2pPrivateKey: mock.NewPrivateKeyMock(), @@ -213,7 +211,6 @@ func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConf P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), - NodeOperationMode: p2p.NormalOperation, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 00d3bf8e038..410aaf16661 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -1103,7 +1103,7 @@ func (nr *nodeRunner) logSessionInformation( configurationPaths.Genesis, configurationPaths.SmartContracts, configurationPaths.Nodes, - configurationPaths.P2p, + configurationPaths.MainP2p, configurationPaths.FullArchiveP2p, configurationPaths.Preferences, configurationPaths.Ratings, diff --git a/factory/disabled/networkMessenger.go b/p2p/disabled/networkMessenger.go similarity index 100% rename from factory/disabled/networkMessenger.go rename to p2p/disabled/networkMessenger.go From 1f8baffc1627d154f28854984c556c2586eeab75 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 8 Jun 2023 17:58:26 +0300 Subject: [PATCH 03/38] fixes after review --- cmd/node/config/fullArchiveP2P.toml | 66 ++++++++++++++--------------- cmd/node/config/p2p.toml | 66 ++++++++++++++--------------- 2 files changed, 66 insertions(+), 66 deletions(-) diff --git a/cmd/node/config/fullArchiveP2P.toml b/cmd/node/config/fullArchiveP2P.toml index 67ed99f52ea..a7bfa21d7dc 100644 --- a/cmd/node/config/fullArchiveP2P.toml +++ b/cmd/node/config/fullArchiveP2P.toml @@ -1,13 +1,13 @@ -#FullArchiveP2P config file +# FullArchiveP2P config file -#NodeConfig holds the P2P settings +# NodeConfig holds the P2P settings [Node] - #Port is the port that will be opened by the node on all interfaces so other peers can connect to it - #If the port = 0, the node will search for a free port on the machine and use it + # Port is the port that will be opened by the node on all interfaces so other peers can connect to it + # If the port = 0, the node will search for a free port on the machine and use it Port = "37373-38383" - #ThresholdMinConnectedPeers represents the minimum number of connections a node should have before it can start - #the sync and consensus mechanisms + # ThresholdMinConnectedPeers represents the minimum number of connections a node should have before it can start + # the sync and consensus mechanisms ThresholdMinConnectedPeers = 3 # MinNumPeersToWaitForOnBootstrap is the minimum number of peers to wait on bootstrap or the node will wait the default @@ -16,43 +16,43 @@ # P2P peer discovery section -#The following sections correspond to the way new peers will be discovered -#If all config types are disabled then the peer will run in single mode (will not try to find other peers) -#If more than one peer discovery mechanism is enabled, the application will output an error and will not start +# The following sections correspond to the way new peers will be discovered +# If all config types are disabled then the peer will run in single mode (will not try to find other peers) +# If more than one peer discovery mechanism is enabled, the application will output an error and will not start [KadDhtPeerDiscovery] - #Enabled: true/false to enable/disable this discovery mechanism + # Enabled: true/false to enable/disable this discovery mechanism Enabled = true - #Type represents the kad-dht glue code implementation. - #"legacy" will define the first implementation. - #"optimized" represents the new variant able to connect to multiple seeders at once. This implementation also has - #a built-in timer that will try to automatically reconnect to the seeders (in case the seeders recover after a - #premature shutdown) + # Type represents the kad-dht glue code implementation. + # "legacy" will define the first implementation. + # "optimized" represents the new variant able to connect to multiple seeders at once. This implementation also has + # a built-in timer that will try to automatically reconnect to the seeders (in case the seeders recover after a + # premature shutdown) Type = "optimized" - #RefreshIntervalInSec represents the time in seconds between querying for new peers + # RefreshIntervalInSec represents the time in seconds between querying for new peers RefreshIntervalInSec = 10 - #ProtocolID represents the protocol that this node will advertize to other peers - #To connect to other nodes, those nodes should have the same ProtocolID string + # ProtocolID represents the protocol that this node will advertize to other peers + # To connect to other nodes, those nodes should have the same ProtocolID string ProtocolID = "/erd/kad/1.0.0" - #InitialPeerList represents the list of strings of some known nodes that will bootstrap this node - #The address will be in a self-describing addressing format. - #More can be found here: https://github.com/libp2p/specs/blob/master/3-requirements.md#34-transport-agnostic - #Example: - # /ip6/fe80::8823:6dff:fee7:f172/tcp/4001/p2p/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu - # /ip4/162.246.145.218/udp/4001/utp/ipfs/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # InitialPeerList represents the list of strings of some known nodes that will bootstrap this node + # The address will be in a self-describing addressing format. + # More can be found here: https://github.com/libp2p/specs/blob/master/3-requirements.md#34-transport-agnostic + # Example: + # /ip6/fe80::8823:6dff:fee7:f172/tcp/4001/p2p/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # /ip4/162.246.145.218/udp/4001/utp/ipfs/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu # - #If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap - #phase but will accept connections and will do the network discovery if another peer connects to it + # If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap + # phase but will accept connections and will do the network discovery if another peer connects to it InitialPeerList = ["/ip4/127.0.0.1/tcp/9999/p2p/16Uiu2HAkw5SNNtSvH1zJiQ6Gc3WoGNSxiyNueRKe6fuAuh57G3Bk"] - #kademlia's routing table bucket size + # kademlia's routing table bucket size BucketSize = 100 - #RoutingTableRefreshIntervalInSec defines how many seconds should pass between 2 kad routing table auto refresh calls + # RoutingTableRefreshIntervalInSec defines how many seconds should pass between 2 kad routing table auto refresh calls RoutingTableRefreshIntervalInSec = 300 [Sharding] @@ -64,9 +64,9 @@ MaxCrossShardObservers = 3 MaxSeeders = 2 - #available options: - # `ListsSharder` will split the peers based on the shard membership (intra, cross or unknown) - # `OneListSharder` will do just the connection triming (upto TargetPeerCount value) not taking into account - # the shard membership of the connected peers - # `NilListSharder` will disable conection trimming (sharder is off) + # available options: + # `ListsSharder` will split the peers based on the shard membership (intra, cross or unknown) + # `OneListSharder` will do just the connection triming (upto TargetPeerCount value) not taking into account + # the shard membership of the connected peers + # `NilListSharder` will disable conection trimming (sharder is off) Type = "ListsSharder" diff --git a/cmd/node/config/p2p.toml b/cmd/node/config/p2p.toml index d95fca754cc..41c7c129c64 100644 --- a/cmd/node/config/p2p.toml +++ b/cmd/node/config/p2p.toml @@ -1,13 +1,13 @@ -#P2P config file +# P2P config file -#NodeConfig holds the P2P settings +# NodeConfig holds the P2P settings [Node] - #Port is the port that will be opened by the node on all interfaces so other peers can connect to it - #If the port = 0, the node will search for a free port on the machine and use it + # Port is the port that will be opened by the node on all interfaces so other peers can connect to it + # If the port = 0, the node will search for a free port on the machine and use it Port = "37373-38383" - #ThresholdMinConnectedPeers represents the minimum number of connections a node should have before it can start - #the sync and consensus mechanisms + # ThresholdMinConnectedPeers represents the minimum number of connections a node should have before it can start + # the sync and consensus mechanisms ThresholdMinConnectedPeers = 3 # MinNumPeersToWaitForOnBootstrap is the minimum number of peers to wait on bootstrap or the node will wait the default @@ -16,43 +16,43 @@ # P2P peer discovery section -#The following sections correspond to the way new peers will be discovered -#If all config types are disabled then the peer will run in single mode (will not try to find other peers) -#If more than one peer discovery mechanism is enabled, the application will output an error and will not start +# The following sections correspond to the way new peers will be discovered +# If all config types are disabled then the peer will run in single mode (will not try to find other peers) +# If more than one peer discovery mechanism is enabled, the application will output an error and will not start [KadDhtPeerDiscovery] - #Enabled: true/false to enable/disable this discovery mechanism + # Enabled: true/false to enable/disable this discovery mechanism Enabled = true - #Type represents the kad-dht glue code implementation. - #"legacy" will define the first implementation. - #"optimized" represents the new variant able to connect to multiple seeders at once. This implementation also has - #a built-in timer that will try to automatically reconnect to the seeders (in case the seeders recover after a - #premature shutdown) + # Type represents the kad-dht glue code implementation. + # "legacy" will define the first implementation. + # "optimized" represents the new variant able to connect to multiple seeders at once. This implementation also has + # a built-in timer that will try to automatically reconnect to the seeders (in case the seeders recover after a + # premature shutdown) Type = "optimized" - #RefreshIntervalInSec represents the time in seconds between querying for new peers + # RefreshIntervalInSec represents the time in seconds between querying for new peers RefreshIntervalInSec = 10 - #ProtocolID represents the protocol that this node will advertize to other peers - #To connect to other nodes, those nodes should have the same ProtocolID string + # ProtocolID represents the protocol that this node will advertize to other peers + # To connect to other nodes, those nodes should have the same ProtocolID string ProtocolID = "/erd/kad/1.0.0" - #InitialPeerList represents the list of strings of some known nodes that will bootstrap this node - #The address will be in a self-describing addressing format. - #More can be found here: https://github.com/libp2p/specs/blob/master/3-requirements.md#34-transport-agnostic - #Example: - # /ip6/fe80::8823:6dff:fee7:f172/tcp/4001/p2p/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu - # /ip4/162.246.145.218/udp/4001/utp/ipfs/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # InitialPeerList represents the list of strings of some known nodes that will bootstrap this node + # The address will be in a self-describing addressing format. + # More can be found here: https://github.com/libp2p/specs/blob/master/3-requirements.md#34-transport-agnostic + # Example: + # /ip6/fe80::8823:6dff:fee7:f172/tcp/4001/p2p/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu + # /ip4/162.246.145.218/udp/4001/utp/ipfs/QmYJyUMAcXEw1b5bFfbBbzYu5wyyjLMRHXGUkCXpag74Fu # - #If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap - #phase but will accept connections and will do the network discovery if another peer connects to it + # If the initial peers list is left empty, the node will not try to connect to other peers during initial bootstrap + # phase but will accept connections and will do the network discovery if another peer connects to it InitialPeerList = ["/ip4/127.0.0.1/tcp/9999/p2p/16Uiu2HAkw5SNNtSvH1zJiQ6Gc3WoGNSxiyNueRKe6fuAuh57G3Bk"] - #kademlia's routing table bucket size + # kademlia's routing table bucket size BucketSize = 100 - #RoutingTableRefreshIntervalInSec defines how many seconds should pass between 2 kad routing table auto refresh calls + # RoutingTableRefreshIntervalInSec defines how many seconds should pass between 2 kad routing table auto refresh calls RoutingTableRefreshIntervalInSec = 300 [Sharding] @@ -64,9 +64,9 @@ MaxCrossShardObservers = 3 MaxSeeders = 2 - #available options: - # `ListsSharder` will split the peers based on the shard membership (intra, cross or unknown) - # `OneListSharder` will do just the connection triming (upto TargetPeerCount value) not taking into account - # the shard membership of the connected peers - # `NilListSharder` will disable conection trimming (sharder is off) + # available options: + # `ListsSharder` will split the peers based on the shard membership (intra, cross or unknown) + # `OneListSharder` will do just the connection triming (upto TargetPeerCount value) not taking into account + # the shard membership of the connected peers + # `NilListSharder` will disable conection trimming (sharder is off) Type = "ListsSharder" From 7931499f3f79ea4402c53f7aae27c0a2c7411c02 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 9 Jun 2023 12:26:05 +0300 Subject: [PATCH 04/38] updated heartbeatV2Components to send messages on the new network as well added todos for further implementation --- dataRetriever/factory/dataPoolFactory.go | 1 + epochStart/bootstrap/process.go | 4 +- factory/heartbeat/heartbeatV2Components.go | 125 +++++++++++----- .../heartbeat/heartbeatV2Components_test.go | 3 +- factory/interface.go | 3 + factory/mock/networkComponentsMock.go | 32 +++- factory/network/networkComponentsHandler.go | 1 - heartbeat/sender/baseSender.go | 16 +- heartbeat/sender/baseSender_test.go | 3 +- heartbeat/sender/bootstrapSender.go | 6 +- heartbeat/sender/bootstrapSender_test.go | 32 +++- .../sender/commonPeerAuthenticationSender.go | 4 +- heartbeat/sender/heartbeatSender.go | 3 +- heartbeat/sender/heartbeatSenderFactory.go | 6 +- heartbeat/sender/heartbeatSender_test.go | 56 +++++-- heartbeat/sender/multikeyHeartbeatSender.go | 6 +- .../sender/multikeyHeartbeatSender_test.go | 140 ++++++++++++++---- .../multikeyPeerAuthenticationSender.go | 3 +- .../multikeyPeerAuthenticationSender_test.go | 60 +++++--- heartbeat/sender/peerAuthenticationSender.go | 7 +- .../sender/peerAuthenticationSender_test.go | 113 +++++++++----- heartbeat/sender/peerShardSender.go | 19 ++- heartbeat/sender/peerShardSender_test.go | 62 ++++++-- heartbeat/sender/sender.go | 18 ++- heartbeat/sender/sender_test.go | 40 +++-- .../mock/networkComponentsMock.go | 36 +++-- .../node/heartbeatV2/heartbeatV2_test.go | 3 +- .../networkSharding_test.go | 15 +- integrationTests/testHeartbeatNode.go | 128 ++++++++++------ node/mock/factory/networkComponentsMock.go | 32 +++- .../baseInterceptorsContainerFactory.go | 1 + 31 files changed, 701 insertions(+), 277 deletions(-) diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index 3f4cf038c4a..052ade3afd7 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -128,6 +128,7 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) return nil, fmt.Errorf("%w while creating the cache for the smartcontract results", err) } + // TODO[Sorin]: create new peer authentication and heartbeat cachers for the messages from full archive network peerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ DefaultSpan: time.Duration(mainConfig.HeartbeatV2.PeerAuthenticationTimeBetweenSendsInSec) * time.Second * peerAuthExpiryMultiplier, CacheExpiry: peerAuthenticationCacheRefresh, diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index aae8cc137ff..199f2a41935 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -30,6 +30,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/bootstrap/types" factoryDisabled "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/heartbeat/sender" + disabledP2P "github.com/multiversx/mx-chain-go/p2p/disabled" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/heartbeat/validator" @@ -1305,7 +1306,8 @@ func (e *epochStartBootstrap) createHeartbeatSender() error { } heartbeatCfg := e.generalConfig.HeartbeatV2 argsHeartbeatSender := sender.ArgBootstrapSender{ - Messenger: e.messenger, + MainMessenger: e.messenger, + FullArchiveMessenger: disabledP2P.NewNetworkMessenger(), // TODO[Sorin]: pass full archive messenger Marshaller: e.coreComponentsHolder.InternalMarshalizer(), HeartbeatTopic: heartbeatTopic, HeartbeatTimeBetweenSends: time.Second * time.Duration(heartbeatCfg.HeartbeatTimeBetweenSendsDuringBootstrapInSec), diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index 08b0e65bd58..6cb42b46c0e 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/heartbeat/processor" "github.com/multiversx/mx-chain-go/heartbeat/sender" "github.com/multiversx/mx-chain-go/heartbeat/status" + "github.com/multiversx/mx-chain-go/p2p" processFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/process/peer" "github.com/multiversx/mx-chain-go/update" @@ -53,12 +54,13 @@ type heartbeatV2ComponentsFactory struct { } type heartbeatV2Components struct { - sender update.Closer - peerAuthRequestsProcessor update.Closer - shardSender update.Closer - monitor factory.HeartbeatV2Monitor - statusHandler update.Closer - directConnectionProcessor update.Closer + sender update.Closer + peerAuthRequestsProcessor update.Closer + shardSender update.Closer + monitor factory.HeartbeatV2Monitor + statusHandler update.Closer + mainDirectConnectionProcessor update.Closer + fullArchiveDirectConnectionProcessor update.Closer } // NewHeartbeatV2ComponentsFactory creates a new instance of heartbeatV2ComponentsFactory @@ -120,17 +122,9 @@ func checkHeartbeatV2FactoryArgs(args ArgHeartbeatV2ComponentsFactory) error { // Create creates the heartbeatV2 components func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error) { - if !hcf.networkComponents.NetworkMessenger().HasTopic(common.PeerAuthenticationTopic) { - err := hcf.networkComponents.NetworkMessenger().CreateTopic(common.PeerAuthenticationTopic, true) - if err != nil { - return nil, err - } - } - if !hcf.networkComponents.NetworkMessenger().HasTopic(common.HeartbeatV2Topic) { - err := hcf.networkComponents.NetworkMessenger().CreateTopic(common.HeartbeatV2Topic, true) - if err != nil { - return nil, err - } + err := hcf.createTopicsIfNeeded() + if err != nil { + return nil, err } cfg := hcf.config.HeartbeatV2 @@ -157,11 +151,12 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error } argsSender := sender.ArgSender{ - Messenger: hcf.networkComponents.NetworkMessenger(), - Marshaller: hcf.coreComponents.InternalMarshalizer(), - PeerAuthenticationTopic: common.PeerAuthenticationTopic, - HeartbeatTopic: heartbeatTopic, - PeerAuthenticationTimeBetweenSends: time.Second * time.Duration(cfg.PeerAuthenticationTimeBetweenSendsInSec), + MainMessenger: hcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: hcf.networkComponents.FullArchiveNetworkMessenger(), + Marshaller: hcf.coreComponents.InternalMarshalizer(), + PeerAuthenticationTopic: common.PeerAuthenticationTopic, + HeartbeatTopic: heartbeatTopic, + PeerAuthenticationTimeBetweenSends: time.Second * time.Duration(cfg.PeerAuthenticationTimeBetweenSendsInSec), PeerAuthenticationTimeBetweenSendsWhenError: time.Second * time.Duration(cfg.PeerAuthenticationTimeBetweenSendsWhenErrorInSec), PeerAuthenticationTimeThresholdBetweenSends: cfg.PeerAuthenticationTimeThresholdBetweenSends, HeartbeatTimeBetweenSends: time.Second * time.Duration(cfg.HeartbeatTimeBetweenSendsInSec), @@ -209,7 +204,8 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error } argsPeerShardSender := sender.ArgPeerShardSender{ - Messenger: hcf.networkComponents.NetworkMessenger(), + MainMessenger: hcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: hcf.networkComponents.FullArchiveNetworkMessenger(), Marshaller: hcf.coreComponents.InternalMarshalizer(), ShardCoordinator: hcf.bootstrapComponents.ShardCoordinator(), TimeBetweenSends: time.Second * time.Duration(cfg.PeerShardTimeBetweenSendsInSec), @@ -247,7 +243,7 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error return nil, err } - argsDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ + argsMainDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ TimeToReadDirectConnections: time.Second * time.Duration(cfg.TimeToReadDirectConnectionsInSec), Messenger: hcf.networkComponents.NetworkMessenger(), PeerShardMapper: hcf.processComponents.PeerShardMapper(), @@ -255,34 +251,87 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error BaseIntraShardTopic: common.ConsensusTopic, BaseCrossShardTopic: processFactory.MiniBlocksTopic, } - directConnectionProcessor, err := processor.NewDirectConnectionProcessor(argsDirectConnectionProcessor) + mainDirectConnectionProcessor, err := processor.NewDirectConnectionProcessor(argsMainDirectConnectionProcessor) if err != nil { return nil, err } - argsCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ + argsFullArchiveDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ + TimeToReadDirectConnections: time.Second * time.Duration(cfg.TimeToReadDirectConnectionsInSec), + Messenger: hcf.networkComponents.FullArchiveNetworkMessenger(), + PeerShardMapper: hcf.processComponents.PeerShardMapper(), // TODO[Sorin]: replace this with the full archive psm + ShardCoordinator: hcf.processComponents.ShardCoordinator(), + BaseIntraShardTopic: common.ConsensusTopic, + BaseCrossShardTopic: processFactory.MiniBlocksTopic, + } + fullArchiveDirectConnectionProcessor, err := processor.NewDirectConnectionProcessor(argsFullArchiveDirectConnectionProcessor) + if err != nil { + return nil, err + } + + argsMainCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ ShardCoordinator: hcf.processComponents.ShardCoordinator(), PeerShardMapper: hcf.processComponents.PeerShardMapper(), } - crossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) + mainCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsMainCrossShardPeerTopicNotifier) + if err != nil { + return nil, err + } + err = hcf.networkComponents.NetworkMessenger().AddPeerTopicNotifier(mainCrossShardPeerTopicNotifier) + if err != nil { + return nil, err + } + + argsFullArchiveCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ + ShardCoordinator: hcf.processComponents.ShardCoordinator(), + PeerShardMapper: hcf.processComponents.PeerShardMapper(), // TODO[Sorin]: replace this with the full archive psm + } + fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsFullArchiveCrossShardPeerTopicNotifier) if err != nil { return nil, err } - err = hcf.networkComponents.NetworkMessenger().AddPeerTopicNotifier(crossShardPeerTopicNotifier) + err = hcf.networkComponents.FullArchiveNetworkMessenger().AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) if err != nil { return nil, err } return &heartbeatV2Components{ - sender: heartbeatV2Sender, - peerAuthRequestsProcessor: paRequestsProcessor, - shardSender: shardSender, - monitor: heartbeatsMonitor, - statusHandler: statusHandler, - directConnectionProcessor: directConnectionProcessor, + sender: heartbeatV2Sender, + peerAuthRequestsProcessor: paRequestsProcessor, + shardSender: shardSender, + monitor: heartbeatsMonitor, + statusHandler: statusHandler, + mainDirectConnectionProcessor: mainDirectConnectionProcessor, + fullArchiveDirectConnectionProcessor: fullArchiveDirectConnectionProcessor, }, nil } +func (hcf *heartbeatV2ComponentsFactory) createTopicsIfNeeded() error { + err := createTopicIfNeededOnMessenger(hcf.networkComponents.NetworkMessenger()) + if err != nil { + return err + } + + return createTopicIfNeededOnMessenger(hcf.networkComponents.FullArchiveNetworkMessenger()) +} + +func createTopicIfNeededOnMessenger(messenger p2p.Messenger) error { + if !messenger.HasTopic(common.PeerAuthenticationTopic) { + err := messenger.CreateTopic(common.PeerAuthenticationTopic, true) + if err != nil { + return err + } + } + if !messenger.HasTopic(common.HeartbeatV2Topic) { + err := messenger.CreateTopic(common.HeartbeatV2Topic, true) + if err != nil { + return err + } + } + + return nil +} + // Close closes the heartbeat components func (hc *heartbeatV2Components) Close() error { log.Debug("calling close on heartbeatV2 components") @@ -303,8 +352,12 @@ func (hc *heartbeatV2Components) Close() error { log.LogIfError(hc.statusHandler.Close()) } - if !check.IfNil(hc.directConnectionProcessor) { - log.LogIfError(hc.directConnectionProcessor.Close()) + if !check.IfNil(hc.mainDirectConnectionProcessor) { + log.LogIfError(hc.mainDirectConnectionProcessor.Close()) + } + + if !check.IfNil(hc.fullArchiveDirectConnectionProcessor) { + log.LogIfError(hc.fullArchiveDirectConnectionProcessor.Close()) } return nil diff --git a/factory/heartbeat/heartbeatV2Components_test.go b/factory/heartbeat/heartbeatV2Components_test.go index 46587997ecf..5862add074c 100644 --- a/factory/heartbeat/heartbeatV2Components_test.go +++ b/factory/heartbeat/heartbeatV2Components_test.go @@ -65,7 +65,8 @@ func createMockHeartbeatV2ComponentsFactoryArgs() heartbeatComp.ArgHeartbeatV2Co BlockChain: &testscommon.ChainHandlerStub{}, }, NetworkComponents: &testsMocks.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, }, CryptoComponents: &testsMocks.CryptoComponentsStub{ PrivKey: &cryptoMocks.PrivateKeyStub{}, diff --git a/factory/interface.go b/factory/interface.go index 2d82d5ab86a..3038d28806c 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -247,6 +247,9 @@ type NetworkComponentsHolder interface { PreferredPeersHolderHandler() PreferredPeersHolderHandler PeersRatingHandler() p2p.PeersRatingHandler PeersRatingMonitor() p2p.PeersRatingMonitor + FullArchiveNetworkMessenger() p2p.Messenger + FullArchivePeersRatingHandler() p2p.PeersRatingHandler + FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor IsInterfaceNil() bool } diff --git a/factory/mock/networkComponentsMock.go b/factory/mock/networkComponentsMock.go index fb194918ff7..04e89f19067 100644 --- a/factory/mock/networkComponentsMock.go +++ b/factory/mock/networkComponentsMock.go @@ -8,13 +8,16 @@ import ( // NetworkComponentsMock - type NetworkComponentsMock struct { - Messenger p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PreferredPeersHolder factory.PreferredPeersHolderHandler - PeersRatingHandlerField p2p.PeersRatingHandler - PeersRatingMonitorField p2p.PeersRatingMonitor + Messenger p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler + PeersRatingMonitorField p2p.PeersRatingMonitor + FullArchiveNetworkMessengerField p2p.Messenger + FullArchivePeersRatingHandlerField p2p.PeersRatingHandler + FullArchivePeersRatingMonitorField p2p.PeersRatingMonitor } // PubKeyCacher - @@ -77,6 +80,21 @@ func (ncm *NetworkComponentsMock) PeersRatingMonitor() p2p.PeersRatingMonitor { return ncm.PeersRatingMonitorField } +// FullArchiveNetworkMessenger - +func (ncm *NetworkComponentsMock) FullArchiveNetworkMessenger() p2p.Messenger { + return ncm.FullArchiveNetworkMessengerField +} + +// FullArchivePeersRatingHandler - +func (ncm *NetworkComponentsMock) FullArchivePeersRatingHandler() p2p.PeersRatingHandler { + return ncm.FullArchivePeersRatingHandlerField +} + +// FullArchivePeersRatingMonitor - +func (ncm *NetworkComponentsMock) FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor { + return ncm.FullArchivePeersRatingMonitorField +} + // IsInterfaceNil - func (ncm *NetworkComponentsMock) IsInterfaceNil() bool { return ncm == nil diff --git a/factory/network/networkComponentsHandler.go b/factory/network/networkComponentsHandler.go index 40a3be8dd6f..2afd33f6ede 100644 --- a/factory/network/networkComponentsHandler.go +++ b/factory/network/networkComponentsHandler.go @@ -217,7 +217,6 @@ func (mnc *managedNetworkComponents) PeersRatingMonitor() p2p.PeersRatingMonitor } // FullArchiveNetworkMessenger returns the p2p messenger of the full archive network -// TODO[Sorin]: add these new methods into the interface func (mnc *managedNetworkComponents) FullArchiveNetworkMessenger() p2p.Messenger { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() diff --git a/heartbeat/sender/baseSender.go b/heartbeat/sender/baseSender.go index cf7a7787c1f..a1c6a1664a2 100644 --- a/heartbeat/sender/baseSender.go +++ b/heartbeat/sender/baseSender.go @@ -19,7 +19,8 @@ const maxThresholdBetweenSends = 1.00 // 100% // argBaseSender represents the arguments for base sender type argBaseSender struct { - messenger heartbeat.P2PMessenger + mainMessenger heartbeat.P2PMessenger + fullArchiveMessenger heartbeat.P2PMessenger marshaller marshal.Marshalizer topic string timeBetweenSends time.Duration @@ -31,7 +32,8 @@ type argBaseSender struct { type baseSender struct { timerHandler - messenger heartbeat.P2PMessenger + mainMessenger heartbeat.P2PMessenger + fullArchiveMessenger heartbeat.P2PMessenger marshaller marshal.Marshalizer topic string timeBetweenSends time.Duration @@ -45,7 +47,8 @@ type baseSender struct { func createBaseSender(args argBaseSender) baseSender { bs := baseSender{ - messenger: args.messenger, + mainMessenger: args.mainMessenger, + fullArchiveMessenger: args.fullArchiveMessenger, marshaller: args.marshaller, topic: args.topic, timeBetweenSends: args.timeBetweenSends, @@ -64,8 +67,11 @@ func createBaseSender(args argBaseSender) baseSender { } func checkBaseSenderArgs(args argBaseSender) error { - if check.IfNil(args.messenger) { - return heartbeat.ErrNilMessenger + if check.IfNil(args.mainMessenger) { + return fmt.Errorf("%w for main", heartbeat.ErrNilMessenger) + } + if check.IfNil(args.fullArchiveMessenger) { + return fmt.Errorf("%w for full archive", heartbeat.ErrNilMessenger) } if check.IfNil(args.marshaller) { return heartbeat.ErrNilMarshaller diff --git a/heartbeat/sender/baseSender_test.go b/heartbeat/sender/baseSender_test.go index dc19139fe29..0061f1aeea4 100644 --- a/heartbeat/sender/baseSender_test.go +++ b/heartbeat/sender/baseSender_test.go @@ -13,7 +13,8 @@ import ( func createMockBaseArgs() argBaseSender { return argBaseSender{ - messenger: &p2pmocks.MessengerStub{}, + mainMessenger: &p2pmocks.MessengerStub{}, + fullArchiveMessenger: &p2pmocks.MessengerStub{}, marshaller: &marshallerMock.MarshalizerMock{}, topic: "topic", timeBetweenSends: time.Second, diff --git a/heartbeat/sender/bootstrapSender.go b/heartbeat/sender/bootstrapSender.go index 107eaedd93b..0872412ddda 100644 --- a/heartbeat/sender/bootstrapSender.go +++ b/heartbeat/sender/bootstrapSender.go @@ -12,7 +12,8 @@ import ( // ArgBootstrapSender represents the arguments for the bootstrap bootstrapSender type ArgBootstrapSender struct { - Messenger heartbeat.P2PMessenger + MainMessenger heartbeat.P2PMessenger + FullArchiveMessenger heartbeat.P2PMessenger Marshaller marshal.Marshalizer HeartbeatTopic string HeartbeatTimeBetweenSends time.Duration @@ -39,7 +40,8 @@ type bootstrapSender struct { func NewBootstrapSender(args ArgBootstrapSender) (*bootstrapSender, error) { hbs, err := newHeartbeatSender(argHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.HeartbeatTopic, timeBetweenSends: args.HeartbeatTimeBetweenSends, diff --git a/heartbeat/sender/bootstrapSender_test.go b/heartbeat/sender/bootstrapSender_test.go index 1f9dd524940..8d78e04caf4 100644 --- a/heartbeat/sender/bootstrapSender_test.go +++ b/heartbeat/sender/bootstrapSender_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/heartbeat/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -19,7 +18,8 @@ import ( func createMockBootstrapSenderArgs() ArgBootstrapSender { return ArgBootstrapSender{ - Messenger: &p2pmocks.MessengerStub{}, + MainMessenger: &p2pmocks.MessengerStub{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, Marshaller: &marshallerMock.MarshalizerMock{}, HeartbeatTopic: "hb-topic", HeartbeatTimeBetweenSends: time.Second, @@ -40,15 +40,25 @@ func createMockBootstrapSenderArgs() ArgBootstrapSender { func TestNewBootstrapSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() args := createMockBootstrapSenderArgs() - args.Messenger = nil + args.MainMessenger = nil senderInstance, err := NewBootstrapSender(args) assert.Nil(t, senderInstance) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockBootstrapSenderArgs() + args.FullArchiveMessenger = nil + senderInstance, err := NewBootstrapSender(args) + + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -182,7 +192,7 @@ func TestNewBootstrapSender(t *testing.T) { args := createMockBootstrapSenderArgs() senderInstance, err := NewBootstrapSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -202,3 +212,13 @@ func TestBootstrapSender_Close(t *testing.T) { err := senderInstance.Close() assert.Nil(t, err) } + +func TestBootstrapSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *bootstrapSender + assert.True(t, senderInstance.IsInterfaceNil()) + + senderInstance, _ = NewBootstrapSender(createMockBootstrapSenderArgs()) + assert.False(t, senderInstance.IsInterfaceNil()) +} diff --git a/heartbeat/sender/commonPeerAuthenticationSender.go b/heartbeat/sender/commonPeerAuthenticationSender.go index f1cf2e41eed..c89f0899261 100644 --- a/heartbeat/sender/commonPeerAuthenticationSender.go +++ b/heartbeat/sender/commonPeerAuthenticationSender.go @@ -40,12 +40,12 @@ func (cpas *commonPeerAuthenticationSender) generateMessageBytes( msg.Payload = payloadBytes if p2pSkBytes != nil { - msg.PayloadSignature, err = cpas.messenger.SignUsingPrivateKey(p2pSkBytes, payloadBytes) + msg.PayloadSignature, err = cpas.mainMessenger.SignUsingPrivateKey(p2pSkBytes, payloadBytes) if err != nil { return nil, isTriggered, 0, err } } else { - msg.PayloadSignature, err = cpas.messenger.Sign(payloadBytes) + msg.PayloadSignature, err = cpas.mainMessenger.Sign(payloadBytes) if err != nil { return nil, isTriggered, 0, err } diff --git a/heartbeat/sender/heartbeatSender.go b/heartbeat/sender/heartbeatSender.go index 77c52cd96ee..bdf6c5c12d1 100644 --- a/heartbeat/sender/heartbeatSender.go +++ b/heartbeat/sender/heartbeatSender.go @@ -112,7 +112,8 @@ func (sender *heartbeatSender) execute() error { return err } - sender.messenger.Broadcast(sender.topic, msgBytes) + sender.mainMessenger.Broadcast(sender.topic, msgBytes) + sender.fullArchiveMessenger.Broadcast(sender.topic, msgBytes) return nil } diff --git a/heartbeat/sender/heartbeatSenderFactory.go b/heartbeat/sender/heartbeatSenderFactory.go index 487bd623924..d254eeb5c02 100644 --- a/heartbeat/sender/heartbeatSenderFactory.go +++ b/heartbeat/sender/heartbeatSenderFactory.go @@ -39,7 +39,8 @@ func createHeartbeatSender(args argHeartbeatSenderFactory) (heartbeatSenderHandl func createRegularHeartbeatSender(args argHeartbeatSenderFactory) (*heartbeatSender, error) { argsSender := argHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.messenger, + mainMessenger: args.mainMessenger, + fullArchiveMessenger: args.fullArchiveMessenger, marshaller: args.marshaller, topic: args.topic, timeBetweenSends: args.timeBetweenSends, @@ -63,7 +64,8 @@ func createRegularHeartbeatSender(args argHeartbeatSenderFactory) (*heartbeatSen func createMultikeyHeartbeatSender(args argHeartbeatSenderFactory) (*multikeyHeartbeatSender, error) { argsSender := argMultikeyHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.messenger, + mainMessenger: args.mainMessenger, + fullArchiveMessenger: args.fullArchiveMessenger, marshaller: args.marshaller, topic: args.topic, timeBetweenSends: args.timeBetweenSends, diff --git a/heartbeat/sender/heartbeatSender_test.go b/heartbeat/sender/heartbeatSender_test.go index e4fd2c4bc3f..ef7d59f3544 100644 --- a/heartbeat/sender/heartbeatSender_test.go +++ b/heartbeat/sender/heartbeatSender_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/heartbeat" @@ -36,16 +35,27 @@ func createMockHeartbeatSenderArgs(argBase argBaseSender) argHeartbeatSender { func TestNewHeartbeatSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() argBase := createMockBaseArgs() - argBase.messenger = nil + argBase.mainMessenger = nil args := createMockHeartbeatSenderArgs(argBase) senderInstance, err := newHeartbeatSender(args) assert.Nil(t, senderInstance) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + argBase := createMockBaseArgs() + argBase.fullArchiveMessenger = nil + args := createMockHeartbeatSenderArgs(argBase) + senderInstance, err := newHeartbeatSender(args) + + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -101,7 +111,7 @@ func TestNewHeartbeatSender(t *testing.T) { args.privKey = nil senderInstance, err := newHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPrivateKey, err) }) t.Run("nil redundancy handler should error", func(t *testing.T) { @@ -111,7 +121,7 @@ func TestNewHeartbeatSender(t *testing.T) { args.redundancyHandler = nil senderInstance, err := newHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilRedundancyHandler, err) }) t.Run("version number too long should error", func(t *testing.T) { @@ -205,7 +215,7 @@ func TestNewHeartbeatSender(t *testing.T) { args := createMockHeartbeatSenderArgs(createMockBaseArgs()) senderInstance, err := newHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -278,7 +288,7 @@ func TestHeartbeatSender_execute(t *testing.T) { args := createMockHeartbeatSenderArgs(argsBase) senderInstance, _ := newHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) err := senderInstance.execute() assert.Equal(t, expectedErr, err) @@ -301,7 +311,7 @@ func TestHeartbeatSender_execute(t *testing.T) { args := createMockHeartbeatSenderArgs(argsBase) senderInstance, _ := newHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) err := senderInstance.execute() assert.Equal(t, expectedErr, err) @@ -312,7 +322,20 @@ func TestHeartbeatSender_execute(t *testing.T) { providedNumTrieNodesSynced := 100 argsBase := createMockBaseArgs() broadcastCalled := false - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, argsBase.topic, topic) + recoveredMessage := &heartbeat.HeartbeatV2{} + err := argsBase.marshaller.Unmarshal(recoveredMessage, buff) + assert.Nil(t, err) + pk := argsBase.privKey.GeneratePublic() + pkBytes, _ := pk.ToByteArray() + assert.Equal(t, pkBytes, recoveredMessage.Pubkey) + assert.Equal(t, uint64(providedNumTrieNodesSynced), recoveredMessage.NumTrieNodesSynced) + broadcastCalled = true + }, + } + argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, argsBase.topic, topic) recoveredMessage := &heartbeat.HeartbeatV2{} @@ -343,7 +366,7 @@ func TestHeartbeatSender_execute(t *testing.T) { } senderInstance, _ := newHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) err := senderInstance.execute() assert.Nil(t, err) @@ -369,3 +392,14 @@ func TestHeartbeatSender_GetCurrentNodeType(t *testing.T) { assert.Equal(t, string(common.EligibleList), peerType) assert.Equal(t, core.FullHistoryObserver, subType) } + +func TestHeartbeatSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *heartbeatSender + assert.True(t, senderInstance.IsInterfaceNil()) + + args := createMockHeartbeatSenderArgs(createMockBaseArgs()) + senderInstance, _ = newHeartbeatSender(args) + assert.False(t, senderInstance.IsInterfaceNil()) +} diff --git a/heartbeat/sender/multikeyHeartbeatSender.go b/heartbeat/sender/multikeyHeartbeatSender.go index 6e147dd8e47..e97c64e4ea4 100644 --- a/heartbeat/sender/multikeyHeartbeatSender.go +++ b/heartbeat/sender/multikeyHeartbeatSender.go @@ -130,7 +130,8 @@ func (sender *multikeyHeartbeatSender) execute() error { return err } - sender.messenger.Broadcast(sender.topic, buff) + sender.mainMessenger.Broadcast(sender.topic, buff) + sender.fullArchiveMessenger.Broadcast(sender.topic, buff) return sender.sendMultiKeysInfo() } @@ -184,7 +185,8 @@ func (sender *multikeyHeartbeatSender) sendMessageForKey(pkBytes []byte) error { return err } - sender.messenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) + sender.mainMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) + sender.fullArchiveMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) return nil } diff --git a/heartbeat/sender/multikeyHeartbeatSender_test.go b/heartbeat/sender/multikeyHeartbeatSender_test.go index fec7a216720..74c1f0014d9 100644 --- a/heartbeat/sender/multikeyHeartbeatSender_test.go +++ b/heartbeat/sender/multikeyHeartbeatSender_test.go @@ -7,7 +7,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" @@ -50,15 +49,25 @@ func createMockMultikeyHeartbeatSenderArgs(argBase argBaseSender) argMultikeyHea func TestNewMultikeyHeartbeatSender(t *testing.T) { t.Parallel() - t.Run("nil messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) - args.messenger = nil + args.mainMessenger = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) + args.fullArchiveMessenger = nil + + senderInstance, err := newMultikeyHeartbeatSender(args) + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -67,7 +76,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.marshaller = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilMarshaller, err) }) t.Run("empty topic should error", func(t *testing.T) { @@ -79,7 +88,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args := createMockMultikeyHeartbeatSenderArgs(argsBase) senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrEmptySendTopic, err) }) t.Run("invalid time between sends should error", func(t *testing.T) { @@ -91,7 +100,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args := createMockMultikeyHeartbeatSenderArgs(argsBase) senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) @@ -105,7 +114,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args := createMockMultikeyHeartbeatSenderArgs(argsBase) senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) }) @@ -138,7 +147,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.peerTypeProvider = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPeerTypeProvider, err) }) t.Run("version number too long should error", func(t *testing.T) { @@ -192,7 +201,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.currentBlockProvider = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilCurrentBlockProvider, err) }) t.Run("nil managed peers holder should error", func(t *testing.T) { @@ -202,7 +211,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.managedPeersHolder = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilManagedPeersHolder, err) }) t.Run("nil shard coordinator should error", func(t *testing.T) { @@ -212,7 +221,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args.shardCoordinator = nil senderInstance, err := newMultikeyHeartbeatSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilShardCoordinator, err) }) @@ -222,7 +231,7 @@ func TestNewMultikeyHeartbeatSender(t *testing.T) { args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) senderInstance, err := newMultikeyHeartbeatSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -287,13 +296,22 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { t.Parallel() args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) - broadcastCalled := false - recordedMessages := make(map[core.PeerID][][]byte) - args.messenger = &p2pmocks.MessengerStub{ + mainBroadcastCalled := false + fullArchiveBroadcastCalled := false + recordedMessagesFromMain := make(map[core.PeerID][][]byte) + recordedMessagesFromFullArchive := make(map[core.PeerID][][]byte) + args.mainMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, args.topic, topic) + recordedMessagesFromMain[args.mainMessenger.ID()] = append(recordedMessagesFromMain[args.mainMessenger.ID()], buff) + mainBroadcastCalled = true + }, + } + args.fullArchiveMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, args.topic, topic) - recordedMessages[args.messenger.ID()] = append(recordedMessages[args.messenger.ID()], buff) - broadcastCalled = true + recordedMessagesFromFullArchive[args.mainMessenger.ID()] = append(recordedMessagesFromFullArchive[args.mainMessenger.ID()], buff) + fullArchiveBroadcastCalled = true }, } @@ -301,24 +319,38 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { err := senderInstance.execute() assert.Nil(t, err) - assert.True(t, broadcastCalled) - assert.Equal(t, 1, len(recordedMessages)) - checkRecordedMessages(t, recordedMessages, args, args.versionNumber, args.nodeDisplayName, args.messenger.ID(), core.FullHistoryObserver) + assert.True(t, mainBroadcastCalled) + assert.True(t, fullArchiveBroadcastCalled) + assert.Equal(t, 1, len(recordedMessagesFromMain)) + checkRecordedMessages(t, recordedMessagesFromMain, args, args.versionNumber, args.nodeDisplayName, args.mainMessenger.ID(), core.FullHistoryObserver) + assert.Equal(t, 1, len(recordedMessagesFromFullArchive)) + checkRecordedMessages(t, recordedMessagesFromFullArchive, args, args.versionNumber, args.nodeDisplayName, args.mainMessenger.ID(), core.FullHistoryObserver) assert.Equal(t, uint64(1), args.currentBlockProvider.GetCurrentBlockHeader().GetNonce()) }) t.Run("should send the current node heartbeat and some multikey heartbeats", func(t *testing.T) { t.Parallel() args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) - recordedMessages := make(map[core.PeerID][][]byte) - args.messenger = &p2pmocks.MessengerStub{ + recordedMainMessages := make(map[core.PeerID][][]byte) + recordedFullArchiveMessages := make(map[core.PeerID][][]byte) + args.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, args.topic, topic) - recordedMessages[args.messenger.ID()] = append(recordedMessages[args.messenger.ID()], buff) + recordedMainMessages[args.mainMessenger.ID()] = append(recordedMainMessages[args.mainMessenger.ID()], buff) }, BroadcastUsingPrivateKeyCalled: func(topic string, buff []byte, pid core.PeerID, skBytes []byte) { assert.Equal(t, args.topic, topic) - recordedMessages[pid] = append(recordedMessages[pid], buff) + recordedMainMessages[pid] = append(recordedMainMessages[pid], buff) + }, + } + args.fullArchiveMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, args.topic, topic) + recordedFullArchiveMessages[args.mainMessenger.ID()] = append(recordedFullArchiveMessages[args.mainMessenger.ID()], buff) + }, + BroadcastUsingPrivateKeyCalled: func(topic string, buff []byte, pid core.PeerID, skBytes []byte) { + assert.Equal(t, args.topic, topic) + recordedFullArchiveMessages[pid] = append(recordedFullArchiveMessages[pid], buff) }, } args.managedPeersHolder = &testscommon.ManagedPeersHolderStub{ @@ -358,18 +390,19 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { err := senderInstance.execute() assert.Nil(t, err) - assert.Equal(t, 4, len(recordedMessages)) // current pid, aa, bb, cc + assert.Equal(t, 4, len(recordedMainMessages)) // current pid, aa, bb, cc + assert.Equal(t, 4, len(recordedFullArchiveMessages)) // current pid, aa, bb, cc checkRecordedMessages(t, - recordedMessages, + recordedMainMessages, args, args.versionNumber, args.nodeDisplayName, - args.messenger.ID(), + args.mainMessenger.ID(), core.FullHistoryObserver) checkRecordedMessages(t, - recordedMessages, + recordedMainMessages, args, args.baseVersionNumber+"/aa_machineID", "aa_name", @@ -377,7 +410,7 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { core.RegularPeer) checkRecordedMessages(t, - recordedMessages, + recordedMainMessages, args, args.baseVersionNumber+"/bb_machineID", "bb_name", @@ -385,7 +418,39 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { core.RegularPeer) checkRecordedMessages(t, - recordedMessages, + recordedMainMessages, + args, + args.baseVersionNumber+"/cc_machineID", + "cc_name", + "cc_pid", + core.RegularPeer) + + checkRecordedMessages(t, + recordedFullArchiveMessages, + args, + args.versionNumber, + args.nodeDisplayName, + args.mainMessenger.ID(), + core.FullHistoryObserver) + + checkRecordedMessages(t, + recordedFullArchiveMessages, + args, + args.baseVersionNumber+"/aa_machineID", + "aa_name", + "aa_pid", + core.RegularPeer) + + checkRecordedMessages(t, + recordedFullArchiveMessages, + args, + args.baseVersionNumber+"/bb_machineID", + "bb_name", + "bb_pid", + core.RegularPeer) + + checkRecordedMessages(t, + recordedFullArchiveMessages, args, args.baseVersionNumber+"/cc_machineID", "cc_name", @@ -446,6 +511,17 @@ func TestMultikeyHeartbeatSender_generateMessageBytes(t *testing.T) { }) } +func TestMultikeyHeartbeatSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *multikeyHeartbeatSender + assert.True(t, senderInstance.IsInterfaceNil()) + + args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) + senderInstance, _ = newMultikeyHeartbeatSender(args) + assert.False(t, senderInstance.IsInterfaceNil()) +} + func checkRecordedMessages( tb testing.TB, recordedMessages map[core.PeerID][][]byte, diff --git a/heartbeat/sender/multikeyPeerAuthenticationSender.go b/heartbeat/sender/multikeyPeerAuthenticationSender.go index ac6d03b849b..95401478c05 100644 --- a/heartbeat/sender/multikeyPeerAuthenticationSender.go +++ b/heartbeat/sender/multikeyPeerAuthenticationSender.go @@ -194,7 +194,8 @@ func (sender *multikeyPeerAuthenticationSender) sendData(pkBytes []byte, data [] log.Error("could not get identity for pk", "pk", hex.EncodeToString(pkBytes), "error", err) return } - sender.messenger.BroadcastUsingPrivateKey(sender.topic, data, pid, p2pSk) + sender.mainMessenger.BroadcastUsingPrivateKey(sender.topic, data, pid, p2pSk) + sender.fullArchiveMessenger.BroadcastUsingPrivateKey(sender.topic, data, pid, p2pSk) nextTimeToCheck, err := sender.managedPeersHolder.GetNextPeerAuthenticationTime(pkBytes) if err != nil { diff --git a/heartbeat/sender/multikeyPeerAuthenticationSender_test.go b/heartbeat/sender/multikeyPeerAuthenticationSender_test.go index 37107f3b6e3..d28c31a8d62 100644 --- a/heartbeat/sender/multikeyPeerAuthenticationSender_test.go +++ b/heartbeat/sender/multikeyPeerAuthenticationSender_test.go @@ -8,7 +8,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/batch" crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-crypto-go/signing" @@ -144,7 +143,7 @@ func createMockMultikeyPeerAuthenticationSenderArgsSemiIntegrationTests( }, } - args.messenger = messenger + args.mainMessenger = messenger return args, messenger } @@ -152,17 +151,29 @@ func createMockMultikeyPeerAuthenticationSenderArgsSemiIntegrationTests( func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - argsBase.messenger = nil + argsBase.mainMessenger = nil args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + argsBase := createMockBaseArgs() + argsBase.fullArchiveMessenger = nil + + args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) + senderInstance, err := newMultikeyPeerAuthenticationSender(args) + + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -173,7 +184,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilMarshaller, err) }) t.Run("empty topic should error", func(t *testing.T) { @@ -185,7 +196,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrEmptySendTopic, err) }) t.Run("invalid time between sends should error", func(t *testing.T) { @@ -197,7 +208,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) @@ -211,7 +222,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) }) @@ -244,7 +255,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.nodesCoordinator = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilNodesCoordinator, err) }) t.Run("nil peer signature handler should error", func(t *testing.T) { @@ -254,7 +265,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.peerSignatureHandler = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPeerSignatureHandler, err) }) t.Run("nil hardfork trigger should error", func(t *testing.T) { @@ -264,7 +275,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.hardforkTrigger = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilHardforkTrigger, err) }) t.Run("invalid time between hardforks should error", func(t *testing.T) { @@ -274,7 +285,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.hardforkTimeBetweenSends = time.Second - time.Nanosecond senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "hardforkTimeBetweenSends")) }) @@ -285,7 +296,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.managedPeersHolder = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilManagedPeersHolder, err) }) t.Run("invalid time between checks should error", func(t *testing.T) { @@ -295,7 +306,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.timeBetweenChecks = time.Second - time.Nanosecond senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenChecks")) }) @@ -306,7 +317,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args.shardCoordinator = nil senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilShardCoordinator, err) }) t.Run("should work", func(t *testing.T) { @@ -315,7 +326,7 @@ func TestNewMultikeyPeerAuthenticationSender(t *testing.T) { args := createMockMultikeyPeerAuthenticationSenderArgs(createMockBaseArgs()) senderInstance, err := newMultikeyPeerAuthenticationSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -648,6 +659,17 @@ func TestNewMultikeyPeerAuthenticationSender_Execute(t *testing.T) { }) } +func TestMultikeyPeerAuthenticationSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *multikeyPeerAuthenticationSender + assert.True(t, senderInstance.IsInterfaceNil()) + + args := createMockMultikeyPeerAuthenticationSenderArgs(createMockBaseArgs()) + senderInstance, _ = newMultikeyPeerAuthenticationSender(args) + assert.False(t, senderInstance.IsInterfaceNil()) +} + func testRecoveredMessages( tb testing.TB, args argMultikeyPeerAuthenticationSender, @@ -693,7 +715,7 @@ func testSingleMessage( errVerify := args.peerSignatureHandler.VerifyPeerSignature(recoveredMessage.Pubkey, core.PeerID(recoveredMessage.Pid), recoveredMessage.Signature) assert.Nil(tb, errVerify) - messenger := args.messenger.(*p2pmocks.MessengerStub) + messenger := args.mainMessenger.(*p2pmocks.MessengerStub) errVerify = messenger.Verify(recoveredMessage.Payload, core.PeerID(recoveredMessage.Pid), recoveredMessage.PayloadSignature) assert.Nil(tb, errVerify) diff --git a/heartbeat/sender/peerAuthenticationSender.go b/heartbeat/sender/peerAuthenticationSender.go index 6151177c8af..bdb223cf99e 100644 --- a/heartbeat/sender/peerAuthenticationSender.go +++ b/heartbeat/sender/peerAuthenticationSender.go @@ -120,15 +120,16 @@ func (sender *peerAuthenticationSender) execute() (error, bool) { return err, false } - data, isTriggered, msgTimestamp, err := sender.generateMessageBytes(pkBytes, sk, nil, sender.messenger.ID().Bytes()) + data, isTriggered, msgTimestamp, err := sender.generateMessageBytes(pkBytes, sk, nil, sender.mainMessenger.ID().Bytes()) if err != nil { return err, isTriggered } log.Debug("sending peer authentication message", - "public key", pkBytes, "pid", sender.messenger.ID().Pretty(), + "public key", pkBytes, "pid", sender.mainMessenger.ID().Pretty(), "timestamp", msgTimestamp) - sender.messenger.Broadcast(sender.topic, data) + sender.mainMessenger.Broadcast(sender.topic, data) + sender.fullArchiveMessenger.Broadcast(sender.topic, data) return nil, isTriggered } diff --git a/heartbeat/sender/peerAuthenticationSender_test.go b/heartbeat/sender/peerAuthenticationSender_test.go index 901ebf31d3e..69fc1625fc4 100644 --- a/heartbeat/sender/peerAuthenticationSender_test.go +++ b/heartbeat/sender/peerAuthenticationSender_test.go @@ -9,7 +9,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/batch" "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-crypto-go/signing" @@ -71,17 +70,17 @@ func createMockPeerAuthenticationSenderArgsSemiIntegrationTests(baseArg argBaseS func TestNewPeerAuthenticationSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - argsBase.messenger = nil + argsBase.mainMessenger = nil args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil nodes coordinator should error", func(t *testing.T) { t.Parallel() @@ -90,7 +89,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.nodesCoordinator = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilNodesCoordinator, err) }) t.Run("nil peer signature handler should error", func(t *testing.T) { @@ -100,7 +99,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.peerSignatureHandler = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPeerSignatureHandler, err) }) t.Run("nil private key should error", func(t *testing.T) { @@ -110,7 +109,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.privKey = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilPrivateKey, err) }) t.Run("nil marshaller should error", func(t *testing.T) { @@ -122,7 +121,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilMarshaller, err) }) t.Run("empty topic should error", func(t *testing.T) { @@ -134,7 +133,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrEmptySendTopic, err) }) t.Run("nil redundancy handler should error", func(t *testing.T) { @@ -144,7 +143,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.redundancyHandler = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilRedundancyHandler, err) }) t.Run("invalid time between sends should error", func(t *testing.T) { @@ -156,7 +155,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSends")) assert.False(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) @@ -170,7 +169,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(argsBase) senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "timeBetweenSendsWhenError")) }) @@ -203,7 +202,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.hardforkTrigger = nil senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.Equal(t, heartbeat.ErrNilHardforkTrigger, err) }) t.Run("invalid time between hardforks should error", func(t *testing.T) { @@ -213,7 +212,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args.hardforkTimeBetweenSends = time.Second - time.Nanosecond senderInstance, err := newPeerAuthenticationSender(args) - assert.True(t, check.IfNil(senderInstance)) + assert.Nil(t, senderInstance) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "hardforkTimeBetweenSends")) }) @@ -223,7 +222,7 @@ func TestNewPeerAuthenticationSender(t *testing.T) { args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) senderInstance, err := newPeerAuthenticationSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -235,7 +234,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ SignCalled: func(payload []byte) ([]byte, error) { return nil, expectedErr }, @@ -255,7 +254,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Fail(t, "should have not called Messenger.BroadcastCalled") }, @@ -277,7 +276,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { t.Parallel() baseArgs := createMockBaseArgs() - baseArgs.messenger = &p2pmocks.MessengerStub{ + baseArgs.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Fail(t, "should have not called Messenger.BroadcastCalled") }, @@ -299,7 +298,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { numCalls := 0 argsBase := createMockBaseArgs() - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Fail(t, "should have not called Messenger.BroadcastCalled") }, @@ -325,11 +324,18 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - broadcastCalled := false - argsBase.messenger = &p2pmocks.MessengerStub{ + mainBroadcastCalled := false + fullArchiveBroadcastCalled := false + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, argsBase.topic, topic) - broadcastCalled = true + mainBroadcastCalled = true + }, + } + argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, argsBase.topic, topic) + fullArchiveBroadcastCalled = true }, } @@ -338,7 +344,8 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { err, isHardforkTriggered := senderInstance.execute() assert.Nil(t, err) - assert.True(t, broadcastCalled) + assert.True(t, mainBroadcastCalled) + assert.True(t, fullArchiveBroadcastCalled) assert.False(t, isHardforkTriggered) }) t.Run("should work with some real components", func(t *testing.T) { @@ -354,7 +361,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { argsBase := createMockBaseArgs() argsBase.privKey = skMessenger var buffResulted []byte - messenger := &p2pmocks.MessengerStub{ + mainMessenger := &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, argsBase.topic, topic) buffResulted = buff @@ -372,7 +379,27 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { return core.PeerID(pkBytes) }, } - argsBase.messenger = messenger + argsBase.mainMessenger = mainMessenger + + argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + assert.Equal(t, argsBase.topic, topic) + assert.Equal(t, buffResulted, buff) + }, + SignCalled: func(payload []byte) ([]byte, error) { + assert.Fail(t, "should have not been called") + return nil, nil + }, + VerifyCalled: func(payload []byte, pid core.PeerID, signature []byte) error { + assert.Fail(t, "should have not been called") + return nil + }, + IDCalled: func() core.PeerID { + assert.Fail(t, "should have not been called") + return "" + }, + } + args := createMockPeerAuthenticationSenderArgsSemiIntegrationTests(argsBase) senderInstance, _ := newPeerAuthenticationSender(args) @@ -382,7 +409,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { skBytes, _ := senderInstance.privKey.ToByteArray() pkBytes, _ := senderInstance.publicKey.ToByteArray() - log.Info("args", "pid", argsBase.messenger.ID().Pretty(), "bls sk", skBytes, "bls pk", pkBytes) + log.Info("args", "pid", argsBase.mainMessenger.ID().Pretty(), "bls sk", skBytes, "bls pk", pkBytes) // verify the received bytes if they can be converted in a valid peer authentication message recoveredBatch := batch.Batch{} @@ -392,13 +419,13 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { err = argsBase.marshaller.Unmarshal(recoveredMessage, recoveredBatch.Data[0]) assert.Nil(t, err) assert.Equal(t, pkBytes, recoveredMessage.Pubkey) - assert.Equal(t, argsBase.messenger.ID().Pretty(), core.PeerID(recoveredMessage.Pid).Pretty()) + assert.Equal(t, argsBase.mainMessenger.ID().Pretty(), core.PeerID(recoveredMessage.Pid).Pretty()) t.Run("verify BLS sig on having the payload == message's pid", func(t *testing.T) { errVerify := args.peerSignatureHandler.VerifyPeerSignature(recoveredMessage.Pubkey, core.PeerID(recoveredMessage.Pid), recoveredMessage.Signature) assert.Nil(t, errVerify) }) t.Run("verify ed25519 sig having the payload == message's payload", func(t *testing.T) { - errVerify := messenger.Verify(recoveredMessage.Payload, core.PeerID(recoveredMessage.Pid), recoveredMessage.PayloadSignature) + errVerify := mainMessenger.Verify(recoveredMessage.Payload, core.PeerID(recoveredMessage.Pid), recoveredMessage.PayloadSignature) assert.Nil(t, errVerify) }) t.Run("verify payload", func(t *testing.T) { @@ -423,7 +450,7 @@ func TestPeerAuthenticationSender_Execute(t *testing.T) { argsBase := createMockBaseArgs() wasBroadcastCalled := false - argsBase.messenger = &p2pmocks.MessengerStub{ + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { wasBroadcastCalled = true }, @@ -492,10 +519,16 @@ func TestPeerAuthenticationSender_Execute(t *testing.T) { t.Parallel() argsBase := createMockBaseArgs() - counterBroadcast := 0 - argsBase.messenger = &p2pmocks.MessengerStub{ + counterMainBroadcast := 0 + counterFullArchiveroadcast := 0 + argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { - counterBroadcast++ + counterMainBroadcast++ + }, + } + argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + counterFullArchiveroadcast++ }, } args := createMockPeerAuthenticationSenderArgs(argsBase) @@ -516,7 +549,8 @@ func TestPeerAuthenticationSender_Execute(t *testing.T) { senderInstance.Execute() // observer senderInstance.Execute() // validator senderInstance.Execute() // observer - assert.Equal(t, 1, counterBroadcast) + assert.Equal(t, 1, counterMainBroadcast) + assert.Equal(t, 1, counterFullArchiveroadcast) }) t.Run("execute worked, should set the hardfork time duration value", func(t *testing.T) { t.Parallel() @@ -711,3 +745,14 @@ func TestPeerAuthenticationSender_ShouldTriggerHardfork(t *testing.T) { assert.Fail(t, "should not reach timeout") } } + +func TestPeerAuthenticationSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *peerAuthenticationSender + assert.True(t, senderInstance.IsInterfaceNil()) + + args := createMockPeerAuthenticationSenderArgs(createMockBaseArgs()) + senderInstance, _ = newPeerAuthenticationSender(args) + assert.False(t, senderInstance.IsInterfaceNil()) +} diff --git a/heartbeat/sender/peerShardSender.go b/heartbeat/sender/peerShardSender.go index 8ce5a7bb02a..45d641918cd 100644 --- a/heartbeat/sender/peerShardSender.go +++ b/heartbeat/sender/peerShardSender.go @@ -20,7 +20,8 @@ const minDelayBetweenSends = time.Second // ArgPeerShardSender represents the arguments for the peer shard sender type ArgPeerShardSender struct { - Messenger p2p.Messenger + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger Marshaller marshal.Marshalizer ShardCoordinator sharding.Coordinator TimeBetweenSends time.Duration @@ -29,7 +30,8 @@ type ArgPeerShardSender struct { } type peerShardSender struct { - messenger p2p.Messenger + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger marshaller marshal.Marshalizer shardCoordinator sharding.Coordinator timeBetweenSends time.Duration @@ -46,7 +48,8 @@ func NewPeerShardSender(args ArgPeerShardSender) (*peerShardSender, error) { } pss := &peerShardSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, shardCoordinator: args.ShardCoordinator, timeBetweenSends: args.TimeBetweenSends, @@ -63,8 +66,11 @@ func NewPeerShardSender(args ArgPeerShardSender) (*peerShardSender, error) { } func checkArgPeerShardSender(args ArgPeerShardSender) error { - if check.IfNil(args.Messenger) { - return process.ErrNilMessenger + if check.IfNil(args.MainMessenger) { + return fmt.Errorf("%w for main", process.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return fmt.Errorf("%w for full archive", process.ErrNilMessenger) } if check.IfNil(args.Marshaller) { return process.ErrNilMarshalizer @@ -132,7 +138,8 @@ func (pss *peerShardSender) broadcastShard() { } log.Debug("broadcast peer shard", "shard", peerShard.ShardId) - pss.messenger.Broadcast(common.ConnectionTopic, peerShardBuff) + pss.mainMessenger.Broadcast(common.ConnectionTopic, peerShardBuff) + pss.fullArchiveMessenger.Broadcast(common.ConnectionTopic, peerShardBuff) } func (pss *peerShardSender) isCurrentNodeValidator() bool { diff --git a/heartbeat/sender/peerShardSender_test.go b/heartbeat/sender/peerShardSender_test.go index b3b396747af..cbc1b71714b 100644 --- a/heartbeat/sender/peerShardSender_test.go +++ b/heartbeat/sender/peerShardSender_test.go @@ -8,7 +8,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/heartbeat" @@ -23,7 +22,8 @@ import ( func createMockArgPeerShardSender() ArgPeerShardSender { return ArgPeerShardSender{ - Messenger: &p2pmocks.MessengerStub{}, + MainMessenger: &p2pmocks.MessengerStub{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, Marshaller: &marshal.GogoProtoMarshalizer{}, ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, TimeBetweenSends: time.Second, @@ -39,15 +39,25 @@ func createMockArgPeerShardSender() ArgPeerShardSender { func TestNewPeerShardSender(t *testing.T) { t.Parallel() - t.Run("nil messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() args := createMockArgPeerShardSender() - args.Messenger = nil + args.MainMessenger = nil pss, err := NewPeerShardSender(args) - assert.Equal(t, process.ErrNilMessenger, err) - assert.True(t, check.IfNil(pss)) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) + assert.Nil(t, pss) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockArgPeerShardSender() + args.FullArchiveMessenger = nil + + pss, err := NewPeerShardSender(args) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) + assert.Nil(t, pss) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -57,7 +67,7 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.Equal(t, process.ErrNilMarshalizer, err) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("nil shard coordinator should error", func(t *testing.T) { t.Parallel() @@ -67,7 +77,7 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.Equal(t, process.ErrNilShardCoordinator, err) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("invalid time between sends should error", func(t *testing.T) { t.Parallel() @@ -78,7 +88,7 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.True(t, errors.Is(err, heartbeat.ErrInvalidTimeDuration)) assert.True(t, strings.Contains(err.Error(), "TimeBetweenSends")) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("invalid threshold between sends should error", func(t *testing.T) { t.Parallel() @@ -89,7 +99,7 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.True(t, errors.Is(err, heartbeat.ErrInvalidThreshold)) assert.True(t, strings.Contains(err.Error(), "TimeThresholdBetweenSends")) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("nil nodes coordinator should error", func(t *testing.T) { t.Parallel() @@ -99,14 +109,14 @@ func TestNewPeerShardSender(t *testing.T) { pss, err := NewPeerShardSender(args) assert.True(t, errors.Is(err, heartbeat.ErrNilNodesCoordinator)) - assert.True(t, check.IfNil(pss)) + assert.Nil(t, pss) }) t.Run("should work and validator should not broadcast", func(t *testing.T) { t.Parallel() args := createMockArgPeerShardSender() wasCalled := false - args.Messenger = &p2pmocks.MessengerStub{ + args.MainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { wasCalled = true }, @@ -119,7 +129,7 @@ func TestNewPeerShardSender(t *testing.T) { args.TimeBetweenSends = 2 * time.Second pss, _ := NewPeerShardSender(args) - assert.False(t, check.IfNil(pss)) + assert.NotNil(t, pss) time.Sleep(3 * time.Second) _ = pss.Close() @@ -131,7 +141,17 @@ func TestNewPeerShardSender(t *testing.T) { args := createMockArgPeerShardSender() expectedShard := fmt.Sprintf("%d", args.ShardCoordinator.SelfId()) numOfCalls := uint32(0) - args.Messenger = &p2pmocks.MessengerStub{ + args.MainMessenger = &p2pmocks.MessengerStub{ + BroadcastCalled: func(topic string, buff []byte) { + shardInfo := &factory.PeerShard{} + err := args.Marshaller.Unmarshal(shardInfo, buff) + assert.Nil(t, err) + assert.Equal(t, expectedShard, shardInfo.ShardId) + assert.Equal(t, common.ConnectionTopic, topic) + atomic.AddUint32(&numOfCalls, 1) + }, + } + args.FullArchiveMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { shardInfo := &factory.PeerShard{} err := args.Marshaller.Unmarshal(shardInfo, buff) @@ -144,10 +164,20 @@ func TestNewPeerShardSender(t *testing.T) { args.TimeBetweenSends = 2 * time.Second pss, _ := NewPeerShardSender(args) - assert.False(t, check.IfNil(pss)) + assert.NotNil(t, pss) time.Sleep(3 * time.Second) _ = pss.Close() - assert.Equal(t, uint32(1), atomic.LoadUint32(&numOfCalls)) + assert.Equal(t, uint32(2), atomic.LoadUint32(&numOfCalls)) // one call for each messenger }) } + +func TestPeerShardSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var pss *peerShardSender + assert.True(t, pss.IsInterfaceNil()) + + pss, _ = NewPeerShardSender(createMockArgPeerShardSender()) + assert.False(t, pss.IsInterfaceNil()) +} diff --git a/heartbeat/sender/sender.go b/heartbeat/sender/sender.go index fbc5525be26..5589621f31f 100644 --- a/heartbeat/sender/sender.go +++ b/heartbeat/sender/sender.go @@ -12,7 +12,8 @@ import ( // ArgSender represents the arguments for the sender type ArgSender struct { - Messenger heartbeat.P2PMessenger + MainMessenger heartbeat.P2PMessenger + FullArchiveMessenger heartbeat.P2PMessenger Marshaller marshal.Marshalizer PeerAuthenticationTopic string HeartbeatTopic string @@ -56,7 +57,8 @@ func NewSender(args ArgSender) (*sender, error) { pas, err := createPeerAuthenticationSender(argPeerAuthenticationSenderFactory{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.PeerAuthenticationTopic, timeBetweenSends: args.PeerAuthenticationTimeBetweenSends, @@ -80,7 +82,8 @@ func NewSender(args ArgSender) (*sender, error) { hbs, err := createHeartbeatSender(argHeartbeatSenderFactory{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.HeartbeatTopic, timeBetweenSends: args.HeartbeatTimeBetweenSends, @@ -113,7 +116,8 @@ func NewSender(args ArgSender) (*sender, error) { func checkSenderArgs(args ArgSender) error { basePeerAuthSenderArgs := argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.PeerAuthenticationTopic, timeBetweenSends: args.PeerAuthenticationTimeBetweenSends, @@ -153,7 +157,8 @@ func checkSenderArgs(args ArgSender) error { hbsArgs := argHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.HeartbeatTopic, timeBetweenSends: args.HeartbeatTimeBetweenSends, @@ -177,7 +182,8 @@ func checkSenderArgs(args ArgSender) error { mhbsArgs := argMultikeyHeartbeatSender{ argBaseSender: argBaseSender{ - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshaller: args.Marshaller, topic: args.HeartbeatTopic, timeBetweenSends: args.HeartbeatTimeBetweenSends, diff --git a/heartbeat/sender/sender_test.go b/heartbeat/sender/sender_test.go index bc9db68bad1..5509e23f16a 100644 --- a/heartbeat/sender/sender_test.go +++ b/heartbeat/sender/sender_test.go @@ -8,7 +8,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/heartbeat/mock" "github.com/multiversx/mx-chain-go/testscommon" @@ -22,11 +21,12 @@ import ( func createMockSenderArgs() ArgSender { return ArgSender{ - Messenger: &p2pmocks.MessengerStub{}, - Marshaller: &marshallerMock.MarshalizerMock{}, - PeerAuthenticationTopic: "pa-topic", - HeartbeatTopic: "hb-topic", - PeerAuthenticationTimeBetweenSends: time.Second, + MainMessenger: &p2pmocks.MessengerStub{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, + Marshaller: &marshallerMock.MarshalizerMock{}, + PeerAuthenticationTopic: "pa-topic", + HeartbeatTopic: "hb-topic", + PeerAuthenticationTimeBetweenSends: time.Second, PeerAuthenticationTimeBetweenSendsWhenError: time.Second, PeerAuthenticationTimeThresholdBetweenSends: 0.1, HeartbeatTimeBetweenSends: time.Second, @@ -55,15 +55,25 @@ func createMockSenderArgs() ArgSender { func TestNewSender(t *testing.T) { t.Parallel() - t.Run("nil peer messenger should error", func(t *testing.T) { + t.Run("nil main messenger should error", func(t *testing.T) { t.Parallel() args := createMockSenderArgs() - args.Messenger = nil + args.MainMessenger = nil senderInstance, err := NewSender(args) assert.Nil(t, senderInstance) - assert.Equal(t, heartbeat.ErrNilMessenger, err) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) + }) + t.Run("nil full archive messenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockSenderArgs() + args.FullArchiveMessenger = nil + senderInstance, err := NewSender(args) + + assert.Nil(t, senderInstance) + assert.True(t, errors.Is(err, heartbeat.ErrNilMessenger)) }) t.Run("nil marshaller should error", func(t *testing.T) { t.Parallel() @@ -303,7 +313,7 @@ func TestNewSender(t *testing.T) { args := createMockSenderArgs() senderInstance, err := NewSender(args) - assert.False(t, check.IfNil(senderInstance)) + assert.NotNil(t, senderInstance) assert.Nil(t, err) }) } @@ -343,3 +353,13 @@ func TestSender_GetCurrentNodeTypeShouldNotPanic(t *testing.T) { _ = senderInstance.Close() } + +func TestSender_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var senderInstance *sender + assert.True(t, senderInstance.IsInterfaceNil()) + + senderInstance, _ = NewSender(createMockSenderArgs()) + assert.False(t, senderInstance.IsInterfaceNil()) +} diff --git a/integrationTests/mock/networkComponentsMock.go b/integrationTests/mock/networkComponentsMock.go index 573a4ae7f66..5cedae5b3fd 100644 --- a/integrationTests/mock/networkComponentsMock.go +++ b/integrationTests/mock/networkComponentsMock.go @@ -8,15 +8,18 @@ import ( // NetworkComponentsStub - type NetworkComponentsStub struct { - Messenger p2p.Messenger - MessengerCalled func() p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PeerHonesty factory.PeerHonestyHandler - PreferredPeersHolder factory.PreferredPeersHolderHandler - PeersRatingHandlerField p2p.PeersRatingHandler - PeersRatingMonitorField p2p.PeersRatingMonitor + Messenger p2p.Messenger + MessengerCalled func() p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PeerHonesty factory.PeerHonestyHandler + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler + PeersRatingMonitorField p2p.PeersRatingMonitor + FullArchiveNetworkMessengerField p2p.Messenger + FullArchivePeersRatingHandlerField p2p.PeersRatingHandler + FullArchivePeersRatingMonitorField p2p.PeersRatingMonitor } // PubKeyCacher - @@ -82,6 +85,21 @@ func (ncs *NetworkComponentsStub) PeersRatingMonitor() p2p.PeersRatingMonitor { return ncs.PeersRatingMonitorField } +// FullArchiveNetworkMessenger - +func (ncs *NetworkComponentsStub) FullArchiveNetworkMessenger() p2p.Messenger { + return ncs.FullArchiveNetworkMessengerField +} + +// FullArchivePeersRatingHandler - +func (ncs *NetworkComponentsStub) FullArchivePeersRatingHandler() p2p.PeersRatingHandler { + return ncs.FullArchivePeersRatingHandlerField +} + +// FullArchivePeersRatingMonitor - +func (ncs *NetworkComponentsStub) FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor { + return ncs.FullArchivePeersRatingMonitorField +} + // String - func (ncs *NetworkComponentsStub) String() string { return "NetworkComponentsStub" diff --git a/integrationTests/node/heartbeatV2/heartbeatV2_test.go b/integrationTests/node/heartbeatV2/heartbeatV2_test.go index 2b9051534df..d88c89e3e8f 100644 --- a/integrationTests/node/heartbeatV2/heartbeatV2_test.go +++ b/integrationTests/node/heartbeatV2/heartbeatV2_test.go @@ -143,6 +143,7 @@ func connectNodes(nodes []*integrationTests.TestHeartbeatNode, interactingNodes func checkMessages(t *testing.T, nodes []*integrationTests.TestHeartbeatNode, maxMessageAgeAllowed time.Duration) { numOfNodes := len(nodes) for i := 0; i < numOfNodes; i++ { + // TODO[Sorin]: check also the full archive cachers paCache := nodes[i].DataPool.PeerAuthentications() hbCache := nodes[i].DataPool.Heartbeats() @@ -155,7 +156,7 @@ func checkMessages(t *testing.T, nodes []*integrationTests.TestHeartbeatNode, ma assert.Nil(t, err) assert.True(t, paCache.Has(pkBytes)) - assert.True(t, hbCache.Has(node.Messenger.ID().Bytes())) + assert.True(t, hbCache.Has(node.MainMessenger.ID().Bytes())) // Also check message age value, found := paCache.Get(pkBytes) diff --git a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go index c78b4ef7320..644223672cb 100644 --- a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go @@ -119,7 +119,8 @@ func stopNodes(advertiser p2p.Messenger, nodesMap map[uint32][]*integrationTests _ = advertiser.Close() for _, nodes := range nodesMap { for _, n := range nodes { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() + _ = n.FullArchiveMessenger.Close() } } } @@ -127,7 +128,7 @@ func stopNodes(advertiser p2p.Messenger, nodesMap map[uint32][]*integrationTests func startNodes(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { for _, nodes := range nodesMap { for _, n := range nodes { - _ = n.Messenger.Bootstrap() + _ = n.MainMessenger.Bootstrap() } } } @@ -150,7 +151,7 @@ func createTestInterceptorForEachNode(nodesMap map[uint32][]*integrationTests.Te func sendMessageOnGlobalTopic(nodesMap map[uint32][]*integrationTests.TestHeartbeatNode) { fmt.Println("sending a message on global topic") - nodesMap[0][0].Messenger.Broadcast(integrationTests.GlobalTopic, []byte("global message")) + nodesMap[0][0].MainMessenger.Broadcast(integrationTests.GlobalTopic, []byte("global message")) time.Sleep(time.Second) } @@ -161,7 +162,7 @@ func sendMessagesOnIntraShardTopic(nodesMap map[uint32][]*integrationTests.TestH identifier := integrationTests.ShardTopic + n.ShardCoordinator.CommunicationIdentifier(n.ShardCoordinator.SelfId()) - nodes[0].Messenger.Broadcast(identifier, []byte("intra shard message")) + nodes[0].MainMessenger.Broadcast(identifier, []byte("intra shard message")) } time.Sleep(time.Second) } @@ -179,7 +180,7 @@ func sendMessagesOnCrossShardTopic(nodesMap map[uint32][]*integrationTests.TestH identifier := integrationTests.ShardTopic + n.ShardCoordinator.CommunicationIdentifier(shardIdDest) - nodes[0].Messenger.Broadcast(identifier, []byte("cross shard message")) + nodes[0].MainMessenger.Broadcast(identifier, []byte("cross shard message")) } } time.Sleep(time.Second) @@ -209,8 +210,8 @@ func testUnknownSeederPeers( for _, nodes := range nodesMap { for _, n := range nodes { - assert.Equal(t, 0, len(n.Messenger.GetConnectedPeersInfo().UnknownPeers)) - assert.Equal(t, 1, len(n.Messenger.GetConnectedPeersInfo().Seeders)) + assert.Equal(t, 0, len(n.MainMessenger.GetConnectedPeersInfo().UnknownPeers)) + assert.Equal(t, 1, len(n.MainMessenger.GetConnectedPeersInfo().Seeders)) } } } diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 54a2b206587..38a04ec67db 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -84,29 +84,31 @@ var TestThrottler = &processMock.InterceptorThrottlerStub{ // TestHeartbeatNode represents a container type of class used in integration tests // with all its fields exported type TestHeartbeatNode struct { - ShardCoordinator sharding.Coordinator - NodesCoordinator nodesCoordinator.NodesCoordinator - PeerShardMapper process.NetworkShardingCollector - Messenger p2p.Messenger - NodeKeys *TestNodeKeys - DataPool dataRetriever.PoolsHolder - Sender update.Closer - PeerAuthInterceptor *interceptors.MultiDataInterceptor - HeartbeatInterceptor *interceptors.SingleDataInterceptor - PeerShardInterceptor *interceptors.SingleDataInterceptor - PeerSigHandler crypto.PeerSignatureHandler - WhiteListHandler process.WhiteListHandler - Storage dataRetriever.StorageService - ResolversContainer dataRetriever.ResolversContainer - RequestersContainer dataRetriever.RequestersContainer - RequestersFinder dataRetriever.RequestersFinder - RequestHandler process.RequestHandler - RequestedItemsHandler dataRetriever.RequestedItemsHandler - RequestsProcessor update.Closer - ShardSender update.Closer - DirectConnectionProcessor update.Closer - Interceptor *CountInterceptor - heartbeatExpiryTimespanInSec int64 + ShardCoordinator sharding.Coordinator + NodesCoordinator nodesCoordinator.NodesCoordinator + PeerShardMapper process.NetworkShardingCollector + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + NodeKeys *TestNodeKeys + DataPool dataRetriever.PoolsHolder + Sender update.Closer + PeerAuthInterceptor *interceptors.MultiDataInterceptor + HeartbeatInterceptor *interceptors.SingleDataInterceptor + PeerShardInterceptor *interceptors.SingleDataInterceptor + PeerSigHandler crypto.PeerSignatureHandler + WhiteListHandler process.WhiteListHandler + Storage dataRetriever.StorageService + ResolversContainer dataRetriever.ResolversContainer + RequestersContainer dataRetriever.RequestersContainer + RequestersFinder dataRetriever.RequestersFinder + RequestHandler process.RequestHandler + RequestedItemsHandler dataRetriever.RequestedItemsHandler + RequestsProcessor update.Closer + ShardSender update.Closer + MainDirectConnectionProcessor update.Closer + FullArchiveDirectConnectionProcessor update.Closer + Interceptor *CountInterceptor + heartbeatExpiryTimespanInSec int64 } // NewTestHeartbeatNode returns a new TestHeartbeatNode instance with a libp2p messenger @@ -181,13 +183,14 @@ func NewTestHeartbeatNode( thn := &TestHeartbeatNode{ ShardCoordinator: shardCoordinator, NodesCoordinator: nodesCoordinatorInstance, - Messenger: messenger, + MainMessenger: messenger, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, // TODO[Sorin]: inject a proper messenger when all pieces are done to test this network as well PeerSigHandler: peerSigHandler, PeerShardMapper: peerShardMapper, heartbeatExpiryTimespanInSec: heartbeatExpiryTimespanInSec, } - localId := thn.Messenger.ID() + localId := thn.MainMessenger.ID() pkBytes, _ := pk.ToByteArray() thn.PeerShardMapper.UpdatePeerIDInfo(localId, pkBytes, shardCoordinator.SelfId()) @@ -254,14 +257,15 @@ func NewTestHeartbeatNodeWithCoordinator( thn := &TestHeartbeatNode{ ShardCoordinator: shardCoordinator, NodesCoordinator: coordinator, - Messenger: messenger, + MainMessenger: messenger, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, PeerSigHandler: peerSigHandler, PeerShardMapper: peerShardMapper, Interceptor: NewCountInterceptor(), heartbeatExpiryTimespanInSec: 30, } - localId := thn.Messenger.ID() + localId := thn.MainMessenger.ID() thn.PeerShardMapper.UpdatePeerIDInfo(localId, []byte(""), shardCoordinator.SelfId()) thn.NodeKeys = keys @@ -389,7 +393,7 @@ func (thn *TestHeartbeatNode) InitTestHeartbeatNode(tb testing.TB, minPeersWaiti thn.initCrossShardPeerTopicNotifier(tb) thn.initDirectConnectionProcessor(tb) - for len(thn.Messenger.Peers()) < minPeersWaiting { + for len(thn.MainMessenger.Peers()) < minPeersWaiting { time.Sleep(time.Second) } @@ -412,7 +416,8 @@ func (thn *TestHeartbeatNode) initStorage() { func (thn *TestHeartbeatNode) initSender() { identifierHeartbeat := common.HeartbeatV2Topic + thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()) argsSender := sender.ArgSender{ - Messenger: thn.Messenger, + MainMessenger: thn.MainMessenger, + FullArchiveMessenger: thn.FullArchiveMessenger, Marshaller: TestMarshaller, PeerAuthenticationTopic: common.PeerAuthenticationTopic, HeartbeatTopic: identifierHeartbeat, @@ -448,12 +453,13 @@ func (thn *TestHeartbeatNode) initSender() { func (thn *TestHeartbeatNode) initResolversAndRequesters() { dataPacker, _ := partitioning.NewSimpleDataPacker(TestMarshaller) - _ = thn.Messenger.CreateTopic(common.ConsensusTopic+thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()), true) + _ = thn.MainMessenger.CreateTopic(common.ConsensusTopic+thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()), true) + _ = thn.FullArchiveMessenger.CreateTopic(common.ConsensusTopic+thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()), true) payloadValidator, _ := validator.NewPeerAuthenticationPayloadValidator(thn.heartbeatExpiryTimespanInSec) resolverContainerFactoryArgs := resolverscontainer.FactoryArgs{ ShardCoordinator: thn.ShardCoordinator, - Messenger: thn.Messenger, + Messenger: thn.MainMessenger, Store: thn.Storage, Marshalizer: TestMarshaller, DataPools: thn.DataPool, @@ -478,7 +484,7 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { NumTotalPeers: 3, NumFullHistoryPeers: 3}, ShardCoordinator: thn.ShardCoordinator, - Messenger: thn.Messenger, + Messenger: thn.MainMessenger, Marshaller: TestMarshaller, Uint64ByteSliceConverter: TestUint64Converter, OutputAntifloodHandler: &mock.NilAntifloodHandler{}, @@ -557,7 +563,7 @@ func (thn *TestHeartbeatNode) initInterceptors() { PeerSignatureHandler: thn.PeerSigHandler, SignaturesHandler: &processMock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: thn.heartbeatExpiryTimespanInSec, - PeerID: thn.Messenger.ID(), + PeerID: thn.MainMessenger.ID(), } thn.createPeerAuthInterceptor(argsFactory) @@ -613,7 +619,7 @@ func (thn *TestHeartbeatNode) initMultiDataInterceptor(topic string, dataFactory }, }, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - CurrentPeerId: thn.Messenger.ID(), + CurrentPeerId: thn.MainMessenger.ID(), }, ) @@ -636,7 +642,7 @@ func (thn *TestHeartbeatNode) initSingleDataInterceptor(topic string, dataFactor }, }, PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - CurrentPeerId: thn.Messenger.ID(), + CurrentPeerId: thn.MainMessenger.ID(), }, ) @@ -663,7 +669,8 @@ func (thn *TestHeartbeatNode) initRequestsProcessor() { func (thn *TestHeartbeatNode) initShardSender(tb testing.TB) { args := sender.ArgPeerShardSender{ - Messenger: thn.Messenger, + MainMessenger: thn.MainMessenger, + FullArchiveMessenger: thn.FullArchiveMessenger, Marshaller: TestMarshaller, ShardCoordinator: thn.ShardCoordinator, TimeBetweenSends: 5 * time.Second, @@ -679,7 +686,7 @@ func (thn *TestHeartbeatNode) initShardSender(tb testing.TB) { func (thn *TestHeartbeatNode) initDirectConnectionProcessor(tb testing.TB) { argsDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ TimeToReadDirectConnections: 5 * time.Second, - Messenger: thn.Messenger, + Messenger: thn.MainMessenger, PeerShardMapper: thn.PeerShardMapper, ShardCoordinator: thn.ShardCoordinator, BaseIntraShardTopic: ShardTopic, @@ -687,7 +694,19 @@ func (thn *TestHeartbeatNode) initDirectConnectionProcessor(tb testing.TB) { } var err error - thn.DirectConnectionProcessor, err = processor.NewDirectConnectionProcessor(argsDirectConnectionProcessor) + thn.MainDirectConnectionProcessor, err = processor.NewDirectConnectionProcessor(argsDirectConnectionProcessor) + require.Nil(tb, err) + + argsDirectConnectionProcessor = processor.ArgsDirectConnectionProcessor{ + TimeToReadDirectConnections: 5 * time.Second, + Messenger: thn.FullArchiveMessenger, + PeerShardMapper: thn.PeerShardMapper, // TODO[Sorin]: replace this with the full archive psm + ShardCoordinator: thn.ShardCoordinator, + BaseIntraShardTopic: ShardTopic, + BaseCrossShardTopic: ShardTopic, + } + + thn.FullArchiveDirectConnectionProcessor, err = processor.NewDirectConnectionProcessor(argsDirectConnectionProcessor) require.Nil(tb, err) } @@ -699,8 +718,19 @@ func (thn *TestHeartbeatNode) initCrossShardPeerTopicNotifier(tb testing.TB) { crossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) require.Nil(tb, err) - err = thn.Messenger.AddPeerTopicNotifier(crossShardPeerTopicNotifier) + err = thn.MainMessenger.AddPeerTopicNotifier(crossShardPeerTopicNotifier) require.Nil(tb, err) + + argsCrossShardPeerTopicNotifier = monitor.ArgsCrossShardPeerTopicNotifier{ + ShardCoordinator: thn.ShardCoordinator, + PeerShardMapper: thn.PeerShardMapper, // TODO[Sorin]: replace this with the full archive psm + } + crossShardPeerTopicNotifier, err = monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) + require.Nil(tb, err) + + err = thn.FullArchiveMessenger.AddPeerTopicNotifier(crossShardPeerTopicNotifier) + require.Nil(tb, err) + } // ConnectTo will try to initiate a connection to the provided parameter @@ -709,7 +739,7 @@ func (thn *TestHeartbeatNode) ConnectTo(connectable Connectable) error { return fmt.Errorf("trying to connect to a nil Connectable parameter") } - return thn.Messenger.ConnectToPeer(connectable.GetConnectableAddress()) + return thn.MainMessenger.ConnectToPeer(connectable.GetConnectableAddress()) } // GetConnectableAddress returns a non circuit, non windows default connectable p2p address @@ -718,7 +748,7 @@ func (thn *TestHeartbeatNode) GetConnectableAddress() string { return "nil" } - return GetConnectableAddress(thn.Messenger) + return GetConnectableAddress(thn.MainMessenger) } // MakeDisplayTableForHeartbeatNodes returns a string containing counters for received messages for all provided test nodes @@ -730,9 +760,9 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st for _, n := range nodesList { buffPk, _ := n.NodeKeys.MainKey.Pk.ToByteArray() - peerInfo := n.Messenger.GetConnectedPeersInfo() + peerInfo := n.MainMessenger.GetConnectedPeersInfo() - pid := n.Messenger.ID().Pretty() + pid := n.MainMessenger.ID().Pretty() lineData := display.NewLineData( false, []string{ @@ -743,7 +773,7 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st fmt.Sprintf("%d", n.CountIntraShardMessages()), fmt.Sprintf("%d", n.CountCrossShardMessages()), fmt.Sprintf("%d/%d/%d/%d/%d/%d/%d", - len(n.Messenger.ConnectedPeers()), + len(n.MainMessenger.ConnectedPeers()), peerInfo.NumIntraShardValidators, peerInfo.NumCrossShardValidators, peerInfo.NumIntraShardObservers, @@ -764,13 +794,13 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st // registerTopicValidator registers a message processor instance on the provided topic func (thn *TestHeartbeatNode) registerTopicValidator(topic string, processor p2p.MessageProcessor) { - err := thn.Messenger.CreateTopic(topic, true) + err := thn.MainMessenger.CreateTopic(topic, true) if err != nil { fmt.Printf("error while creating topic %s: %s\n", topic, err.Error()) return } - err = thn.Messenger.RegisterMessageProcessor(topic, "test", processor) + err = thn.MainMessenger.RegisterMessageProcessor(topic, "test", processor) if err != nil { fmt.Printf("error while registering topic validator %s: %s\n", topic, err.Error()) return @@ -830,8 +860,10 @@ func (thn *TestHeartbeatNode) Close() { _ = thn.RequestersContainer.Close() _ = thn.ResolversContainer.Close() _ = thn.ShardSender.Close() - _ = thn.Messenger.Close() - _ = thn.DirectConnectionProcessor.Close() + _ = thn.MainMessenger.Close() + _ = thn.FullArchiveMessenger.Close() + _ = thn.MainDirectConnectionProcessor.Close() + _ = thn.FullArchiveDirectConnectionProcessor.Close() } // IsInterfaceNil returns true if there is no value under the interface diff --git a/node/mock/factory/networkComponentsMock.go b/node/mock/factory/networkComponentsMock.go index c9dd18d26b6..ae42eeda2e7 100644 --- a/node/mock/factory/networkComponentsMock.go +++ b/node/mock/factory/networkComponentsMock.go @@ -8,13 +8,16 @@ import ( // NetworkComponentsMock - type NetworkComponentsMock struct { - Messenger p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PreferredPeersHolder factory.PreferredPeersHolderHandler - PeersRatingHandlerField p2p.PeersRatingHandler - PeersRatingMonitorField p2p.PeersRatingMonitor + Messenger p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler + PeersRatingMonitorField p2p.PeersRatingMonitor + FullArchiveNetworkMessengerField p2p.Messenger + FullArchivePeersRatingHandlerField p2p.PeersRatingHandler + FullArchivePeersRatingMonitorField p2p.PeersRatingMonitor } // PubKeyCacher - @@ -77,6 +80,21 @@ func (ncm *NetworkComponentsMock) PeersRatingMonitor() p2p.PeersRatingMonitor { return ncm.PeersRatingMonitorField } +// FullArchiveNetworkMessenger - +func (ncm *NetworkComponentsMock) FullArchiveNetworkMessenger() p2p.Messenger { + return ncm.FullArchiveNetworkMessengerField +} + +// FullArchivePeersRatingHandler - +func (ncm *NetworkComponentsMock) FullArchivePeersRatingHandler() p2p.PeersRatingHandler { + return ncm.FullArchivePeersRatingHandlerField +} + +// FullArchivePeersRatingMonitor - +func (ncm *NetworkComponentsMock) FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor { + return ncm.FullArchivePeersRatingMonitorField +} + // String - func (ncm *NetworkComponentsMock) String() string { return "NetworkComponentsMock" diff --git a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go index c66ac5bea6f..3a8686e0713 100644 --- a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go @@ -657,6 +657,7 @@ func (bicf *baseInterceptorsContainerFactory) generatePeerAuthenticationIntercep //------- Heartbeat interceptor func (bicf *baseInterceptorsContainerFactory) generateHeartbeatInterceptor() error { + // TODO[Sorin]: we'll need separate interceptors for full archive heartbeats and peer authentications shardC := bicf.shardCoordinator identifierHeartbeat := common.HeartbeatV2Topic + shardC.CommunicationIdentifier(shardC.SelfId()) From 710b341713dee4c4222a46a5fd570b0927833519 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 9 Jun 2023 12:53:25 +0300 Subject: [PATCH 05/38] added new data pools for full archive heartbeats --- dataRetriever/dataPool/dataPool.go | 135 +++++++++++------- dataRetriever/factory/dataPoolFactory.go | 53 ++++--- dataRetriever/factory/dataPoolFactory_test.go | 2 +- dataRetriever/interface.go | 2 + factory/heartbeat/heartbeatV2Components.go | 5 +- .../heartbeat/heartbeatV2Components_test.go | 17 ++- testscommon/dataRetriever/poolFactory.go | 88 +++++++----- testscommon/dataRetriever/poolsHolderMock.go | 68 ++++++--- testscommon/dataRetriever/poolsHolderStub.go | 52 ++++--- 9 files changed, 281 insertions(+), 141 deletions(-) diff --git a/dataRetriever/dataPool/dataPool.go b/dataRetriever/dataPool/dataPool.go index 67b55cbfaee..cf51ad37cea 100644 --- a/dataRetriever/dataPool/dataPool.go +++ b/dataRetriever/dataPool/dataPool.go @@ -1,6 +1,8 @@ package dataPool import ( + "fmt" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/storage" @@ -12,38 +14,42 @@ var _ dataRetriever.PoolsHolder = (*dataPool)(nil) var log = logger.GetOrCreate("dataRetriever/dataPool") type dataPool struct { - transactions dataRetriever.ShardedDataCacherNotifier - unsignedTransactions dataRetriever.ShardedDataCacherNotifier - rewardTransactions dataRetriever.ShardedDataCacherNotifier - headers dataRetriever.HeadersPool - miniBlocks storage.Cacher - peerChangesBlocks storage.Cacher - trieNodes storage.Cacher - trieNodesChunks storage.Cacher - currBlockTxs dataRetriever.TransactionCacher - currEpochValidatorInfo dataRetriever.ValidatorInfoCacher - smartContracts storage.Cacher - peerAuthentications storage.Cacher - heartbeats storage.Cacher - validatorsInfo dataRetriever.ShardedDataCacherNotifier + transactions dataRetriever.ShardedDataCacherNotifier + unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier + headers dataRetriever.HeadersPool + miniBlocks storage.Cacher + peerChangesBlocks storage.Cacher + trieNodes storage.Cacher + trieNodesChunks storage.Cacher + currBlockTxs dataRetriever.TransactionCacher + currEpochValidatorInfo dataRetriever.ValidatorInfoCacher + smartContracts storage.Cacher + mainPeerAuthentications storage.Cacher + mainHeartbeats storage.Cacher + fullArchivePeerAuthentications storage.Cacher + fullArchiveHeartbeats storage.Cacher + validatorsInfo dataRetriever.ShardedDataCacherNotifier } // DataPoolArgs represents the data pool's constructor structure type DataPoolArgs struct { - Transactions dataRetriever.ShardedDataCacherNotifier - UnsignedTransactions dataRetriever.ShardedDataCacherNotifier - RewardTransactions dataRetriever.ShardedDataCacherNotifier - Headers dataRetriever.HeadersPool - MiniBlocks storage.Cacher - PeerChangesBlocks storage.Cacher - TrieNodes storage.Cacher - TrieNodesChunks storage.Cacher - CurrentBlockTransactions dataRetriever.TransactionCacher - CurrentEpochValidatorInfo dataRetriever.ValidatorInfoCacher - SmartContracts storage.Cacher - PeerAuthentications storage.Cacher - Heartbeats storage.Cacher - ValidatorsInfo dataRetriever.ShardedDataCacherNotifier + Transactions dataRetriever.ShardedDataCacherNotifier + UnsignedTransactions dataRetriever.ShardedDataCacherNotifier + RewardTransactions dataRetriever.ShardedDataCacherNotifier + Headers dataRetriever.HeadersPool + MiniBlocks storage.Cacher + PeerChangesBlocks storage.Cacher + TrieNodes storage.Cacher + TrieNodesChunks storage.Cacher + CurrentBlockTransactions dataRetriever.TransactionCacher + CurrentEpochValidatorInfo dataRetriever.ValidatorInfoCacher + SmartContracts storage.Cacher + MainPeerAuthentications storage.Cacher + MainHeartbeats storage.Cacher + FullArchivePeerAuthentications storage.Cacher + FullArchiveHeartbeats storage.Cacher + ValidatorsInfo dataRetriever.ShardedDataCacherNotifier } // NewDataPool creates a data pools holder object @@ -81,31 +87,39 @@ func NewDataPool(args DataPoolArgs) (*dataPool, error) { if check.IfNil(args.SmartContracts) { return nil, dataRetriever.ErrNilSmartContractsPool } - if check.IfNil(args.PeerAuthentications) { - return nil, dataRetriever.ErrNilPeerAuthenticationPool + if check.IfNil(args.MainPeerAuthentications) { + return nil, fmt.Errorf("%w for main", dataRetriever.ErrNilPeerAuthenticationPool) + } + if check.IfNil(args.MainHeartbeats) { + return nil, fmt.Errorf("%w for main", dataRetriever.ErrNilHeartbeatPool) } - if check.IfNil(args.Heartbeats) { - return nil, dataRetriever.ErrNilHeartbeatPool + if check.IfNil(args.FullArchivePeerAuthentications) { + return nil, fmt.Errorf("%w for full archive", dataRetriever.ErrNilPeerAuthenticationPool) + } + if check.IfNil(args.FullArchiveHeartbeats) { + return nil, fmt.Errorf("%w for full archive", dataRetriever.ErrNilHeartbeatPool) } if check.IfNil(args.ValidatorsInfo) { return nil, dataRetriever.ErrNilValidatorInfoPool } return &dataPool{ - transactions: args.Transactions, - unsignedTransactions: args.UnsignedTransactions, - rewardTransactions: args.RewardTransactions, - headers: args.Headers, - miniBlocks: args.MiniBlocks, - peerChangesBlocks: args.PeerChangesBlocks, - trieNodes: args.TrieNodes, - trieNodesChunks: args.TrieNodesChunks, - currBlockTxs: args.CurrentBlockTransactions, - currEpochValidatorInfo: args.CurrentEpochValidatorInfo, - smartContracts: args.SmartContracts, - peerAuthentications: args.PeerAuthentications, - heartbeats: args.Heartbeats, - validatorsInfo: args.ValidatorsInfo, + transactions: args.Transactions, + unsignedTransactions: args.UnsignedTransactions, + rewardTransactions: args.RewardTransactions, + headers: args.Headers, + miniBlocks: args.MiniBlocks, + peerChangesBlocks: args.PeerChangesBlocks, + trieNodes: args.TrieNodes, + trieNodesChunks: args.TrieNodesChunks, + currBlockTxs: args.CurrentBlockTransactions, + currEpochValidatorInfo: args.CurrentEpochValidatorInfo, + smartContracts: args.SmartContracts, + mainPeerAuthentications: args.MainPeerAuthentications, + mainHeartbeats: args.MainHeartbeats, + fullArchivePeerAuthentications: args.FullArchivePeerAuthentications, + fullArchiveHeartbeats: args.FullArchiveHeartbeats, + validatorsInfo: args.ValidatorsInfo, }, nil } @@ -166,12 +180,22 @@ func (dp *dataPool) SmartContracts() storage.Cacher { // PeerAuthentications returns the holder for peer authentications func (dp *dataPool) PeerAuthentications() storage.Cacher { - return dp.peerAuthentications + return dp.mainPeerAuthentications } // Heartbeats returns the holder for heartbeats func (dp *dataPool) Heartbeats() storage.Cacher { - return dp.heartbeats + return dp.mainHeartbeats +} + +// FullArchivePeerAuthentications returns the holder for full archive peer authentications +func (dp *dataPool) FullArchivePeerAuthentications() storage.Cacher { + return dp.fullArchivePeerAuthentications +} + +// FullArchiveHeartbeats returns the holder for full archive heartbeats +func (dp *dataPool) FullArchiveHeartbeats() storage.Cacher { + return dp.fullArchiveHeartbeats } // ValidatorsInfo returns the holder for validators info @@ -191,9 +215,18 @@ func (dp *dataPool) Close() error { } } - if !check.IfNil(dp.peerAuthentications) { - log.Debug("closing peer authentications data pool....") - err := dp.peerAuthentications.Close() + if !check.IfNil(dp.mainPeerAuthentications) { + log.Debug("closing main peer authentications data pool....") + err := dp.mainPeerAuthentications.Close() + if err != nil { + log.Error("failed to close peer authentications data pool", "error", err.Error()) + lastError = err + } + } + + if !check.IfNil(dp.fullArchivePeerAuthentications) { + log.Debug("closing full archive peer authentications data pool....") + err := dp.fullArchivePeerAuthentications.Close() if err != nil { log.Error("failed to close peer authentications data pool", "error", err.Error()) lastError = err diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index 052ade3afd7..1132c1fdf94 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -128,19 +128,32 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) return nil, fmt.Errorf("%w while creating the cache for the smartcontract results", err) } - // TODO[Sorin]: create new peer authentication and heartbeat cachers for the messages from full archive network - peerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ + mainPeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ DefaultSpan: time.Duration(mainConfig.HeartbeatV2.PeerAuthenticationTimeBetweenSendsInSec) * time.Second * peerAuthExpiryMultiplier, CacheExpiry: peerAuthenticationCacheRefresh, }) if err != nil { - return nil, fmt.Errorf("%w while creating the cache for the peer authentication messages", err) + return nil, fmt.Errorf("%w while creating the cache for the main peer authentication messages", err) } cacherCfg = factory.GetCacherFromConfig(mainConfig.HeartbeatV2.HeartbeatPool) - heartbeatPool, err := storageunit.NewCache(cacherCfg) + mainHeartbeatPool, err := storageunit.NewCache(cacherCfg) if err != nil { - return nil, fmt.Errorf("%w while creating the cache for the heartbeat messages", err) + return nil, fmt.Errorf("%w while creating the cache for the main heartbeat messages", err) + } + + fullArchivePeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ + DefaultSpan: time.Duration(mainConfig.HeartbeatV2.PeerAuthenticationTimeBetweenSendsInSec) * time.Second * peerAuthExpiryMultiplier, + CacheExpiry: peerAuthenticationCacheRefresh, + }) + if err != nil { + return nil, fmt.Errorf("%w while creating the cache for the full archive peer authentication messages", err) + } + + cacherCfg = factory.GetCacherFromConfig(mainConfig.HeartbeatV2.HeartbeatPool) + fullArchiveHeartbeatPool, err := storageunit.NewCache(cacherCfg) + if err != nil { + return nil, fmt.Errorf("%w while creating the cache for the full archive heartbeat messages", err) } validatorsInfo, err := shardedData.NewShardedData(dataRetriever.ValidatorsInfoPoolName, factory.GetCacherFromConfig(mainConfig.ValidatorInfoPool)) @@ -151,20 +164,22 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) currBlockTransactions := dataPool.NewCurrentBlockTransactionsPool() currEpochValidatorInfo := dataPool.NewCurrentEpochValidatorInfoPool() dataPoolArgs := dataPool.DataPoolArgs{ - Transactions: txPool, - UnsignedTransactions: uTxPool, - RewardTransactions: rewardTxPool, - Headers: hdrPool, - MiniBlocks: txBlockBody, - PeerChangesBlocks: peerChangeBlockBody, - TrieNodes: adaptedTrieNodesStorage, - TrieNodesChunks: trieNodesChunks, - CurrentBlockTransactions: currBlockTransactions, - CurrentEpochValidatorInfo: currEpochValidatorInfo, - SmartContracts: smartContracts, - PeerAuthentications: peerAuthPool, - Heartbeats: heartbeatPool, - ValidatorsInfo: validatorsInfo, + Transactions: txPool, + UnsignedTransactions: uTxPool, + RewardTransactions: rewardTxPool, + Headers: hdrPool, + MiniBlocks: txBlockBody, + PeerChangesBlocks: peerChangeBlockBody, + TrieNodes: adaptedTrieNodesStorage, + TrieNodesChunks: trieNodesChunks, + CurrentBlockTransactions: currBlockTransactions, + CurrentEpochValidatorInfo: currEpochValidatorInfo, + SmartContracts: smartContracts, + MainPeerAuthentications: mainPeerAuthPool, + MainHeartbeats: mainHeartbeatPool, + FullArchivePeerAuthentications: fullArchivePeerAuthPool, + FullArchiveHeartbeats: fullArchiveHeartbeatPool, + ValidatorsInfo: validatorsInfo, } return dataPool.NewDataPool(dataPoolArgs) } diff --git a/dataRetriever/factory/dataPoolFactory_test.go b/dataRetriever/factory/dataPoolFactory_test.go index c9ae8b60c43..820c4b226a6 100644 --- a/dataRetriever/factory/dataPoolFactory_test.go +++ b/dataRetriever/factory/dataPoolFactory_test.go @@ -135,7 +135,7 @@ func TestNewDataPoolFromConfig_BadConfigShouldErr(t *testing.T) { require.Nil(t, holder) fmt.Println(err) require.True(t, errors.Is(err, storage.ErrNotSupportedCacheType)) - require.True(t, strings.Contains(err.Error(), "the cache for the heartbeat messages")) + require.True(t, strings.Contains(err.Error(), "the cache for the main heartbeat messages")) args = getGoodArgs() args.Config.ValidatorInfoPool.Capacity = 0 diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 4da2c3669db..06455c0adb1 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -244,6 +244,8 @@ type PoolsHolder interface { CurrentEpochValidatorInfo() ValidatorInfoCacher PeerAuthentications() storage.Cacher Heartbeats() storage.Cacher + FullArchivePeerAuthentications() storage.Cacher + FullArchiveHeartbeats() storage.Cacher ValidatorsInfo() ShardedDataCacherNotifier Close() error IsInterfaceNil() bool diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index 6cb42b46c0e..fc66bc9675a 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -102,7 +102,10 @@ func checkHeartbeatV2FactoryArgs(args ArgHeartbeatV2ComponentsFactory) error { return errors.ErrNilNetworkComponentsHolder } if check.IfNil(args.NetworkComponents.NetworkMessenger()) { - return errors.ErrNilMessenger + return fmt.Errorf("%w for main", errors.ErrNilMessenger) + } + if check.IfNil(args.NetworkComponents.FullArchiveNetworkMessenger()) { + return fmt.Errorf("%w for full archive", errors.ErrNilMessenger) } if check.IfNil(args.CryptoComponents) { return errors.ErrNilCryptoComponentsHolder diff --git a/factory/heartbeat/heartbeatV2Components_test.go b/factory/heartbeat/heartbeatV2Components_test.go index 5862add074c..093a17d719b 100644 --- a/factory/heartbeat/heartbeatV2Components_test.go +++ b/factory/heartbeat/heartbeatV2Components_test.go @@ -190,7 +190,19 @@ func TestNewHeartbeatV2ComponentsFactory(t *testing.T) { } hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.Nil(t, hcf) - assert.Equal(t, errorsMx.ErrNilMessenger, err) + assert.True(t, errors.Is(err, errorsMx.ErrNilMessenger)) + }) + t.Run("nil FullArchiveNetworkMessenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: nil, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.Nil(t, hcf) + assert.True(t, errors.Is(err, errorsMx.ErrNilMessenger)) }) t.Run("nil CryptoComponents should error", func(t *testing.T) { t.Parallel() @@ -257,6 +269,7 @@ func TestHeartbeatV2Components_Create(t *testing.T) { return nil }, }, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, } hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.NotNil(t, hcf) @@ -283,6 +296,7 @@ func TestHeartbeatV2Components_Create(t *testing.T) { return nil }, }, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, } hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.NotNil(t, hcf) @@ -440,6 +454,7 @@ func TestHeartbeatV2Components_Create(t *testing.T) { return expectedErr }, }, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, } hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.NotNil(t, hcf) diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index d4d0ed59fc8..cb5597d9be2 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -114,14 +114,24 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo smartContracts, err := storageunit.NewCache(cacherConfig) panicIfError("CreatePoolsHolder", err) - peerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ + mainPeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ DefaultSpan: 60 * time.Second, CacheExpiry: 60 * time.Second, }) panicIfError("CreatePoolsHolder", err) cacherConfig = storageunit.CacheConfig{Capacity: 50000, Type: storageunit.LRUCache} - heartbeatPool, err := storageunit.NewCache(cacherConfig) + mainHeartbeatPool, err := storageunit.NewCache(cacherConfig) + panicIfError("CreatePoolsHolder", err) + + fullArchivePeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ + DefaultSpan: 60 * time.Second, + CacheExpiry: 60 * time.Second, + }) + panicIfError("CreatePoolsHolder", err) + + cacherConfig = storageunit.CacheConfig{Capacity: 50000, Type: storageunit.LRUCache} + fullArchiveHeartbeatPool, err := storageunit.NewCache(cacherConfig) panicIfError("CreatePoolsHolder", err) validatorsInfo, err := shardedData.NewShardedData("validatorsInfoPool", storageunit.CacheConfig{ @@ -134,20 +144,22 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo currentBlockTransactions := dataPool.NewCurrentBlockTransactionsPool() currentEpochValidatorInfo := dataPool.NewCurrentEpochValidatorInfoPool() dataPoolArgs := dataPool.DataPoolArgs{ - Transactions: txPool, - UnsignedTransactions: unsignedTxPool, - RewardTransactions: rewardsTxPool, - Headers: headersPool, - MiniBlocks: txBlockBody, - PeerChangesBlocks: peerChangeBlockBody, - TrieNodes: adaptedTrieNodesStorage, - TrieNodesChunks: trieNodesChunks, - CurrentBlockTransactions: currentBlockTransactions, - CurrentEpochValidatorInfo: currentEpochValidatorInfo, - SmartContracts: smartContracts, - PeerAuthentications: peerAuthPool, - Heartbeats: heartbeatPool, - ValidatorsInfo: validatorsInfo, + Transactions: txPool, + UnsignedTransactions: unsignedTxPool, + RewardTransactions: rewardsTxPool, + Headers: headersPool, + MiniBlocks: txBlockBody, + PeerChangesBlocks: peerChangeBlockBody, + TrieNodes: adaptedTrieNodesStorage, + TrieNodesChunks: trieNodesChunks, + CurrentBlockTransactions: currentBlockTransactions, + CurrentEpochValidatorInfo: currentEpochValidatorInfo, + SmartContracts: smartContracts, + MainPeerAuthentications: mainPeerAuthPool, + MainHeartbeats: mainHeartbeatPool, + FullArchivePeerAuthentications: fullArchivePeerAuthPool, + FullArchiveHeartbeats: fullArchiveHeartbeatPool, + ValidatorsInfo: validatorsInfo, } holder, err := dataPool.NewDataPool(dataPoolArgs) panicIfError("CreatePoolsHolder", err) @@ -205,33 +217,45 @@ func CreatePoolsHolderWithTxPool(txPool dataRetriever.ShardedDataCacherNotifier) }) panicIfError("CreatePoolsHolderWithTxPool", err) - peerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ + mainPeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ + DefaultSpan: peerAuthDuration, + CacheExpiry: peerAuthDuration, + }) + panicIfError("CreatePoolsHolderWithTxPool", err) + + cacherConfig = storageunit.CacheConfig{Capacity: 50000, Type: storageunit.LRUCache} + mainHeartbeatPool, err := storageunit.NewCache(cacherConfig) + panicIfError("CreatePoolsHolderWithTxPool", err) + + fullArchivePeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ DefaultSpan: peerAuthDuration, CacheExpiry: peerAuthDuration, }) panicIfError("CreatePoolsHolderWithTxPool", err) cacherConfig = storageunit.CacheConfig{Capacity: 50000, Type: storageunit.LRUCache} - heartbeatPool, err := storageunit.NewCache(cacherConfig) + fullArchiveHeartbeatPool, err := storageunit.NewCache(cacherConfig) panicIfError("CreatePoolsHolderWithTxPool", err) currentBlockTransactions := dataPool.NewCurrentBlockTransactionsPool() currentEpochValidatorInfo := dataPool.NewCurrentEpochValidatorInfoPool() dataPoolArgs := dataPool.DataPoolArgs{ - Transactions: txPool, - UnsignedTransactions: unsignedTxPool, - RewardTransactions: rewardsTxPool, - Headers: headersPool, - MiniBlocks: txBlockBody, - PeerChangesBlocks: peerChangeBlockBody, - TrieNodes: trieNodes, - TrieNodesChunks: trieNodesChunks, - CurrentBlockTransactions: currentBlockTransactions, - CurrentEpochValidatorInfo: currentEpochValidatorInfo, - SmartContracts: smartContracts, - PeerAuthentications: peerAuthPool, - Heartbeats: heartbeatPool, - ValidatorsInfo: validatorsInfo, + Transactions: txPool, + UnsignedTransactions: unsignedTxPool, + RewardTransactions: rewardsTxPool, + Headers: headersPool, + MiniBlocks: txBlockBody, + PeerChangesBlocks: peerChangeBlockBody, + TrieNodes: trieNodes, + TrieNodesChunks: trieNodesChunks, + CurrentBlockTransactions: currentBlockTransactions, + CurrentEpochValidatorInfo: currentEpochValidatorInfo, + SmartContracts: smartContracts, + MainPeerAuthentications: mainPeerAuthPool, + MainHeartbeats: mainHeartbeatPool, + FullArchivePeerAuthentications: fullArchivePeerAuthPool, + FullArchiveHeartbeats: fullArchiveHeartbeatPool, + ValidatorsInfo: validatorsInfo, } holder, err := dataPool.NewDataPool(dataPoolArgs) panicIfError("CreatePoolsHolderWithTxPool", err) diff --git a/testscommon/dataRetriever/poolsHolderMock.go b/testscommon/dataRetriever/poolsHolderMock.go index 5c711addbb0..52aab613d25 100644 --- a/testscommon/dataRetriever/poolsHolderMock.go +++ b/testscommon/dataRetriever/poolsHolderMock.go @@ -18,20 +18,22 @@ import ( // PoolsHolderMock - type PoolsHolderMock struct { - transactions dataRetriever.ShardedDataCacherNotifier - unsignedTransactions dataRetriever.ShardedDataCacherNotifier - rewardTransactions dataRetriever.ShardedDataCacherNotifier - headers dataRetriever.HeadersPool - miniBlocks storage.Cacher - peerChangesBlocks storage.Cacher - trieNodes storage.Cacher - trieNodesChunks storage.Cacher - smartContracts storage.Cacher - currBlockTxs dataRetriever.TransactionCacher - currEpochValidatorInfo dataRetriever.ValidatorInfoCacher - peerAuthentications storage.Cacher - heartbeats storage.Cacher - validatorsInfo dataRetriever.ShardedDataCacherNotifier + transactions dataRetriever.ShardedDataCacherNotifier + unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier + headers dataRetriever.HeadersPool + miniBlocks storage.Cacher + peerChangesBlocks storage.Cacher + trieNodes storage.Cacher + trieNodesChunks storage.Cacher + smartContracts storage.Cacher + currBlockTxs dataRetriever.TransactionCacher + currEpochValidatorInfo dataRetriever.ValidatorInfoCacher + mainPeerAuthentications storage.Cacher + mainHeartbeats storage.Cacher + fullArchivePeerAuthentications storage.Cacher + fullArchiveHeartbeats storage.Cacher + validatorsInfo dataRetriever.ShardedDataCacherNotifier } // NewPoolsHolderMock - @@ -93,13 +95,22 @@ func NewPoolsHolderMock() *PoolsHolderMock { holder.smartContracts, err = storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) panicIfError("NewPoolsHolderMock", err) - holder.peerAuthentications, err = cache.NewTimeCacher(cache.ArgTimeCacher{ + holder.mainPeerAuthentications, err = cache.NewTimeCacher(cache.ArgTimeCacher{ DefaultSpan: 10 * time.Second, CacheExpiry: 10 * time.Second, }) panicIfError("NewPoolsHolderMock", err) - holder.heartbeats, err = storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) + holder.mainHeartbeats, err = storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) + panicIfError("NewPoolsHolderMock", err) + + holder.fullArchivePeerAuthentications, err = cache.NewTimeCacher(cache.ArgTimeCacher{ + DefaultSpan: 10 * time.Second, + CacheExpiry: 10 * time.Second, + }) + panicIfError("NewPoolsHolderMock", err) + + holder.fullArchiveHeartbeats, err = storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) panicIfError("NewPoolsHolderMock", err) holder.validatorsInfo, err = shardedData.NewShardedData("validatorsInfoPool", storageunit.CacheConfig{ @@ -179,12 +190,22 @@ func (holder *PoolsHolderMock) SmartContracts() storage.Cacher { // PeerAuthentications - func (holder *PoolsHolderMock) PeerAuthentications() storage.Cacher { - return holder.peerAuthentications + return holder.mainPeerAuthentications } // Heartbeats - func (holder *PoolsHolderMock) Heartbeats() storage.Cacher { - return holder.heartbeats + return holder.mainHeartbeats +} + +// FullArchivePeerAuthentications - +func (holder *PoolsHolderMock) FullArchivePeerAuthentications() storage.Cacher { + return holder.fullArchivePeerAuthentications +} + +// FullArchiveHeartbeats - +func (holder *PoolsHolderMock) FullArchiveHeartbeats() storage.Cacher { + return holder.fullArchiveHeartbeats } // ValidatorsInfo - @@ -202,8 +223,15 @@ func (holder *PoolsHolderMock) Close() error { } } - if !check.IfNil(holder.peerAuthentications) { - err := holder.peerAuthentications.Close() + if !check.IfNil(holder.mainPeerAuthentications) { + err := holder.mainPeerAuthentications.Close() + if err != nil { + lastError = err + } + } + + if !check.IfNil(holder.fullArchivePeerAuthentications) { + err := holder.fullArchivePeerAuthentications.Close() if err != nil { lastError = err } diff --git a/testscommon/dataRetriever/poolsHolderStub.go b/testscommon/dataRetriever/poolsHolderStub.go index 106c8b96bb5..8f87f878b79 100644 --- a/testscommon/dataRetriever/poolsHolderStub.go +++ b/testscommon/dataRetriever/poolsHolderStub.go @@ -8,22 +8,24 @@ import ( // PoolsHolderStub - type PoolsHolderStub struct { - HeadersCalled func() dataRetriever.HeadersPool - TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier - UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier - RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier - MiniBlocksCalled func() storage.Cacher - MetaBlocksCalled func() storage.Cacher - CurrBlockTxsCalled func() dataRetriever.TransactionCacher - CurrEpochValidatorInfoCalled func() dataRetriever.ValidatorInfoCacher - TrieNodesCalled func() storage.Cacher - TrieNodesChunksCalled func() storage.Cacher - PeerChangesBlocksCalled func() storage.Cacher - SmartContractsCalled func() storage.Cacher - PeerAuthenticationsCalled func() storage.Cacher - HeartbeatsCalled func() storage.Cacher - ValidatorsInfoCalled func() dataRetriever.ShardedDataCacherNotifier - CloseCalled func() error + HeadersCalled func() dataRetriever.HeadersPool + TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + MiniBlocksCalled func() storage.Cacher + MetaBlocksCalled func() storage.Cacher + CurrBlockTxsCalled func() dataRetriever.TransactionCacher + CurrEpochValidatorInfoCalled func() dataRetriever.ValidatorInfoCacher + TrieNodesCalled func() storage.Cacher + TrieNodesChunksCalled func() storage.Cacher + PeerChangesBlocksCalled func() storage.Cacher + SmartContractsCalled func() storage.Cacher + PeerAuthenticationsCalled func() storage.Cacher + HeartbeatsCalled func() storage.Cacher + FullArchivePeerAuthenticationsCalled func() storage.Cacher + FullArchiveHeartbeatsCalled func() storage.Cacher + ValidatorsInfoCalled func() dataRetriever.ShardedDataCacherNotifier + CloseCalled func() error } // NewPoolsHolderStub - @@ -157,6 +159,24 @@ func (holder *PoolsHolderStub) Heartbeats() storage.Cacher { return testscommon.NewCacherStub() } +// FullArchivePeerAuthentications - +func (holder *PoolsHolderStub) FullArchivePeerAuthentications() storage.Cacher { + if holder.FullArchivePeerAuthenticationsCalled != nil { + return holder.FullArchivePeerAuthenticationsCalled() + } + + return testscommon.NewCacherStub() +} + +// FullArchiveHeartbeats - +func (holder *PoolsHolderStub) FullArchiveHeartbeats() storage.Cacher { + if holder.FullArchiveHeartbeatsCalled != nil { + return holder.FullArchiveHeartbeatsCalled() + } + + return testscommon.NewCacherStub() +} + // ValidatorsInfo - func (holder *PoolsHolderStub) ValidatorsInfo() dataRetriever.ShardedDataCacherNotifier { if holder.ValidatorsInfoCalled != nil { From 419be65bfc3ca0964002a65e742c853a6853fea3 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 9 Jun 2023 13:13:10 +0300 Subject: [PATCH 06/38] fixed datapool tests --- dataRetriever/dataPool/dataPool_test.go | 112 ++++++++++++++++++------ 1 file changed, 83 insertions(+), 29 deletions(-) diff --git a/dataRetriever/dataPool/dataPool_test.go b/dataRetriever/dataPool/dataPool_test.go index 11a94c5e488..c5a33786789 100644 --- a/dataRetriever/dataPool/dataPool_test.go +++ b/dataRetriever/dataPool/dataPool_test.go @@ -16,20 +16,22 @@ import ( func createMockDataPoolArgs() dataPool.DataPoolArgs { return dataPool.DataPoolArgs{ - Transactions: testscommon.NewShardedDataStub(), - UnsignedTransactions: testscommon.NewShardedDataStub(), - RewardTransactions: testscommon.NewShardedDataStub(), - Headers: &mock.HeadersCacherStub{}, - MiniBlocks: testscommon.NewCacherStub(), - PeerChangesBlocks: testscommon.NewCacherStub(), - TrieNodes: testscommon.NewCacherStub(), - TrieNodesChunks: testscommon.NewCacherStub(), - CurrentBlockTransactions: &mock.TxForCurrentBlockStub{}, - CurrentEpochValidatorInfo: &mock.ValidatorInfoForCurrentEpochStub{}, - SmartContracts: testscommon.NewCacherStub(), - PeerAuthentications: testscommon.NewCacherStub(), - Heartbeats: testscommon.NewCacherStub(), - ValidatorsInfo: testscommon.NewShardedDataStub(), + Transactions: testscommon.NewShardedDataStub(), + UnsignedTransactions: testscommon.NewShardedDataStub(), + RewardTransactions: testscommon.NewShardedDataStub(), + Headers: &mock.HeadersCacherStub{}, + MiniBlocks: testscommon.NewCacherStub(), + PeerChangesBlocks: testscommon.NewCacherStub(), + TrieNodes: testscommon.NewCacherStub(), + TrieNodesChunks: testscommon.NewCacherStub(), + CurrentBlockTransactions: &mock.TxForCurrentBlockStub{}, + CurrentEpochValidatorInfo: &mock.ValidatorInfoForCurrentEpochStub{}, + SmartContracts: testscommon.NewCacherStub(), + MainPeerAuthentications: testscommon.NewCacherStub(), + MainHeartbeats: testscommon.NewCacherStub(), + FullArchivePeerAuthentications: testscommon.NewCacherStub(), + FullArchiveHeartbeats: testscommon.NewCacherStub(), + ValidatorsInfo: testscommon.NewShardedDataStub(), } } @@ -121,25 +123,47 @@ func TestNewDataPool_NilSmartContractsShouldErr(t *testing.T) { assert.Nil(t, tdp) } -func TestNewDataPool_NilPeerAuthenticationsShouldErr(t *testing.T) { +func TestNewDataPool_NilMainPeerAuthenticationsShouldErr(t *testing.T) { t.Parallel() args := createMockDataPoolArgs() - args.PeerAuthentications = nil + args.MainPeerAuthentications = nil tdp, err := dataPool.NewDataPool(args) - assert.Equal(t, dataRetriever.ErrNilPeerAuthenticationPool, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeerAuthenticationPool)) assert.Nil(t, tdp) } -func TestNewDataPool_NilHeartbeatsShouldErr(t *testing.T) { +func TestNewDataPool_NilMainHeartbeatsShouldErr(t *testing.T) { t.Parallel() args := createMockDataPoolArgs() - args.Heartbeats = nil + args.MainHeartbeats = nil tdp, err := dataPool.NewDataPool(args) - assert.Equal(t, dataRetriever.ErrNilHeartbeatPool, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilHeartbeatPool)) + assert.Nil(t, tdp) +} + +func TestNewDataPool_NilFullArchivePeerAuthenticationsShouldErr(t *testing.T) { + t.Parallel() + + args := createMockDataPoolArgs() + args.FullArchivePeerAuthentications = nil + tdp, err := dataPool.NewDataPool(args) + + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeerAuthenticationPool)) + assert.Nil(t, tdp) +} + +func TestNewDataPool_NilFullArchiveHeartbeatsShouldErr(t *testing.T) { + t.Parallel() + + args := createMockDataPoolArgs() + args.FullArchiveHeartbeats = nil + tdp, err := dataPool.NewDataPool(args) + + assert.True(t, errors.Is(err, dataRetriever.ErrNilHeartbeatPool)) assert.Nil(t, tdp) } @@ -207,8 +231,10 @@ func TestNewDataPool_OkValsShouldWork(t *testing.T) { assert.True(t, args.TrieNodes == tdp.TrieNodes()) assert.True(t, args.TrieNodesChunks == tdp.TrieNodesChunks()) assert.True(t, args.SmartContracts == tdp.SmartContracts()) - assert.True(t, args.PeerAuthentications == tdp.PeerAuthentications()) - assert.True(t, args.Heartbeats == tdp.Heartbeats()) + assert.True(t, args.MainPeerAuthentications == tdp.PeerAuthentications()) + assert.True(t, args.MainHeartbeats == tdp.Heartbeats()) + assert.True(t, args.FullArchivePeerAuthentications == tdp.FullArchivePeerAuthentications()) + assert.True(t, args.FullArchiveHeartbeats == tdp.FullArchiveHeartbeats()) assert.True(t, args.ValidatorsInfo == tdp.ValidatorsInfo()) } @@ -230,11 +256,11 @@ func TestNewDataPool_Close(t *testing.T) { err := tdp.Close() assert.Equal(t, expectedErr, err) }) - t.Run("peer authentications close returns error", func(t *testing.T) { + t.Run("main peer authentications close returns error", func(t *testing.T) { t.Parallel() args := createMockDataPoolArgs() - args.PeerAuthentications = &testscommon.CacherStub{ + args.MainPeerAuthentications = &testscommon.CacherStub{ CloseCalled: func() error { return expectedErr }, @@ -244,11 +270,26 @@ func TestNewDataPool_Close(t *testing.T) { err := tdp.Close() assert.Equal(t, expectedErr, err) }) - t.Run("both fail", func(t *testing.T) { + t.Run("full archive peer authentications close returns error", func(t *testing.T) { + t.Parallel() + + args := createMockDataPoolArgs() + args.FullArchivePeerAuthentications = &testscommon.CacherStub{ + CloseCalled: func() error { + return expectedErr + }, + } + tdp, _ := dataPool.NewDataPool(args) + assert.NotNil(t, tdp) + err := tdp.Close() + assert.Equal(t, expectedErr, err) + }) + t.Run("all fail", func(t *testing.T) { t.Parallel() tnExpectedErr := errors.New("tn expected error") paExpectedErr := errors.New("pa expected error") + faExpectedErr := errors.New("fa expected error") args := createMockDataPoolArgs() tnCalled, paCalled := false, false args.TrieNodes = &testscommon.CacherStub{ @@ -257,16 +298,22 @@ func TestNewDataPool_Close(t *testing.T) { return tnExpectedErr }, } - args.PeerAuthentications = &testscommon.CacherStub{ + args.MainPeerAuthentications = &testscommon.CacherStub{ CloseCalled: func() error { paCalled = true return paExpectedErr }, } + args.FullArchivePeerAuthentications = &testscommon.CacherStub{ + CloseCalled: func() error { + paCalled = true + return faExpectedErr + }, + } tdp, _ := dataPool.NewDataPool(args) assert.NotNil(t, tdp) err := tdp.Close() - assert.Equal(t, paExpectedErr, err) + assert.Equal(t, faExpectedErr, err) assert.True(t, tnCalled) assert.True(t, paCalled) }) @@ -274,24 +321,31 @@ func TestNewDataPool_Close(t *testing.T) { t.Parallel() args := createMockDataPoolArgs() - tnCalled, paCalled := false, false + tnCalled, paCalled, faCalled := false, false, false args.TrieNodes = &testscommon.CacherStub{ CloseCalled: func() error { tnCalled = true return nil }, } - args.PeerAuthentications = &testscommon.CacherStub{ + args.MainPeerAuthentications = &testscommon.CacherStub{ CloseCalled: func() error { paCalled = true return nil }, } + args.FullArchivePeerAuthentications = &testscommon.CacherStub{ + CloseCalled: func() error { + faCalled = true + return nil + }, + } tdp, _ := dataPool.NewDataPool(args) assert.NotNil(t, tdp) err := tdp.Close() assert.Nil(t, err) assert.True(t, tnCalled) assert.True(t, paCalled) + assert.True(t, faCalled) }) } From 06d691e6fd7b9f8da942887d3434860142cef735 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 9 Jun 2023 15:51:13 +0300 Subject: [PATCH 07/38] removed full archive pools, not needed --- dataRetriever/dataPool/dataPool.go | 135 +++++++----------- dataRetriever/dataPool/dataPool_test.go | 105 ++++---------- dataRetriever/factory/dataPoolFactory.go | 52 +++---- dataRetriever/factory/dataPoolFactory_test.go | 2 +- dataRetriever/interface.go | 2 - testscommon/dataRetriever/poolFactory.go | 88 +++++------- testscommon/dataRetriever/poolsHolderMock.go | 68 +++------ testscommon/dataRetriever/poolsHolderStub.go | 52 +++---- 8 files changed, 164 insertions(+), 340 deletions(-) diff --git a/dataRetriever/dataPool/dataPool.go b/dataRetriever/dataPool/dataPool.go index cf51ad37cea..67b55cbfaee 100644 --- a/dataRetriever/dataPool/dataPool.go +++ b/dataRetriever/dataPool/dataPool.go @@ -1,8 +1,6 @@ package dataPool import ( - "fmt" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/storage" @@ -14,42 +12,38 @@ var _ dataRetriever.PoolsHolder = (*dataPool)(nil) var log = logger.GetOrCreate("dataRetriever/dataPool") type dataPool struct { - transactions dataRetriever.ShardedDataCacherNotifier - unsignedTransactions dataRetriever.ShardedDataCacherNotifier - rewardTransactions dataRetriever.ShardedDataCacherNotifier - headers dataRetriever.HeadersPool - miniBlocks storage.Cacher - peerChangesBlocks storage.Cacher - trieNodes storage.Cacher - trieNodesChunks storage.Cacher - currBlockTxs dataRetriever.TransactionCacher - currEpochValidatorInfo dataRetriever.ValidatorInfoCacher - smartContracts storage.Cacher - mainPeerAuthentications storage.Cacher - mainHeartbeats storage.Cacher - fullArchivePeerAuthentications storage.Cacher - fullArchiveHeartbeats storage.Cacher - validatorsInfo dataRetriever.ShardedDataCacherNotifier + transactions dataRetriever.ShardedDataCacherNotifier + unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier + headers dataRetriever.HeadersPool + miniBlocks storage.Cacher + peerChangesBlocks storage.Cacher + trieNodes storage.Cacher + trieNodesChunks storage.Cacher + currBlockTxs dataRetriever.TransactionCacher + currEpochValidatorInfo dataRetriever.ValidatorInfoCacher + smartContracts storage.Cacher + peerAuthentications storage.Cacher + heartbeats storage.Cacher + validatorsInfo dataRetriever.ShardedDataCacherNotifier } // DataPoolArgs represents the data pool's constructor structure type DataPoolArgs struct { - Transactions dataRetriever.ShardedDataCacherNotifier - UnsignedTransactions dataRetriever.ShardedDataCacherNotifier - RewardTransactions dataRetriever.ShardedDataCacherNotifier - Headers dataRetriever.HeadersPool - MiniBlocks storage.Cacher - PeerChangesBlocks storage.Cacher - TrieNodes storage.Cacher - TrieNodesChunks storage.Cacher - CurrentBlockTransactions dataRetriever.TransactionCacher - CurrentEpochValidatorInfo dataRetriever.ValidatorInfoCacher - SmartContracts storage.Cacher - MainPeerAuthentications storage.Cacher - MainHeartbeats storage.Cacher - FullArchivePeerAuthentications storage.Cacher - FullArchiveHeartbeats storage.Cacher - ValidatorsInfo dataRetriever.ShardedDataCacherNotifier + Transactions dataRetriever.ShardedDataCacherNotifier + UnsignedTransactions dataRetriever.ShardedDataCacherNotifier + RewardTransactions dataRetriever.ShardedDataCacherNotifier + Headers dataRetriever.HeadersPool + MiniBlocks storage.Cacher + PeerChangesBlocks storage.Cacher + TrieNodes storage.Cacher + TrieNodesChunks storage.Cacher + CurrentBlockTransactions dataRetriever.TransactionCacher + CurrentEpochValidatorInfo dataRetriever.ValidatorInfoCacher + SmartContracts storage.Cacher + PeerAuthentications storage.Cacher + Heartbeats storage.Cacher + ValidatorsInfo dataRetriever.ShardedDataCacherNotifier } // NewDataPool creates a data pools holder object @@ -87,39 +81,31 @@ func NewDataPool(args DataPoolArgs) (*dataPool, error) { if check.IfNil(args.SmartContracts) { return nil, dataRetriever.ErrNilSmartContractsPool } - if check.IfNil(args.MainPeerAuthentications) { - return nil, fmt.Errorf("%w for main", dataRetriever.ErrNilPeerAuthenticationPool) - } - if check.IfNil(args.MainHeartbeats) { - return nil, fmt.Errorf("%w for main", dataRetriever.ErrNilHeartbeatPool) + if check.IfNil(args.PeerAuthentications) { + return nil, dataRetriever.ErrNilPeerAuthenticationPool } - if check.IfNil(args.FullArchivePeerAuthentications) { - return nil, fmt.Errorf("%w for full archive", dataRetriever.ErrNilPeerAuthenticationPool) - } - if check.IfNil(args.FullArchiveHeartbeats) { - return nil, fmt.Errorf("%w for full archive", dataRetriever.ErrNilHeartbeatPool) + if check.IfNil(args.Heartbeats) { + return nil, dataRetriever.ErrNilHeartbeatPool } if check.IfNil(args.ValidatorsInfo) { return nil, dataRetriever.ErrNilValidatorInfoPool } return &dataPool{ - transactions: args.Transactions, - unsignedTransactions: args.UnsignedTransactions, - rewardTransactions: args.RewardTransactions, - headers: args.Headers, - miniBlocks: args.MiniBlocks, - peerChangesBlocks: args.PeerChangesBlocks, - trieNodes: args.TrieNodes, - trieNodesChunks: args.TrieNodesChunks, - currBlockTxs: args.CurrentBlockTransactions, - currEpochValidatorInfo: args.CurrentEpochValidatorInfo, - smartContracts: args.SmartContracts, - mainPeerAuthentications: args.MainPeerAuthentications, - mainHeartbeats: args.MainHeartbeats, - fullArchivePeerAuthentications: args.FullArchivePeerAuthentications, - fullArchiveHeartbeats: args.FullArchiveHeartbeats, - validatorsInfo: args.ValidatorsInfo, + transactions: args.Transactions, + unsignedTransactions: args.UnsignedTransactions, + rewardTransactions: args.RewardTransactions, + headers: args.Headers, + miniBlocks: args.MiniBlocks, + peerChangesBlocks: args.PeerChangesBlocks, + trieNodes: args.TrieNodes, + trieNodesChunks: args.TrieNodesChunks, + currBlockTxs: args.CurrentBlockTransactions, + currEpochValidatorInfo: args.CurrentEpochValidatorInfo, + smartContracts: args.SmartContracts, + peerAuthentications: args.PeerAuthentications, + heartbeats: args.Heartbeats, + validatorsInfo: args.ValidatorsInfo, }, nil } @@ -180,22 +166,12 @@ func (dp *dataPool) SmartContracts() storage.Cacher { // PeerAuthentications returns the holder for peer authentications func (dp *dataPool) PeerAuthentications() storage.Cacher { - return dp.mainPeerAuthentications + return dp.peerAuthentications } // Heartbeats returns the holder for heartbeats func (dp *dataPool) Heartbeats() storage.Cacher { - return dp.mainHeartbeats -} - -// FullArchivePeerAuthentications returns the holder for full archive peer authentications -func (dp *dataPool) FullArchivePeerAuthentications() storage.Cacher { - return dp.fullArchivePeerAuthentications -} - -// FullArchiveHeartbeats returns the holder for full archive heartbeats -func (dp *dataPool) FullArchiveHeartbeats() storage.Cacher { - return dp.fullArchiveHeartbeats + return dp.heartbeats } // ValidatorsInfo returns the holder for validators info @@ -215,18 +191,9 @@ func (dp *dataPool) Close() error { } } - if !check.IfNil(dp.mainPeerAuthentications) { - log.Debug("closing main peer authentications data pool....") - err := dp.mainPeerAuthentications.Close() - if err != nil { - log.Error("failed to close peer authentications data pool", "error", err.Error()) - lastError = err - } - } - - if !check.IfNil(dp.fullArchivePeerAuthentications) { - log.Debug("closing full archive peer authentications data pool....") - err := dp.fullArchivePeerAuthentications.Close() + if !check.IfNil(dp.peerAuthentications) { + log.Debug("closing peer authentications data pool....") + err := dp.peerAuthentications.Close() if err != nil { log.Error("failed to close peer authentications data pool", "error", err.Error()) lastError = err diff --git a/dataRetriever/dataPool/dataPool_test.go b/dataRetriever/dataPool/dataPool_test.go index c5a33786789..31ee7156f64 100644 --- a/dataRetriever/dataPool/dataPool_test.go +++ b/dataRetriever/dataPool/dataPool_test.go @@ -16,22 +16,20 @@ import ( func createMockDataPoolArgs() dataPool.DataPoolArgs { return dataPool.DataPoolArgs{ - Transactions: testscommon.NewShardedDataStub(), - UnsignedTransactions: testscommon.NewShardedDataStub(), - RewardTransactions: testscommon.NewShardedDataStub(), - Headers: &mock.HeadersCacherStub{}, - MiniBlocks: testscommon.NewCacherStub(), - PeerChangesBlocks: testscommon.NewCacherStub(), - TrieNodes: testscommon.NewCacherStub(), - TrieNodesChunks: testscommon.NewCacherStub(), - CurrentBlockTransactions: &mock.TxForCurrentBlockStub{}, - CurrentEpochValidatorInfo: &mock.ValidatorInfoForCurrentEpochStub{}, - SmartContracts: testscommon.NewCacherStub(), - MainPeerAuthentications: testscommon.NewCacherStub(), - MainHeartbeats: testscommon.NewCacherStub(), - FullArchivePeerAuthentications: testscommon.NewCacherStub(), - FullArchiveHeartbeats: testscommon.NewCacherStub(), - ValidatorsInfo: testscommon.NewShardedDataStub(), + Transactions: testscommon.NewShardedDataStub(), + UnsignedTransactions: testscommon.NewShardedDataStub(), + RewardTransactions: testscommon.NewShardedDataStub(), + Headers: &mock.HeadersCacherStub{}, + MiniBlocks: testscommon.NewCacherStub(), + PeerChangesBlocks: testscommon.NewCacherStub(), + TrieNodes: testscommon.NewCacherStub(), + TrieNodesChunks: testscommon.NewCacherStub(), + CurrentBlockTransactions: &mock.TxForCurrentBlockStub{}, + CurrentEpochValidatorInfo: &mock.ValidatorInfoForCurrentEpochStub{}, + SmartContracts: testscommon.NewCacherStub(), + PeerAuthentications: testscommon.NewCacherStub(), + Heartbeats: testscommon.NewCacherStub(), + ValidatorsInfo: testscommon.NewShardedDataStub(), } } @@ -123,44 +121,22 @@ func TestNewDataPool_NilSmartContractsShouldErr(t *testing.T) { assert.Nil(t, tdp) } -func TestNewDataPool_NilMainPeerAuthenticationsShouldErr(t *testing.T) { +func TestNewDataPool_NilPeerAuthenticationsShouldErr(t *testing.T) { t.Parallel() args := createMockDataPoolArgs() - args.MainPeerAuthentications = nil + args.PeerAuthentications = nil tdp, err := dataPool.NewDataPool(args) assert.True(t, errors.Is(err, dataRetriever.ErrNilPeerAuthenticationPool)) assert.Nil(t, tdp) } -func TestNewDataPool_NilMainHeartbeatsShouldErr(t *testing.T) { +func TestNewDataPool_NilHeartbeatsShouldErr(t *testing.T) { t.Parallel() args := createMockDataPoolArgs() - args.MainHeartbeats = nil - tdp, err := dataPool.NewDataPool(args) - - assert.True(t, errors.Is(err, dataRetriever.ErrNilHeartbeatPool)) - assert.Nil(t, tdp) -} - -func TestNewDataPool_NilFullArchivePeerAuthenticationsShouldErr(t *testing.T) { - t.Parallel() - - args := createMockDataPoolArgs() - args.FullArchivePeerAuthentications = nil - tdp, err := dataPool.NewDataPool(args) - - assert.True(t, errors.Is(err, dataRetriever.ErrNilPeerAuthenticationPool)) - assert.Nil(t, tdp) -} - -func TestNewDataPool_NilFullArchiveHeartbeatsShouldErr(t *testing.T) { - t.Parallel() - - args := createMockDataPoolArgs() - args.FullArchiveHeartbeats = nil + args.Heartbeats = nil tdp, err := dataPool.NewDataPool(args) assert.True(t, errors.Is(err, dataRetriever.ErrNilHeartbeatPool)) @@ -231,10 +207,8 @@ func TestNewDataPool_OkValsShouldWork(t *testing.T) { assert.True(t, args.TrieNodes == tdp.TrieNodes()) assert.True(t, args.TrieNodesChunks == tdp.TrieNodesChunks()) assert.True(t, args.SmartContracts == tdp.SmartContracts()) - assert.True(t, args.MainPeerAuthentications == tdp.PeerAuthentications()) - assert.True(t, args.MainHeartbeats == tdp.Heartbeats()) - assert.True(t, args.FullArchivePeerAuthentications == tdp.FullArchivePeerAuthentications()) - assert.True(t, args.FullArchiveHeartbeats == tdp.FullArchiveHeartbeats()) + assert.True(t, args.PeerAuthentications == tdp.PeerAuthentications()) + assert.True(t, args.Heartbeats == tdp.Heartbeats()) assert.True(t, args.ValidatorsInfo == tdp.ValidatorsInfo()) } @@ -256,11 +230,11 @@ func TestNewDataPool_Close(t *testing.T) { err := tdp.Close() assert.Equal(t, expectedErr, err) }) - t.Run("main peer authentications close returns error", func(t *testing.T) { + t.Run("peer authentications close returns error", func(t *testing.T) { t.Parallel() args := createMockDataPoolArgs() - args.MainPeerAuthentications = &testscommon.CacherStub{ + args.PeerAuthentications = &testscommon.CacherStub{ CloseCalled: func() error { return expectedErr }, @@ -270,21 +244,7 @@ func TestNewDataPool_Close(t *testing.T) { err := tdp.Close() assert.Equal(t, expectedErr, err) }) - t.Run("full archive peer authentications close returns error", func(t *testing.T) { - t.Parallel() - - args := createMockDataPoolArgs() - args.FullArchivePeerAuthentications = &testscommon.CacherStub{ - CloseCalled: func() error { - return expectedErr - }, - } - tdp, _ := dataPool.NewDataPool(args) - assert.NotNil(t, tdp) - err := tdp.Close() - assert.Equal(t, expectedErr, err) - }) - t.Run("all fail", func(t *testing.T) { + t.Run("both fail", func(t *testing.T) { t.Parallel() tnExpectedErr := errors.New("tn expected error") @@ -298,18 +258,12 @@ func TestNewDataPool_Close(t *testing.T) { return tnExpectedErr }, } - args.MainPeerAuthentications = &testscommon.CacherStub{ + args.PeerAuthentications = &testscommon.CacherStub{ CloseCalled: func() error { paCalled = true return paExpectedErr }, } - args.FullArchivePeerAuthentications = &testscommon.CacherStub{ - CloseCalled: func() error { - paCalled = true - return faExpectedErr - }, - } tdp, _ := dataPool.NewDataPool(args) assert.NotNil(t, tdp) err := tdp.Close() @@ -321,31 +275,24 @@ func TestNewDataPool_Close(t *testing.T) { t.Parallel() args := createMockDataPoolArgs() - tnCalled, paCalled, faCalled := false, false, false + tnCalled, paCalled := false, false args.TrieNodes = &testscommon.CacherStub{ CloseCalled: func() error { tnCalled = true return nil }, } - args.MainPeerAuthentications = &testscommon.CacherStub{ + args.PeerAuthentications = &testscommon.CacherStub{ CloseCalled: func() error { paCalled = true return nil }, } - args.FullArchivePeerAuthentications = &testscommon.CacherStub{ - CloseCalled: func() error { - faCalled = true - return nil - }, - } tdp, _ := dataPool.NewDataPool(args) assert.NotNil(t, tdp) err := tdp.Close() assert.Nil(t, err) assert.True(t, tnCalled) assert.True(t, paCalled) - assert.True(t, faCalled) }) } diff --git a/dataRetriever/factory/dataPoolFactory.go b/dataRetriever/factory/dataPoolFactory.go index 1132c1fdf94..3f4cf038c4a 100644 --- a/dataRetriever/factory/dataPoolFactory.go +++ b/dataRetriever/factory/dataPoolFactory.go @@ -128,32 +128,18 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) return nil, fmt.Errorf("%w while creating the cache for the smartcontract results", err) } - mainPeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ + peerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ DefaultSpan: time.Duration(mainConfig.HeartbeatV2.PeerAuthenticationTimeBetweenSendsInSec) * time.Second * peerAuthExpiryMultiplier, CacheExpiry: peerAuthenticationCacheRefresh, }) if err != nil { - return nil, fmt.Errorf("%w while creating the cache for the main peer authentication messages", err) + return nil, fmt.Errorf("%w while creating the cache for the peer authentication messages", err) } cacherCfg = factory.GetCacherFromConfig(mainConfig.HeartbeatV2.HeartbeatPool) - mainHeartbeatPool, err := storageunit.NewCache(cacherCfg) + heartbeatPool, err := storageunit.NewCache(cacherCfg) if err != nil { - return nil, fmt.Errorf("%w while creating the cache for the main heartbeat messages", err) - } - - fullArchivePeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ - DefaultSpan: time.Duration(mainConfig.HeartbeatV2.PeerAuthenticationTimeBetweenSendsInSec) * time.Second * peerAuthExpiryMultiplier, - CacheExpiry: peerAuthenticationCacheRefresh, - }) - if err != nil { - return nil, fmt.Errorf("%w while creating the cache for the full archive peer authentication messages", err) - } - - cacherCfg = factory.GetCacherFromConfig(mainConfig.HeartbeatV2.HeartbeatPool) - fullArchiveHeartbeatPool, err := storageunit.NewCache(cacherCfg) - if err != nil { - return nil, fmt.Errorf("%w while creating the cache for the full archive heartbeat messages", err) + return nil, fmt.Errorf("%w while creating the cache for the heartbeat messages", err) } validatorsInfo, err := shardedData.NewShardedData(dataRetriever.ValidatorsInfoPoolName, factory.GetCacherFromConfig(mainConfig.ValidatorInfoPool)) @@ -164,22 +150,20 @@ func NewDataPoolFromConfig(args ArgsDataPool) (dataRetriever.PoolsHolder, error) currBlockTransactions := dataPool.NewCurrentBlockTransactionsPool() currEpochValidatorInfo := dataPool.NewCurrentEpochValidatorInfoPool() dataPoolArgs := dataPool.DataPoolArgs{ - Transactions: txPool, - UnsignedTransactions: uTxPool, - RewardTransactions: rewardTxPool, - Headers: hdrPool, - MiniBlocks: txBlockBody, - PeerChangesBlocks: peerChangeBlockBody, - TrieNodes: adaptedTrieNodesStorage, - TrieNodesChunks: trieNodesChunks, - CurrentBlockTransactions: currBlockTransactions, - CurrentEpochValidatorInfo: currEpochValidatorInfo, - SmartContracts: smartContracts, - MainPeerAuthentications: mainPeerAuthPool, - MainHeartbeats: mainHeartbeatPool, - FullArchivePeerAuthentications: fullArchivePeerAuthPool, - FullArchiveHeartbeats: fullArchiveHeartbeatPool, - ValidatorsInfo: validatorsInfo, + Transactions: txPool, + UnsignedTransactions: uTxPool, + RewardTransactions: rewardTxPool, + Headers: hdrPool, + MiniBlocks: txBlockBody, + PeerChangesBlocks: peerChangeBlockBody, + TrieNodes: adaptedTrieNodesStorage, + TrieNodesChunks: trieNodesChunks, + CurrentBlockTransactions: currBlockTransactions, + CurrentEpochValidatorInfo: currEpochValidatorInfo, + SmartContracts: smartContracts, + PeerAuthentications: peerAuthPool, + Heartbeats: heartbeatPool, + ValidatorsInfo: validatorsInfo, } return dataPool.NewDataPool(dataPoolArgs) } diff --git a/dataRetriever/factory/dataPoolFactory_test.go b/dataRetriever/factory/dataPoolFactory_test.go index 820c4b226a6..c9ae8b60c43 100644 --- a/dataRetriever/factory/dataPoolFactory_test.go +++ b/dataRetriever/factory/dataPoolFactory_test.go @@ -135,7 +135,7 @@ func TestNewDataPoolFromConfig_BadConfigShouldErr(t *testing.T) { require.Nil(t, holder) fmt.Println(err) require.True(t, errors.Is(err, storage.ErrNotSupportedCacheType)) - require.True(t, strings.Contains(err.Error(), "the cache for the main heartbeat messages")) + require.True(t, strings.Contains(err.Error(), "the cache for the heartbeat messages")) args = getGoodArgs() args.Config.ValidatorInfoPool.Capacity = 0 diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 06455c0adb1..4da2c3669db 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -244,8 +244,6 @@ type PoolsHolder interface { CurrentEpochValidatorInfo() ValidatorInfoCacher PeerAuthentications() storage.Cacher Heartbeats() storage.Cacher - FullArchivePeerAuthentications() storage.Cacher - FullArchiveHeartbeats() storage.Cacher ValidatorsInfo() ShardedDataCacherNotifier Close() error IsInterfaceNil() bool diff --git a/testscommon/dataRetriever/poolFactory.go b/testscommon/dataRetriever/poolFactory.go index cb5597d9be2..d4d0ed59fc8 100644 --- a/testscommon/dataRetriever/poolFactory.go +++ b/testscommon/dataRetriever/poolFactory.go @@ -114,24 +114,14 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo smartContracts, err := storageunit.NewCache(cacherConfig) panicIfError("CreatePoolsHolder", err) - mainPeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ + peerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ DefaultSpan: 60 * time.Second, CacheExpiry: 60 * time.Second, }) panicIfError("CreatePoolsHolder", err) cacherConfig = storageunit.CacheConfig{Capacity: 50000, Type: storageunit.LRUCache} - mainHeartbeatPool, err := storageunit.NewCache(cacherConfig) - panicIfError("CreatePoolsHolder", err) - - fullArchivePeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ - DefaultSpan: 60 * time.Second, - CacheExpiry: 60 * time.Second, - }) - panicIfError("CreatePoolsHolder", err) - - cacherConfig = storageunit.CacheConfig{Capacity: 50000, Type: storageunit.LRUCache} - fullArchiveHeartbeatPool, err := storageunit.NewCache(cacherConfig) + heartbeatPool, err := storageunit.NewCache(cacherConfig) panicIfError("CreatePoolsHolder", err) validatorsInfo, err := shardedData.NewShardedData("validatorsInfoPool", storageunit.CacheConfig{ @@ -144,22 +134,20 @@ func CreatePoolsHolder(numShards uint32, selfShard uint32) dataRetriever.PoolsHo currentBlockTransactions := dataPool.NewCurrentBlockTransactionsPool() currentEpochValidatorInfo := dataPool.NewCurrentEpochValidatorInfoPool() dataPoolArgs := dataPool.DataPoolArgs{ - Transactions: txPool, - UnsignedTransactions: unsignedTxPool, - RewardTransactions: rewardsTxPool, - Headers: headersPool, - MiniBlocks: txBlockBody, - PeerChangesBlocks: peerChangeBlockBody, - TrieNodes: adaptedTrieNodesStorage, - TrieNodesChunks: trieNodesChunks, - CurrentBlockTransactions: currentBlockTransactions, - CurrentEpochValidatorInfo: currentEpochValidatorInfo, - SmartContracts: smartContracts, - MainPeerAuthentications: mainPeerAuthPool, - MainHeartbeats: mainHeartbeatPool, - FullArchivePeerAuthentications: fullArchivePeerAuthPool, - FullArchiveHeartbeats: fullArchiveHeartbeatPool, - ValidatorsInfo: validatorsInfo, + Transactions: txPool, + UnsignedTransactions: unsignedTxPool, + RewardTransactions: rewardsTxPool, + Headers: headersPool, + MiniBlocks: txBlockBody, + PeerChangesBlocks: peerChangeBlockBody, + TrieNodes: adaptedTrieNodesStorage, + TrieNodesChunks: trieNodesChunks, + CurrentBlockTransactions: currentBlockTransactions, + CurrentEpochValidatorInfo: currentEpochValidatorInfo, + SmartContracts: smartContracts, + PeerAuthentications: peerAuthPool, + Heartbeats: heartbeatPool, + ValidatorsInfo: validatorsInfo, } holder, err := dataPool.NewDataPool(dataPoolArgs) panicIfError("CreatePoolsHolder", err) @@ -217,45 +205,33 @@ func CreatePoolsHolderWithTxPool(txPool dataRetriever.ShardedDataCacherNotifier) }) panicIfError("CreatePoolsHolderWithTxPool", err) - mainPeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ - DefaultSpan: peerAuthDuration, - CacheExpiry: peerAuthDuration, - }) - panicIfError("CreatePoolsHolderWithTxPool", err) - - cacherConfig = storageunit.CacheConfig{Capacity: 50000, Type: storageunit.LRUCache} - mainHeartbeatPool, err := storageunit.NewCache(cacherConfig) - panicIfError("CreatePoolsHolderWithTxPool", err) - - fullArchivePeerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ + peerAuthPool, err := cache.NewTimeCacher(cache.ArgTimeCacher{ DefaultSpan: peerAuthDuration, CacheExpiry: peerAuthDuration, }) panicIfError("CreatePoolsHolderWithTxPool", err) cacherConfig = storageunit.CacheConfig{Capacity: 50000, Type: storageunit.LRUCache} - fullArchiveHeartbeatPool, err := storageunit.NewCache(cacherConfig) + heartbeatPool, err := storageunit.NewCache(cacherConfig) panicIfError("CreatePoolsHolderWithTxPool", err) currentBlockTransactions := dataPool.NewCurrentBlockTransactionsPool() currentEpochValidatorInfo := dataPool.NewCurrentEpochValidatorInfoPool() dataPoolArgs := dataPool.DataPoolArgs{ - Transactions: txPool, - UnsignedTransactions: unsignedTxPool, - RewardTransactions: rewardsTxPool, - Headers: headersPool, - MiniBlocks: txBlockBody, - PeerChangesBlocks: peerChangeBlockBody, - TrieNodes: trieNodes, - TrieNodesChunks: trieNodesChunks, - CurrentBlockTransactions: currentBlockTransactions, - CurrentEpochValidatorInfo: currentEpochValidatorInfo, - SmartContracts: smartContracts, - MainPeerAuthentications: mainPeerAuthPool, - MainHeartbeats: mainHeartbeatPool, - FullArchivePeerAuthentications: fullArchivePeerAuthPool, - FullArchiveHeartbeats: fullArchiveHeartbeatPool, - ValidatorsInfo: validatorsInfo, + Transactions: txPool, + UnsignedTransactions: unsignedTxPool, + RewardTransactions: rewardsTxPool, + Headers: headersPool, + MiniBlocks: txBlockBody, + PeerChangesBlocks: peerChangeBlockBody, + TrieNodes: trieNodes, + TrieNodesChunks: trieNodesChunks, + CurrentBlockTransactions: currentBlockTransactions, + CurrentEpochValidatorInfo: currentEpochValidatorInfo, + SmartContracts: smartContracts, + PeerAuthentications: peerAuthPool, + Heartbeats: heartbeatPool, + ValidatorsInfo: validatorsInfo, } holder, err := dataPool.NewDataPool(dataPoolArgs) panicIfError("CreatePoolsHolderWithTxPool", err) diff --git a/testscommon/dataRetriever/poolsHolderMock.go b/testscommon/dataRetriever/poolsHolderMock.go index 52aab613d25..5c711addbb0 100644 --- a/testscommon/dataRetriever/poolsHolderMock.go +++ b/testscommon/dataRetriever/poolsHolderMock.go @@ -18,22 +18,20 @@ import ( // PoolsHolderMock - type PoolsHolderMock struct { - transactions dataRetriever.ShardedDataCacherNotifier - unsignedTransactions dataRetriever.ShardedDataCacherNotifier - rewardTransactions dataRetriever.ShardedDataCacherNotifier - headers dataRetriever.HeadersPool - miniBlocks storage.Cacher - peerChangesBlocks storage.Cacher - trieNodes storage.Cacher - trieNodesChunks storage.Cacher - smartContracts storage.Cacher - currBlockTxs dataRetriever.TransactionCacher - currEpochValidatorInfo dataRetriever.ValidatorInfoCacher - mainPeerAuthentications storage.Cacher - mainHeartbeats storage.Cacher - fullArchivePeerAuthentications storage.Cacher - fullArchiveHeartbeats storage.Cacher - validatorsInfo dataRetriever.ShardedDataCacherNotifier + transactions dataRetriever.ShardedDataCacherNotifier + unsignedTransactions dataRetriever.ShardedDataCacherNotifier + rewardTransactions dataRetriever.ShardedDataCacherNotifier + headers dataRetriever.HeadersPool + miniBlocks storage.Cacher + peerChangesBlocks storage.Cacher + trieNodes storage.Cacher + trieNodesChunks storage.Cacher + smartContracts storage.Cacher + currBlockTxs dataRetriever.TransactionCacher + currEpochValidatorInfo dataRetriever.ValidatorInfoCacher + peerAuthentications storage.Cacher + heartbeats storage.Cacher + validatorsInfo dataRetriever.ShardedDataCacherNotifier } // NewPoolsHolderMock - @@ -95,22 +93,13 @@ func NewPoolsHolderMock() *PoolsHolderMock { holder.smartContracts, err = storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) panicIfError("NewPoolsHolderMock", err) - holder.mainPeerAuthentications, err = cache.NewTimeCacher(cache.ArgTimeCacher{ + holder.peerAuthentications, err = cache.NewTimeCacher(cache.ArgTimeCacher{ DefaultSpan: 10 * time.Second, CacheExpiry: 10 * time.Second, }) panicIfError("NewPoolsHolderMock", err) - holder.mainHeartbeats, err = storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) - panicIfError("NewPoolsHolderMock", err) - - holder.fullArchivePeerAuthentications, err = cache.NewTimeCacher(cache.ArgTimeCacher{ - DefaultSpan: 10 * time.Second, - CacheExpiry: 10 * time.Second, - }) - panicIfError("NewPoolsHolderMock", err) - - holder.fullArchiveHeartbeats, err = storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) + holder.heartbeats, err = storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 10000, Shards: 1, SizeInBytes: 0}) panicIfError("NewPoolsHolderMock", err) holder.validatorsInfo, err = shardedData.NewShardedData("validatorsInfoPool", storageunit.CacheConfig{ @@ -190,22 +179,12 @@ func (holder *PoolsHolderMock) SmartContracts() storage.Cacher { // PeerAuthentications - func (holder *PoolsHolderMock) PeerAuthentications() storage.Cacher { - return holder.mainPeerAuthentications + return holder.peerAuthentications } // Heartbeats - func (holder *PoolsHolderMock) Heartbeats() storage.Cacher { - return holder.mainHeartbeats -} - -// FullArchivePeerAuthentications - -func (holder *PoolsHolderMock) FullArchivePeerAuthentications() storage.Cacher { - return holder.fullArchivePeerAuthentications -} - -// FullArchiveHeartbeats - -func (holder *PoolsHolderMock) FullArchiveHeartbeats() storage.Cacher { - return holder.fullArchiveHeartbeats + return holder.heartbeats } // ValidatorsInfo - @@ -223,15 +202,8 @@ func (holder *PoolsHolderMock) Close() error { } } - if !check.IfNil(holder.mainPeerAuthentications) { - err := holder.mainPeerAuthentications.Close() - if err != nil { - lastError = err - } - } - - if !check.IfNil(holder.fullArchivePeerAuthentications) { - err := holder.fullArchivePeerAuthentications.Close() + if !check.IfNil(holder.peerAuthentications) { + err := holder.peerAuthentications.Close() if err != nil { lastError = err } diff --git a/testscommon/dataRetriever/poolsHolderStub.go b/testscommon/dataRetriever/poolsHolderStub.go index 8f87f878b79..106c8b96bb5 100644 --- a/testscommon/dataRetriever/poolsHolderStub.go +++ b/testscommon/dataRetriever/poolsHolderStub.go @@ -8,24 +8,22 @@ import ( // PoolsHolderStub - type PoolsHolderStub struct { - HeadersCalled func() dataRetriever.HeadersPool - TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier - UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier - RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier - MiniBlocksCalled func() storage.Cacher - MetaBlocksCalled func() storage.Cacher - CurrBlockTxsCalled func() dataRetriever.TransactionCacher - CurrEpochValidatorInfoCalled func() dataRetriever.ValidatorInfoCacher - TrieNodesCalled func() storage.Cacher - TrieNodesChunksCalled func() storage.Cacher - PeerChangesBlocksCalled func() storage.Cacher - SmartContractsCalled func() storage.Cacher - PeerAuthenticationsCalled func() storage.Cacher - HeartbeatsCalled func() storage.Cacher - FullArchivePeerAuthenticationsCalled func() storage.Cacher - FullArchiveHeartbeatsCalled func() storage.Cacher - ValidatorsInfoCalled func() dataRetriever.ShardedDataCacherNotifier - CloseCalled func() error + HeadersCalled func() dataRetriever.HeadersPool + TransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + UnsignedTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + RewardTransactionsCalled func() dataRetriever.ShardedDataCacherNotifier + MiniBlocksCalled func() storage.Cacher + MetaBlocksCalled func() storage.Cacher + CurrBlockTxsCalled func() dataRetriever.TransactionCacher + CurrEpochValidatorInfoCalled func() dataRetriever.ValidatorInfoCacher + TrieNodesCalled func() storage.Cacher + TrieNodesChunksCalled func() storage.Cacher + PeerChangesBlocksCalled func() storage.Cacher + SmartContractsCalled func() storage.Cacher + PeerAuthenticationsCalled func() storage.Cacher + HeartbeatsCalled func() storage.Cacher + ValidatorsInfoCalled func() dataRetriever.ShardedDataCacherNotifier + CloseCalled func() error } // NewPoolsHolderStub - @@ -159,24 +157,6 @@ func (holder *PoolsHolderStub) Heartbeats() storage.Cacher { return testscommon.NewCacherStub() } -// FullArchivePeerAuthentications - -func (holder *PoolsHolderStub) FullArchivePeerAuthentications() storage.Cacher { - if holder.FullArchivePeerAuthenticationsCalled != nil { - return holder.FullArchivePeerAuthenticationsCalled() - } - - return testscommon.NewCacherStub() -} - -// FullArchiveHeartbeats - -func (holder *PoolsHolderStub) FullArchiveHeartbeats() storage.Cacher { - if holder.FullArchiveHeartbeatsCalled != nil { - return holder.FullArchiveHeartbeatsCalled() - } - - return testscommon.NewCacherStub() -} - // ValidatorsInfo - func (holder *PoolsHolderStub) ValidatorsInfo() dataRetriever.ShardedDataCacherNotifier { if holder.ValidatorsInfoCalled != nil { From ff292375d0f6c671193302ccadd901058f1f636a Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 9 Jun 2023 16:05:59 +0300 Subject: [PATCH 08/38] fix test --- dataRetriever/dataPool/dataPool_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/dataRetriever/dataPool/dataPool_test.go b/dataRetriever/dataPool/dataPool_test.go index 31ee7156f64..b948b7f2d44 100644 --- a/dataRetriever/dataPool/dataPool_test.go +++ b/dataRetriever/dataPool/dataPool_test.go @@ -249,7 +249,6 @@ func TestNewDataPool_Close(t *testing.T) { tnExpectedErr := errors.New("tn expected error") paExpectedErr := errors.New("pa expected error") - faExpectedErr := errors.New("fa expected error") args := createMockDataPoolArgs() tnCalled, paCalled := false, false args.TrieNodes = &testscommon.CacherStub{ @@ -267,7 +266,7 @@ func TestNewDataPool_Close(t *testing.T) { tdp, _ := dataPool.NewDataPool(args) assert.NotNil(t, tdp) err := tdp.Close() - assert.Equal(t, faExpectedErr, err) + assert.Equal(t, paExpectedErr, err) assert.True(t, tnCalled) assert.True(t, paCalled) }) From de007c1d1f9a4dc1926c0428e499ea363975eb90 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 9 Jun 2023 16:36:48 +0300 Subject: [PATCH 09/38] fixes after review --- factory/heartbeat/heartbeatV2Components.go | 6 +- .../heartbeat/heartbeatV2Components_test.go | 85 ++++++++++++++++++- heartbeat/sender/multikeyHeartbeatSender.go | 2 +- .../multikeyPeerAuthenticationSender.go | 1 - heartbeat/sender/peerAuthenticationSender.go | 1 - 5 files changed, 87 insertions(+), 8 deletions(-) diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index fc66bc9675a..4356b03a70f 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -310,15 +310,15 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error } func (hcf *heartbeatV2ComponentsFactory) createTopicsIfNeeded() error { - err := createTopicIfNeededOnMessenger(hcf.networkComponents.NetworkMessenger()) + err := createTopicsIfNeededOnMessenger(hcf.networkComponents.NetworkMessenger()) if err != nil { return err } - return createTopicIfNeededOnMessenger(hcf.networkComponents.FullArchiveNetworkMessenger()) + return createTopicsIfNeededOnMessenger(hcf.networkComponents.FullArchiveNetworkMessenger()) } -func createTopicIfNeededOnMessenger(messenger p2p.Messenger) error { +func createTopicsIfNeededOnMessenger(messenger p2p.Messenger) error { if !messenger.HasTopic(common.PeerAuthenticationTopic) { err := messenger.CreateTopic(common.PeerAuthenticationTopic, true) if err != nil { diff --git a/factory/heartbeat/heartbeatV2Components_test.go b/factory/heartbeat/heartbeatV2Components_test.go index 093a17d719b..d350e29cb91 100644 --- a/factory/heartbeat/heartbeatV2Components_test.go +++ b/factory/heartbeat/heartbeatV2Components_test.go @@ -248,7 +248,7 @@ func TestHeartbeatV2Components_Create(t *testing.T) { t.Parallel() expectedErr := errors.New("expected error") - t.Run("messenger does not have PeerAuthenticationTopic and fails to create it", func(t *testing.T) { + t.Run("main messenger does not have PeerAuthenticationTopic and fails to create it", func(t *testing.T) { t.Parallel() args := createMockHeartbeatV2ComponentsFactoryArgs() @@ -279,7 +279,7 @@ func TestHeartbeatV2Components_Create(t *testing.T) { assert.Nil(t, hc) assert.Equal(t, expectedErr, err) }) - t.Run("messenger does not have HeartbeatV2Topic and fails to create it", func(t *testing.T) { + t.Run("main messenger does not have HeartbeatV2Topic and fails to create it", func(t *testing.T) { t.Parallel() args := createMockHeartbeatV2ComponentsFactoryArgs() @@ -306,6 +306,64 @@ func TestHeartbeatV2Components_Create(t *testing.T) { assert.Nil(t, hc) assert.Equal(t, expectedErr, err) }) + t.Run("full archive messenger does not have PeerAuthenticationTopic and fails to create it", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{ + HasTopicCalled: func(name string) bool { + if name == common.PeerAuthenticationTopic { + return false + } + assert.Fail(t, "should not have been called") + return true + }, + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + if name == common.PeerAuthenticationTopic { + return expectedErr + } + assert.Fail(t, "should not have been called") + return nil + }, + }, + Messenger: &p2pmocks.MessengerStub{}, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Equal(t, expectedErr, err) + }) + t.Run("full archive messenger does not have HeartbeatV2Topic and fails to create it", func(t *testing.T) { + t.Parallel() + + args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{ + HasTopicCalled: func(name string) bool { + return name != common.HeartbeatV2Topic + }, + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + if name == common.HeartbeatV2Topic { + return expectedErr + } + assert.Fail(t, "should not have been called") + return nil + }, + }, + Messenger: &p2pmocks.MessengerStub{}, + } + hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) + assert.NotNil(t, hcf) + assert.NoError(t, err) + + hc, err := hcf.Create() + assert.Nil(t, hc) + assert.Equal(t, expectedErr, err) + }) t.Run("invalid config should error", func(t *testing.T) { t.Parallel() @@ -474,7 +532,22 @@ func TestHeartbeatV2Components_Create(t *testing.T) { } }() + topicsCreated := make(map[string][]string) args := createMockHeartbeatV2ComponentsFactoryArgs() + args.NetworkComponents = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + topicsCreated["main"] = append(topicsCreated["main"], name) + return nil + }, + }, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{ + CreateTopicCalled: func(name string, createChannelForTopic bool) error { + topicsCreated["full_archive"] = append(topicsCreated["full_archive"], name) + return nil + }, + }, + } args.Prefs.Preferences.FullArchive = true // coverage only hcf, err := heartbeatComp.NewHeartbeatV2ComponentsFactory(args) assert.NotNil(t, hcf) @@ -484,6 +557,14 @@ func TestHeartbeatV2Components_Create(t *testing.T) { assert.NotNil(t, hc) assert.NoError(t, err) assert.NoError(t, hc.Close()) + + assert.Equal(t, 2, len(topicsCreated)) + assert.Equal(t, 2, len(topicsCreated["main"])) + assert.Equal(t, 2, len(topicsCreated["full_archive"])) + for _, messengerTopics := range topicsCreated { + assert.Contains(t, messengerTopics, common.HeartbeatV2Topic) + assert.Contains(t, messengerTopics, common.PeerAuthenticationTopic) + } }) } diff --git a/heartbeat/sender/multikeyHeartbeatSender.go b/heartbeat/sender/multikeyHeartbeatSender.go index e97c64e4ea4..4ac99e9d7e1 100644 --- a/heartbeat/sender/multikeyHeartbeatSender.go +++ b/heartbeat/sender/multikeyHeartbeatSender.go @@ -186,7 +186,7 @@ func (sender *multikeyHeartbeatSender) sendMessageForKey(pkBytes []byte) error { } sender.mainMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) - sender.fullArchiveMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) + sender.fullArchiveMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) // TODO[Sorin]: rethink if we need to send this return nil } diff --git a/heartbeat/sender/multikeyPeerAuthenticationSender.go b/heartbeat/sender/multikeyPeerAuthenticationSender.go index 95401478c05..b1e9a62b71c 100644 --- a/heartbeat/sender/multikeyPeerAuthenticationSender.go +++ b/heartbeat/sender/multikeyPeerAuthenticationSender.go @@ -195,7 +195,6 @@ func (sender *multikeyPeerAuthenticationSender) sendData(pkBytes []byte, data [] return } sender.mainMessenger.BroadcastUsingPrivateKey(sender.topic, data, pid, p2pSk) - sender.fullArchiveMessenger.BroadcastUsingPrivateKey(sender.topic, data, pid, p2pSk) nextTimeToCheck, err := sender.managedPeersHolder.GetNextPeerAuthenticationTime(pkBytes) if err != nil { diff --git a/heartbeat/sender/peerAuthenticationSender.go b/heartbeat/sender/peerAuthenticationSender.go index bdb223cf99e..09387a13da9 100644 --- a/heartbeat/sender/peerAuthenticationSender.go +++ b/heartbeat/sender/peerAuthenticationSender.go @@ -129,7 +129,6 @@ func (sender *peerAuthenticationSender) execute() (error, bool) { "public key", pkBytes, "pid", sender.mainMessenger.ID().Pretty(), "timestamp", msgTimestamp) sender.mainMessenger.Broadcast(sender.topic, data) - sender.fullArchiveMessenger.Broadcast(sender.topic, data) return nil, isTriggered } From c657818ab5b539d6a442e294cf36a5758ddf2d6d Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 9 Jun 2023 16:48:19 +0300 Subject: [PATCH 10/38] fix tests --- heartbeat/sender/peerAuthenticationSender_test.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/heartbeat/sender/peerAuthenticationSender_test.go b/heartbeat/sender/peerAuthenticationSender_test.go index 69fc1625fc4..b20ccd9eee3 100644 --- a/heartbeat/sender/peerAuthenticationSender_test.go +++ b/heartbeat/sender/peerAuthenticationSender_test.go @@ -325,7 +325,6 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { argsBase := createMockBaseArgs() mainBroadcastCalled := false - fullArchiveBroadcastCalled := false argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, argsBase.topic, topic) @@ -334,8 +333,7 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { } argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { - assert.Equal(t, argsBase.topic, topic) - fullArchiveBroadcastCalled = true + assert.Fail(t, "should have not been called") }, } @@ -345,7 +343,6 @@ func TestPeerAuthenticationSender_execute(t *testing.T) { err, isHardforkTriggered := senderInstance.execute() assert.Nil(t, err) assert.True(t, mainBroadcastCalled) - assert.True(t, fullArchiveBroadcastCalled) assert.False(t, isHardforkTriggered) }) t.Run("should work with some real components", func(t *testing.T) { @@ -520,7 +517,6 @@ func TestPeerAuthenticationSender_Execute(t *testing.T) { argsBase := createMockBaseArgs() counterMainBroadcast := 0 - counterFullArchiveroadcast := 0 argsBase.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { counterMainBroadcast++ @@ -528,7 +524,7 @@ func TestPeerAuthenticationSender_Execute(t *testing.T) { } argsBase.fullArchiveMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { - counterFullArchiveroadcast++ + assert.Fail(t, "should have not been called") }, } args := createMockPeerAuthenticationSenderArgs(argsBase) @@ -550,7 +546,6 @@ func TestPeerAuthenticationSender_Execute(t *testing.T) { senderInstance.Execute() // validator senderInstance.Execute() // observer assert.Equal(t, 1, counterMainBroadcast) - assert.Equal(t, 1, counterFullArchiveroadcast) }) t.Run("execute worked, should set the hardfork time duration value", func(t *testing.T) { t.Parallel() From b06188a72ed118c6612f4ec6802d351ca0f474cc Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Mon, 12 Jun 2023 18:57:16 +0300 Subject: [PATCH 11/38] further integration for the full archive messenger small refactor on antiflood components creation created a new full history peer shard mapper create new interceptors for heartbeat to feed the new peer shard mapper --- common/constants.go | 3 + common/disabled/cache.go | 80 +++++ epochStart/bootstrap/common.go | 7 +- .../epochStartInterceptorsContainerFactory.go | 12 +- epochStart/bootstrap/fromLocalStorage.go | 8 +- epochStart/bootstrap/process.go | 65 +++- epochStart/bootstrap/process_test.go | 21 +- epochStart/bootstrap/storageProcess.go | 14 +- factory/bootstrap/bootstrapComponents.go | 3 +- factory/consensus/consensusComponents_test.go | 3 +- factory/heartbeat/heartbeatV2Components.go | 4 +- .../heartbeat/heartbeatV2Components_test.go | 6 +- factory/interface.go | 1 + factory/mock/processComponentsStub.go | 10 +- factory/network/networkComponents.go | 88 +++-- factory/network/networkComponentsHandler.go | 19 +- factory/processing/processComponents.go | 70 ++-- .../processing/processComponentsHandler.go | 24 +- factory/processing/processComponents_test.go | 11 +- go.sum | 154 -------- .../mock/processComponentsStub.go | 10 +- .../startInEpoch/startInEpoch_test.go | 8 +- .../multiShard/hardFork/hardFork_test.go | 6 +- integrationTests/oneNodeNetwork.go | 2 +- .../p2p/peersRating/peersRating_test.go | 22 +- .../resolvers/headers/headers_test.go | 24 +- .../resolvers/metablock/metablock_test.go | 24 +- .../resolvers/miniblocks/miniblocks_test.go | 32 +- .../resolvers/rewards/rewards_test.go | 4 +- .../smartContractsResults/scrs_test.go | 4 +- integrationTests/resolvers/testInitializer.go | 2 +- .../interceptedRequestHdr_test.go | 8 +- .../interceptedRequestTxBlockBody_test.go | 4 +- .../interceptedBulkTx_test.go | 2 +- .../interceptedBulkUnsignedTx_test.go | 2 +- .../interceptedResolvedTx_test.go | 8 +- .../interceptedResolvedUnsignedTx_test.go | 4 +- .../state/stateTrieSync/stateTrieSync_test.go | 17 +- .../sync/edgeCases/edgeCases_test.go | 2 +- integrationTests/testConsensusNode.go | 3 +- integrationTests/testHeartbeatNode.go | 27 +- integrationTests/testInitializer.go | 2 +- integrationTests/testNetwork.go | 2 +- integrationTests/testProcessorNode.go | 110 +++--- integrationTests/testSyncNode.go | 4 +- .../vm/esdt/nft/esdtNFT/esdtNft_test.go | 4 +- node/nodeHelper.go | 63 +++- node/node_test.go | 5 +- process/factory/interceptorscontainer/args.go | 8 +- .../baseInterceptorsContainerFactory.go | 246 +++++++++---- .../metaInterceptorsContainerFactory.go | 63 ++-- .../metaInterceptorsContainerFactory_test.go | 286 ++++++++------- .../shardInterceptorsContainerFactory.go | 61 ++-- .../shardInterceptorsContainerFactory_test.go | 331 +++++++++--------- process/mock/topicHandlerStub.go | 31 +- .../p2pAntifloodAndBlacklistFactory.go | 13 + testscommon/components/default.go | 3 +- 57 files changed, 1183 insertions(+), 867 deletions(-) create mode 100644 common/disabled/cache.go diff --git a/common/constants.go b/common/constants.go index 5cc7e7ccd4c..828540fc28d 100644 --- a/common/constants.go +++ b/common/constants.go @@ -75,6 +75,9 @@ const ConnectionTopic = "connection" // ValidatorInfoTopic is the topic used for validatorInfo signaling const ValidatorInfoTopic = "validatorInfo" +// FullArchiveTopicPrefix is the topic prefix used for specific topics that have different interceptors on full archive network +const FullArchiveTopicPrefix = "full_archive_" + // MetricCurrentRound is the metric for monitoring the current round of a node const MetricCurrentRound = "erd_current_round" diff --git a/common/disabled/cache.go b/common/disabled/cache.go new file mode 100644 index 00000000000..685e8046c79 --- /dev/null +++ b/common/disabled/cache.go @@ -0,0 +1,80 @@ +package disabled + +type cache struct { +} + +// NewCache returns a new disabled Cacher implementation +func NewCache() *cache { + return &cache{} +} + +// Clear does nothing as it is disabled +func (c *cache) Clear() { +} + +// Put returns false as it is disabled +func (c *cache) Put(_ []byte, _ interface{}, _ int) (evicted bool) { + return false +} + +// Get returns nil and false as it is disabled +func (c *cache) Get(_ []byte) (value interface{}, ok bool) { + return nil, false +} + +// Has returns false as it is disabled +func (c *cache) Has(_ []byte) bool { + return false +} + +// Peek returns nil and false as it is disabled +func (c *cache) Peek(_ []byte) (value interface{}, ok bool) { + return nil, false +} + +// HasOrAdd returns false and false as it is disabled +func (c *cache) HasOrAdd(_ []byte, _ interface{}, _ int) (has, added bool) { + return false, false +} + +// Remove does nothing as it is disabled +func (c *cache) Remove(_ []byte) { +} + +// Keys returns an empty slice as it is disabled +func (c *cache) Keys() [][]byte { + return make([][]byte, 0) +} + +// Len returns 0 as it is disabled +func (c *cache) Len() int { + return 0 +} + +// SizeInBytesContained returns 0 as it is disabled +func (c *cache) SizeInBytesContained() uint64 { + return 0 +} + +// MaxSize returns 0 as it is disabled +func (c *cache) MaxSize() int { + return 0 +} + +// RegisterHandler does nothing as it is disabled +func (c *cache) RegisterHandler(_ func(key []byte, value interface{}), _ string) { +} + +// UnRegisterHandler does nothing as it is disabled +func (c *cache) UnRegisterHandler(_ string) { +} + +// Close returns nil as it is disabled +func (c *cache) Close() error { + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (c *cache) IsInterfaceNil() bool { + return c == nil +} diff --git a/epochStart/bootstrap/common.go b/epochStart/bootstrap/common.go index 7b1bcec3c88..4db54c14382 100644 --- a/epochStart/bootstrap/common.go +++ b/epochStart/bootstrap/common.go @@ -13,8 +13,11 @@ func checkArguments(args ArgsEpochStartBootstrap) error { if check.IfNil(args.GenesisShardCoordinator) { return fmt.Errorf("%s: %w", baseErrorMessage, epochStart.ErrNilShardCoordinator) } - if check.IfNil(args.Messenger) { - return fmt.Errorf("%s: %w", baseErrorMessage, epochStart.ErrNilMessenger) + if check.IfNil(args.MainMessenger) { + return fmt.Errorf("%s on main network: %w", baseErrorMessage, epochStart.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return fmt.Errorf("%s on full archive network: %w", baseErrorMessage, epochStart.ErrNilMessenger) } if check.IfNil(args.EconomicsData) { return fmt.Errorf("%s: %w", baseErrorMessage, epochStart.ErrNilEconomicsData) diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index 5ab4c67d1bb..d17c2b67f8b 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" disabledFactory "github.com/multiversx/mx-chain-go/factory/disabled" disabledGenesis "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/factory/interceptorscontainer" "github.com/multiversx/mx-chain-go/sharding" @@ -29,7 +30,8 @@ type ArgsEpochStartInterceptorContainer struct { CryptoComponents process.CryptoComponentsHolder Config config.Config ShardCoordinator sharding.Coordinator - Messenger process.TopicHandler + MainMessenger process.TopicHandler + FullArchiveMessenger process.TopicHandler DataPool dataRetriever.PoolsHolder WhiteListHandler update.WhiteListHandler WhiteListerVerifiedTxs update.WhiteListHandler @@ -40,6 +42,7 @@ type ArgsEpochStartInterceptorContainer struct { HeaderIntegrityVerifier process.HeaderIntegrityVerifier RequestHandler process.RequestHandler SignaturesHandler process.SignaturesHandler + NodeOperationMode p2p.NodeOperation } // NewEpochStartInterceptorsContainer will return a real interceptors container factory, but with many disabled components @@ -72,6 +75,7 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) epochStartTrigger := disabled.NewEpochStartTrigger() // TODO: move the peerShardMapper creation before boostrapComponents peerShardMapper := disabled.NewPeerShardMapper() + fullArchivePeerShardMapper := disabled.NewPeerShardMapper() hardforkTrigger := disabledFactory.HardforkTrigger() containerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ @@ -80,7 +84,8 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) Accounts: accountsAdapter, ShardCoordinator: args.ShardCoordinator, NodesCoordinator: nodesCoordinator, - Messenger: args.Messenger, + MainMessenger: args.MainMessenger, + FullArchiveMessenger: args.FullArchiveMessenger, Store: storer, DataPool: args.DataPool, MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, @@ -100,7 +105,8 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) PeerSignatureHandler: cryptoComponents.PeerSignatureHandler(), SignaturesHandler: args.SignaturesHandler, HeartbeatExpiryTimespanInSec: args.Config.HeartbeatV2.HeartbeatExpiryTimespanInSec, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: peerShardMapper, + FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, } diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index 6905812d935..4f19f07a90a 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -161,7 +161,13 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { return Parameters{}, err } - err = e.messenger.CreateTopic(common.ConsensusTopic+e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()), true) + consensusTopic := common.ConsensusTopic + e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()) + err = e.mainMessenger.CreateTopic(consensusTopic, true) + if err != nil { + return Parameters{}, err + } + + err = e.fullArchiveMessenger.CreateTopic(consensusTopic, true) if err != nil { return Parameters{}, err } diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 199f2a41935..75e94061133 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -30,7 +30,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/bootstrap/types" factoryDisabled "github.com/multiversx/mx-chain-go/factory/disabled" "github.com/multiversx/mx-chain-go/heartbeat/sender" - disabledP2P "github.com/multiversx/mx-chain-go/p2p/disabled" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/heartbeat/validator" @@ -93,7 +93,8 @@ type epochStartBootstrap struct { destinationShardAsObserver uint32 coreComponentsHolder process.CoreComponentsHolder cryptoComponentsHolder process.CryptoComponentsHolder - messenger Messenger + mainMessenger Messenger + fullArchiveMessenger Messenger generalConfig config.Config prefsConfig config.PreferencesConfig flagsConfig config.ContextFlagsConfig @@ -117,7 +118,7 @@ type epochStartBootstrap struct { bootstrapHeartbeatSender update.Closer trieSyncStatisticsProvider common.SizeSyncStatisticsHandler nodeProcessingMode common.NodeProcessingMode - + nodeOperationMode p2p.NodeOperation // created components requestHandler process.RequestHandler interceptorContainer process.InterceptorsContainer @@ -162,7 +163,8 @@ type ArgsEpochStartBootstrap struct { CoreComponentsHolder process.CoreComponentsHolder CryptoComponentsHolder process.CryptoComponentsHolder DestinationShardAsObserver uint32 - Messenger Messenger + MainMessenger Messenger + FullArchiveMessenger Messenger GeneralConfig config.Config PrefsConfig config.PreferencesConfig FlagsConfig config.ContextFlagsConfig @@ -201,7 +203,8 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, epochStartProvider := &epochStartBootstrap{ coreComponentsHolder: args.CoreComponentsHolder, cryptoComponentsHolder: args.CryptoComponentsHolder, - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, generalConfig: args.GeneralConfig, prefsConfig: args.PrefsConfig, flagsConfig: args.FlagsConfig, @@ -228,6 +231,11 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, shardCoordinator: args.GenesisShardCoordinator, trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, nodeProcessingMode: args.NodeProcessingMode, + nodeOperationMode: p2p.NormalOperation, + } + + if epochStartProvider.prefsConfig.FullArchive { + epochStartProvider.nodeOperationMode = p2p.FullArchiveMode } whiteListCache, err := storageunit.NewCache(storageFactory.GetCacherFromConfig(epochStartProvider.generalConfig.WhiteListPool)) @@ -418,10 +426,16 @@ func (e *epochStartBootstrap) bootstrapFromLocalStorage() (Parameters, error) { func (e *epochStartBootstrap) cleanupOnBootstrapFinish() { log.Debug("unregistering all message processor and un-joining all topics") - errMessenger := e.messenger.UnregisterAllMessageProcessors() + errMessenger := e.mainMessenger.UnregisterAllMessageProcessors() + log.LogIfError(errMessenger) + + errMessenger = e.mainMessenger.UnJoinAllTopics() + log.LogIfError(errMessenger) + + errMessenger = e.fullArchiveMessenger.UnregisterAllMessageProcessors() log.LogIfError(errMessenger) - errMessenger = e.messenger.UnJoinAllTopics() + errMessenger = e.fullArchiveMessenger.UnJoinAllTopics() log.LogIfError(errMessenger) e.closeTrieNodes() @@ -511,7 +525,7 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { epochStartConfig := e.generalConfig.EpochStartConfig metaBlockProcessor, err := NewEpochStartMetaBlockProcessor( - e.messenger, + e.mainMessenger, e.requestHandler, e.coreComponentsHolder.InternalMarshalizer(), e.coreComponentsHolder.Hasher(), @@ -527,7 +541,7 @@ func (e *epochStartBootstrap) prepareComponentsToSyncFromNetwork() error { CoreComponentsHolder: e.coreComponentsHolder, CryptoComponentsHolder: e.cryptoComponentsHolder, RequestHandler: e.requestHandler, - Messenger: e.messenger, + Messenger: e.mainMessenger, ShardCoordinator: e.shardCoordinator, EconomicsData: e.economicsData, WhitelistHandler: e.whiteListHandler, @@ -550,14 +564,16 @@ func (e *epochStartBootstrap) createSyncers() error { CryptoComponents: e.cryptoComponentsHolder, Config: e.generalConfig, ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, DataPool: e.dataPool, WhiteListHandler: e.whiteListHandler, WhiteListerVerifiedTxs: e.whiteListerVerifiedTxs, ArgumentsParser: e.argumentsParser, HeaderIntegrityVerifier: e.headerIntegrityVerifier, RequestHandler: e.requestHandler, - SignaturesHandler: e.messenger, + SignaturesHandler: e.mainMessenger, + NodeOperationMode: e.nodeOperationMode, } e.interceptorContainer, err = factoryInterceptors.NewEpochStartInterceptorsContainer(args) @@ -672,7 +688,13 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { } log.Debug("start in epoch bootstrap: shardCoordinator", "numOfShards", e.baseData.numberOfShards, "shardId", e.baseData.shardId) - err = e.messenger.CreateTopic(common.ConsensusTopic+e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()), true) + consensusTopic := common.ConsensusTopic + e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()) + err = e.mainMessenger.CreateTopic(consensusTopic, true) + if err != nil { + return Parameters{}, err + } + + err = e.fullArchiveMessenger.CreateTopic(consensusTopic, true) if err != nil { return Parameters{}, err } @@ -1191,7 +1213,7 @@ func (e *epochStartBootstrap) createResolversContainer() error { log.Debug("epochStartBootstrap.createRequestHandler", "shard", e.shardCoordinator.SelfId()) resolversContainerArgs := resolverscontainer.FactoryArgs{ ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, + Messenger: e.mainMessenger, Store: storageService, Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), DataPools: e.dataPool, @@ -1222,7 +1244,7 @@ func (e *epochStartBootstrap) createRequestHandler() error { requestersContainerArgs := requesterscontainer.FactoryArgs{ RequesterConfig: e.generalConfig.Requesters, ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, + Messenger: e.mainMessenger, Marshaller: e.coreComponentsHolder.InternalMarshalizer(), Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), OutputAntifloodHandler: disabled.NewAntiFloodHandler(), @@ -1293,8 +1315,15 @@ func (e *epochStartBootstrap) createHeartbeatSender() error { } heartbeatTopic := common.HeartbeatV2Topic + e.shardCoordinator.CommunicationIdentifier(e.shardCoordinator.SelfId()) - if !e.messenger.HasTopic(heartbeatTopic) { - err = e.messenger.CreateTopic(heartbeatTopic, true) + if !e.mainMessenger.HasTopic(heartbeatTopic) { + err = e.mainMessenger.CreateTopic(heartbeatTopic, true) + if err != nil { + return err + } + } + + if !e.fullArchiveMessenger.HasTopic(heartbeatTopic) { + err = e.fullArchiveMessenger.CreateTopic(heartbeatTopic, true) if err != nil { return err } @@ -1306,8 +1335,8 @@ func (e *epochStartBootstrap) createHeartbeatSender() error { } heartbeatCfg := e.generalConfig.HeartbeatV2 argsHeartbeatSender := sender.ArgBootstrapSender{ - MainMessenger: e.messenger, - FullArchiveMessenger: disabledP2P.NewNetworkMessenger(), // TODO[Sorin]: pass full archive messenger + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, Marshaller: e.coreComponentsHolder.InternalMarshalizer(), HeartbeatTopic: heartbeatTopic, HeartbeatTimeBetweenSends: time.Second * time.Duration(heartbeatCfg.HeartbeatTimeBetweenSendsDuringBootstrapInSec), diff --git a/epochStart/bootstrap/process_test.go b/epochStart/bootstrap/process_test.go index 159f069874a..952ee59da51 100644 --- a/epochStart/bootstrap/process_test.go +++ b/epochStart/bootstrap/process_test.go @@ -102,11 +102,12 @@ func createMockEpochStartBootstrapArgs( ScheduledSCRsStorer: genericMocks.NewStorerMock(), CoreComponentsHolder: coreMock, CryptoComponentsHolder: cryptoMock, - Messenger: &p2pmocks.MessengerStub{ + MainMessenger: &p2pmocks.MessengerStub{ ConnectedPeersCalled: func() []core.PeerID { return []core.PeerID{"peer0", "peer1", "peer2", "peer3", "peer4", "peer5"} }, }, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, GeneralConfig: config.Config{ MiniBlocksStorage: generalCfg.MiniBlocksStorage, PeerBlockBodyStorage: generalCfg.PeerBlockBodyStorage, @@ -243,11 +244,21 @@ func TestNewEpochStartBootstrap_NilArgsChecks(t *testing.T) { require.Nil(t, epochStartProvider) require.True(t, errors.Is(err, epochStart.ErrNilShardCoordinator)) }) - t.Run("nil messenger", func(t *testing.T) { + t.Run("nil main messenger", func(t *testing.T) { t.Parallel() args := createMockEpochStartBootstrapArgs(createComponentsForEpochStart()) - args.Messenger = nil + args.MainMessenger = nil + + epochStartProvider, err := NewEpochStartBootstrap(args) + require.Nil(t, epochStartProvider) + require.True(t, errors.Is(err, epochStart.ErrNilMessenger)) + }) + t.Run("nil full archive messenger", func(t *testing.T) { + t.Parallel() + + args := createMockEpochStartBootstrapArgs(createComponentsForEpochStart()) + args.FullArchiveMessenger = nil epochStartProvider, err := NewEpochStartBootstrap(args) require.Nil(t, epochStartProvider) @@ -1672,14 +1683,14 @@ func TestRequestAndProcessing(t *testing.T) { assert.Error(t, err) assert.True(t, strings.Contains(err.Error(), nodesCoordinator.ErrInvalidNumberOfShards.Error())) }) - t.Run("failed to create messenger topic", func(t *testing.T) { + t.Run("failed to create main messenger topic", func(t *testing.T) { t.Parallel() args := createMockEpochStartBootstrapArgs(createComponentsForEpochStart()) args.GenesisNodesConfig = getNodesConfigMock(1) expectedErr := errors.New("expected error") - args.Messenger = &p2pmocks.MessengerStub{ + args.MainMessenger = &p2pmocks.MessengerStub{ CreateTopicCalled: func(topic string, identifier bool) error { return expectedErr }, diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index ac902d0c1f3..898af0ffb17 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -167,7 +167,7 @@ func (sesb *storageEpochStartBootstrap) prepareComponentsToSync() error { } metablockProcessor, err := NewStorageEpochStartMetaBlockProcessor( - sesb.messenger, + sesb.mainMessenger, sesb.requestHandler, sesb.coreComponentsHolder.InternalMarshalizer(), sesb.coreComponentsHolder.Hasher(), @@ -180,7 +180,7 @@ func (sesb *storageEpochStartBootstrap) prepareComponentsToSync() error { CoreComponentsHolder: sesb.coreComponentsHolder, CryptoComponentsHolder: sesb.cryptoComponentsHolder, RequestHandler: sesb.requestHandler, - Messenger: sesb.messenger, + Messenger: sesb.mainMessenger, ShardCoordinator: sesb.shardCoordinator, EconomicsData: sesb.economicsData, WhitelistHandler: sesb.whiteListHandler, @@ -245,7 +245,7 @@ func (sesb *storageEpochStartBootstrap) createStorageRequesters() error { WorkingDirectory: sesb.importDbConfig.ImportDBWorkingDir, Hasher: sesb.coreComponentsHolder.Hasher(), ShardCoordinator: shardCoordinator, - Messenger: sesb.messenger, + Messenger: sesb.mainMessenger, Store: sesb.store, Marshalizer: sesb.coreComponentsHolder.InternalMarshalizer(), Uint64ByteSliceConverter: sesb.coreComponentsHolder.Uint64ByteSliceConverter(), @@ -329,7 +329,13 @@ func (sesb *storageEpochStartBootstrap) requestAndProcessFromStorage() (Paramete } log.Debug("start in epoch bootstrap: shardCoordinator", "numOfShards", sesb.baseData.numberOfShards, "shardId", sesb.baseData.shardId) - err = sesb.messenger.CreateTopic(common.ConsensusTopic+sesb.shardCoordinator.CommunicationIdentifier(sesb.shardCoordinator.SelfId()), true) + consensusTopic := common.ConsensusTopic + sesb.shardCoordinator.CommunicationIdentifier(sesb.shardCoordinator.SelfId()) + err = sesb.mainMessenger.CreateTopic(consensusTopic, true) + if err != nil { + return Parameters{}, err + } + + err = sesb.fullArchiveMessenger.CreateTopic(consensusTopic, true) if err != nil { return Parameters{}, err } diff --git a/factory/bootstrap/bootstrapComponents.go b/factory/bootstrap/bootstrapComponents.go index 9f580416853..1c3e834a16f 100644 --- a/factory/bootstrap/bootstrapComponents.go +++ b/factory/bootstrap/bootstrapComponents.go @@ -188,7 +188,8 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { epochStartBootstrapArgs := bootstrap.ArgsEpochStartBootstrap{ CoreComponentsHolder: bcf.coreComponents, CryptoComponentsHolder: bcf.cryptoComponents, - Messenger: bcf.networkComponents.NetworkMessenger(), + MainMessenger: bcf.networkComponents.NetworkMessenger(), + FullArchiveMessenger: bcf.networkComponents.FullArchiveNetworkMessenger(), GeneralConfig: bcf.config, PrefsConfig: bcf.prefConfig.Preferences, FlagsConfig: bcf.flagsConfig, diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index 54f6a4cf4de..67f551acf1d 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -117,7 +117,8 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent NodeRedundancyHandlerInternal: &testsMocks.RedundancyHandlerStub{}, HardforkTriggerField: &testscommon.HardforkTriggerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, - PeerMapper: &testsMocks.PeerShardMapperStub{}, + MainPeerMapper: &testsMocks.PeerShardMapperStub{}, + FullArchivePeerMapper: &testsMocks.PeerShardMapperStub{}, ShardCoord: testscommon.NewMultiShardsCoordinatorMock(2), RoundHandlerField: &testscommon.RoundHandlerMock{ TimeDurationCalled: func() time.Duration { diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index 4356b03a70f..b1ac0180b30 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -262,7 +262,7 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error argsFullArchiveDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ TimeToReadDirectConnections: time.Second * time.Duration(cfg.TimeToReadDirectConnectionsInSec), Messenger: hcf.networkComponents.FullArchiveNetworkMessenger(), - PeerShardMapper: hcf.processComponents.PeerShardMapper(), // TODO[Sorin]: replace this with the full archive psm + PeerShardMapper: hcf.processComponents.FullArchivePeerShardMapper(), ShardCoordinator: hcf.processComponents.ShardCoordinator(), BaseIntraShardTopic: common.ConsensusTopic, BaseCrossShardTopic: processFactory.MiniBlocksTopic, @@ -287,7 +287,7 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error argsFullArchiveCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ ShardCoordinator: hcf.processComponents.ShardCoordinator(), - PeerShardMapper: hcf.processComponents.PeerShardMapper(), // TODO[Sorin]: replace this with the full archive psm + PeerShardMapper: hcf.processComponents.FullArchivePeerShardMapper(), } fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsFullArchiveCrossShardPeerTopicNotifier) if err != nil { diff --git a/factory/heartbeat/heartbeatV2Components_test.go b/factory/heartbeat/heartbeatV2Components_test.go index d350e29cb91..f013294a7d1 100644 --- a/factory/heartbeat/heartbeatV2Components_test.go +++ b/factory/heartbeat/heartbeatV2Components_test.go @@ -80,7 +80,8 @@ func createMockHeartbeatV2ComponentsFactoryArgs() heartbeatComp.ArgHeartbeatV2Co NodeRedundancyHandlerInternal: &testsMocks.RedundancyHandlerStub{}, HardforkTriggerField: &testscommon.HardforkTriggerStub{}, ReqHandler: &testscommon.RequestHandlerStub{}, - PeerMapper: &testsMocks.PeerShardMapperStub{}, + MainPeerMapper: &testsMocks.PeerShardMapperStub{}, + FullArchivePeerMapper: &testsMocks.PeerShardMapperStub{}, ShardCoord: &testscommon.ShardsCoordinatorMock{}, }, StatusCoreComponents: &factory.StatusCoreComponentsStub{ @@ -485,7 +486,8 @@ func TestHeartbeatV2Components_Create(t *testing.T) { EpochNotifier: processComp.EpochStartNotifier(), NodeRedundancyHandlerInternal: processComp.NodeRedundancyHandler(), HardforkTriggerField: processComp.HardforkTrigger(), - PeerMapper: processComp.PeerShardMapper(), + MainPeerMapper: processComp.PeerShardMapper(), + FullArchivePeerMapper: processComp.FullArchivePeerShardMapper(), ShardCoordinatorCalled: func() sharding.Coordinator { cnt++ if cnt > 3 { diff --git a/factory/interface.go b/factory/interface.go index 3038d28806c..9dd05c13f69 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -289,6 +289,7 @@ type ProcessComponentsHolder interface { TxLogsProcessor() process.TransactionLogProcessorDatabase HeaderConstructionValidator() process.HeaderConstructionValidator PeerShardMapper() process.NetworkShardingCollector + FullArchivePeerShardMapper() process.NetworkShardingCollector FallbackHeaderValidator() process.FallbackHeaderValidator TransactionSimulatorProcessor() TransactionSimulatorProcessor WhiteListHandler() process.WhiteListHandler diff --git a/factory/mock/processComponentsStub.go b/factory/mock/processComponentsStub.go index a756b44f8c6..5f90716cea4 100644 --- a/factory/mock/processComponentsStub.go +++ b/factory/mock/processComponentsStub.go @@ -37,7 +37,8 @@ type ProcessComponentsMock struct { ReqHandler process.RequestHandler TxLogsProcess process.TransactionLogProcessorDatabase HeaderConstructValidator process.HeaderConstructionValidator - PeerMapper process.NetworkShardingCollector + MainPeerMapper process.NetworkShardingCollector + FullArchivePeerMapper process.NetworkShardingCollector TxSimulatorProcessor factory.TransactionSimulatorProcessor FallbackHdrValidator process.FallbackHeaderValidator WhiteListHandlerInternal process.WhiteListHandler @@ -178,7 +179,12 @@ func (pcm *ProcessComponentsMock) HeaderConstructionValidator() process.HeaderCo // PeerShardMapper - func (pcm *ProcessComponentsMock) PeerShardMapper() process.NetworkShardingCollector { - return pcm.PeerMapper + return pcm.MainPeerMapper +} + +// FullArchivePeerShardMapper - +func (pcm *ProcessComponentsMock) FullArchivePeerShardMapper() process.NetworkShardingCollector { + return pcm.FullArchivePeerMapper } // FallbackHeaderValidator - diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index e34993b5ac7..a3cfc467c88 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" - "github.com/multiversx/mx-chain-go/debug/antiflood" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/factory/disabled" @@ -144,42 +143,65 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { } }() - var antiFloodComponents *antifloodFactory.AntiFloodComponents - antiFloodComponents, err = antifloodFactory.NewP2PAntiFloodComponents(ctx, ncf.mainConfig, ncf.statusHandler, mainNetworkComp.netMessenger.ID()) + antiFloodComponents, inputAntifloodHandler, outputAntifloodHandler, peerHonestyHandler, err := ncf.createAntifloodComponents(ctx, mainNetworkComp.netMessenger.ID()) if err != nil { return nil, err } - // TODO: move to NewP2PAntiFloodComponents.initP2PAntiFloodComponents - if ncf.mainConfig.Debug.Antiflood.Enabled { - var debugger process.AntifloodDebugger - debugger, err = antiflood.NewAntifloodDebugger(ncf.mainConfig.Debug.Antiflood) - if err != nil { - return nil, err - } + err = mainNetworkComp.netMessenger.Bootstrap() + if err != nil { + return nil, err + } - err = antiFloodComponents.AntiFloodHandler.SetDebugger(debugger) - if err != nil { - return nil, err - } + mainNetworkComp.netMessenger.WaitForConnections(ncf.bootstrapWaitTime, ncf.mainP2PConfig.Node.MinNumPeersToWaitForOnBootstrap) + + err = fullArchiveNetworkComp.netMessenger.Bootstrap() + if err != nil { + return nil, err + } + + return &networkComponents{ + mainNetworkHolder: mainNetworkComp, + fullArchiveNetworkHolder: fullArchiveNetworkComp, + inputAntifloodHandler: inputAntifloodHandler, + outputAntifloodHandler: outputAntifloodHandler, + pubKeyTimeCacher: antiFloodComponents.PubKeysCacher, + topicFloodPreventer: antiFloodComponents.TopicPreventer, + floodPreventers: antiFloodComponents.FloodPreventers, + peerBlackListHandler: antiFloodComponents.BlacklistHandler, + antifloodConfig: ncf.mainConfig.Antiflood, + peerHonestyHandler: peerHonestyHandler, + peersHolder: peersHolder, + closeFunc: cancelFunc, + }, nil +} + +func (ncf *networkComponentsFactory) createAntifloodComponents( + ctx context.Context, + currentPid core.PeerID, +) (*antifloodFactory.AntiFloodComponents, factory.P2PAntifloodHandler, factory.P2PAntifloodHandler, consensus.PeerHonestyHandler, error) { + var antiFloodComponents *antifloodFactory.AntiFloodComponents + antiFloodComponents, err := antifloodFactory.NewP2PAntiFloodComponents(ctx, ncf.mainConfig, ncf.statusHandler, currentPid) + if err != nil { + return nil, nil, nil, nil, err } inputAntifloodHandler, ok := antiFloodComponents.AntiFloodHandler.(factory.P2PAntifloodHandler) if !ok { err = errors.ErrWrongTypeAssertion - return nil, fmt.Errorf("%w when casting input antiflood handler to P2PAntifloodHandler", err) + return nil, nil, nil, nil, fmt.Errorf("%w when casting input antiflood handler to P2PAntifloodHandler", err) } var outAntifloodHandler process.P2PAntifloodHandler outAntifloodHandler, err = antifloodFactory.NewP2POutputAntiFlood(ctx, ncf.mainConfig) if err != nil { - return nil, err + return nil, nil, nil, nil, err } outputAntifloodHandler, ok := outAntifloodHandler.(factory.P2PAntifloodHandler) if !ok { err = errors.ErrWrongTypeAssertion - return nil, fmt.Errorf("%w when casting output antiflood handler to P2PAntifloodHandler", err) + return nil, nil, nil, nil, fmt.Errorf("%w when casting output antiflood handler to P2PAntifloodHandler", err) } var peerHonestyHandler consensus.PeerHonestyHandler @@ -189,35 +211,10 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { antiFloodComponents.PubKeysCacher, ) if err != nil { - return nil, err - } - - err = mainNetworkComp.netMessenger.Bootstrap() - if err != nil { - return nil, err - } - - mainNetworkComp.netMessenger.WaitForConnections(ncf.bootstrapWaitTime, ncf.mainP2PConfig.Node.MinNumPeersToWaitForOnBootstrap) - - err = fullArchiveNetworkComp.netMessenger.Bootstrap() - if err != nil { - return nil, err + return nil, nil, nil, nil, err } - return &networkComponents{ - mainNetworkHolder: mainNetworkComp, - fullArchiveNetworkHolder: fullArchiveNetworkComp, - inputAntifloodHandler: inputAntifloodHandler, - outputAntifloodHandler: outputAntifloodHandler, - pubKeyTimeCacher: antiFloodComponents.PubKeysCacher, - topicFloodPreventer: antiFloodComponents.TopicPreventer, - floodPreventers: antiFloodComponents.FloodPreventers, - peerBlackListHandler: antiFloodComponents.BlacklistHandler, - antifloodConfig: ncf.mainConfig.Antiflood, - peerHonestyHandler: peerHonestyHandler, - peersHolder: peersHolder, - closeFunc: cancelFunc, - }, nil + return antiFloodComponents, inputAntifloodHandler, outputAntifloodHandler, peerHonestyHandler, nil } func (ncf *networkComponentsFactory) createPeerHonestyHandler( @@ -324,9 +321,6 @@ func (nc *networkComponents) Close() error { if !check.IfNil(nc.outputAntifloodHandler) { log.LogIfError(nc.outputAntifloodHandler.Close()) } - if !check.IfNil(nc.topicFloodPreventer) { - log.LogIfError(nc.outputAntifloodHandler.Close()) - } if !check.IfNil(nc.peerHonestyHandler) { log.LogIfError(nc.peerHonestyHandler.Close()) } diff --git a/factory/network/networkComponentsHandler.go b/factory/network/networkComponentsHandler.go index 2afd33f6ede..ce018c5d057 100644 --- a/factory/network/networkComponentsHandler.go +++ b/factory/network/networkComponentsHandler.go @@ -15,6 +15,11 @@ var _ factory.ComponentHandler = (*managedNetworkComponents)(nil) var _ factory.NetworkComponentsHolder = (*managedNetworkComponents)(nil) var _ factory.NetworkComponentsHandler = (*managedNetworkComponents)(nil) +const ( + errorOnMainNetworkString = "on main network" + errorOnFullArchiveNetworkString = "on full archive network" +) + // managedNetworkComponents creates the data components handler that can create, close and access the data components type managedNetworkComponents struct { *networkComponents @@ -75,23 +80,25 @@ func (mnc *managedNetworkComponents) CheckSubcomponents() error { return errors.ErrNilNetworkComponents } if check.IfNil(mnc.mainNetworkHolder.netMessenger) { - return fmt.Errorf("%w for main", errors.ErrNilMessenger) + return fmt.Errorf("%w %s", errors.ErrNilMessenger, errorOnMainNetworkString) } if check.IfNil(mnc.mainNetworkHolder.peersRatingHandler) { - return fmt.Errorf("%w for main", errors.ErrNilPeersRatingHandler) + return fmt.Errorf("%w %s", errors.ErrNilPeersRatingHandler, errorOnMainNetworkString) } if check.IfNil(mnc.mainNetworkHolder.peersRatingMonitor) { - return fmt.Errorf("%w for main", errors.ErrNilPeersRatingMonitor) + return fmt.Errorf("%w %s", errors.ErrNilPeersRatingMonitor, errorOnMainNetworkString) } + if check.IfNil(mnc.fullArchiveNetworkHolder.netMessenger) { - return fmt.Errorf("%w for full archive", errors.ErrNilMessenger) + return fmt.Errorf("%w %s", errors.ErrNilMessenger, errorOnFullArchiveNetworkString) } if check.IfNil(mnc.fullArchiveNetworkHolder.peersRatingHandler) { - return fmt.Errorf("%w for full archive", errors.ErrNilPeersRatingHandler) + return fmt.Errorf("%w %s", errors.ErrNilPeersRatingHandler, errorOnFullArchiveNetworkString) } if check.IfNil(mnc.fullArchiveNetworkHolder.peersRatingMonitor) { - return fmt.Errorf("%w for full archive", errors.ErrNilPeersRatingMonitor) + return fmt.Errorf("%w %s", errors.ErrNilPeersRatingMonitor, errorOnFullArchiveNetworkString) } + if check.IfNil(mnc.inputAntifloodHandler) { return errors.ErrNilInputAntiFloodHandler } diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index a93d3802045..39f1cb3412e 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -42,6 +42,7 @@ import ( "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/genesis/checking" processGenesis "github.com/multiversx/mx-chain-go/genesis/process" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -104,7 +105,8 @@ type processComponents struct { requestHandler process.RequestHandler txLogsProcessor process.TransactionLogProcessorDatabase headerConstructionValidator process.HeaderConstructionValidator - peerShardMapper process.NetworkShardingCollector + mainPeerShardMapper process.NetworkShardingCollector + fullArchivePeerShardMapper process.NetworkShardingCollector txSimulatorProcessor factory.TransactionSimulatorProcessor miniBlocksPoolCleaner process.PoolsCleaner txsPoolCleaner process.PoolsCleaner @@ -265,7 +267,16 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } // TODO: maybe move PeerShardMapper to network components - peerShardMapper, err := pcf.prepareNetworkShardingCollector() + mainPeerShardMapper, err := pcf.prepareNetworkShardingCollectorForMessenger(pcf.network.NetworkMessenger()) + if err != nil { + return nil, err + } + fullArchivePeerShardMapper, err := pcf.prepareNetworkShardingCollectorForMessenger(pcf.network.FullArchiveNetworkMessenger()) + if err != nil { + return nil, err + } + + err = pcf.network.InputAntiFloodHandler().SetPeerValidatorMapper(mainPeerShardMapper) if err != nil { return nil, err } @@ -493,7 +504,8 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { blockTracker, epochStartTrigger, requestHandler, - peerShardMapper, + mainPeerShardMapper, + fullArchivePeerShardMapper, hardforkTrigger, ) if err != nil { @@ -689,7 +701,8 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { txLogsProcessor: txLogsProcessor, headerConstructionValidator: headerValidator, headerIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), - peerShardMapper: peerShardMapper, + mainPeerShardMapper: mainPeerShardMapper, + fullArchivePeerShardMapper: fullArchivePeerShardMapper, txSimulatorProcessor: txSimulatorProcessor, miniBlocksPoolCleaner: mbsPoolsCleaner, txsPoolCleaner: txsPoolsCleaner, @@ -1431,9 +1444,15 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( validityAttester process.ValidityAttester, epochStartTrigger process.EpochStartTriggerHandler, requestHandler process.RequestHandler, - peerShardMapper *networksharding.PeerShardMapper, + mainPeerShardMapper *networksharding.PeerShardMapper, + fullArchivePeerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { + nodeOperationMode := p2p.NormalOperation + if pcf.prefConfigs.Preferences.FullArchive { + nodeOperationMode = p2p.FullArchiveMode + } + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return pcf.newShardInterceptorContainerFactory( @@ -1442,8 +1461,10 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( validityAttester, epochStartTrigger, requestHandler, - peerShardMapper, + mainPeerShardMapper, + fullArchivePeerShardMapper, hardforkTrigger, + nodeOperationMode, ) } if shardCoordinator.SelfId() == core.MetachainShardId { @@ -1453,8 +1474,10 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( validityAttester, epochStartTrigger, requestHandler, - peerShardMapper, + mainPeerShardMapper, + fullArchivePeerShardMapper, hardforkTrigger, + nodeOperationMode, ) } @@ -1590,8 +1613,10 @@ func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( validityAttester process.ValidityAttester, epochStartTrigger process.EpochStartTriggerHandler, requestHandler process.RequestHandler, - peerShardMapper *networksharding.PeerShardMapper, + mainPeerShardMapper *networksharding.PeerShardMapper, + fullArchivePeerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, + nodeOperationMode p2p.NodeOperation, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { headerBlackList := cache.NewTimeCache(timeSpanForBadHeaders) shardInterceptorsContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ @@ -1600,7 +1625,8 @@ func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( Accounts: pcf.state.AccountsAdapter(), ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), NodesCoordinator: pcf.nodesCoordinator, - Messenger: pcf.network.NetworkMessenger(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), Store: pcf.data.StorageService(), DataPool: pcf.data.Datapool(), MaxTxNonceDeltaAllowed: common.MaxTxNonceDeltaAllowed, @@ -1620,8 +1646,10 @@ func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( PeerSignatureHandler: pcf.crypto.PeerSignatureHandler(), SignaturesHandler: pcf.network.NetworkMessenger(), HeartbeatExpiryTimespanInSec: pcf.config.HeartbeatV2.HeartbeatExpiryTimespanInSec, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: mainPeerShardMapper, + FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, + NodeOperationMode: nodeOperationMode, } interceptorContainerFactory, err := interceptorscontainer.NewShardInterceptorsContainerFactory(shardInterceptorsContainerFactoryArgs) @@ -1638,8 +1666,10 @@ func (pcf *processComponentsFactory) newMetaInterceptorContainerFactory( validityAttester process.ValidityAttester, epochStartTrigger process.EpochStartTriggerHandler, requestHandler process.RequestHandler, - peerShardMapper *networksharding.PeerShardMapper, + mainPeerShardMapper *networksharding.PeerShardMapper, + fullArchivePeerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, + nodeOperationMode p2p.NodeOperation, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { headerBlackList := cache.NewTimeCache(timeSpanForBadHeaders) metaInterceptorsContainerFactoryArgs := interceptorscontainer.CommonInterceptorsContainerFactoryArgs{ @@ -1647,7 +1677,8 @@ func (pcf *processComponentsFactory) newMetaInterceptorContainerFactory( CryptoComponents: pcf.crypto, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), NodesCoordinator: pcf.nodesCoordinator, - Messenger: pcf.network.NetworkMessenger(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), Store: pcf.data.StorageService(), DataPool: pcf.data.Datapool(), Accounts: pcf.state.AccountsAdapter(), @@ -1668,8 +1699,10 @@ func (pcf *processComponentsFactory) newMetaInterceptorContainerFactory( PeerSignatureHandler: pcf.crypto.PeerSignatureHandler(), SignaturesHandler: pcf.network.NetworkMessenger(), HeartbeatExpiryTimespanInSec: pcf.config.HeartbeatV2.HeartbeatExpiryTimespanInSec, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: mainPeerShardMapper, + FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, + NodeOperationMode: nodeOperationMode, } interceptorContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorsContainerFactoryArgs) @@ -1695,8 +1728,8 @@ func (pcf *processComponentsFactory) newForkDetector( return nil, errors.New("could not create fork detector") } -// PrepareNetworkShardingCollector will create the network sharding collector and apply it to the network messenger -func (pcf *processComponentsFactory) prepareNetworkShardingCollector() (*networksharding.PeerShardMapper, error) { +// prepareNetworkShardingCollectorForMessenger will create the network sharding collector and apply it to the provided network messenger +func (pcf *processComponentsFactory) prepareNetworkShardingCollectorForMessenger(messenger p2p.Messenger) (*networksharding.PeerShardMapper, error) { networkShardingCollector, err := createNetworkShardingCollector( &pcf.config, pcf.nodesCoordinator, @@ -1709,12 +1742,7 @@ func (pcf *processComponentsFactory) prepareNetworkShardingCollector() (*network localID := pcf.network.NetworkMessenger().ID() networkShardingCollector.UpdatePeerIDInfo(localID, pcf.crypto.PublicKeyBytes(), pcf.bootstrapComponents.ShardCoordinator().SelfId()) - err = pcf.network.NetworkMessenger().SetPeerShardResolver(networkShardingCollector) - if err != nil { - return nil, err - } - - err = pcf.network.InputAntiFloodHandler().SetPeerValidatorMapper(networkShardingCollector) + err = messenger.SetPeerShardResolver(networkShardingCollector) if err != nil { return nil, err } diff --git a/factory/processing/processComponentsHandler.go b/factory/processing/processComponentsHandler.go index 098ea22e71e..49c7ac40444 100644 --- a/factory/processing/processComponentsHandler.go +++ b/factory/processing/processComponentsHandler.go @@ -1,6 +1,7 @@ package processing import ( + "fmt" "sync" "github.com/multiversx/mx-chain-core-go/core/check" @@ -143,8 +144,11 @@ func (m *managedProcessComponents) CheckSubcomponents() error { if check.IfNil(m.processComponents.headerConstructionValidator) { return errors.ErrNilHeaderConstructionValidator } - if check.IfNil(m.processComponents.peerShardMapper) { - return errors.ErrNilPeerShardMapper + if check.IfNil(m.processComponents.mainPeerShardMapper) { + return fmt.Errorf("%w for main", errors.ErrNilPeerShardMapper) + } + if check.IfNil(m.processComponents.fullArchivePeerShardMapper) { + return fmt.Errorf("%w for full archive", errors.ErrNilPeerShardMapper) } if check.IfNil(m.processComponents.fallbackHeaderValidator) { return errors.ErrNilFallbackHeaderValidator @@ -423,7 +427,7 @@ func (m *managedProcessComponents) HeaderConstructionValidator() process.HeaderC return m.processComponents.headerConstructionValidator } -// PeerShardMapper returns the peer to shard mapper +// PeerShardMapper returns the peer to shard mapper of the main network func (m *managedProcessComponents) PeerShardMapper() process.NetworkShardingCollector { m.mutProcessComponents.RLock() defer m.mutProcessComponents.RUnlock() @@ -432,7 +436,19 @@ func (m *managedProcessComponents) PeerShardMapper() process.NetworkShardingColl return nil } - return m.processComponents.peerShardMapper + return m.processComponents.mainPeerShardMapper +} + +// FullArchivePeerShardMapper returns the peer to shard mapper of the full archive network +func (m *managedProcessComponents) FullArchivePeerShardMapper() process.NetworkShardingCollector { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.fullArchivePeerShardMapper } // FallbackHeaderValidator returns the fallback header validator diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index 4957aa60b50..af3cea67084 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -215,11 +215,12 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, }, Network: &testsMocks.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, - InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, - OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, }, BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), diff --git a/go.sum b/go.sum index 6c8a9c3057f..187cf8b78ce 100644 --- a/go.sum +++ b/go.sum @@ -37,10 +37,8 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= -filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -63,15 +61,11 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= -github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= -github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= @@ -83,7 +77,6 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOF github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= @@ -96,7 +89,6 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1 github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA= github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -110,7 +102,6 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= @@ -120,12 +111,10 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d h1:t5Wuyh53qYyg9eqn4BbnlIT+vmhyww0TatL+zT3uWgI= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= @@ -145,9 +134,7 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2U github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ= github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= -github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -167,9 +154,7 @@ github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -187,7 +172,6 @@ github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH8 github.com/gizak/termui/v3 v3.1.0 h1:ZZmVDgwHl7gR7elfKf1xc4IudXZ5qqfDh4wExk4Iajc= github.com/gizak/termui/v3 v3.1.0/go.mod h1:bXQEBkJpzxUAKf0+xq9MSWAvWZlE7c+aidmyFlkYTrY= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -200,15 +184,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= -github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= @@ -276,10 +256,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -310,16 +287,12 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= -github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -337,30 +310,19 @@ github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixH github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= -github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= -github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipns v0.2.0 h1:BgmNtQhqOw5XEZ8RAfWEpK4DhqaYiuP6h71MhIp7xXU= @@ -373,10 +335,8 @@ github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JP github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.19.0 h1:5axC7rJmPc17Emw6TelxGwnzALk0PdupZ2oj2roDj04= github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= @@ -385,7 +345,6 @@ github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5D github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= -github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -404,7 +363,6 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -439,8 +397,6 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= @@ -449,36 +405,22 @@ github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBx github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= -github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA= -github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.19.0/go.mod h1:AkA+FUKQfYt1FLNef5fOPlo/naAWjKy/RCjkcPjqzYg= github.com/libp2p/go-libp2p-core v0.20.0 h1:PGKM74+T+O/FaZNARNW32i90RMBHCcgd/hkum2UQ5eY= github.com/libp2p/go-libp2p-core v0.20.0/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= github.com/libp2p/go-libp2p-kad-dht v0.18.0 h1:akqO3gPMwixR7qFSFq70ezRun97g5hrA/lBW9jrjUYM= github.com/libp2p/go-libp2p-kad-dht v0.18.0/go.mod h1:Gb92MYIPm3K2pJLGn8wl0m8wiKDvHrYpg+rOd0GzzPA= -github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= -github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.8.0 h1:bzTG693TA1Ju/zKmUCQzDLSqiJnyRFVwPpuloZ/OZtI= -github.com/libp2p/go-libp2p-peerstore v0.8.0/go.mod h1:9geHWmNA3YDlQBjL/uPEJD6vpDK12aDNlUNHJ6kio/s= github.com/libp2p/go-libp2p-pubsub v0.8.1 h1:hSw09NauFUaA0FLgQPBJp6QOy0a2n+HSkb8IeOx8OnY= github.com/libp2p/go-libp2p-pubsub v0.8.1/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= -github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= -github.com/libp2p/go-libp2p-testing v0.11.0/go.mod h1:qG4sF27dfKFoK9KlVzK2y52LQKhp0VEmLjV5aDqr1Hg= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= -github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= -github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= -github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= @@ -488,7 +430,6 @@ github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= -github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= @@ -498,12 +439,10 @@ github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtI github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= -github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= @@ -517,12 +456,9 @@ github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= @@ -531,10 +467,8 @@ github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= @@ -544,7 +478,6 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= @@ -553,7 +486,6 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -573,38 +505,29 @@ github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRV github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= -github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= -github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg= github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= -github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= -github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= -github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= @@ -616,14 +539,8 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230608110322-586e89326c74 h1:jf3bWYdUku19843q7KwBKBjIOQNi/OTLyjbsE1Yfra8= github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230608110322-586e89326c74/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= -github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= -github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.4/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= -github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.6 h1:fD5cMsByM1kgvNI+uGCQGlhvr+TrV7FPvJlXT4ubYdg= github.com/multiversx/mx-chain-core-go v1.2.6/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= @@ -632,10 +549,8 @@ github.com/multiversx/mx-chain-es-indexer-go v1.4.4 h1:3k8pB1AEILlNXL2ggSnP43uqV github.com/multiversx/mx-chain-es-indexer-go v1.4.4/go.mod h1:IAFuU3LhjVfs3+Sf4T3BlNjY1TmZHWovHRhV7tfR8cw= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= -github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-storage-go v1.0.11 h1:u4ZsfIXEU3nJWRUxyAswhBn2pT6tJkKRwf9pra4CpzA= github.com/multiversx/mx-chain-storage-go v1.0.11/go.mod h1:VP9fwyFBmbmDzahUuu0IeGX/dKG3iBWjN6FSQ6YtVaI= -github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= github.com/multiversx/mx-chain-vm-common-go v1.4.7 h1:7I1FQ2W1c9CMT2kOiroPD0je5RpiiaUO/G2HkajXMnU= github.com/multiversx/mx-chain-vm-common-go v1.4.7/go.mod h1:cnMvZN8+4oDkjloTZVExlf8ShkMGWbbDb5/D//wLT/k= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.57 h1:7jzLRq/IcKpb/qWT3YglXY4RIM4oG6aSNnAUBdItjvk= @@ -693,7 +608,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -729,7 +643,6 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -765,13 +678,9 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= @@ -785,7 +694,6 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -817,28 +725,22 @@ github.com/tklauser/numcpus v0.2.1 h1:ct88eFm+Q7m2ZfXJdan1xYoXKlmwsfP+k88q05KvlZ github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ugorji/go/codec v1.2.9 h1:rmenucSohSTiyL09Y+l2OCk+FrMxGMzho2+tjr5ticU= github.com/ugorji/go/codec v1.2.9/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= -github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= -github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -846,21 +748,16 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= -go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -868,9 +765,7 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -891,26 +786,17 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -923,7 +809,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -946,9 +831,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -997,18 +879,9 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1032,8 +905,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1044,7 +915,6 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1092,7 +962,6 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1101,28 +970,16 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1130,10 +987,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1144,13 +997,11 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1192,8 +1043,6 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1304,8 +1153,6 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8 gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= -gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1329,7 +1176,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/integrationTests/mock/processComponentsStub.go b/integrationTests/mock/processComponentsStub.go index 07da0ca9d87..f12a3a1d59b 100644 --- a/integrationTests/mock/processComponentsStub.go +++ b/integrationTests/mock/processComponentsStub.go @@ -40,7 +40,8 @@ type ProcessComponentsStub struct { ReqHandler process.RequestHandler TxLogsProcess process.TransactionLogProcessorDatabase HeaderConstructValidator process.HeaderConstructionValidator - PeerMapper process.NetworkShardingCollector + MainPeerMapper process.NetworkShardingCollector + FullArchivePeerMapper process.NetworkShardingCollector TxSimulatorProcessor factory.TransactionSimulatorProcessor FallbackHdrValidator process.FallbackHeaderValidator WhiteListHandlerInternal process.WhiteListHandler @@ -190,7 +191,12 @@ func (pcs *ProcessComponentsStub) HeaderConstructionValidator() process.HeaderCo // PeerShardMapper - func (pcs *ProcessComponentsStub) PeerShardMapper() process.NetworkShardingCollector { - return pcs.PeerMapper + return pcs.MainPeerMapper +} + +// FullArchivePeerShardMapper - +func (pcs *ProcessComponentsStub) FullArchivePeerShardMapper() process.NetworkShardingCollector { + return pcs.FullArchivePeerMapper } // FallbackHeaderValidator - diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 95622859072..538a52018a9 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -33,6 +33,7 @@ import ( epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/scheduledDataSyncer" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" @@ -199,7 +200,9 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui }) messenger := integrationTests.CreateMessengerWithNoDiscovery() time.Sleep(integrationTests.P2pBootstrapDelay) - nodeToJoinLate.Messenger = messenger + nodeToJoinLate.MainMessenger = messenger + + nodeToJoinLate.FullArchiveMessenger = &p2pmocks.MessengerStub{} for _, n := range nodes { _ = n.ConnectTo(nodeToJoinLate) @@ -230,7 +233,8 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui argsBootstrapHandler := bootstrap.ArgsEpochStartBootstrap{ CryptoComponentsHolder: cryptoComponents, CoreComponentsHolder: coreComponents, - Messenger: nodeToJoinLate.Messenger, + MainMessenger: nodeToJoinLate.MainMessenger, + FullArchiveMessenger: nodeToJoinLate.FullArchiveMessenger, GeneralConfig: generalConfig, PrefsConfig: config.PreferencesConfig{ FullArchive: false, diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 24d927caa40..c71aa6441f0 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -73,7 +73,7 @@ func TestHardForkWithoutTransactionInMultiShardedEnvironment(t *testing.T) { n.Close() } - _ = hardforkTriggerNode.Messenger.Close() + _ = hardforkTriggerNode.MainMessenger.Close() }() round := uint64(0) @@ -144,7 +144,7 @@ func TestHardForkWithContinuousTransactionsInMultiShardedEnvironment(t *testing. n.Close() } - _ = hardforkTriggerNode.Messenger.Close() + _ = hardforkTriggerNode.MainMessenger.Close() }() initialVal := big.NewInt(1000000000) @@ -585,7 +585,7 @@ func createHardForkExporter( StorageService: node.Storage, RequestHandler: node.RequestHandler, ShardCoordinator: node.ShardCoordinator, - Messenger: node.Messenger, + Messenger: node.MainMessenger, ActiveAccountsDBs: accountsDBs, ExportFolder: node.ExportFolder, ExportTriesStorageConfig: config.StorageConfig{ diff --git a/integrationTests/oneNodeNetwork.go b/integrationTests/oneNodeNetwork.go index 554b83bb084..6a69dd0f0d5 100644 --- a/integrationTests/oneNodeNetwork.go +++ b/integrationTests/oneNodeNetwork.go @@ -32,7 +32,7 @@ func NewOneNodeNetwork() *oneNodeNetwork { // Stop stops the test network func (n *oneNodeNetwork) Stop() { - _ = n.Node.Messenger.Close() + _ = n.Node.MainMessenger.Close() _ = n.Node.VMContainer.Close() } diff --git a/integrationTests/p2p/peersRating/peersRating_test.go b/integrationTests/p2p/peersRating/peersRating_test.go index a4a4bc0551e..212476a99c8 100644 --- a/integrationTests/p2p/peersRating/peersRating_test.go +++ b/integrationTests/p2p/peersRating/peersRating_test.go @@ -32,9 +32,9 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { requesterNode := createNodeWithPeersRatingHandler(core.MetachainShardId, numOfShards) defer func() { - _ = resolverNode.Messenger.Close() - _ = maliciousNode.Messenger.Close() - _ = requesterNode.Messenger.Close() + _ = resolverNode.MainMessenger.Close() + _ = maliciousNode.MainMessenger.Close() + _ = requesterNode.MainMessenger.Close() }() time.Sleep(time.Second) @@ -47,9 +47,9 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { // Broadcasts should not be considered for peers rating topic := factory.ShardBlocksTopic + resolverNode.ShardCoordinator.CommunicationIdentifier(requesterNode.ShardCoordinator.SelfId()) - resolverNode.Messenger.Broadcast(topic, hdrBuff) + resolverNode.MainMessenger.Broadcast(topic, hdrBuff) time.Sleep(time.Second) - maliciousNode.Messenger.Broadcast(topic, hdrBuff) + maliciousNode.MainMessenger.Broadcast(topic, hdrBuff) time.Sleep(time.Second) // check that broadcasts were successful _, err := requesterNode.DataPool.Headers().GetHeaderByHash(hdrHash) @@ -67,12 +67,12 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { peerRatingsMap := getRatingsMap(t, requesterNode) // resolver node should have received and responded to numOfRequests - initialResolverRating, exists := peerRatingsMap[resolverNode.Messenger.ID().Pretty()] + initialResolverRating, exists := peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) initialResolverExpectedRating := fmt.Sprintf("%d", numOfRequests*(decreaseFactor+increaseFactor)) assert.Equal(t, initialResolverExpectedRating, initialResolverRating) // malicious node should have only received numOfRequests - initialMaliciousRating, exists := peerRatingsMap[maliciousNode.Messenger.ID().Pretty()] + initialMaliciousRating, exists := peerRatingsMap[maliciousNode.MainMessenger.ID().Pretty()] require.True(t, exists) initialMaliciousExpectedRating := fmt.Sprintf("%d", numOfRequests*decreaseFactor) assert.Equal(t, initialMaliciousExpectedRating, initialMaliciousRating) @@ -83,12 +83,12 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { peerRatingsMap = getRatingsMap(t, requesterNode) // Resolver should have reached max limit and timestamps still update - initialResolverRating, exists = peerRatingsMap[resolverNode.Messenger.ID().Pretty()] + initialResolverRating, exists = peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) assert.Equal(t, "100", initialResolverRating) // Malicious should have reached min limit and timestamps still update - initialMaliciousRating, exists = peerRatingsMap[maliciousNode.Messenger.ID().Pretty()] + initialMaliciousRating, exists = peerRatingsMap[maliciousNode.MainMessenger.ID().Pretty()] require.True(t, exists) assert.Equal(t, "-100", initialMaliciousRating) @@ -100,12 +100,12 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { peerRatingsMap = getRatingsMap(t, requesterNode) // resolver node should have the max rating + numOfRequests that didn't answer to - resolverRating, exists := peerRatingsMap[resolverNode.Messenger.ID().Pretty()] + resolverRating, exists := peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) finalResolverExpectedRating := fmt.Sprintf("%d", 100+decreaseFactor*numOfRequests) assert.Equal(t, finalResolverExpectedRating, resolverRating) // malicious node should have the min rating + numOfRequests that received and responded to - maliciousRating, exists := peerRatingsMap[maliciousNode.Messenger.ID().Pretty()] + maliciousRating, exists := peerRatingsMap[maliciousNode.MainMessenger.ID().Pretty()] require.True(t, exists) finalMaliciousExpectedRating := fmt.Sprintf("%d", -100+numOfRequests*increaseFactor+(numOfRequests-1)*decreaseFactor) assert.Equal(t, finalMaliciousExpectedRating, maliciousRating) diff --git a/integrationTests/resolvers/headers/headers_test.go b/integrationTests/resolvers/headers/headers_test.go index 29823a0090d..74fb94cf5c0 100644 --- a/integrationTests/resolvers/headers/headers_test.go +++ b/integrationTests/resolvers/headers/headers_test.go @@ -24,8 +24,8 @@ func TestRequestResolveShardHeadersByHashRequestingShardResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -61,8 +61,8 @@ func TestRequestResolveShardHeadersByHashRequestingMetaResolvingShard(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -98,8 +98,8 @@ func TestRequestResolveShardHeadersByHashRequestingShardResolvingMeta(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -137,8 +137,8 @@ func TestRequestResolveShardHeadersByNonceRequestingShardResolvingShard(t *testi shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -176,8 +176,8 @@ func TestRequestResolveShardHeadersByNonceRequestingMetaResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -215,8 +215,8 @@ func TestRequestResolveShardHeadersByNonceRequestingShardResolvingMeta(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) diff --git a/integrationTests/resolvers/metablock/metablock_test.go b/integrationTests/resolvers/metablock/metablock_test.go index 37da9de9c3a..00deff89238 100644 --- a/integrationTests/resolvers/metablock/metablock_test.go +++ b/integrationTests/resolvers/metablock/metablock_test.go @@ -24,8 +24,8 @@ func TestRequestResolveMetaHeadersByHashRequestingShardResolvingShard(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -61,8 +61,8 @@ func TestRequestResolveMetaHeadersByHashRequestingMetaResolvingShard(t *testing. shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -98,8 +98,8 @@ func TestRequestResolveMetaHeadersByHashRequestingShardResolvingMeta(t *testing. shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -137,8 +137,8 @@ func TestRequestResolveMetaHeadersByNonceRequestingShardResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -176,8 +176,8 @@ func TestRequestResolveMetaHeadersByNonceRequestingMetaResolvingShard(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -215,8 +215,8 @@ func TestRequestResolveMetaHeadersByNonceRequestingShardResolvingMeta(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) diff --git a/integrationTests/resolvers/miniblocks/miniblocks_test.go b/integrationTests/resolvers/miniblocks/miniblocks_test.go index d115d76ce54..12046e157d4 100644 --- a/integrationTests/resolvers/miniblocks/miniblocks_test.go +++ b/integrationTests/resolvers/miniblocks/miniblocks_test.go @@ -18,8 +18,8 @@ func TestRequestResolveMiniblockByHashRequestingShardResolvingSameShard(t *testi shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, shardId) @@ -54,8 +54,8 @@ func TestRequestResolveMiniblockByHashRequestingShardResolvingOtherShard(t *test shardIdRequester := uint32(1) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardIdResolver, shardIdRequester) @@ -89,8 +89,8 @@ func TestRequestResolveMiniblockByHashRequestingShardResolvingMeta(t *testing.T) shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, shardId) @@ -124,8 +124,8 @@ func TestRequestResolveMiniblockByHashRequestingMetaResolvingShard(t *testing.T) shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, core.MetachainShardId) @@ -159,8 +159,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingShardResolvingSameShard(t *t shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() miniblock, hash := resolvers.CreateMiniblock(core.MetachainShardId, core.AllShardId) @@ -195,8 +195,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingShardResolvingOtherShard(t * shardIdRequester := uint32(1) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardIdResolver, core.AllShardId) @@ -230,8 +230,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingShardResolvingMeta(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, core.AllShardId) @@ -265,8 +265,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingMetaResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, core.AllShardId) diff --git a/integrationTests/resolvers/rewards/rewards_test.go b/integrationTests/resolvers/rewards/rewards_test.go index 5e12fd9c1cf..bc7f63f9524 100644 --- a/integrationTests/resolvers/rewards/rewards_test.go +++ b/integrationTests/resolvers/rewards/rewards_test.go @@ -20,8 +20,8 @@ func TestRequestResolveRewardsByHashRequestingShardResolvingOtherShard(t *testin shardIdRequester := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() headerNonce := uint64(0) reward, hash := resolvers.CreateReward(headerNonce) diff --git a/integrationTests/resolvers/smartContractsResults/scrs_test.go b/integrationTests/resolvers/smartContractsResults/scrs_test.go index 7b243379259..9db1cf8c5a0 100644 --- a/integrationTests/resolvers/smartContractsResults/scrs_test.go +++ b/integrationTests/resolvers/smartContractsResults/scrs_test.go @@ -20,8 +20,8 @@ func TestRequestResolveLargeSCRByHashRequestingShardResolvingOtherShard(t *testi shardIdRequester := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() scr, hash := resolvers.CreateLargeSmartContractResults() diff --git a/integrationTests/resolvers/testInitializer.go b/integrationTests/resolvers/testInitializer.go index 95f0d388df9..2910c7590f7 100644 --- a/integrationTests/resolvers/testInitializer.go +++ b/integrationTests/resolvers/testInitializer.go @@ -38,7 +38,7 @@ func CreateResolverRequester( }) time.Sleep(time.Second) - err := nRequester.Messenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.Messenger)) + err := nRequester.MainMessenger.ConnectToPeer(integrationTests.GetConnectableAddress(nResolver.MainMessenger)) Log.LogIfError(err) time.Sleep(time.Second) diff --git a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go index a36c16007b8..89c75c645ff 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go @@ -49,8 +49,8 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() //connect messengers together @@ -117,8 +117,8 @@ func TestNode_InterceptedHeaderWithWrongChainIDShouldBeDiscarded(t *testing.T) { }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() //connect messengers together diff --git a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go index 1ef06e7aacb..ea2da120a5c 100644 --- a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go +++ b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go @@ -42,8 +42,8 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() //connect messengers together diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go index 3e8a63e1b33..3db8bb8faf9 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go @@ -136,7 +136,7 @@ func TestNode_SendTransactionFromAnUnmintedAccountShouldReturnErrorAtApiLevel(t }) defer func() { - _ = node.Messenger.Close() + _ = node.MainMessenger.Close() }() tx := &transaction.Transaction{ diff --git a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go index ea919f0139a..9d624eb4038 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkUnsignedTx/interceptedBulkUnsignedTx_test.go @@ -91,7 +91,7 @@ func TestNode_GenerateSendInterceptBulkUnsignedTransactionsWithMessenger(t *test noOfUnsignedTx, integrationTests.TestMarshalizer, n.ShardCoordinator, - n.Messenger, + n.MainMessenger, ) assert.Nil(t, err) diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go index 0de6313c58d..45a6dc18e00 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go @@ -40,8 +40,8 @@ func TestNode_RequestInterceptTransactionWithMessengerAndWhitelist(t *testing.T) TxSignPrivKeyShardId: txSignPrivKeyShardId, }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() //connect messengers together @@ -136,8 +136,8 @@ func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { TxSignPrivKeyShardId: txSignPrivKeyShardId, }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() //connect messengers together diff --git a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go index b685444fa85..ef2abacd76e 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go @@ -38,8 +38,8 @@ func TestNode_RequestInterceptUnsignedTransactionWithMessenger(t *testing.T) { TxSignPrivKeyShardId: txSignPrivKeyShardId, }) defer func() { - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() //connect messengers together diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 98e88edc668..39b144366d8 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -53,7 +53,8 @@ func createTestProcessorNodeAndTrieStorage( TrieStore: mainStorer, GasScheduleMap: createTestGasMap(), }) - _ = node.Messenger.CreateTopic(common.ConsensusTopic+node.ShardCoordinator.CommunicationIdentifier(node.ShardCoordinator.SelfId()), true) + _ = node.MainMessenger.CreateTopic(common.ConsensusTopic+node.ShardCoordinator.CommunicationIdentifier(node.ShardCoordinator.SelfId()), true) + _ = node.FullArchiveMessenger.CreateTopic(common.ConsensusTopic+node.ShardCoordinator.CommunicationIdentifier(node.ShardCoordinator.SelfId()), true) return node, mainStorer } @@ -86,8 +87,8 @@ func testNodeRequestInterceptTrieNodesWithMessenger(t *testing.T, version int) { _ = trieStorageRequester.DestroyUnit() _ = trieStorageResolver.DestroyUnit() - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() time.Sleep(time.Second) @@ -207,8 +208,8 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin _ = trieStorageRequester.DestroyUnit() _ = trieStorageResolver.DestroyUnit() - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() time.Sleep(time.Second) @@ -254,7 +255,7 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin go func() { // sudden close of the resolver node after just 2 seconds time.Sleep(time.Second * 2) - _ = nResolver.Messenger.Close() + _ = nResolver.MainMessenger.Close() log.Info("resolver node closed, the requester should soon fail in error") }() @@ -315,8 +316,8 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves _ = trieStorageRequester.DestroyUnit() _ = trieStorageResolver.DestroyUnit() - _ = nRequester.Messenger.Close() - _ = nResolver.Messenger.Close() + _ = nRequester.MainMessenger.Close() + _ = nResolver.MainMessenger.Close() }() time.Sleep(time.Second) diff --git a/integrationTests/sync/edgeCases/edgeCases_test.go b/integrationTests/sync/edgeCases/edgeCases_test.go index 285f0876cd1..b02c2e39140 100644 --- a/integrationTests/sync/edgeCases/edgeCases_test.go +++ b/integrationTests/sync/edgeCases/edgeCases_test.go @@ -94,7 +94,7 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { } integrationTests.BootstrapDelay() - require.True(t, len(syncMetaNode.Messenger.ConnectedPeers()) > 1, "not enough peers connected to this node."+ + require.True(t, len(syncMetaNode.MainMessenger.ConnectedPeers()) > 1, "not enough peers connected to this node."+ " Check that the peer discovery mechanism works properly.") integrationTests.StartSyncingBlocks(syncNodesSlice) diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index 59620306e34..fb4348620cf 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -313,7 +313,8 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { processComponents.HeaderSigVerif = &mock.HeaderSigVerifierStub{} processComponents.HeaderIntegrVerif = &mock.HeaderIntegrityVerifierStub{} processComponents.ReqHandler = &testscommon.RequestHandlerStub{} - processComponents.PeerMapper = mock.NewNetworkShardingCollectorMock() + processComponents.MainPeerMapper = mock.NewNetworkShardingCollectorMock() + processComponents.FullArchivePeerMapper = mock.NewNetworkShardingCollectorMock() processComponents.RoundHandlerField = roundHandler processComponents.ScheduledTxsExecutionHandlerInternal = &testscommon.ScheduledTxsExecutionStub{} processComponents.ProcessedMiniBlocksTrackerInternal = &testscommon.ProcessedMiniBlocksTrackerStub{} diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 38a04ec67db..da8775fb329 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -86,7 +86,8 @@ var TestThrottler = &processMock.InterceptorThrottlerStub{ type TestHeartbeatNode struct { ShardCoordinator sharding.Coordinator NodesCoordinator nodesCoordinator.NodesCoordinator - PeerShardMapper process.NetworkShardingCollector + MainPeerShardMapper process.NetworkShardingCollector + FullArchivePeerShardMapper process.NetworkShardingCollector MainMessenger p2p.Messenger FullArchiveMessenger p2p.Messenger NodeKeys *TestNodeKeys @@ -186,13 +187,14 @@ func NewTestHeartbeatNode( MainMessenger: messenger, FullArchiveMessenger: &p2pmocks.MessengerStub{}, // TODO[Sorin]: inject a proper messenger when all pieces are done to test this network as well PeerSigHandler: peerSigHandler, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: peerShardMapper, + FullArchivePeerShardMapper: &mock.PeerShardMapperStub{}, heartbeatExpiryTimespanInSec: heartbeatExpiryTimespanInSec, } localId := thn.MainMessenger.ID() pkBytes, _ := pk.ToByteArray() - thn.PeerShardMapper.UpdatePeerIDInfo(localId, pkBytes, shardCoordinator.SelfId()) + thn.MainPeerShardMapper.UpdatePeerIDInfo(localId, pkBytes, shardCoordinator.SelfId()) thn.NodeKeys = &TestNodeKeys{ MainKey: &TestKeyPair{ @@ -260,13 +262,14 @@ func NewTestHeartbeatNodeWithCoordinator( MainMessenger: messenger, FullArchiveMessenger: &p2pmocks.MessengerStub{}, PeerSigHandler: peerSigHandler, - PeerShardMapper: peerShardMapper, + MainPeerShardMapper: peerShardMapper, + FullArchivePeerShardMapper: &mock.PeerShardMapperStub{}, Interceptor: NewCountInterceptor(), heartbeatExpiryTimespanInSec: 30, } localId := thn.MainMessenger.ID() - thn.PeerShardMapper.UpdatePeerIDInfo(localId, []byte(""), shardCoordinator.SelfId()) + thn.MainPeerShardMapper.UpdatePeerIDInfo(localId, []byte(""), shardCoordinator.SelfId()) thn.NodeKeys = keys @@ -574,7 +577,7 @@ func (thn *TestHeartbeatNode) initInterceptors() { func (thn *TestHeartbeatNode) createPeerAuthInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { args := interceptorsProcessor.ArgPeerAuthenticationInterceptorProcessor{ PeerAuthenticationCacher: thn.DataPool.PeerAuthentications(), - PeerShardMapper: thn.PeerShardMapper, + PeerShardMapper: thn.MainPeerShardMapper, Marshaller: TestMarshaller, HardforkTrigger: &testscommon.HardforkTriggerStub{}, } @@ -587,7 +590,7 @@ func (thn *TestHeartbeatNode) createHeartbeatInterceptor(argsFactory interceptor args := interceptorsProcessor.ArgHeartbeatInterceptorProcessor{ HeartbeatCacher: thn.DataPool.Heartbeats(), ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.PeerShardMapper, + PeerShardMapper: thn.MainPeerShardMapper, } hbProcessor, _ := interceptorsProcessor.NewHeartbeatInterceptorProcessor(args) hbFactory, _ := interceptorFactory.NewInterceptedHeartbeatDataFactory(argsFactory) @@ -597,7 +600,7 @@ func (thn *TestHeartbeatNode) createHeartbeatInterceptor(argsFactory interceptor func (thn *TestHeartbeatNode) createPeerShardInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { args := interceptorsProcessor.ArgPeerShardInterceptorProcessor{ - PeerShardMapper: thn.PeerShardMapper, + PeerShardMapper: thn.MainPeerShardMapper, } dciProcessor, _ := interceptorsProcessor.NewPeerShardInterceptorProcessor(args) dciFactory, _ := interceptorFactory.NewInterceptedPeerShardFactory(argsFactory) @@ -687,7 +690,7 @@ func (thn *TestHeartbeatNode) initDirectConnectionProcessor(tb testing.TB) { argsDirectConnectionProcessor := processor.ArgsDirectConnectionProcessor{ TimeToReadDirectConnections: 5 * time.Second, Messenger: thn.MainMessenger, - PeerShardMapper: thn.PeerShardMapper, + PeerShardMapper: thn.MainPeerShardMapper, ShardCoordinator: thn.ShardCoordinator, BaseIntraShardTopic: ShardTopic, BaseCrossShardTopic: ShardTopic, @@ -700,7 +703,7 @@ func (thn *TestHeartbeatNode) initDirectConnectionProcessor(tb testing.TB) { argsDirectConnectionProcessor = processor.ArgsDirectConnectionProcessor{ TimeToReadDirectConnections: 5 * time.Second, Messenger: thn.FullArchiveMessenger, - PeerShardMapper: thn.PeerShardMapper, // TODO[Sorin]: replace this with the full archive psm + PeerShardMapper: thn.FullArchivePeerShardMapper, ShardCoordinator: thn.ShardCoordinator, BaseIntraShardTopic: ShardTopic, BaseCrossShardTopic: ShardTopic, @@ -713,7 +716,7 @@ func (thn *TestHeartbeatNode) initDirectConnectionProcessor(tb testing.TB) { func (thn *TestHeartbeatNode) initCrossShardPeerTopicNotifier(tb testing.TB) { argsCrossShardPeerTopicNotifier := monitor.ArgsCrossShardPeerTopicNotifier{ ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.PeerShardMapper, + PeerShardMapper: thn.MainPeerShardMapper, } crossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) require.Nil(tb, err) @@ -723,7 +726,7 @@ func (thn *TestHeartbeatNode) initCrossShardPeerTopicNotifier(tb testing.TB) { argsCrossShardPeerTopicNotifier = monitor.ArgsCrossShardPeerTopicNotifier{ ShardCoordinator: thn.ShardCoordinator, - PeerShardMapper: thn.PeerShardMapper, // TODO[Sorin]: replace this with the full archive psm + PeerShardMapper: thn.FullArchivePeerShardMapper, } crossShardPeerTopicNotifier, err = monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) require.Nil(tb, err) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 7f6461dba49..6b91a5fe769 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -879,7 +879,7 @@ func MakeDisplayTable(nodes []*TestProcessorNode) string { fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMbRecv)), fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterHdrRecv)), fmt.Sprintf("%d", atomic.LoadInt32(&n.CounterMetaRcv)), - fmt.Sprintf("%d", len(n.Messenger.ConnectedPeers())), + fmt.Sprintf("%d", len(n.MainMessenger.ConnectedPeers())), }, ) } diff --git a/integrationTests/testNetwork.go b/integrationTests/testNetwork.go index 63a253b3bc0..81ea18c423c 100644 --- a/integrationTests/testNetwork.go +++ b/integrationTests/testNetwork.go @@ -441,7 +441,7 @@ func (net *TestNetwork) initDefaults() { func (net *TestNetwork) closeNodes() { for _, node := range net.Nodes { - err := node.Messenger.Close() + err := node.MainMessenger.Close() net.handleOrBypassError(err) _ = node.VMContainer.Close() } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 82f0a4d20cf..303bb2f7a40 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -290,11 +290,14 @@ type ArgTestProcessorNode struct { // TestProcessorNode represents a container type of class used in integration tests // with all its fields exported type TestProcessorNode struct { - ShardCoordinator sharding.Coordinator - NodesCoordinator nodesCoordinator.NodesCoordinator - PeerShardMapper process.PeerShardMapper - NodesSetup sharding.GenesisNodesSetupHandler - Messenger p2p.Messenger + ShardCoordinator sharding.Coordinator + NodesCoordinator nodesCoordinator.NodesCoordinator + MainPeerShardMapper process.PeerShardMapper + FullArchivePeerShardMapper process.PeerShardMapper + NodesSetup sharding.GenesisNodesSetupHandler + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + NodeOperationMode p2p.NodeOperation OwnAccount *TestWalletAccount NodeKeys *TestNodeKeys @@ -468,29 +471,32 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - Messenger: messenger, - NodesCoordinator: nodesCoordinatorInstance, - ChainID: ChainID, - MinTransactionVersion: MinTransactionVersion, - NodesSetup: nodesSetup, - HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, - EpochNotifier: genericEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - WasmVMChangeLocker: &sync.RWMutex{}, - TransactionLogProcessor: logsProcessor, - Bootstrapper: mock.NewTestBootstrapperMock(), - PeersRatingHandler: peersRatingHandler, - PeerShardMapper: mock.NewNetworkShardingCollectorMock(), - EnableEpochs: *epochsConfig, - UseValidVmBlsSigVerifier: args.WithBLSSigVerifier, - StorageBootstrapper: &mock.StorageBootstrapperMock{}, - BootstrapStorer: &mock.BoostrapStorerMock{}, - RatingsData: args.RatingsData, - EpochStartNotifier: args.EpochStartSubscriber, - GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, - AppStatusHandler: appStatusHandler, - PeersRatingMonitor: peersRatingMonitor, + ShardCoordinator: shardCoordinator, + MainMessenger: messenger, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, // TODO[Sorin]: inject a proper messenger when all pieces are done to test this network as well, + NodeOperationMode: p2p.NormalOperation, + NodesCoordinator: nodesCoordinatorInstance, + ChainID: ChainID, + MinTransactionVersion: MinTransactionVersion, + NodesSetup: nodesSetup, + HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, + EpochNotifier: genericEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + WasmVMChangeLocker: &sync.RWMutex{}, + TransactionLogProcessor: logsProcessor, + Bootstrapper: mock.NewTestBootstrapperMock(), + PeersRatingHandler: peersRatingHandler, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + EnableEpochs: *epochsConfig, + UseValidVmBlsSigVerifier: args.WithBLSSigVerifier, + StorageBootstrapper: &mock.StorageBootstrapperMock{}, + BootstrapStorer: &mock.BoostrapStorerMock{}, + RatingsData: args.RatingsData, + EpochStartNotifier: args.EpochStartSubscriber, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, + AppStatusHandler: appStatusHandler, + PeersRatingMonitor: peersRatingMonitor, } tpn.NodeKeys = args.NodeKeys @@ -547,7 +553,7 @@ func (tpn *TestProcessorNode) ConnectTo(connectable Connectable) error { return fmt.Errorf("trying to connect to a nil Connectable parameter") } - return tpn.Messenger.ConnectToPeer(connectable.GetConnectableAddress()) + return tpn.MainMessenger.ConnectToPeer(connectable.GetConnectableAddress()) } // GetConnectableAddress returns a non circuit, non windows default connectable p2p address @@ -556,12 +562,13 @@ func (tpn *TestProcessorNode) GetConnectableAddress() string { return "nil" } - return GetConnectableAddress(tpn.Messenger) + return GetConnectableAddress(tpn.MainMessenger) } // Close - func (tpn *TestProcessorNode) Close() { - _ = tpn.Messenger.Close() + _ = tpn.MainMessenger.Close() + _ = tpn.FullArchiveMessenger.Close() _ = tpn.VMContainer.Close() } @@ -765,7 +772,7 @@ func (tpn *TestProcessorNode) initTestNodeWithArgs(args ArgTestProcessorNode) { tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( TestMarshalizer, TestHasher, - tpn.Messenger, + tpn.MainMessenger, tpn.ShardCoordinator, tpn.OwnAccount.PeerSigHandler, tpn.DataPool.Headers(), @@ -773,7 +780,7 @@ func (tpn *TestProcessorNode) initTestNodeWithArgs(args ArgTestProcessorNode) { &testscommon.AlarmSchedulerStub{}, testscommon.NewKeysHandlerSingleSignerMock( tpn.NodeKeys.MainKey.Sk, - tpn.Messenger.ID(), + tpn.MainMessenger.ID(), ), ) @@ -954,7 +961,7 @@ func (tpn *TestProcessorNode) InitializeProcessors(gasMap map[string]map[string] tpn.BroadcastMessenger, _ = sposFactory.GetBroadcastMessenger( TestMarshalizer, TestHasher, - tpn.Messenger, + tpn.MainMessenger, tpn.ShardCoordinator, tpn.OwnAccount.PeerSigHandler, tpn.DataPool.Headers(), @@ -962,7 +969,7 @@ func (tpn *TestProcessorNode) InitializeProcessors(gasMap map[string]map[string] &testscommon.AlarmSchedulerStub{}, testscommon.NewKeysHandlerSingleSignerMock( tpn.NodeKeys.MainKey.Sk, - tpn.Messenger.ID(), + tpn.MainMessenger.ID(), ), ) tpn.setGenesisBlock() @@ -1207,7 +1214,8 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, NodesCoordinator: tpn.NodesCoordinator, - Messenger: tpn.Messenger, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, Store: tpn.Storage, DataPool: tpn.DataPool, MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, @@ -1227,8 +1235,10 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, SignaturesHandler: &processMock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: 30, - PeerShardMapper: tpn.PeerShardMapper, + MainPeerShardMapper: tpn.MainPeerShardMapper, + FullArchivePeerShardMapper: tpn.FullArchivePeerShardMapper, HardforkTrigger: tpn.HardforkTrigger, + NodeOperationMode: tpn.NodeOperationMode, } interceptorContainerFactory, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) @@ -1272,7 +1282,8 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, NodesCoordinator: tpn.NodesCoordinator, - Messenger: tpn.Messenger, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, Store: tpn.Storage, DataPool: tpn.DataPool, MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, @@ -1292,8 +1303,10 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { PeerSignatureHandler: &processMock.PeerSignatureHandlerStub{}, SignaturesHandler: &processMock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: 30, - PeerShardMapper: tpn.PeerShardMapper, + MainPeerShardMapper: tpn.MainPeerShardMapper, + FullArchivePeerShardMapper: tpn.FullArchivePeerShardMapper, HardforkTrigger: tpn.HardforkTrigger, + NodeOperationMode: tpn.NodeOperationMode, } interceptorContainerFactory, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) @@ -1335,13 +1348,13 @@ func (tpn *TestProcessorNode) createHardforkTrigger(heartbeatPk string) []byte { func (tpn *TestProcessorNode) initResolvers() { dataPacker, _ := partitioning.NewSimpleDataPacker(TestMarshalizer) - _ = tpn.Messenger.CreateTopic(common.ConsensusTopic+tpn.ShardCoordinator.CommunicationIdentifier(tpn.ShardCoordinator.SelfId()), true) + _ = tpn.MainMessenger.CreateTopic(common.ConsensusTopic+tpn.ShardCoordinator.CommunicationIdentifier(tpn.ShardCoordinator.SelfId()), true) payloadValidator, _ := validator.NewPeerAuthenticationPayloadValidator(60) preferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) resolverContainerFactory := resolverscontainer.FactoryArgs{ ShardCoordinator: tpn.ShardCoordinator, - Messenger: tpn.Messenger, + Messenger: tpn.MainMessenger, Store: tpn.Storage, Marshalizer: TestMarshalizer, DataPools: tpn.DataPool, @@ -1378,7 +1391,7 @@ func (tpn *TestProcessorNode) initRequesters() { NumFullHistoryPeers: 3, }, ShardCoordinator: tpn.ShardCoordinator, - Messenger: tpn.Messenger, + Messenger: tpn.MainMessenger, Marshaller: TestMarshaller, Uint64ByteSliceConverter: TestUint64Converter, OutputAntifloodHandler: &mock.NilAntifloodHandler{}, @@ -2356,7 +2369,7 @@ func (tpn *TestProcessorNode) initNode() { processComponents.HistoryRepositoryInternal = tpn.HistoryRepository processComponents.WhiteListHandlerInternal = tpn.WhiteListHandler processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs - processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.Messenger) + processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.MainMessenger) processComponents.HardforkTriggerField = tpn.HardforkTrigger cryptoComponents := GetDefaultCryptoComponents() @@ -2388,7 +2401,8 @@ func (tpn *TestProcessorNode) initNode() { stateComponents.AccountsRepo, _ = state.NewAccountsRepository(argsAccountsRepo) networkComponents := GetDefaultNetworkComponents() - networkComponents.Messenger = tpn.Messenger + networkComponents.Messenger = tpn.MainMessenger + networkComponents.FullArchiveNetworkMessengerField = tpn.FullArchiveMessenger networkComponents.PeersRatingHandlerField = tpn.PeersRatingHandler networkComponents.PeersRatingMonitorField = tpn.PeersRatingMonitor @@ -2934,7 +2948,8 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { cryptoComponents.PeerSignHandler = psh networkComponents := GetDefaultNetworkComponents() - networkComponents.Messenger = tpn.Messenger + networkComponents.Messenger = tpn.MainMessenger + networkComponents.FullArchiveNetworkMessengerField = tpn.FullArchiveMessenger networkComponents.InputAntiFlood = &mock.NilAntifloodHandler{} processComponents := GetDefaultProcessComponents() @@ -2959,7 +2974,7 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs processComponents.WhiteListHandlerInternal = tpn.WhiteListHandler processComponents.HistoryRepositoryInternal = tpn.HistoryRepository - processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.Messenger) + processComponents.TxsSenderHandlerField = createTxsSender(tpn.ShardCoordinator, tpn.MainMessenger) processComponents.HardforkTriggerField = tpn.HardforkTrigger @@ -3170,7 +3185,8 @@ func GetDefaultProcessComponents() *mock.ProcessComponentsStub { ReqHandler: &testscommon.RequestHandlerStub{}, TxLogsProcess: &mock.TxLogProcessorMock{}, HeaderConstructValidator: &mock.HeaderValidatorStub{}, - PeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, NodeRedundancyHandlerInternal: &mock.RedundancyHandlerStub{ IsRedundancyNodeCalled: func() bool { diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 0bec036c39a..c9bae24a1da 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -159,7 +159,7 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error ShardCoordinator: tpn.ShardCoordinator, Accounts: tpn.AccntState, BlackListHandler: tpn.BlockBlackListHandler, - NetworkWatcher: tpn.Messenger, + NetworkWatcher: tpn.MainMessenger, BootStorer: tpn.BootstrapStorer, StorageBootstrapper: tpn.StorageBootstrapper, EpochHandler: tpn.EpochStartTrigger, @@ -205,7 +205,7 @@ func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, e ShardCoordinator: tpn.ShardCoordinator, Accounts: tpn.AccntState, BlackListHandler: tpn.BlockBlackListHandler, - NetworkWatcher: tpn.Messenger, + NetworkWatcher: tpn.MainMessenger, BootStorer: tpn.BootstrapStorer, StorageBootstrapper: tpn.StorageBootstrapper, EpochHandler: tpn.EpochStartTrigger, diff --git a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go index 717615301b6..0117b84fd3d 100644 --- a/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go +++ b/integrationTests/vm/esdt/nft/esdtNFT/esdtNft_test.go @@ -779,7 +779,7 @@ func testESDTSemiFungibleTokenTransferRole(t *testing.T, numOfShards int) { defer func() { for _, n := range nodes { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() } }() @@ -929,7 +929,7 @@ func TestESDTSFTWithEnhancedTransferRole(t *testing.T) { defer func() { for _, n := range nodes { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() } }() diff --git a/node/nodeHelper.go b/node/nodeHelper.go index f04ba91bbcb..7685c44f600 100644 --- a/node/nodeHelper.go +++ b/node/nodeHelper.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/node/nodeDebugFactory" + "github.com/multiversx/mx-chain-go/p2p" procFactory "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/process/throttle/antiflood/blackList" "github.com/multiversx/mx-chain-go/sharding" @@ -22,13 +23,25 @@ func prepareOpenTopics( ) { selfID := shardCoordinator.SelfId() selfShardHeartbeatV2Topic := common.HeartbeatV2Topic + core.CommunicationIdentifierBetweenShards(selfID, selfID) + selfShardHeartbeatV2TopicFullArchive := common.FullArchiveTopicPrefix + common.HeartbeatV2Topic + core.CommunicationIdentifierBetweenShards(selfID, selfID) if selfID == core.MetachainShardId { - antiflood.SetTopicsForAll(common.PeerAuthenticationTopic, selfShardHeartbeatV2Topic, common.ConnectionTopic) + antiflood.SetTopicsForAll( + common.PeerAuthenticationTopic, + selfShardHeartbeatV2Topic, + selfShardHeartbeatV2TopicFullArchive, + common.ConnectionTopic, + common.FullArchiveTopicPrefix+common.ConnectionTopic) return } selfShardTxTopic := procFactory.TransactionTopic + core.CommunicationIdentifierBetweenShards(selfID, selfID) - antiflood.SetTopicsForAll(common.PeerAuthenticationTopic, selfShardHeartbeatV2Topic, common.ConnectionTopic, selfShardTxTopic) + antiflood.SetTopicsForAll( + common.PeerAuthenticationTopic, + selfShardHeartbeatV2Topic, + selfShardHeartbeatV2TopicFullArchive, + common.ConnectionTopic, + common.FullArchiveTopicPrefix+common.ConnectionTopic, + selfShardTxTopic) } // CreateNode is the node factory @@ -50,16 +63,7 @@ func CreateNode( ) (*Node, error) { prepareOpenTopics(networkComponents.InputAntiFloodHandler(), processComponents.ShardCoordinator()) - peerDenialEvaluator, err := blackList.NewPeerDenialEvaluator( - networkComponents.PeerBlackListHandler(), - networkComponents.PubKeyCacher(), - processComponents.PeerShardMapper(), - ) - if err != nil { - return nil, err - } - - err = networkComponents.NetworkMessenger().SetPeerDenialEvaluator(peerDenialEvaluator) + peerDenialEvaluator, err := preparePeerDenialEvaluators(networkComponents, processComponents) if err != nil { return nil, err } @@ -123,3 +127,38 @@ func CreateNode( return nd, nil } + +func preparePeerDenialEvaluators( + networkComponents factory.NetworkComponentsHandler, + processComponents factory.ProcessComponentsHandler, +) (p2p.PeerDenialEvaluator, error) { + mainPeerDenialEvaluator, err := blackList.NewPeerDenialEvaluator( + networkComponents.PeerBlackListHandler(), + networkComponents.PubKeyCacher(), + processComponents.PeerShardMapper(), + ) + if err != nil { + return nil, err + } + + err = networkComponents.NetworkMessenger().SetPeerDenialEvaluator(mainPeerDenialEvaluator) + if err != nil { + return nil, err + } + + fullArchivePeerDenialEvaluator, err := blackList.NewPeerDenialEvaluator( + networkComponents.PeerBlackListHandler(), + networkComponents.PubKeyCacher(), + processComponents.FullArchivePeerShardMapper(), + ) + if err != nil { + return nil, err + } + + err = networkComponents.FullArchiveNetworkMessenger().SetPeerDenialEvaluator(fullArchivePeerDenialEvaluator) + if err != nil { + return nil, err + } + + return mainPeerDenialEvaluator, nil +} diff --git a/node/node_test.go b/node/node_test.go index 1a15b79cb39..5c11a0f167c 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -3752,7 +3752,7 @@ func TestNode_ShouldWork(t *testing.T) { pid2 := "pid2" processComponents := getDefaultProcessComponents() - processComponents.PeerMapper = &p2pmocks.NetworkShardingCollectorStub{ + processComponents.MainPeerMapper = &p2pmocks.NetworkShardingCollectorStub{ GetPeerInfoCalled: func(pid core.PeerID) core.P2PPeerInfo { return core.P2PPeerInfo{ PeerType: 0, @@ -5130,7 +5130,8 @@ func getDefaultProcessComponents() *factoryMock.ProcessComponentsMock { ReqHandler: &testscommon.RequestHandlerStub{}, TxLogsProcess: &mock.TxLogProcessorMock{}, HeaderConstructValidator: &mock.HeaderValidatorStub{}, - PeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, WhiteListHandlerInternal: &testscommon.WhiteListHandlerStub{}, WhiteListerVerifiedTxsInternal: &testscommon.WhiteListHandlerStub{}, TxsSenderHandlerField: &txsSenderMock.TxsSenderHandlerMock{}, diff --git a/process/factory/interceptorscontainer/args.go b/process/factory/interceptorscontainer/args.go index 459812df1f9..ca8b66c61fd 100644 --- a/process/factory/interceptorscontainer/args.go +++ b/process/factory/interceptorscontainer/args.go @@ -4,6 +4,7 @@ import ( crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/heartbeat" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -17,7 +18,8 @@ type CommonInterceptorsContainerFactoryArgs struct { Accounts state.AccountsAdapter ShardCoordinator sharding.Coordinator NodesCoordinator nodesCoordinator.NodesCoordinator - Messenger process.TopicHandler + MainMessenger process.TopicHandler + FullArchiveMessenger process.TopicHandler Store dataRetriever.StorageService DataPool dataRetriever.PoolsHolder MaxTxNonceDeltaAllowed int @@ -37,6 +39,8 @@ type CommonInterceptorsContainerFactoryArgs struct { PeerSignatureHandler crypto.PeerSignatureHandler SignaturesHandler process.SignaturesHandler HeartbeatExpiryTimespanInSec int64 - PeerShardMapper process.PeerShardMapper + MainPeerShardMapper process.PeerShardMapper + FullArchivePeerShardMapper process.PeerShardMapper HardforkTrigger heartbeat.HardforkTrigger + NodeOperationMode p2p.NodeOperation } diff --git a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go index 3a8686e0713..f212b7fdc0c 100644 --- a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go @@ -1,14 +1,17 @@ package interceptorscontainer import ( + "fmt" "time" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/heartbeat" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/dataValidators" "github.com/multiversx/mx-chain-go/process/factory" @@ -18,32 +21,40 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/storage" ) -const numGoRoutines = 100 -const chunksProcessorRequestInterval = time.Millisecond * 400 -const minTimespanDurationInSec = int64(1) +const ( + numGoRoutines = 100 + chunksProcessorRequestInterval = time.Millisecond * 400 + minTimespanDurationInSec = int64(1) + errorOnMainNetworkString = "on main network" + errorOnFullArchiveNetworkString = "on full archive network" +) type baseInterceptorsContainerFactory struct { - container process.InterceptorsContainer - shardCoordinator sharding.Coordinator - accounts state.AccountsAdapter - store dataRetriever.StorageService - dataPool dataRetriever.PoolsHolder - messenger process.TopicHandler - nodesCoordinator nodesCoordinator.NodesCoordinator - blockBlackList process.TimeCacher - argInterceptorFactory *interceptorFactory.ArgInterceptedDataFactory - globalThrottler process.InterceptorThrottler - maxTxNonceDeltaAllowed int - antifloodHandler process.P2PAntifloodHandler - whiteListHandler process.WhiteListHandler - whiteListerVerifiedTxs process.WhiteListHandler - preferredPeersHolder process.PreferredPeersHolderHandler - hasher hashing.Hasher - requestHandler process.RequestHandler - peerShardMapper process.PeerShardMapper - hardforkTrigger heartbeat.HardforkTrigger + container process.InterceptorsContainer + shardCoordinator sharding.Coordinator + accounts state.AccountsAdapter + store dataRetriever.StorageService + dataPool dataRetriever.PoolsHolder + mainMessenger process.TopicHandler + fullArchiveMessenger process.TopicHandler + nodesCoordinator nodesCoordinator.NodesCoordinator + blockBlackList process.TimeCacher + argInterceptorFactory *interceptorFactory.ArgInterceptedDataFactory + globalThrottler process.InterceptorThrottler + maxTxNonceDeltaAllowed int + antifloodHandler process.P2PAntifloodHandler + whiteListHandler process.WhiteListHandler + whiteListerVerifiedTxs process.WhiteListHandler + preferredPeersHolder process.PreferredPeersHolderHandler + hasher hashing.Hasher + requestHandler process.RequestHandler + mainPeerShardMapper process.PeerShardMapper + fullArchivePeerShardMapper process.PeerShardMapper + hardforkTrigger heartbeat.HardforkTrigger + nodeOperationMode p2p.NodeOperation } func checkBaseParams( @@ -53,7 +64,8 @@ func checkBaseParams( accounts state.AccountsAdapter, store dataRetriever.StorageService, dataPool dataRetriever.PoolsHolder, - messenger process.TopicHandler, + mainMessenger process.TopicHandler, + fullArchiveMessenger process.TopicHandler, nodesCoordinator nodesCoordinator.NodesCoordinator, blackList process.TimeCacher, antifloodHandler process.P2PAntifloodHandler, @@ -61,7 +73,8 @@ func checkBaseParams( whiteListerVerifiedTxs process.WhiteListHandler, preferredPeersHolder process.PreferredPeersHolderHandler, requestHandler process.RequestHandler, - peerShardMapper process.PeerShardMapper, + mainPeerShardMapper process.PeerShardMapper, + fullArchivePeerShardMapper process.PeerShardMapper, hardforkTrigger heartbeat.HardforkTrigger, ) error { if check.IfNil(coreComponents) { @@ -73,8 +86,11 @@ func checkBaseParams( if check.IfNil(shardCoordinator) { return process.ErrNilShardCoordinator } - if check.IfNil(messenger) { - return process.ErrNilMessenger + if check.IfNil(mainMessenger) { + return fmt.Errorf("%w %s", process.ErrNilMessenger, errorOnMainNetworkString) + } + if check.IfNil(fullArchiveMessenger) { + return fmt.Errorf("%w %s", process.ErrNilMessenger, errorOnFullArchiveNetworkString) } if check.IfNil(store) { return process.ErrNilStore @@ -149,8 +165,11 @@ func checkBaseParams( if check.IfNil(requestHandler) { return process.ErrNilRequestHandler } - if check.IfNil(peerShardMapper) { - return process.ErrNilPeerShardMapper + if check.IfNil(mainPeerShardMapper) { + return fmt.Errorf("%w %s", process.ErrNilPeerShardMapper, errorOnMainNetworkString) + } + if check.IfNil(fullArchivePeerShardMapper) { + return fmt.Errorf("%w %s", process.ErrNilPeerShardMapper, errorOnFullArchiveNetworkString) } if check.IfNil(hardforkTrigger) { return process.ErrNilHardforkTrigger @@ -165,12 +184,34 @@ func (bicf *baseInterceptorsContainerFactory) createTopicAndAssignHandler( createChannel bool, ) (process.Interceptor, error) { - err := bicf.messenger.CreateTopic(topic, createChannel) + err := createTopicAndAssignHandlerOnMessenger(topic, interceptor, createChannel, bicf.mainMessenger) if err != nil { return nil, err } - return interceptor, bicf.messenger.RegisterMessageProcessor(topic, common.DefaultInterceptorsIdentifier, interceptor) + if bicf.nodeOperationMode == p2p.FullArchiveMode { + err = createTopicAndAssignHandlerOnMessenger(topic, interceptor, createChannel, bicf.fullArchiveMessenger) + if err != nil { + return nil, err + } + } + + return interceptor, nil +} + +func createTopicAndAssignHandlerOnMessenger( + topic string, + interceptor process.Interceptor, + createChannel bool, + messenger process.TopicHandler, +) error { + + err := messenger.CreateTopic(topic, createChannel) + if err != nil { + return err + } + + return messenger.RegisterMessageProcessor(topic, common.DefaultInterceptorsIdentifier, interceptor) } // ------- Tx interceptors @@ -255,7 +296,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneTxInterceptor(topic strin Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -298,7 +339,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneUnsignedTxInterceptor(top Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -341,7 +382,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneRewardTxInterceptor(topic Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -383,7 +424,7 @@ func (bicf *baseInterceptorsContainerFactory) generateHeaderInterceptors() error Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -471,7 +512,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneMiniBlocksInterceptor(top Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -510,7 +551,7 @@ func (bicf *baseInterceptorsContainerFactory) generateMetachainHeaderInterceptor Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -547,7 +588,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneTrieNodesInterceptor(topi Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) @@ -615,7 +656,7 @@ func (bicf *baseInterceptorsContainerFactory) generatePeerAuthenticationIntercep internalMarshaller := bicf.argInterceptorFactory.CoreComponents.InternalMarshalizer() argProcessor := processor.ArgPeerAuthenticationInterceptorProcessor{ PeerAuthenticationCacher: bicf.dataPool.PeerAuthentications(), - PeerShardMapper: bicf.peerShardMapper, + PeerShardMapper: bicf.mainPeerShardMapper, Marshaller: internalMarshaller, HardforkTrigger: bicf.hardforkTrigger, } @@ -639,107 +680,162 @@ func (bicf *baseInterceptorsContainerFactory) generatePeerAuthenticationIntercep AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, PreferredPeersHolder: bicf.preferredPeersHolder, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), }, ) if err != nil { return err } - interceptor, err := bicf.createTopicAndAssignHandler(identifierPeerAuthentication, mdInterceptor, true) + err = createTopicAndAssignHandlerOnMessenger(identifierPeerAuthentication, mdInterceptor, true, bicf.mainMessenger) if err != nil { return err } - return bicf.container.Add(identifierPeerAuthentication, interceptor) + return bicf.container.Add(identifierPeerAuthentication, mdInterceptor) } -//------- Heartbeat interceptor +//------- Heartbeat interceptors -func (bicf *baseInterceptorsContainerFactory) generateHeartbeatInterceptor() error { - // TODO[Sorin]: we'll need separate interceptors for full archive heartbeats and peer authentications +func (bicf *baseInterceptorsContainerFactory) generateMainHeartbeatInterceptor() error { shardC := bicf.shardCoordinator identifierHeartbeat := common.HeartbeatV2Topic + shardC.CommunicationIdentifier(shardC.SelfId()) + interceptor, err := bicf.createOneHeartbeatV2Interceptor(identifierHeartbeat, bicf.dataPool.Heartbeats(), bicf.mainPeerShardMapper) + if err != nil { + return err + } + + err = createTopicAndAssignHandlerOnMessenger(identifierHeartbeat, interceptor, true, bicf.mainMessenger) + if err != nil { + return err + } + + return bicf.container.Add(identifierHeartbeat, interceptor) +} + +func (bicf *baseInterceptorsContainerFactory) generateFullArchiveHeartbeatInterceptor() error { + if bicf.nodeOperationMode != p2p.FullArchiveMode { + return nil + } + + shardC := bicf.shardCoordinator + identifierHeartbeat := common.FullArchiveTopicPrefix + common.HeartbeatV2Topic + shardC.CommunicationIdentifier(shardC.SelfId()) + + interceptor, err := bicf.createOneHeartbeatV2Interceptor(identifierHeartbeat, disabled.NewCache(), bicf.fullArchivePeerShardMapper) + if err != nil { + return err + } + + err = createTopicAndAssignHandlerOnMessenger(identifierHeartbeat, interceptor, true, bicf.fullArchiveMessenger) + if err != nil { + return err + } + + return bicf.container.Add(identifierHeartbeat, interceptor) +} + +func (bicf *baseInterceptorsContainerFactory) createOneHeartbeatV2Interceptor( + identifier string, + heartbeatCahcer storage.Cacher, + peerShardMapper process.PeerShardMapper, +) (process.Interceptor, error) { argHeartbeatProcessor := processor.ArgHeartbeatInterceptorProcessor{ - HeartbeatCacher: bicf.dataPool.Heartbeats(), - ShardCoordinator: shardC, - PeerShardMapper: bicf.peerShardMapper, + HeartbeatCacher: heartbeatCahcer, + ShardCoordinator: bicf.shardCoordinator, + PeerShardMapper: peerShardMapper, } heartbeatProcessor, err := processor.NewHeartbeatInterceptorProcessor(argHeartbeatProcessor) if err != nil { - return err + return nil, err } heartbeatFactory, err := interceptorFactory.NewInterceptedHeartbeatDataFactory(*bicf.argInterceptorFactory) if err != nil { - return err + return nil, err } - sdInterceptor, err := interceptors.NewSingleDataInterceptor( + return interceptors.NewSingleDataInterceptor( interceptors.ArgSingleDataInterceptor{ - Topic: identifierHeartbeat, + Topic: identifier, DataFactory: heartbeatFactory, Processor: heartbeatProcessor, Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, PreferredPeersHolder: bicf.preferredPeersHolder, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), }, ) +} + +// ------- PeerShard interceptors + +func (bicf *baseInterceptorsContainerFactory) generateMainPeerShardInterceptor() error { + identifier := common.ConnectionTopic + + interceptor, err := bicf.createOnePeerShardInterceptor(identifier, bicf.mainPeerShardMapper) if err != nil { return err } - interceptor, err := bicf.createTopicAndAssignHandler(identifierHeartbeat, sdInterceptor, true) + err = createTopicAndAssignHandlerOnMessenger(identifier, interceptor, true, bicf.mainMessenger) if err != nil { return err } - return bicf.container.Add(identifierHeartbeat, interceptor) + return bicf.container.Add(identifier, interceptor) } -// ------- PeerShard interceptor +func (bicf *baseInterceptorsContainerFactory) generateFullArchivePeerShardInterceptor() error { + if bicf.nodeOperationMode != p2p.FullArchiveMode { + return nil + } -func (bicf *baseInterceptorsContainerFactory) generatePeerShardInterceptor() error { - identifier := common.ConnectionTopic + identifier := common.FullArchiveTopicPrefix + common.ConnectionTopic - interceptedPeerShardFactory, err := interceptorFactory.NewInterceptedPeerShardFactory(*bicf.argInterceptorFactory) + interceptor, err := bicf.createOnePeerShardInterceptor(identifier, bicf.fullArchivePeerShardMapper) + if err != nil { + return err + } + + err = createTopicAndAssignHandlerOnMessenger(identifier, interceptor, true, bicf.fullArchiveMessenger) if err != nil { return err } + return bicf.container.Add(identifier, interceptor) +} + +func (bicf *baseInterceptorsContainerFactory) createOnePeerShardInterceptor( + identifier string, + peerShardMapper process.PeerShardMapper, +) (process.Interceptor, error) { + interceptedPeerShardFactory, err := interceptorFactory.NewInterceptedPeerShardFactory(*bicf.argInterceptorFactory) + if err != nil { + return nil, err + } + argProcessor := processor.ArgPeerShardInterceptorProcessor{ - PeerShardMapper: bicf.peerShardMapper, + PeerShardMapper: peerShardMapper, } - dciProcessor, err := processor.NewPeerShardInterceptorProcessor(argProcessor) + psiProcessor, err := processor.NewPeerShardInterceptorProcessor(argProcessor) if err != nil { - return err + return nil, err } - interceptor, err := interceptors.NewSingleDataInterceptor( + return interceptors.NewSingleDataInterceptor( interceptors.ArgSingleDataInterceptor{ Topic: identifier, DataFactory: interceptedPeerShardFactory, - Processor: dciProcessor, + Processor: psiProcessor, Throttler: bicf.globalThrottler, AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), PreferredPeersHolder: bicf.preferredPeersHolder, }, ) - if err != nil { - return err - } - - _, err = bicf.createTopicAndAssignHandler(identifier, interceptor, true) - if err != nil { - return err - } - - return bicf.container.Add(identifier, interceptor) } func (bicf *baseInterceptorsContainerFactory) generateValidatorInfoInterceptor() error { @@ -770,7 +866,7 @@ func (bicf *baseInterceptorsContainerFactory) generateValidatorInfoInterceptor() AntifloodHandler: bicf.antifloodHandler, WhiteListRequest: bicf.whiteListHandler, PreferredPeersHolder: bicf.preferredPeersHolder, - CurrentPeerId: bicf.messenger.ID(), + CurrentPeerId: bicf.mainMessenger.ID(), }, ) if err != nil { diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go index b783556254b..bd1d3bee45a 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go @@ -31,7 +31,8 @@ func NewMetaInterceptorsContainerFactory( args.Accounts, args.Store, args.DataPool, - args.Messenger, + args.MainMessenger, + args.FullArchiveMessenger, args.NodesCoordinator, args.BlockBlackList, args.AntifloodHandler, @@ -39,7 +40,8 @@ func NewMetaInterceptorsContainerFactory( args.WhiteListerVerifiedTxs, args.PreferredPeersHolder, args.RequestHandler, - args.PeerShardMapper, + args.MainPeerShardMapper, + args.FullArchivePeerShardMapper, args.HardforkTrigger, ) if err != nil { @@ -96,29 +98,32 @@ func NewMetaInterceptorsContainerFactory( PeerSignatureHandler: args.PeerSignatureHandler, SignaturesHandler: args.SignaturesHandler, HeartbeatExpiryTimespanInSec: args.HeartbeatExpiryTimespanInSec, - PeerID: args.Messenger.ID(), + PeerID: args.MainMessenger.ID(), } container := containers.NewInterceptorsContainer() base := &baseInterceptorsContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - dataPool: args.DataPool, - nodesCoordinator: args.NodesCoordinator, - blockBlackList: args.BlockBlackList, - argInterceptorFactory: argInterceptorFactory, - maxTxNonceDeltaAllowed: args.MaxTxNonceDeltaAllowed, - accounts: args.Accounts, - antifloodHandler: args.AntifloodHandler, - whiteListHandler: args.WhiteListHandler, - whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - preferredPeersHolder: args.PreferredPeersHolder, - hasher: args.CoreComponents.Hasher(), - requestHandler: args.RequestHandler, - peerShardMapper: args.PeerShardMapper, - hardforkTrigger: args.HardforkTrigger, + container: container, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + store: args.Store, + dataPool: args.DataPool, + nodesCoordinator: args.NodesCoordinator, + blockBlackList: args.BlockBlackList, + argInterceptorFactory: argInterceptorFactory, + maxTxNonceDeltaAllowed: args.MaxTxNonceDeltaAllowed, + accounts: args.Accounts, + antifloodHandler: args.AntifloodHandler, + whiteListHandler: args.WhiteListHandler, + whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + preferredPeersHolder: args.PreferredPeersHolder, + hasher: args.CoreComponents.Hasher(), + requestHandler: args.RequestHandler, + mainPeerShardMapper: args.MainPeerShardMapper, + fullArchivePeerShardMapper: args.FullArchivePeerShardMapper, + hardforkTrigger: args.HardforkTrigger, + nodeOperationMode: args.NodeOperationMode, } icf := &metaInterceptorsContainerFactory{ @@ -175,12 +180,22 @@ func (micf *metaInterceptorsContainerFactory) Create() (process.InterceptorsCont return nil, err } - err = micf.generateHeartbeatInterceptor() + err = micf.generateMainHeartbeatInterceptor() if err != nil { return nil, err } - err = micf.generatePeerShardInterceptor() + err = micf.generateFullArchiveHeartbeatInterceptor() + if err != nil { + return nil, err + } + + err = micf.generateMainPeerShardInterceptor() + if err != nil { + return nil, err + } + + err = micf.generateFullArchivePeerShardInterceptor() if err != nil { return nil, err } @@ -264,7 +279,7 @@ func (micf *metaInterceptorsContainerFactory) createOneShardHeaderInterceptor(to Throttler: micf.globalThrottler, AntifloodHandler: micf.antifloodHandler, WhiteListRequest: micf.whiteListHandler, - CurrentPeerId: micf.messenger.ID(), + CurrentPeerId: micf.mainMessenger.ID(), PreferredPeersHolder: micf.preferredPeersHolder, }, ) diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go index 5e636622ed3..c794c94dafb 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" @@ -143,16 +144,28 @@ func TestNewMetaInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *tes assert.Equal(t, process.ErrNilNodesCoordinator, err) } -func TestNewMetaInterceptorsContainerFactory_NilTopicHandlerShouldErr(t *testing.T) { +func TestNewMetaInterceptorsContainerFactory_NilMainTopicHandlerShouldErr(t *testing.T) { t.Parallel() coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = nil + args.MainMessenger = nil icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMessenger, err) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) +} + +func TestNewMetaInterceptorsContainerFactory_NilFullArchiveTopicHandlerShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.FullArchiveMessenger = nil + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) } func TestNewMetaInterceptorsContainerFactory_NilStoreShouldErr(t *testing.T) { @@ -420,16 +433,28 @@ func TestNewMetaInterceptorsContainerFactory_NilRequestHandlerShouldErr(t *testi assert.Equal(t, process.ErrNilRequestHandler, err) } -func TestNewMetaInterceptorsContainerFactory_NilPeerShardMapperShouldErr(t *testing.T) { +func TestNewMetaInterceptorsContainerFactory_NilMainPeerShardMapperShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.MainPeerShardMapper = nil + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.True(t, errors.Is(err, process.ErrNilPeerShardMapper)) +} + +func TestNewMetaInterceptorsContainerFactory_NilFullArchivePeerShardMapperShouldErr(t *testing.T) { t.Parallel() coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsMeta(coreComp, cryptoComp) - args.PeerShardMapper = nil + args.FullArchivePeerShardMapper = nil icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) assert.Nil(t, icf) - assert.Equal(t, process.ErrNilPeerShardMapper, err) + assert.True(t, errors.Is(err, process.ErrNilPeerShardMapper)) } func TestNewMetaInterceptorsContainerFactory_NilHardforkTriggerShouldErr(t *testing.T) { @@ -470,74 +495,59 @@ func TestNewMetaInterceptorsContainerFactory_ShouldWorkWithSizeCheck(t *testing. // ------- Create -func TestMetaInterceptorsContainerFactory_CreateTopicMetablocksFailsShouldErr(t *testing.T) { +func TestMetaInterceptorsContainerFactory_CreateTopicsAndRegisterFailure(t *testing.T) { t.Parallel() - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler(factory.MetachainBlocksTopic, "") - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateMetachainHeaderInterceptors_create", factory.MetachainBlocksTopic, "") + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateMetachainHeaderInterceptors_register", "", factory.MetachainBlocksTopic) - container, err := icf.Create() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateShardHeaderInterceptors", factory.ShardBlocksTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateTxInterceptors", factory.TransactionTopic, "") -func TestMetaInterceptorsContainerFactory_CreateTopicShardHeadersForMetachainFailsShouldErr(t *testing.T) { - t.Parallel() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateUnsignedTxsInterceptors", factory.UnsignedTransactionTopic, "") - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler(factory.ShardBlocksTopic, "") - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateRewardTxInterceptors", factory.RewardsTransactionTopic, "") - container, err := icf.Create() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateMiniBlocksInterceptors", factory.MiniBlocksTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateTrieNodesInterceptors_validator", factory.ValidatorTrieNodesTopic, "") -func TestMetaInterceptorsContainerFactory_CreateRegisterForMetablocksFailsShouldErr(t *testing.T) { - t.Parallel() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateTrieNodesInterceptors_account", factory.AccountTrieNodesTopic, "") - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler("", factory.MetachainBlocksTopic) - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - - container, err := icf.Create() + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateValidatorInfoInterceptor", common.ValidatorInfoTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Run("generatePeerAuthenticationInterceptor_main", testCreateMetaTopicShouldFail(common.PeerAuthenticationTopic, "")) + t.Run("generateHeartbeatInterceptor_main", testCreateMetaTopicShouldFail(common.HeartbeatV2Topic, "")) + t.Run("generateHeartbeatInterceptor_full_archive", testCreateMetaTopicShouldFail(common.FullArchiveTopicPrefix+common.HeartbeatV2Topic, "")) + t.Run("generatePeerShardInterceptor_main", testCreateMetaTopicShouldFail(common.ConnectionTopic, "")) + t.Run("generatePeerShardInterceptor_full_archive", testCreateMetaTopicShouldFail(common.FullArchiveTopicPrefix+common.ConnectionTopic, "")) } -func TestMetaInterceptorsContainerFactory_CreateRegisterShardHeadersForMetachainFailsShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler("", factory.MetachainBlocksTopic) - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) +func testCreateMetaTopicShouldFailOnAllMessenger(t *testing.T, testNamePrefix string, matchStrToErrOnCreate string, matchStrToErrOnRegister string) { + t.Run(testNamePrefix+"main messenger", testCreateMetaTopicShouldFail(matchStrToErrOnCreate, matchStrToErrOnRegister)) + t.Run(testNamePrefix+"full archive messenger", testCreateMetaTopicShouldFail(matchStrToErrOnCreate, matchStrToErrOnRegister)) } -func TestMetaInterceptorsContainerFactory_CreateRegisterTrieNodesFailsShouldErr(t *testing.T) { - t.Parallel() +func testCreateMetaTopicShouldFail(matchStrToErrOnCreate string, matchStrToErrOnRegister string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = createMetaStubTopicHandler("", factory.AccountTrieNodesTopic) - icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + if strings.Contains(t.Name(), "full_archive") { + args.NodeOperationMode = p2p.FullArchiveMode + args.FullArchiveMessenger = createMetaStubTopicHandler(matchStrToErrOnCreate, matchStrToErrOnRegister) + } else { + args.MainMessenger = createMetaStubTopicHandler(matchStrToErrOnCreate, matchStrToErrOnRegister) + } + icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - container, err := icf.Create() + container, err := icf.Create() - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + assert.Nil(t, container) + assert.Equal(t, errExpected, err) + } } func TestMetaInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { @@ -545,14 +555,6 @@ func TestMetaInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsMeta(coreComp, cryptoComp) - args.Messenger = &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { - return nil - }, - } icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) container, err := icf.Create() @@ -564,58 +566,103 @@ func TestMetaInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { func TestMetaInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { t.Parallel() - noOfShards := 4 - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.SetNoShards(uint32(noOfShards)) - shardCoordinator.CurrentShard = 1 - - nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ - ShardConsensusSize: 1, - MetaConsensusSize: 1, - NbShards: uint32(noOfShards), - ShardId: 1, - } - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsMeta(coreComp, cryptoComp) - args.ShardCoordinator = shardCoordinator - args.NodesCoordinator = nodesCoordinator - args.Messenger = &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { - return nil - }, - } - icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - require.Nil(t, err) - - container, err := icf.Create() - - numInterceptorsMetablock := 1 - numInterceptorsShardHeadersForMetachain := noOfShards - numInterceptorsTransactionsForMetachain := noOfShards + 1 - numInterceptorsMiniBlocksForMetachain := noOfShards + 1 + 1 - numInterceptorsUnsignedTxsForMetachain := noOfShards + 1 - numInterceptorsRewardsTxsForMetachain := noOfShards - numInterceptorsTrieNodes := 2 - numInterceptorsPeerAuthForMetachain := 1 - numInterceptorsHeartbeatForMetachain := 1 - numInterceptorsShardValidatorInfoForMetachain := 1 - numInterceptorValidatorInfo := 1 - totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + numInterceptorsTrieNodes + - numInterceptorsTransactionsForMetachain + numInterceptorsUnsignedTxsForMetachain + numInterceptorsMiniBlocksForMetachain + - numInterceptorsRewardsTxsForMetachain + numInterceptorsPeerAuthForMetachain + numInterceptorsHeartbeatForMetachain + - numInterceptorsShardValidatorInfoForMetachain + numInterceptorValidatorInfo - - assert.Nil(t, err) - assert.Equal(t, totalInterceptors, container.Len()) - - err = icf.AddShardTrieNodeInterceptors(container) - assert.Nil(t, err) - assert.Equal(t, totalInterceptors+noOfShards, container.Len()) + t.Run("regular mode", func(t *testing.T) { + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + ShardId: 1, + } + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.ShardCoordinator = shardCoordinator + args.NodesCoordinator = nodesCoordinator + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + require.Nil(t, err) + + container, err := icf.Create() + + numInterceptorsMetablock := 1 + numInterceptorsShardHeadersForMetachain := noOfShards + numInterceptorsTransactionsForMetachain := noOfShards + 1 + numInterceptorsMiniBlocksForMetachain := noOfShards + 1 + 1 + numInterceptorsUnsignedTxsForMetachain := noOfShards + 1 + numInterceptorsRewardsTxsForMetachain := noOfShards + numInterceptorsTrieNodes := 2 + numInterceptorsPeerAuthForMetachain := 1 + numInterceptorsHeartbeatForMetachain := 1 + numInterceptorsShardValidatorInfoForMetachain := 1 + numInterceptorValidatorInfo := 1 + totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + numInterceptorsTrieNodes + + numInterceptorsTransactionsForMetachain + numInterceptorsUnsignedTxsForMetachain + numInterceptorsMiniBlocksForMetachain + + numInterceptorsRewardsTxsForMetachain + numInterceptorsPeerAuthForMetachain + numInterceptorsHeartbeatForMetachain + + numInterceptorsShardValidatorInfoForMetachain + numInterceptorValidatorInfo + + assert.Nil(t, err) + assert.Equal(t, totalInterceptors, container.Len()) + + err = icf.AddShardTrieNodeInterceptors(container) + assert.Nil(t, err) + assert.Equal(t, totalInterceptors+noOfShards, container.Len()) + }) + t.Run("full archive mode", func(t *testing.T) { + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + ShardId: 1, + } + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsMeta(coreComp, cryptoComp) + args.NodeOperationMode = p2p.FullArchiveMode + args.ShardCoordinator = shardCoordinator + args.NodesCoordinator = nodesCoordinator + + icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) + require.Nil(t, err) + + container, err := icf.Create() + + numInterceptorsMetablock := 1 + numInterceptorsShardHeadersForMetachain := noOfShards + numInterceptorsTransactionsForMetachain := noOfShards + 1 + numInterceptorsMiniBlocksForMetachain := noOfShards + 1 + 1 + numInterceptorsUnsignedTxsForMetachain := noOfShards + 1 + numInterceptorsRewardsTxsForMetachain := noOfShards + numInterceptorsTrieNodes := 2 + numInterceptorsPeerAuthForMetachain := 1 + numInterceptorsHeartbeatForMetachain := 2 // one for full archive + numInterceptorsShardValidatorInfoForMetachain := 2 // one for full archive + numInterceptorValidatorInfo := 1 + totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + numInterceptorsTrieNodes + + numInterceptorsTransactionsForMetachain + numInterceptorsUnsignedTxsForMetachain + numInterceptorsMiniBlocksForMetachain + + numInterceptorsRewardsTxsForMetachain + numInterceptorsPeerAuthForMetachain + numInterceptorsHeartbeatForMetachain + + numInterceptorsShardValidatorInfoForMetachain + numInterceptorValidatorInfo + + assert.Nil(t, err) + assert.Equal(t, totalInterceptors, container.Len()) + + err = icf.AddShardTrieNodeInterceptors(container) + assert.Nil(t, err) + assert.Equal(t, totalInterceptors+noOfShards, container.Len()) + }) } func getArgumentsMeta( @@ -628,7 +675,8 @@ func getArgumentsMeta( Accounts: &stateMock.AccountsStub{}, ShardCoordinator: mock.NewOneShardCoordinatorMock(), NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), - Messenger: &mock.TopicHandlerStub{}, + MainMessenger: &mock.TopicHandlerStub{}, + FullArchiveMessenger: &mock.TopicHandlerStub{}, Store: createMetaStore(), DataPool: createMetaDataPools(), MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, @@ -647,7 +695,9 @@ func getArgumentsMeta( PeerSignatureHandler: &mock.PeerSignatureHandlerStub{}, SignaturesHandler: &mock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: 30, - PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, HardforkTrigger: &testscommon.HardforkTriggerStub{}, + NodeOperationMode: p2p.NormalOperation, } } diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go index ccd0f9ee981..87d18138ba0 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go @@ -29,7 +29,8 @@ func NewShardInterceptorsContainerFactory( args.Accounts, args.Store, args.DataPool, - args.Messenger, + args.MainMessenger, + args.FullArchiveMessenger, args.NodesCoordinator, args.BlockBlackList, args.AntifloodHandler, @@ -37,7 +38,8 @@ func NewShardInterceptorsContainerFactory( args.WhiteListerVerifiedTxs, args.PreferredPeersHolder, args.RequestHandler, - args.PeerShardMapper, + args.MainPeerShardMapper, + args.FullArchivePeerShardMapper, args.HardforkTrigger, ) if err != nil { @@ -95,29 +97,32 @@ func NewShardInterceptorsContainerFactory( PeerSignatureHandler: args.PeerSignatureHandler, SignaturesHandler: args.SignaturesHandler, HeartbeatExpiryTimespanInSec: args.HeartbeatExpiryTimespanInSec, - PeerID: args.Messenger.ID(), + PeerID: args.MainMessenger.ID(), } container := containers.NewInterceptorsContainer() base := &baseInterceptorsContainerFactory{ - container: container, - accounts: args.Accounts, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - dataPool: args.DataPool, - nodesCoordinator: args.NodesCoordinator, - argInterceptorFactory: argInterceptorFactory, - blockBlackList: args.BlockBlackList, - maxTxNonceDeltaAllowed: args.MaxTxNonceDeltaAllowed, - antifloodHandler: args.AntifloodHandler, - whiteListHandler: args.WhiteListHandler, - whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - preferredPeersHolder: args.PreferredPeersHolder, - hasher: args.CoreComponents.Hasher(), - requestHandler: args.RequestHandler, - peerShardMapper: args.PeerShardMapper, - hardforkTrigger: args.HardforkTrigger, + container: container, + accounts: args.Accounts, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + store: args.Store, + dataPool: args.DataPool, + nodesCoordinator: args.NodesCoordinator, + argInterceptorFactory: argInterceptorFactory, + blockBlackList: args.BlockBlackList, + maxTxNonceDeltaAllowed: args.MaxTxNonceDeltaAllowed, + antifloodHandler: args.AntifloodHandler, + whiteListHandler: args.WhiteListHandler, + whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + preferredPeersHolder: args.PreferredPeersHolder, + hasher: args.CoreComponents.Hasher(), + requestHandler: args.RequestHandler, + mainPeerShardMapper: args.MainPeerShardMapper, + fullArchivePeerShardMapper: args.FullArchivePeerShardMapper, + hardforkTrigger: args.HardforkTrigger, + nodeOperationMode: args.NodeOperationMode, } icf := &shardInterceptorsContainerFactory{ @@ -174,12 +179,22 @@ func (sicf *shardInterceptorsContainerFactory) Create() (process.InterceptorsCon return nil, err } - err = sicf.generateHeartbeatInterceptor() + err = sicf.generateMainHeartbeatInterceptor() if err != nil { return nil, err } - err = sicf.generatePeerShardInterceptor() + err = sicf.generateFullArchiveHeartbeatInterceptor() + if err != nil { + return nil, err + } + + err = sicf.generateMainPeerShardInterceptor() + if err != nil { + return nil, err + } + + err = sicf.generateFullArchivePeerShardInterceptor() if err != nil { return nil, err } diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go index afc6de41014..009cde05e94 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go @@ -1,10 +1,12 @@ package interceptorscontainer_test import ( + "errors" "strings" "testing" "github.com/multiversx/mx-chain-core-go/core/versioning" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" @@ -133,16 +135,28 @@ func TestNewShardInterceptorsContainerFactory_NilNodesCoordinatorShouldErr(t *te assert.Equal(t, process.ErrNilNodesCoordinator, err) } -func TestNewShardInterceptorsContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewShardInterceptorsContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = nil + args.MainMessenger = nil icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) assert.Nil(t, icf) - assert.Equal(t, process.ErrNilMessenger, err) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) +} + +func TestNewShardInterceptorsContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + args.FullArchiveMessenger = nil + icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.True(t, errors.Is(err, process.ErrNilMessenger)) } func TestNewShardInterceptorsContainerFactory_NilStoreShouldErr(t *testing.T) { @@ -379,16 +393,28 @@ func TestNewShardInterceptorsContainerFactory_EmptyEpochStartTriggerShouldErr(t assert.Equal(t, process.ErrNilEpochStartTrigger, err) } -func TestNewShardInterceptorsContainerFactory_NilPeerShardMapperShouldErr(t *testing.T) { +func TestNewShardInterceptorsContainerFactory_NilMainPeerShardMapperShouldErr(t *testing.T) { + t.Parallel() + + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + args.MainPeerShardMapper = nil + icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + assert.Nil(t, icf) + assert.True(t, errors.Is(err, process.ErrNilPeerShardMapper)) +} + +func TestNewShardInterceptorsContainerFactory_NilFullArchivePeerShardMapperShouldErr(t *testing.T) { t.Parallel() coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsShard(coreComp, cryptoComp) - args.PeerShardMapper = nil + args.FullArchivePeerShardMapper = nil icf, err := interceptorscontainer.NewShardInterceptorsContainerFactory(args) assert.Nil(t, icf) - assert.Equal(t, process.ErrNilPeerShardMapper, err) + assert.True(t, errors.Is(err, process.ErrNilPeerShardMapper)) } func TestNewShardInterceptorsContainerFactory_NilHardforkTriggerShouldErr(t *testing.T) { @@ -429,130 +455,56 @@ func TestNewShardInterceptorsContainerFactory_ShouldWorkWithSizeCheck(t *testing // ------- Create -func TestShardInterceptorsContainerFactory_CreateTopicCreationTxFailsShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler(factory.TransactionTopic, "") - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} - -func TestShardInterceptorsContainerFactory_CreateTopicCreationHdrFailsShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler(factory.ShardBlocksTopic, "") - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} - -func TestShardInterceptorsContainerFactory_CreateTopicCreationMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler(factory.MiniBlocksTopic, "") - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} - -func TestShardInterceptorsContainerFactory_CreateTopicCreationMetachainHeadersFailsShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler(factory.MetachainBlocksTopic, "") - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} - -func TestShardInterceptorsContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) { +func TestShardInterceptorsContainerFactory_CreateTopicsAndRegisterFailure(t *testing.T) { t.Parallel() - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.TransactionTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + testCreateShardTopicShouldFailOnAllMessenger(t, "generateTxInterceptors_create", factory.TransactionTopic, "") + testCreateShardTopicShouldFailOnAllMessenger(t, "generateTxInterceptors_register", "", factory.TransactionTopic) -func TestShardInterceptorsContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) { - t.Parallel() + testCreateShardTopicShouldFailOnAllMessenger(t, "generateUnsignedTxsInterceptors", factory.UnsignedTransactionTopic, "") - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.ShardBlocksTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + testCreateShardTopicShouldFailOnAllMessenger(t, "generateRewardTxInterceptor", factory.RewardsTransactionTopic, "") - container, err := icf.Create() + testCreateShardTopicShouldFailOnAllMessenger(t, "generateHeaderInterceptors", factory.ShardBlocksTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) -} + testCreateShardTopicShouldFailOnAllMessenger(t, "generateMiniBlocksInterceptors", factory.MiniBlocksTopic, "") -func TestShardInterceptorsContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *testing.T) { - t.Parallel() + testCreateShardTopicShouldFailOnAllMessenger(t, "generateMetachainHeaderInterceptors", factory.MetachainBlocksTopic, "") - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.MiniBlocksTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + testCreateShardTopicShouldFailOnAllMessenger(t, "generateTrieNodesInterceptors", factory.AccountTrieNodesTopic, "") - container, err := icf.Create() + testCreateShardTopicShouldFailOnAllMessenger(t, "generateValidatorInfoInterceptor", common.ValidatorInfoTopic, "") - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + t.Run("generatePeerAuthenticationInterceptor_main", testCreateShardTopicShouldFail(common.PeerAuthenticationTopic, "")) + t.Run("generateHeartbeatInterceptor_main", testCreateShardTopicShouldFail(common.HeartbeatV2Topic, "")) + t.Run("generateHeartbeatInterceptor_full_archive", testCreateShardTopicShouldFail(common.FullArchiveTopicPrefix+common.HeartbeatV2Topic, "")) + t.Run("generatePeerShardInterceptor_main", testCreateShardTopicShouldFail(common.ConnectionTopic, "")) + t.Run("generatePeerShardInterceptor_full_archive", testCreateShardTopicShouldFail(common.FullArchiveTopicPrefix+common.ConnectionTopic, "")) } - -func TestShardInterceptorsContainerFactory_CreateRegisterMetachainHeadersShouldErr(t *testing.T) { - t.Parallel() - - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.MetachainBlocksTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - assert.Nil(t, container) - assert.Equal(t, errExpected, err) +func testCreateShardTopicShouldFailOnAllMessenger(t *testing.T, testNamePrefix string, matchStrToErrOnCreate string, matchStrToErrOnRegister string) { + t.Run(testNamePrefix+"main messenger", testCreateShardTopicShouldFail(matchStrToErrOnCreate, matchStrToErrOnRegister)) + t.Run(testNamePrefix+"full archive messenger", testCreateShardTopicShouldFail(matchStrToErrOnCreate, matchStrToErrOnRegister)) } -func TestShardInterceptorsContainerFactory_CreateRegisterTrieNodesShouldErr(t *testing.T) { - t.Parallel() +func testCreateShardTopicShouldFail(matchStrToErrOnCreate string, matchStrToErrOnRegister string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() - coreComp, cryptoComp := createMockComponentHolders() - args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = createShardStubTopicHandler("", factory.AccountTrieNodesTopic) - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + coreComp, cryptoComp := createMockComponentHolders() + args := getArgumentsShard(coreComp, cryptoComp) + if strings.Contains(t.Name(), "full_archive") { + args.NodeOperationMode = p2p.FullArchiveMode + args.FullArchiveMessenger = createShardStubTopicHandler(matchStrToErrOnCreate, matchStrToErrOnRegister) + } else { + args.MainMessenger = createShardStubTopicHandler(matchStrToErrOnCreate, matchStrToErrOnRegister) + } + icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - container, err := icf.Create() + container, err := icf.Create() - assert.Nil(t, container) - assert.Equal(t, errExpected, err) + assert.Nil(t, container) + assert.Equal(t, errExpected, err) + } } func TestShardInterceptorsContainerFactory_NilSignaturesHandler(t *testing.T) { @@ -596,7 +548,7 @@ func TestShardInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { coreComp, cryptoComp := createMockComponentHolders() args := getArgumentsShard(coreComp, cryptoComp) - args.Messenger = &mock.TopicHandlerStub{ + args.MainMessenger = &mock.TopicHandlerStub{ CreateTopicCalled: func(name string, createChannelForTopic bool) error { return nil }, @@ -617,57 +569,98 @@ func TestShardInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { func TestShardInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { t.Parallel() - noOfShards := 4 - - shardCoordinator := mock.NewMultipleShardsCoordinatorMock() - shardCoordinator.SetNoShards(uint32(noOfShards)) - shardCoordinator.CurrentShard = 1 - - nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ - ShardId: 1, - ShardConsensusSize: 1, - MetaConsensusSize: 1, - NbShards: uint32(noOfShards), - } - - messenger := &mock.TopicHandlerStub{ - CreateTopicCalled: func(name string, createChannelForTopic bool) error { - return nil - }, - RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { - return nil - }, - } - - coreComp, cryptoComp := createMockComponentHolders() - coreComp.AddrPubKeyConv = testscommon.NewPubkeyConverterMock(32) - args := getArgumentsShard(coreComp, cryptoComp) - args.ShardCoordinator = shardCoordinator - args.NodesCoordinator = nodesCoordinator - args.Messenger = messenger - args.PreferredPeersHolder = &p2pmocks.PeersHolderStub{} - - icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - - container, err := icf.Create() - - numInterceptorTxs := noOfShards + 1 - numInterceptorsUnsignedTxs := numInterceptorTxs - numInterceptorsRewardTxs := 1 - numInterceptorHeaders := 1 - numInterceptorMiniBlocks := noOfShards + 2 - numInterceptorMetachainHeaders := 1 - numInterceptorTrieNodes := 1 - numInterceptorPeerAuth := 1 - numInterceptorHeartbeat := 1 - numInterceptorsShardValidatorInfo := 1 - numInterceptorValidatorInfo := 1 - totalInterceptors := numInterceptorTxs + numInterceptorsUnsignedTxs + numInterceptorsRewardTxs + - numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes + - numInterceptorPeerAuth + numInterceptorHeartbeat + numInterceptorsShardValidatorInfo + numInterceptorValidatorInfo - - assert.Nil(t, err) - assert.Equal(t, totalInterceptors, container.Len()) + t.Run("normal mode", func(t *testing.T) { + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ + ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + } + + coreComp, cryptoComp := createMockComponentHolders() + coreComp.AddrPubKeyConv = testscommon.NewPubkeyConverterMock(32) + args := getArgumentsShard(coreComp, cryptoComp) + args.ShardCoordinator = shardCoordinator + args.NodesCoordinator = nodesCoordinator + args.PreferredPeersHolder = &p2pmocks.PeersHolderStub{} + + icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + container, err := icf.Create() + + numInterceptorTxs := noOfShards + 1 + numInterceptorsUnsignedTxs := numInterceptorTxs + numInterceptorsRewardTxs := 1 + numInterceptorHeaders := 1 + numInterceptorMiniBlocks := noOfShards + 2 + numInterceptorMetachainHeaders := 1 + numInterceptorTrieNodes := 1 + numInterceptorPeerAuth := 1 + numInterceptorHeartbeat := 1 + numInterceptorsShardValidatorInfo := 1 + numInterceptorValidatorInfo := 1 + totalInterceptors := numInterceptorTxs + numInterceptorsUnsignedTxs + numInterceptorsRewardTxs + + numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes + + numInterceptorPeerAuth + numInterceptorHeartbeat + numInterceptorsShardValidatorInfo + numInterceptorValidatorInfo + + assert.Nil(t, err) + assert.Equal(t, totalInterceptors, container.Len()) + }) + + t.Run("full archive mode", func(t *testing.T) { + t.Parallel() + + noOfShards := 4 + + shardCoordinator := mock.NewMultipleShardsCoordinatorMock() + shardCoordinator.SetNoShards(uint32(noOfShards)) + shardCoordinator.CurrentShard = 1 + + nodesCoordinator := &shardingMocks.NodesCoordinatorMock{ + ShardId: 1, + ShardConsensusSize: 1, + MetaConsensusSize: 1, + NbShards: uint32(noOfShards), + } + + coreComp, cryptoComp := createMockComponentHolders() + coreComp.AddrPubKeyConv = testscommon.NewPubkeyConverterMock(32) + args := getArgumentsShard(coreComp, cryptoComp) + args.NodeOperationMode = p2p.FullArchiveMode + args.ShardCoordinator = shardCoordinator + args.NodesCoordinator = nodesCoordinator + args.PreferredPeersHolder = &p2pmocks.PeersHolderStub{} + + icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) + + container, err := icf.Create() + + numInterceptorTxs := noOfShards + 1 + numInterceptorsUnsignedTxs := numInterceptorTxs + numInterceptorsRewardTxs := 1 + numInterceptorHeaders := 1 + numInterceptorMiniBlocks := noOfShards + 2 + numInterceptorMetachainHeaders := 1 + numInterceptorTrieNodes := 1 + numInterceptorPeerAuth := 1 + numInterceptorHeartbeat := 2 // one for full archive + numInterceptorsShardValidatorInfo := 2 // one for full archive + numInterceptorValidatorInfo := 1 + totalInterceptors := numInterceptorTxs + numInterceptorsUnsignedTxs + numInterceptorsRewardTxs + + numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes + + numInterceptorPeerAuth + numInterceptorHeartbeat + numInterceptorsShardValidatorInfo + numInterceptorValidatorInfo + + assert.Nil(t, err) + assert.Equal(t, totalInterceptors, container.Len()) + }) } func createMockComponentHolders() (*mock.CoreComponentsMock, *mock.CryptoComponentsMock) { @@ -711,7 +704,8 @@ func getArgumentsShard( Accounts: &stateMock.AccountsStub{}, ShardCoordinator: mock.NewOneShardCoordinatorMock(), NodesCoordinator: shardingMocks.NewNodesCoordinatorMock(), - Messenger: &mock.TopicHandlerStub{}, + MainMessenger: &mock.TopicHandlerStub{}, + FullArchiveMessenger: &mock.TopicHandlerStub{}, Store: createShardStore(), DataPool: createShardDataPools(), MaxTxNonceDeltaAllowed: maxTxNonceDeltaAllowed, @@ -731,7 +725,8 @@ func getArgumentsShard( PeerSignatureHandler: &mock.PeerSignatureHandlerStub{}, SignaturesHandler: &mock.SignaturesHandlerStub{}, HeartbeatExpiryTimespanInSec: 30, - PeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerShardMapper: &p2pmocks.NetworkShardingCollectorStub{}, HardforkTrigger: &testscommon.HardforkTriggerStub{}, } } diff --git a/process/mock/topicHandlerStub.go b/process/mock/topicHandlerStub.go index 7484a5f677d..7578b383de3 100644 --- a/process/mock/topicHandlerStub.go +++ b/process/mock/topicHandlerStub.go @@ -14,30 +14,39 @@ type TopicHandlerStub struct { } // HasTopic - -func (ths *TopicHandlerStub) HasTopic(name string) bool { - return ths.HasTopicCalled(name) +func (stub *TopicHandlerStub) HasTopic(name string) bool { + if stub.HasTopicCalled != nil { + return stub.HasTopicCalled(name) + } + return false } // CreateTopic - -func (ths *TopicHandlerStub) CreateTopic(name string, createChannelForTopic bool) error { - return ths.CreateTopicCalled(name, createChannelForTopic) +func (stub *TopicHandlerStub) CreateTopic(name string, createChannelForTopic bool) error { + if stub.CreateTopicCalled != nil { + return stub.CreateTopicCalled(name, createChannelForTopic) + } + return nil } // RegisterMessageProcessor - -func (ths *TopicHandlerStub) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { - return ths.RegisterMessageProcessorCalled(topic, identifier, handler) +func (stub *TopicHandlerStub) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { + if stub.RegisterMessageProcessorCalled != nil { + return stub.RegisterMessageProcessorCalled(topic, identifier, handler) + } + return nil } // ID - -func (ths *TopicHandlerStub) ID() core.PeerID { - if ths.IDCalled != nil { - return ths.IDCalled() +func (stub *TopicHandlerStub) ID() core.PeerID { + if stub.IDCalled != nil { + return stub.IDCalled() } return "peer ID" } // IsInterfaceNil returns true if there is no value under the interface -func (ths *TopicHandlerStub) IsInterfaceNil() bool { - return ths == nil +func (stub *TopicHandlerStub) IsInterfaceNil() bool { + return stub == nil } diff --git a/process/throttle/antiflood/factory/p2pAntifloodAndBlacklistFactory.go b/process/throttle/antiflood/factory/p2pAntifloodAndBlacklistFactory.go index e77e3f03494..bfbf29617c6 100644 --- a/process/throttle/antiflood/factory/p2pAntifloodAndBlacklistFactory.go +++ b/process/throttle/antiflood/factory/p2pAntifloodAndBlacklistFactory.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/config" + antifloodDebug "github.com/multiversx/mx-chain-go/debug/antiflood" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/throttle/antiflood" @@ -130,6 +131,18 @@ func initP2PAntiFloodComponents( return nil, err } + if mainConfig.Debug.Antiflood.Enabled { + debugger, errDebugger := antifloodDebug.NewAntifloodDebugger(mainConfig.Debug.Antiflood) + if errDebugger != nil { + return nil, errDebugger + } + + err = p2pAntiflood.SetDebugger(debugger) + if err != nil { + return nil, err + } + } + startResettingTopicFloodPreventer(ctx, topicFloodPreventer, topicMaxMessages) startSweepingTimeCaches(ctx, p2pPeerBlackList, publicKeysCache) diff --git a/testscommon/components/default.go b/testscommon/components/default.go index 38df0c22211..a609909ecdf 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -137,7 +137,8 @@ func GetDefaultProcessComponents(shardCoordinator sharding.Coordinator) *mock.Pr ReqHandler: &testscommon.RequestHandlerStub{}, TxLogsProcess: &mock.TxLogProcessorMock{}, HeaderConstructValidator: &mock.HeaderValidatorStub{}, - PeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + MainPeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, + FullArchivePeerMapper: &p2pmocks.NetworkShardingCollectorStub{}, FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, NodeRedundancyHandlerInternal: &mock.RedundancyHandlerStub{ IsRedundancyNodeCalled: func() bool { From 694bcc57cf6ae3989940f096de079e621f22f0ee Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Wed, 14 Jun 2023 13:29:33 +0300 Subject: [PATCH 12/38] updated resolvers and requesters for full archive network added separate preferred peers holder for the new network --- cmd/node/config/prefs.toml | 9 + config/prefsConfig.go | 17 +- config/tomlConfig_test.go | 17 +- .../factory/requestersContainer/args.go | 23 +- .../baseRequestersContainerFactory.go | 87 +++--- .../metaRequestersContainerFactory.go | 31 ++- .../metaRequestersContainerFactory_test.go | 51 +++- .../shardRequestersContainerFactory.go | 31 ++- .../shardRequestersContainerFactory_test.go | 72 +++-- .../factory/resolverscontainer/args.go | 32 ++- .../baseResolversContainerFactory.go | 95 +++++-- .../metaResolversContainerFactory.go | 46 +-- .../metaResolversContainerFactory_test.go | 97 +++++-- .../shardResolversContainerFactory.go | 46 +-- .../shardResolversContainerFactory_test.go | 165 +++++++++-- dataRetriever/interface.go | 2 + dataRetriever/mock/messageHandlerStub.go | 20 ++ dataRetriever/topicSender/baseTopicSender.go | 76 +++-- .../topicSender/topicRequestSender.go | 121 ++++++-- .../topicSender/topicRequestSender_test.go | 263 ++++++++++++------ .../topicSender/topicResolverSender.go | 6 +- .../topicSender/topicResolverSender_test.go | 120 ++++++-- epochStart/bootstrap/process.go | 53 ++-- factory/disabled/preferredPeersHolder.go | 44 +++ factory/interface.go | 1 + factory/mock/networkComponentsMock.go | 6 + factory/network/networkComponents.go | 126 +++++---- factory/network/networkComponentsHandler.go | 16 +- .../network/networkComponentsHandler_test.go | 2 + factory/network/networkComponents_test.go | 2 +- factory/processing/processComponents.go | 94 ++++--- .../processComponentsHandler_test.go | 2 + factory/processing/processComponents_test.go | 14 +- .../mock/networkComponentsMock.go | 6 + .../multiShard/hardFork/hardFork_test.go | 12 +- .../node/heartbeatV2/heartbeatV2_test.go | 1 - .../realcomponents/processorRunner.go | 25 +- integrationTests/testHeartbeatNode.go | 37 +-- integrationTests/testProcessorNode.go | 137 ++++----- node/mock/factory/networkComponentsMock.go | 6 + node/nodeRunner.go | 25 +- p2p/disabled/networkMessenger.go | 4 +- update/errors.go | 3 + update/factory/exportHandlerFactory.go | 114 ++++---- .../fullSyncRequestersContainerFactory.go | 95 ++++--- .../fullSyncResolversContainerFactory.go | 37 ++- 46 files changed, 1518 insertions(+), 771 deletions(-) create mode 100644 factory/disabled/preferredPeersHolder.go diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 98d5c02557f..1fd8d43f000 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -28,6 +28,15 @@ # ] PreferredConnections = [] + # PreferredFullArchiveConnections holds an array containing valid ips or peer ids from nodes to connect with (in top of other connections) + # This is only considered on FullArchive mode + # Example: + # PreferredConnections = [ + # "127.0.0.10", + # "16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdorrr" + # ] + PreferredFullArchiveConnections = [] + # ConnectionWatcherType represents the type of a connection watcher needed. # possible options: # - "disabled" - no connection watching should be made diff --git a/config/prefsConfig.go b/config/prefsConfig.go index 34861d647e8..ee2e7483381 100644 --- a/config/prefsConfig.go +++ b/config/prefsConfig.go @@ -9,14 +9,15 @@ type Preferences struct { // PreferencesConfig will hold the fields which are node specific such as the display name type PreferencesConfig struct { - DestinationShardAsObserver string - NodeDisplayName string - Identity string - RedundancyLevel int64 - PreferredConnections []string - ConnectionWatcherType string - OverridableConfigTomlValues []OverridableConfig - FullArchive bool + DestinationShardAsObserver string + NodeDisplayName string + Identity string + RedundancyLevel int64 + PreferredConnections []string + PreferredFullArchiveConnections []string + ConnectionWatcherType string + OverridableConfigTomlValues []OverridableConfig + FullArchive bool } // OverridableConfig holds the path and the new value to be updated in the configuration diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index a33f910a832..2c28462c85a 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -315,14 +315,17 @@ func TestTomlPreferencesParser(t *testing.T) { redundancyLevel := int64(0) prefPubKey0 := "preferred pub key 0" prefPubKey1 := "preferred pub key 1" + prefFAPubKey0 := "preferred full archive pub key 0" + prefFAPubKey1 := "preferred full archive pub key 1" cfgPreferencesExpected := Preferences{ Preferences: PreferencesConfig{ - NodeDisplayName: nodeDisplayName, - DestinationShardAsObserver: destinationShardAsObs, - Identity: identity, - RedundancyLevel: redundancyLevel, - PreferredConnections: []string{prefPubKey0, prefPubKey1}, + NodeDisplayName: nodeDisplayName, + DestinationShardAsObserver: destinationShardAsObs, + Identity: identity, + RedundancyLevel: redundancyLevel, + PreferredConnections: []string{prefPubKey0, prefPubKey1}, + PreferredFullArchiveConnections: []string{prefFAPubKey0, prefFAPubKey1}, }, BlockProcessingCutoff: BlockProcessingCutoffConfig{ Enabled: true, @@ -342,6 +345,10 @@ func TestTomlPreferencesParser(t *testing.T) { "` + prefPubKey0 + `", "` + prefPubKey1 + `" ] + PreferredFullArchiveConnections = [ + "` + prefFAPubKey0 + `", + "` + prefFAPubKey1 + `" + ] [BlockProcessingCutoff] Enabled = true diff --git a/dataRetriever/factory/requestersContainer/args.go b/dataRetriever/factory/requestersContainer/args.go index 9e37166f9d9..76ca1fddaf7 100644 --- a/dataRetriever/factory/requestersContainer/args.go +++ b/dataRetriever/factory/requestersContainer/args.go @@ -11,14 +11,17 @@ import ( // FactoryArgs will hold the arguments for RequestersContainerFactory for both shard and meta type FactoryArgs struct { - RequesterConfig config.RequesterConfig - ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler - Marshaller marshal.Marshalizer - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - OutputAntifloodHandler dataRetriever.P2PAntifloodHandler - CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler - PreferredPeersHolder p2p.PreferredPeersHolderHandler - PeersRatingHandler dataRetriever.PeersRatingHandler - SizeCheckDelta uint32 + RequesterConfig config.RequesterConfig + ShardCoordinator sharding.Coordinator + MainMessenger dataRetriever.TopicMessageHandler + FullArchiveMessenger dataRetriever.TopicMessageHandler + Marshaller marshal.Marshalizer + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler + MainPreferredPeersHolder p2p.PreferredPeersHolderHandler + FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler + MainPeersRatingHandler dataRetriever.PeersRatingHandler + FullArchivePeersRatingHandler dataRetriever.PeersRatingHandler + SizeCheckDelta uint32 } diff --git a/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go index 562405c37fb..6fddb70ac8c 100644 --- a/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go @@ -22,29 +22,35 @@ const EmptyExcludePeersOnTopic = "" var log = logger.GetOrCreate("dataRetriever/factory/requesterscontainer") type baseRequestersContainerFactory struct { - container dataRetriever.RequestersContainer - shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler - marshaller marshal.Marshalizer - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - intRandomizer dataRetriever.IntRandomizer - outputAntifloodHandler dataRetriever.P2PAntifloodHandler - intraShardTopic string - currentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler - preferredPeersHolder dataRetriever.PreferredPeersHolderHandler - peersRatingHandler dataRetriever.PeersRatingHandler - numCrossShardPeers int - numIntraShardPeers int - numTotalPeers int - numFullHistoryPeers int + container dataRetriever.RequestersContainer + shardCoordinator sharding.Coordinator + mainMessenger dataRetriever.TopicMessageHandler + fullArchiveMessenger dataRetriever.TopicMessageHandler + marshaller marshal.Marshalizer + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + intRandomizer dataRetriever.IntRandomizer + outputAntifloodHandler dataRetriever.P2PAntifloodHandler + intraShardTopic string + currentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler + mainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + fullArchivePreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + mainPeersRatingHandler dataRetriever.PeersRatingHandler + fullArchivePeersRatingHandler dataRetriever.PeersRatingHandler + numCrossShardPeers int + numIntraShardPeers int + numTotalPeers int + numFullHistoryPeers int } func (brcf *baseRequestersContainerFactory) checkParams() error { if check.IfNil(brcf.shardCoordinator) { return dataRetriever.ErrNilShardCoordinator } - if check.IfNil(brcf.messenger) { - return dataRetriever.ErrNilMessenger + if check.IfNil(brcf.mainMessenger) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilMessenger) + } + if check.IfNil(brcf.fullArchiveMessenger) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilMessenger) } if check.IfNil(brcf.marshaller) { return dataRetriever.ErrNilMarshalizer @@ -58,11 +64,17 @@ func (brcf *baseRequestersContainerFactory) checkParams() error { if check.IfNil(brcf.currentNetworkEpochProvider) { return dataRetriever.ErrNilCurrentNetworkEpochProvider } - if check.IfNil(brcf.preferredPeersHolder) { - return dataRetriever.ErrNilPreferredPeersHolder + if check.IfNil(brcf.mainPreferredPeersHolder) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilPreferredPeersHolder) + } + if check.IfNil(brcf.fullArchivePreferredPeersHolder) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilPreferredPeersHolder) + } + if check.IfNil(brcf.mainPeersRatingHandler) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilPeersRatingHandler) } - if check.IfNil(brcf.peersRatingHandler) { - return dataRetriever.ErrNilPeersRatingHandler + if check.IfNil(brcf.fullArchivePeersRatingHandler) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilPeersRatingHandler) } if brcf.numCrossShardPeers <= 0 { return fmt.Errorf("%w for numCrossShardPeers", dataRetriever.ErrInvalidValue) @@ -260,28 +272,31 @@ func (brcf *baseRequestersContainerFactory) createOneRequestSenderWithSpecifiedN "topic", topic, "intraShardTopic", brcf.intraShardTopic, "excludedTopic", excludedTopic, "numCrossShardPeers", numCrossShardPeers, "numIntraShardPeers", numIntraShardPeers) - peerListCreator, err := topicsender.NewDiffPeerListCreator(brcf.messenger, topic, brcf.intraShardTopic, excludedTopic) + peerListCreator, err := topicsender.NewDiffPeerListCreator(brcf.mainMessenger, topic, brcf.intraShardTopic, excludedTopic) if err != nil { return nil, err } arg := topicsender.ArgTopicRequestSender{ ArgBaseTopicSender: topicsender.ArgBaseTopicSender{ - Messenger: brcf.messenger, - TopicName: topic, - OutputAntiflooder: brcf.outputAntifloodHandler, - PreferredPeersHolder: brcf.preferredPeersHolder, - TargetShardId: targetShardId, + MainMessenger: brcf.mainMessenger, + FullArchiveMessenger: brcf.fullArchiveMessenger, + TopicName: topic, + OutputAntiflooder: brcf.outputAntifloodHandler, + MainPreferredPeersHolder: brcf.mainPreferredPeersHolder, + FullArchivePreferredPeersHolder: brcf.fullArchivePreferredPeersHolder, + TargetShardId: targetShardId, }, - Marshaller: brcf.marshaller, - Randomizer: brcf.intRandomizer, - PeerListCreator: peerListCreator, - NumIntraShardPeers: numIntraShardPeers, - NumCrossShardPeers: numCrossShardPeers, - NumFullHistoryPeers: brcf.numFullHistoryPeers, - CurrentNetworkEpochProvider: brcf.currentNetworkEpochProvider, - SelfShardIdProvider: brcf.shardCoordinator, - PeersRatingHandler: brcf.peersRatingHandler, + Marshaller: brcf.marshaller, + Randomizer: brcf.intRandomizer, + PeerListCreator: peerListCreator, + NumIntraShardPeers: numIntraShardPeers, + NumCrossShardPeers: numCrossShardPeers, + NumFullHistoryPeers: brcf.numFullHistoryPeers, + CurrentNetworkEpochProvider: brcf.currentNetworkEpochProvider, + SelfShardIdProvider: brcf.shardCoordinator, + MainPeersRatingHandler: brcf.mainPeersRatingHandler, + FullArchivePeersRatingHandler: brcf.fullArchivePeersRatingHandler, } return topicsender.NewTopicRequestSender(arg) } diff --git a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go index 5abe87ed961..59de81ae816 100644 --- a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go @@ -27,20 +27,23 @@ func NewMetaRequestersContainerFactory( numIntraShardPeers := args.RequesterConfig.NumTotalPeers - args.RequesterConfig.NumCrossShardPeers container := containers.NewRequestersContainer() base := &baseRequestersContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - marshaller: args.Marshaller, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - outputAntifloodHandler: args.OutputAntifloodHandler, - currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, - preferredPeersHolder: args.PreferredPeersHolder, - peersRatingHandler: args.PeersRatingHandler, - numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), - numIntraShardPeers: int(numIntraShardPeers), - numTotalPeers: int(args.RequesterConfig.NumTotalPeers), - numFullHistoryPeers: int(args.RequesterConfig.NumFullHistoryPeers), + container: container, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + marshaller: args.Marshaller, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + outputAntifloodHandler: args.OutputAntifloodHandler, + currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, + mainPreferredPeersHolder: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, + mainPeersRatingHandler: args.MainPeersRatingHandler, + fullArchivePeersRatingHandler: args.FullArchivePeersRatingHandler, + numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), + numIntraShardPeers: int(numIntraShardPeers), + numTotalPeers: int(args.RequesterConfig.NumTotalPeers), + numFullHistoryPeers: int(args.RequesterConfig.NumFullHistoryPeers), } err := base.checkParams() diff --git a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go index 69391b79efd..2f373ecc2d7 100644 --- a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go +++ b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go @@ -22,15 +22,26 @@ func TestNewMetaRequestersContainerFactory_NilShardCoordinatorShouldErr(t *testi assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } -func TestNewMetaRequestersContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewMetaRequestersContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.Messenger = nil + args.MainMessenger = nil rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewMetaRequestersContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.FullArchiveMessenger = nil + rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewMetaRequestersContainerFactory_NilMarshallerShouldErr(t *testing.T) { @@ -56,26 +67,48 @@ func TestNewMetaRequestersContainerFactory_NilMarshallerAndSizeCheckShouldErr(t assert.Equal(t, dataRetriever.ErrNilMarshalizer, err) } -func TestNewMetaRequestersContainerFactory_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewMetaRequestersContainerFactory_NilMainPreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.MainPreferredPeersHolder = nil + rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewMetaRequestersContainerFactory_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.FullArchivePreferredPeersHolder = nil + rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewMetaRequestersContainerFactory_NilMainPeersRatingHandlerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.PreferredPeersHolder = nil + args.MainPeersRatingHandler = nil rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) } -func TestNewMetaRequestersContainerFactory_NilPeersRatingHandlerShouldErr(t *testing.T) { +func TestNewMetaRequestersContainerFactory_NilFullArchivePeersRatingHandlerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.PeersRatingHandler = nil + args.FullArchivePeersRatingHandler = nil rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPeersRatingHandler, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) } func TestNewMetaRequestersContainerFactory_NilUint64SliceConverterShouldErr(t *testing.T) { diff --git a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go index 20ff257d738..adc945c35fb 100644 --- a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go @@ -26,20 +26,23 @@ func NewShardRequestersContainerFactory( numIntraShardPeers := args.RequesterConfig.NumTotalPeers - args.RequesterConfig.NumCrossShardPeers container := containers.NewRequestersContainer() base := &baseRequestersContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - marshaller: args.Marshaller, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - outputAntifloodHandler: args.OutputAntifloodHandler, - currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, - preferredPeersHolder: args.PreferredPeersHolder, - peersRatingHandler: args.PeersRatingHandler, - numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), - numIntraShardPeers: int(numIntraShardPeers), - numTotalPeers: int(args.RequesterConfig.NumTotalPeers), - numFullHistoryPeers: int(args.RequesterConfig.NumFullHistoryPeers), + container: container, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + marshaller: args.Marshaller, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + outputAntifloodHandler: args.OutputAntifloodHandler, + currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, + mainPreferredPeersHolder: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, + mainPeersRatingHandler: args.MainPeersRatingHandler, + fullArchivePeersRatingHandler: args.FullArchivePeersRatingHandler, + numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), + numIntraShardPeers: int(numIntraShardPeers), + numTotalPeers: int(args.RequesterConfig.NumTotalPeers), + numFullHistoryPeers: int(args.RequesterConfig.NumFullHistoryPeers), } err := base.checkParams() diff --git a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go index 7602fd6f98d..ee3dcba7794 100644 --- a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go +++ b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go @@ -44,15 +44,26 @@ func TestNewShardRequestersContainerFactory_NilShardCoordinatorShouldErr(t *test assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } -func TestNewShardRequestersContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewShardRequestersContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.Messenger = nil + args.MainMessenger = nil rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewShardRequestersContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.FullArchiveMessenger = nil + rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewShardRequestersContainerFactory_NilMarshallerShouldErr(t *testing.T) { @@ -89,26 +100,48 @@ func TestNewShardRequestersContainerFactory_NilUint64SliceConverterShouldErr(t * assert.Equal(t, dataRetriever.ErrNilUint64ByteSliceConverter, err) } -func TestNewShardRequestersContainerFactory_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewShardRequestersContainerFactory_NilMainPreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.MainPreferredPeersHolder = nil + rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewShardRequestersContainerFactory_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArguments() + args.FullArchivePreferredPeersHolder = nil + rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewShardRequestersContainerFactory_NilMainPeersRatingHandlerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.PreferredPeersHolder = nil + args.MainPeersRatingHandler = nil rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) } -func TestNewShardRequestersContainerFactory_NilPeersRatingHandlerShouldErr(t *testing.T) { +func TestNewShardRequestersContainerFactory_NilFullArchivePeersRatingHandlerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.PeersRatingHandler = nil + args.FullArchivePeersRatingHandler = nil rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPeersRatingHandler, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) } func TestNewShardRequestersContainerFactory_InvalidNumTotalPeersShouldErr(t *testing.T) { @@ -243,14 +276,17 @@ func getArguments() requesterscontainer.FactoryArgs { NumTotalPeers: 3, NumFullHistoryPeers: 3, }, - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandler(""), - Marshaller: &mock.MarshalizerMock{}, - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, - SizeCheckDelta: 0, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createStubTopicMessageHandler(""), + FullArchiveMessenger: createStubTopicMessageHandler(""), + Marshaller: &mock.MarshalizerMock{}, + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + MainPeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + SizeCheckDelta: 0, } } diff --git a/dataRetriever/factory/resolverscontainer/args.go b/dataRetriever/factory/resolverscontainer/args.go index 65ddfc326a3..e342eef57a8 100644 --- a/dataRetriever/factory/resolverscontainer/args.go +++ b/dataRetriever/factory/resolverscontainer/args.go @@ -11,19 +11,21 @@ import ( // FactoryArgs will hold the arguments for ResolversContainerFactory for both shard and meta type FactoryArgs struct { - NumConcurrentResolvingJobs int32 - ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler - Store dataRetriever.StorageService - Marshalizer marshal.Marshalizer - DataPools dataRetriever.PoolsHolder - Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - DataPacker dataRetriever.DataPacker - TriesContainer common.TriesHolder - InputAntifloodHandler dataRetriever.P2PAntifloodHandler - OutputAntifloodHandler dataRetriever.P2PAntifloodHandler - PreferredPeersHolder p2p.PreferredPeersHolderHandler - SizeCheckDelta uint32 - IsFullHistoryNode bool - PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator + NumConcurrentResolvingJobs int32 + ShardCoordinator sharding.Coordinator + MainMessenger dataRetriever.TopicMessageHandler + FullArchiveMessenger dataRetriever.TopicMessageHandler + Store dataRetriever.StorageService + Marshalizer marshal.Marshalizer + DataPools dataRetriever.PoolsHolder + Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + DataPacker dataRetriever.DataPacker + TriesContainer common.TriesHolder + InputAntifloodHandler dataRetriever.P2PAntifloodHandler + OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + MainPreferredPeersHolder p2p.PreferredPeersHolderHandler + FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler + SizeCheckDelta uint32 + IsFullHistoryNode bool + PayloadValidator dataRetriever.PeerAuthenticationPayloadValidator } diff --git a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go index 45acf32e347..52e7a50c61a 100644 --- a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go @@ -22,30 +22,35 @@ const EmptyExcludePeersOnTopic = "" var log = logger.GetOrCreate("dataRetriever/factory/resolverscontainer") type baseResolversContainerFactory struct { - container dataRetriever.ResolversContainer - shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler - store dataRetriever.StorageService - marshalizer marshal.Marshalizer - dataPools dataRetriever.PoolsHolder - uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter - dataPacker dataRetriever.DataPacker - triesContainer common.TriesHolder - inputAntifloodHandler dataRetriever.P2PAntifloodHandler - outputAntifloodHandler dataRetriever.P2PAntifloodHandler - throttler dataRetriever.ResolverThrottler - intraShardTopic string - isFullHistoryNode bool - preferredPeersHolder dataRetriever.PreferredPeersHolderHandler - payloadValidator dataRetriever.PeerAuthenticationPayloadValidator + container dataRetriever.ResolversContainer + shardCoordinator sharding.Coordinator + mainMessenger dataRetriever.TopicMessageHandler + fullArchiveMessenger dataRetriever.TopicMessageHandler + store dataRetriever.StorageService + marshalizer marshal.Marshalizer + dataPools dataRetriever.PoolsHolder + uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter + dataPacker dataRetriever.DataPacker + triesContainer common.TriesHolder + inputAntifloodHandler dataRetriever.P2PAntifloodHandler + outputAntifloodHandler dataRetriever.P2PAntifloodHandler + throttler dataRetriever.ResolverThrottler + intraShardTopic string + isFullHistoryNode bool + mainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + fullArchivePreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + payloadValidator dataRetriever.PeerAuthenticationPayloadValidator } func (brcf *baseResolversContainerFactory) checkParams() error { if check.IfNil(brcf.shardCoordinator) { return dataRetriever.ErrNilShardCoordinator } - if check.IfNil(brcf.messenger) { - return dataRetriever.ErrNilMessenger + if check.IfNil(brcf.mainMessenger) { + return fmt.Errorf("%w for main network", dataRetriever.ErrNilMessenger) + } + if check.IfNil(brcf.fullArchiveMessenger) { + return fmt.Errorf("%w for full archive network", dataRetriever.ErrNilMessenger) } if check.IfNil(brcf.store) { return dataRetriever.ErrNilStore @@ -74,8 +79,11 @@ func (brcf *baseResolversContainerFactory) checkParams() error { if check.IfNil(brcf.throttler) { return dataRetriever.ErrNilThrottler } - if check.IfNil(brcf.preferredPeersHolder) { - return dataRetriever.ErrNilPreferredPeersHolder + if check.IfNil(brcf.mainPreferredPeersHolder) { + return fmt.Errorf("%w for main network", dataRetriever.ErrNilPreferredPeersHolder) + } + if check.IfNil(brcf.fullArchivePreferredPeersHolder) { + return fmt.Errorf("%w for full archive network", dataRetriever.ErrNilPreferredPeersHolder) } return nil @@ -155,7 +163,12 @@ func (brcf *baseResolversContainerFactory) createTxResolver( return nil, err } - err = brcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = brcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return nil, err } @@ -237,7 +250,12 @@ func (brcf *baseResolversContainerFactory) createMiniBlocksResolver( return nil, err } - err = brcf.messenger.RegisterMessageProcessor(txBlkResolver.RequestTopic(), common.DefaultResolversIdentifier, txBlkResolver) + err = brcf.mainMessenger.RegisterMessageProcessor(txBlkResolver.RequestTopic(), common.DefaultResolversIdentifier, txBlkResolver) + if err != nil { + return nil, err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(txBlkResolver.RequestTopic(), common.DefaultResolversIdentifier, txBlkResolver) if err != nil { return nil, err } @@ -269,7 +287,12 @@ func (brcf *baseResolversContainerFactory) generatePeerAuthenticationResolver() return err } - err = brcf.messenger.RegisterMessageProcessor(peerAuthResolver.RequestTopic(), common.DefaultResolversIdentifier, peerAuthResolver) + err = brcf.mainMessenger.RegisterMessageProcessor(peerAuthResolver.RequestTopic(), common.DefaultResolversIdentifier, peerAuthResolver) + if err != nil { + return err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(peerAuthResolver.RequestTopic(), common.DefaultResolversIdentifier, peerAuthResolver) if err != nil { return err } @@ -288,11 +311,13 @@ func (brcf *baseResolversContainerFactory) createOneResolverSenderWithSpecifiedN arg := topicsender.ArgTopicResolverSender{ ArgBaseTopicSender: topicsender.ArgBaseTopicSender{ - Messenger: brcf.messenger, - TopicName: topic, - OutputAntiflooder: brcf.outputAntifloodHandler, - PreferredPeersHolder: brcf.preferredPeersHolder, - TargetShardId: targetShardId, + MainMessenger: brcf.mainMessenger, + FullArchiveMessenger: brcf.fullArchiveMessenger, + TopicName: topic, + OutputAntiflooder: brcf.outputAntifloodHandler, + MainPreferredPeersHolder: brcf.mainPreferredPeersHolder, + FullArchivePreferredPeersHolder: brcf.fullArchivePreferredPeersHolder, + TargetShardId: targetShardId, }, } // TODO instantiate topic sender resolver with the shard IDs for which this resolver is supposed to serve the data @@ -334,7 +359,12 @@ func (brcf *baseResolversContainerFactory) createTrieNodesResolver( return nil, err } - err = brcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = brcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return nil, err } @@ -370,7 +400,12 @@ func (brcf *baseResolversContainerFactory) generateValidatorInfoResolver() error return err } - err = brcf.messenger.RegisterMessageProcessor(validatorInfoResolver.RequestTopic(), common.DefaultResolversIdentifier, validatorInfoResolver) + err = brcf.mainMessenger.RegisterMessageProcessor(validatorInfoResolver.RequestTopic(), common.DefaultResolversIdentifier, validatorInfoResolver) + if err != nil { + return err + } + + err = brcf.fullArchiveMessenger.RegisterMessageProcessor(validatorInfoResolver.RequestTopic(), common.DefaultResolversIdentifier, validatorInfoResolver) if err != nil { return err } diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 889481e9fde..426a978ae20 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -34,21 +34,23 @@ func NewMetaResolversContainerFactory( container := containers.NewResolversContainer() base := &baseResolversContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - marshalizer: args.Marshalizer, - dataPools: args.DataPools, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - dataPacker: args.DataPacker, - triesContainer: args.TriesContainer, - inputAntifloodHandler: args.InputAntifloodHandler, - outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, - isFullHistoryNode: args.IsFullHistoryNode, - preferredPeersHolder: args.PreferredPeersHolder, - payloadValidator: args.PayloadValidator, + container: container, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + store: args.Store, + marshalizer: args.Marshalizer, + dataPools: args.DataPools, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + dataPacker: args.DataPacker, + triesContainer: args.TriesContainer, + inputAntifloodHandler: args.InputAntifloodHandler, + outputAntifloodHandler: args.OutputAntifloodHandler, + throttler: thr, + isFullHistoryNode: args.IsFullHistoryNode, + mainPreferredPeersHolder: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, + payloadValidator: args.PayloadValidator, } err = base.checkParams() @@ -221,7 +223,12 @@ func (mrcf *metaResolversContainerFactory) createShardHeaderResolver( return nil, err } - err = mrcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = mrcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = mrcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return nil, err } @@ -279,7 +286,12 @@ func (mrcf *metaResolversContainerFactory) createMetaChainHeaderResolver( return nil, err } - err = mrcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = mrcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = mrcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return nil, err } diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index bb396cbcb7b..6d90f550a71 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -111,15 +111,26 @@ func TestNewMetaResolversContainerFactory_NilShardCoordinatorShouldErr(t *testin assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } -func TestNewMetaResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewMetaResolversContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() args := getArgumentsMeta() - args.Messenger = nil + args.MainMessenger = nil rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewMetaResolversContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.FullArchiveMessenger = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewMetaResolversContainerFactory_NilStoreShouldErr(t *testing.T) { @@ -167,15 +178,26 @@ func TestNewMetaResolversContainerFactory_NilDataPoolShouldErr(t *testing.T) { assert.Equal(t, dataRetriever.ErrNilDataPoolHolder, err) } -func TestNewMetaResolversContainerFactory_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewMetaResolversContainerFactory_NilMainPreferredPeersHolderShouldErr(t *testing.T) { t.Parallel() args := getArgumentsMeta() - args.PreferredPeersHolder = nil + args.MainPreferredPeersHolder = nil rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewMetaResolversContainerFactory_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.FullArchivePreferredPeersHolder = nil + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } func TestNewMetaResolversContainerFactory_NilUint64SliceConverterShouldErr(t *testing.T) { @@ -235,11 +257,24 @@ func TestNewMetaResolversContainerFactory_NilOutputAntifloodHandlerShouldErr(t * // ------- Create -func TestMetaResolversContainerFactory_CreateRegisterShardHeadersForMetachainFailsShouldErr(t *testing.T) { +func TestMetaResolversContainerFactory_CreateRegisterShardHeadersForMetachainOnMainNetworkFailsShouldErr(t *testing.T) { t.Parallel() args := getArgumentsMeta() - args.Messenger = createStubTopicMessageHandlerForMeta("", factory.ShardBlocksTopic) + args.MainMessenger = createStubTopicMessageHandlerForMeta("", factory.ShardBlocksTopic) + rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestMetaResolversContainerFactory_CreateRegisterShardHeadersForMetachainOnFullArchiveNetworkFailsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.FullArchiveMessenger = createStubTopicMessageHandlerForMeta("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) container, err := rcf.Create() @@ -269,6 +304,20 @@ func TestMetaResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.CurrentShard = 1 args := getArgumentsMeta() + registerMainCnt := 0 + args.MainMessenger = &p2pmocks.MessengerStub{ + RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { + registerMainCnt++ + return nil + }, + } + registerFullArchiveCnt := 0 + args.FullArchiveMessenger = &p2pmocks.MessengerStub{ + RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { + registerFullArchiveCnt++ + return nil + }, + } args.ShardCoordinator = shardCoordinator rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) @@ -286,6 +335,8 @@ func TestMetaResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolversUnsigned + numResolversTxs + numResolversTrieNodes + numResolversRewards + numResolversPeerAuth + numResolverValidatorInfo assert.Equal(t, totalResolvers, container.Len()) + assert.Equal(t, totalResolvers, registerMainCnt) + assert.Equal(t, totalResolvers, registerFullArchiveCnt) err := rcf.AddShardTrieNodeResolvers(container) assert.Nil(t, err) @@ -306,19 +357,21 @@ func TestMetaResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsMeta() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandlerForMeta("", ""), - Store: createStoreForMeta(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForMeta(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForMeta(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createStubTopicMessageHandlerForMeta("", ""), + FullArchiveMessenger: createStubTopicMessageHandlerForMeta("", ""), + Store: createStoreForMeta(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForMeta(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForMeta(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index 7a4fb1a282a..28582f03bc5 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -32,21 +32,23 @@ func NewShardResolversContainerFactory( container := containers.NewResolversContainer() base := &baseResolversContainerFactory{ - container: container, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - store: args.Store, - marshalizer: args.Marshalizer, - dataPools: args.DataPools, - uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - dataPacker: args.DataPacker, - triesContainer: args.TriesContainer, - inputAntifloodHandler: args.InputAntifloodHandler, - outputAntifloodHandler: args.OutputAntifloodHandler, - throttler: thr, - isFullHistoryNode: args.IsFullHistoryNode, - preferredPeersHolder: args.PreferredPeersHolder, - payloadValidator: args.PayloadValidator, + container: container, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + store: args.Store, + marshalizer: args.Marshalizer, + dataPools: args.DataPools, + uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + dataPacker: args.DataPacker, + triesContainer: args.TriesContainer, + inputAntifloodHandler: args.InputAntifloodHandler, + outputAntifloodHandler: args.OutputAntifloodHandler, + throttler: thr, + isFullHistoryNode: args.IsFullHistoryNode, + mainPreferredPeersHolder: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, + payloadValidator: args.PayloadValidator, } err = base.checkParams() @@ -166,7 +168,12 @@ func (srcf *shardResolversContainerFactory) generateHeaderResolvers() error { return err } - err = srcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = srcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return err + } + + err = srcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return err } @@ -214,7 +221,12 @@ func (srcf *shardResolversContainerFactory) generateMetablockHeaderResolvers() e return err } - err = srcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + err = srcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) + if err != nil { + return err + } + + err = srcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.DefaultResolversIdentifier, resolver) if err != nil { return err } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index 51195e6c5a8..89ebde60228 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -117,15 +117,26 @@ func TestNewShardResolversContainerFactory_NilShardCoordinatorShouldErr(t *testi assert.Equal(t, dataRetriever.ErrNilShardCoordinator, err) } -func TestNewShardResolversContainerFactory_NilMessengerShouldErr(t *testing.T) { +func TestNewShardResolversContainerFactory_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = nil + args.MainMessenger = nil rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewShardResolversContainerFactory_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.FullArchiveMessenger = nil + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewShardResolversContainerFactory_NilStoreShouldErr(t *testing.T) { @@ -195,15 +206,26 @@ func TestNewShardResolversContainerFactory_NilDataPackerShouldErr(t *testing.T) assert.Equal(t, dataRetriever.ErrNilDataPacker, err) } -func TestNewShardResolversContainerFactory_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewShardResolversContainerFactory_NilMainPreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.MainPreferredPeersHolder = nil + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewShardResolversContainerFactory_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.PreferredPeersHolder = nil + args.FullArchivePreferredPeersHolder = nil rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) assert.Nil(t, rcf) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } func TestNewShardResolversContainerFactory_NilTriesContainerShouldErr(t *testing.T) { @@ -241,11 +263,11 @@ func TestNewShardResolversContainerFactory_NilOutputAntifloodHandlerShouldErr(t // ------- Create -func TestShardResolversContainerFactory_CreateRegisterTxFailsShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterTxFailsOnMainNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", factory.TransactionTopic) + args.MainMessenger = createStubTopicMessageHandlerForShard("", factory.TransactionTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -254,11 +276,11 @@ func TestShardResolversContainerFactory_CreateRegisterTxFailsShouldErr(t *testin assert.Equal(t, errExpected, err) } -func TestShardResolversContainerFactory_CreateRegisterHdrFailsShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterTxFailsOnFullArchiveNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", factory.ShardBlocksTopic) + args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", factory.TransactionTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -267,11 +289,11 @@ func TestShardResolversContainerFactory_CreateRegisterHdrFailsShouldErr(t *testi assert.Equal(t, errExpected, err) } -func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterHdrFailsOnMainNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", factory.MiniBlocksTopic) + args.MainMessenger = createStubTopicMessageHandlerForShard("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -280,11 +302,11 @@ func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsShouldErr(t assert.Equal(t, errExpected, err) } -func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterHdrFailsOnFullArchiveNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", factory.AccountTrieNodesTopic) + args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -293,11 +315,76 @@ func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsShouldErr(t assert.Equal(t, errExpected, err) } -func TestShardResolversContainerFactory_CreateRegisterPeerAuthenticationShouldErr(t *testing.T) { +func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsOnMainNetworkShouldErr(t *testing.T) { t.Parallel() args := getArgumentsShard() - args.Messenger = createStubTopicMessageHandlerForShard("", common.PeerAuthenticationTopic) + args.MainMessenger = createStubTopicMessageHandlerForShard("", factory.MiniBlocksTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsOnFullArchiveNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", factory.MiniBlocksTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsOnMainNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.MainMessenger = createStubTopicMessageHandlerForShard("", factory.AccountTrieNodesTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsOnFullArchiveNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", factory.AccountTrieNodesTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterPeerAuthenticationOnMainNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.MainMessenger = createStubTopicMessageHandlerForShard("", common.PeerAuthenticationTopic) + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + + container, err := rcf.Create() + + assert.Nil(t, container) + assert.Equal(t, errExpected, err) +} + +func TestShardResolversContainerFactory_CreateRegisterPeerAuthenticationOnFullArchiveNetworkShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", common.PeerAuthenticationTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -328,6 +415,20 @@ func TestShardResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { shardCoordinator.CurrentShard = 1 args := getArgumentsShard() + registerMainCnt := 0 + args.MainMessenger = &p2pmocks.MessengerStub{ + RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { + registerMainCnt++ + return nil + }, + } + registerFullArchiveCnt := 0 + args.FullArchiveMessenger = &p2pmocks.MessengerStub{ + RegisterMessageProcessorCalled: func(topic string, identifier string, handler p2p.MessageProcessor) error { + registerFullArchiveCnt++ + return nil + }, + } args.ShardCoordinator = shardCoordinator rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) @@ -346,6 +447,8 @@ func TestShardResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { numResolverSCRs + numResolverRewardTxs + numResolverTrieNodes + numResolverPeerAuth + numResolverValidatorInfo assert.Equal(t, totalResolvers, container.Len()) + assert.Equal(t, totalResolvers, registerMainCnt) + assert.Equal(t, totalResolvers, registerFullArchiveCnt) } func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { @@ -362,19 +465,21 @@ func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsShard() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ - ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandlerForShard("", ""), - Store: createStoreForShard(), - Marshalizer: &mock.MarshalizerMock{}, - DataPools: createDataPoolsForShard(), - Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, - DataPacker: &mock.DataPackerStub{}, - TriesContainer: createTriesHolderForShard(), - SizeCheckDelta: 0, - InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, - NumConcurrentResolvingJobs: 10, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, + ShardCoordinator: mock.NewOneShardCoordinatorMock(), + MainMessenger: createStubTopicMessageHandlerForShard("", ""), + FullArchiveMessenger: createStubTopicMessageHandlerForShard("", ""), + Store: createStoreForShard(), + Marshalizer: &mock.MarshalizerMock{}, + DataPools: createDataPoolsForShard(), + Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, + DataPacker: &mock.DataPackerStub{}, + TriesContainer: createTriesHolderForShard(), + SizeCheckDelta: 0, + InputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, + NumConcurrentResolvingJobs: 10, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: &testscommon.PeerAuthenticationPayloadValidatorStub{}, } } diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 4da2c3669db..3c96db90af5 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -137,6 +137,8 @@ type MessageHandler interface { ConnectedPeersOnTopic(topic string) []core.PeerID SendToConnectedPeer(topic string, buff []byte, peerID core.PeerID) error ID() core.PeerID + ConnectedPeers() []core.PeerID + IsConnected(peerID core.PeerID) bool IsInterfaceNil() bool } diff --git a/dataRetriever/mock/messageHandlerStub.go b/dataRetriever/mock/messageHandlerStub.go index 541bee0270a..3f2998efd15 100644 --- a/dataRetriever/mock/messageHandlerStub.go +++ b/dataRetriever/mock/messageHandlerStub.go @@ -9,6 +9,8 @@ type MessageHandlerStub struct { ConnectedPeersOnTopicCalled func(topic string) []core.PeerID SendToConnectedPeerCalled func(topic string, buff []byte, peerID core.PeerID) error IDCalled func() core.PeerID + ConnectedPeersCalled func() []core.PeerID + IsConnectedCalled func(peerID core.PeerID) bool } // ConnectedPeersOnTopic - @@ -30,6 +32,24 @@ func (mhs *MessageHandlerStub) ID() core.PeerID { return "" } +// ConnectedPeers - +func (mhs *MessageHandlerStub) ConnectedPeers() []core.PeerID { + if mhs.ConnectedPeersCalled != nil { + return mhs.ConnectedPeersCalled() + } + + return make([]core.PeerID, 0) +} + +// IsConnected - +func (mhs *MessageHandlerStub) IsConnected(peerID core.PeerID) bool { + if mhs.IsConnectedCalled != nil { + return mhs.IsConnectedCalled(peerID) + } + + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (mhs *MessageHandlerStub) IsInterfaceNil() bool { return mhs == nil diff --git a/dataRetriever/topicSender/baseTopicSender.go b/dataRetriever/topicSender/baseTopicSender.go index f5470a87dfb..b9ddf469008 100644 --- a/dataRetriever/topicSender/baseTopicSender.go +++ b/dataRetriever/topicSender/baseTopicSender.go @@ -18,73 +18,95 @@ var log = logger.GetOrCreate("dataretriever/topicsender") const ( minPeersToQuery = 2 preferredPeerIndex = -1 + mainNetwork = "main" + fullArchiveNetwork = "full archive" ) // ArgBaseTopicSender is the base DTO used to create a new topic sender instance type ArgBaseTopicSender struct { - Messenger dataRetriever.MessageHandler - TopicName string - OutputAntiflooder dataRetriever.P2PAntifloodHandler - PreferredPeersHolder dataRetriever.PreferredPeersHolderHandler - TargetShardId uint32 + MainMessenger dataRetriever.MessageHandler + FullArchiveMessenger dataRetriever.MessageHandler + TopicName string + OutputAntiflooder dataRetriever.P2PAntifloodHandler + MainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + FullArchivePreferredPeersHolder dataRetriever.PreferredPeersHolderHandler + TargetShardId uint32 } type baseTopicSender struct { - messenger dataRetriever.MessageHandler - topicName string - outputAntiflooder dataRetriever.P2PAntifloodHandler - mutDebugHandler sync.RWMutex - debugHandler dataRetriever.DebugHandler - preferredPeersHolderHandler dataRetriever.PreferredPeersHolderHandler - targetShardId uint32 + mainMessenger dataRetriever.MessageHandler + fullArchiveMessenger dataRetriever.MessageHandler + topicName string + outputAntiflooder dataRetriever.P2PAntifloodHandler + mutDebugHandler sync.RWMutex + debugHandler dataRetriever.DebugHandler + mainPreferredPeersHolderHandler dataRetriever.PreferredPeersHolderHandler + fullArchivePreferredPeersHolderHandler dataRetriever.PreferredPeersHolderHandler + targetShardId uint32 } func createBaseTopicSender(args ArgBaseTopicSender) *baseTopicSender { return &baseTopicSender{ - messenger: args.Messenger, - topicName: args.TopicName, - outputAntiflooder: args.OutputAntiflooder, - debugHandler: handler.NewDisabledInterceptorDebugHandler(), - preferredPeersHolderHandler: args.PreferredPeersHolder, - targetShardId: args.TargetShardId, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + topicName: args.TopicName, + outputAntiflooder: args.OutputAntiflooder, + debugHandler: handler.NewDisabledInterceptorDebugHandler(), + mainPreferredPeersHolderHandler: args.MainPreferredPeersHolder, + fullArchivePreferredPeersHolderHandler: args.FullArchivePreferredPeersHolder, + targetShardId: args.TargetShardId, } } func checkBaseTopicSenderArgs(args ArgBaseTopicSender) error { - if check.IfNil(args.Messenger) { - return dataRetriever.ErrNilMessenger + if check.IfNil(args.MainMessenger) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilMessenger) } if check.IfNil(args.OutputAntiflooder) { return dataRetriever.ErrNilAntifloodHandler } - if check.IfNil(args.PreferredPeersHolder) { - return dataRetriever.ErrNilPreferredPeersHolder + if check.IfNil(args.MainPreferredPeersHolder) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilPreferredPeersHolder) + } + if check.IfNil(args.FullArchivePreferredPeersHolder) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilPreferredPeersHolder) } return nil } -func (baseSender *baseTopicSender) sendToConnectedPeer(topic string, buff []byte, peer core.PeerID) error { +func (baseSender *baseTopicSender) sendToConnectedPeer( + topic string, + buff []byte, + peer core.PeerID, + messenger dataRetriever.MessageHandler, + network string, + preferredPeersHolder dataRetriever.PreferredPeersHolderHandler, +) error { msg := &factory.Message{ DataField: buff, PeerField: peer, TopicField: topic, } - shouldAvoidAntiFloodCheck := baseSender.preferredPeersHolderHandler.Contains(peer) + shouldAvoidAntiFloodCheck := preferredPeersHolder.Contains(peer) if shouldAvoidAntiFloodCheck { - return baseSender.messenger.SendToConnectedPeer(topic, buff, peer) + return messenger.SendToConnectedPeer(topic, buff, peer) } err := baseSender.outputAntiflooder.CanProcessMessage(msg, peer) if err != nil { - return fmt.Errorf("%w while sending %d bytes to peer %s", + return fmt.Errorf("%w while sending %d bytes to peer %s on network %s", err, len(buff), p2p.PeerIdToShortString(peer), + network, ) } - return baseSender.messenger.SendToConnectedPeer(topic, buff, peer) + return messenger.SendToConnectedPeer(topic, buff, peer) } // DebugHandler returns the debug handler used in resolvers diff --git a/dataRetriever/topicSender/topicRequestSender.go b/dataRetriever/topicSender/topicRequestSender.go index f09fb194f95..996f3f8a065 100644 --- a/dataRetriever/topicSender/topicRequestSender.go +++ b/dataRetriever/topicSender/topicRequestSender.go @@ -16,15 +16,16 @@ var _ dataRetriever.TopicRequestSender = (*topicRequestSender)(nil) // ArgTopicRequestSender is the argument structure used to create new topic request sender instance type ArgTopicRequestSender struct { ArgBaseTopicSender - Marshaller marshal.Marshalizer - Randomizer dataRetriever.IntRandomizer - PeerListCreator dataRetriever.PeerListCreator - NumIntraShardPeers int - NumCrossShardPeers int - NumFullHistoryPeers int - CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler - SelfShardIdProvider dataRetriever.SelfShardIDProvider - PeersRatingHandler dataRetriever.PeersRatingHandler + Marshaller marshal.Marshalizer + Randomizer dataRetriever.IntRandomizer + PeerListCreator dataRetriever.PeerListCreator + NumIntraShardPeers int + NumCrossShardPeers int + NumFullHistoryPeers int + CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler + SelfShardIdProvider dataRetriever.SelfShardIDProvider + MainPeersRatingHandler dataRetriever.PeersRatingHandler + FullArchivePeersRatingHandler dataRetriever.PeersRatingHandler } type topicRequestSender struct { @@ -37,7 +38,8 @@ type topicRequestSender struct { numCrossShardPeers int numFullHistoryPeers int currentNetworkEpochProviderHandler dataRetriever.CurrentNetworkEpochProviderHandler - peersRatingHandler dataRetriever.PeersRatingHandler + mainPeersRatingHandler dataRetriever.PeersRatingHandler + fullArchivePeersRatingHandler dataRetriever.PeersRatingHandler selfShardId uint32 } @@ -57,7 +59,8 @@ func NewTopicRequestSender(args ArgTopicRequestSender) (*topicRequestSender, err numCrossShardPeers: args.NumCrossShardPeers, numFullHistoryPeers: args.NumFullHistoryPeers, currentNetworkEpochProviderHandler: args.CurrentNetworkEpochProvider, - peersRatingHandler: args.PeersRatingHandler, + mainPeersRatingHandler: args.MainPeersRatingHandler, + fullArchivePeersRatingHandler: args.FullArchivePeersRatingHandler, selfShardId: args.SelfShardIdProvider.SelfId(), }, nil } @@ -79,8 +82,11 @@ func checkArgs(args ArgTopicRequestSender) error { if check.IfNil(args.CurrentNetworkEpochProvider) { return dataRetriever.ErrNilCurrentNetworkEpochProvider } - if check.IfNil(args.PeersRatingHandler) { - return dataRetriever.ErrNilPeersRatingHandler + if check.IfNil(args.MainPeersRatingHandler) { + return fmt.Errorf("%w on main network", dataRetriever.ErrNilPeersRatingHandler) + } + if check.IfNil(args.FullArchivePeersRatingHandler) { + return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilPeersRatingHandler) } if check.IfNil(args.SelfShardIdProvider) { return dataRetriever.ErrNilSelfShardIDProvider @@ -120,16 +126,46 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, if trs.currentNetworkEpochProviderHandler.EpochIsActiveInNetwork(rd.Epoch) { crossPeers = trs.peerListCreator.CrossShardPeerList() preferredPeer := trs.getPreferredPeer(trs.targetShardId) - numSentCross = trs.sendOnTopic(crossPeers, preferredPeer, topicToSendRequest, buff, trs.numCrossShardPeers, core.CrossShardPeer.String()) + numSentCross = trs.sendOnTopic( + crossPeers, + preferredPeer, + topicToSendRequest, + buff, + trs.numCrossShardPeers, + core.CrossShardPeer.String(), + trs.mainMessenger, + trs.mainPeersRatingHandler, + mainNetwork, + trs.mainPreferredPeersHolderHandler) intraPeers = trs.peerListCreator.IntraShardPeerList() preferredPeer = trs.getPreferredPeer(trs.selfShardId) - numSentIntra = trs.sendOnTopic(intraPeers, preferredPeer, topicToSendRequest, buff, trs.numIntraShardPeers, core.IntraShardPeer.String()) + numSentIntra = trs.sendOnTopic( + intraPeers, + preferredPeer, + topicToSendRequest, + buff, + trs.numIntraShardPeers, + core.IntraShardPeer.String(), + trs.mainMessenger, + trs.mainPeersRatingHandler, + mainNetwork, + trs.mainPreferredPeersHolderHandler) } else { - // TODO[Sorin]: select preferred peers of type full history as well. - // TODO[Sorin]: replace the following line with the proper functionality from the full archive messenger - // fullHistoryPeers = trs.peerListCreator.FullHistoryList() - numSentIntra = trs.sendOnTopic(fullHistoryPeers, "", topicToSendRequest, buff, trs.numFullHistoryPeers, core.FullHistoryPeer.String()) + preferredPeer := trs.getPreferredFullArchivePeer() + fullHistoryPeers = trs.fullArchiveMessenger.ConnectedPeers() + + numSentIntra = trs.sendOnTopic( + fullHistoryPeers, + preferredPeer, + topicToSendRequest, + buff, + trs.numFullHistoryPeers, + core.FullHistoryPeer.String(), + trs.fullArchiveMessenger, + trs.fullArchivePeersRatingHandler, + fullArchiveNetwork, + trs.fullArchivePreferredPeersHolderHandler) } trs.callDebugHandler(originalHashes, numSentIntra, numSentCross) @@ -169,6 +205,10 @@ func (trs *topicRequestSender) sendOnTopic( buff []byte, maxToSend int, peerType string, + messenger dataRetriever.MessageHandler, + peersRatingHandler dataRetriever.PeersRatingHandler, + network string, + preferredPeersHolder dataRetriever.PreferredPeersHolderHandler, ) int { if len(peerList) == 0 || maxToSend == 0 { return 0 @@ -176,7 +216,7 @@ func (trs *topicRequestSender) sendOnTopic( histogramMap := make(map[string]int) - topRatedPeersList := trs.peersRatingHandler.GetTopRatedPeersFromList(peerList, maxToSend) + topRatedPeersList := peersRatingHandler.GetTopRatedPeersFromList(peerList, maxToSend) indexes := createIndexList(len(topRatedPeersList)) shuffledIndexes := random.FisherYatesShuffle(indexes, trs.randomizer) @@ -187,14 +227,16 @@ func (trs *topicRequestSender) sendOnTopic( shuffledIndexes = append([]int{preferredPeerIndex}, shuffledIndexes...) } + logData = append(logData, "network", network) + for idx := 0; idx < len(shuffledIndexes); idx++ { peer := getPeerID(shuffledIndexes[idx], topRatedPeersList, preferredPeer, peerType, topicToSendRequest, histogramMap) - err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer) + err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer, messenger, network, preferredPeersHolder) if err != nil { continue } - trs.peersRatingHandler.DecreaseRating(peer) + peersRatingHandler.DecreaseRating(peer) logData = append(logData, peerType) logData = append(logData, peer.Pretty()) @@ -204,7 +246,7 @@ func (trs *topicRequestSender) sendOnTopic( } } log.Trace("requests are sent to", logData...) - log.Trace("request peers histogram", "max peers to send", maxToSend, "topic", topicToSendRequest, "histogram", histogramMap) + log.Trace("request peers histogram", "network", network, "max peers to send", maxToSend, "topic", topicToSendRequest, "histogram", histogramMap) return msgSentCounter } @@ -227,13 +269,11 @@ func (trs *topicRequestSender) getPreferredPeer(shardID uint32) core.PeerID { return "" } - randomIdx := trs.randomizer.Intn(len(peersInShard)) - - return peersInShard[randomIdx] + return trs.getRandomPeerID(peersInShard) } func (trs *topicRequestSender) getPreferredPeersInShard(shardID uint32) ([]core.PeerID, bool) { - preferredPeers := trs.preferredPeersHolderHandler.Get() + preferredPeers := trs.mainPreferredPeersHolderHandler.Get() peers, found := preferredPeers[shardID] if !found || len(peers) == 0 { @@ -243,6 +283,33 @@ func (trs *topicRequestSender) getPreferredPeersInShard(shardID uint32) ([]core. return peers, true } +func (trs *topicRequestSender) getPreferredFullArchivePeer() core.PeerID { + preferredPeersMap := trs.fullArchivePreferredPeersHolderHandler.Get() + preferredPeersSlice := mapToSlice(preferredPeersMap) + + if len(preferredPeersSlice) == 0 { + return "" + } + + return trs.getRandomPeerID(preferredPeersSlice) +} + +func (trs *topicRequestSender) getRandomPeerID(peerIDs []core.PeerID) core.PeerID { + randomIdx := trs.randomizer.Intn(len(peerIDs)) + + return peerIDs[randomIdx] +} + +func mapToSlice(initialMap map[uint32][]core.PeerID) []core.PeerID { + newSlice := make([]core.PeerID, 0, len(initialMap)) + + for _, peerIDsOnShard := range initialMap { + newSlice = append(newSlice, peerIDsOnShard...) + } + + return newSlice +} + // SetNumPeersToQuery will set the number of intra shard and cross shard number of peers to query func (trs *topicRequestSender) SetNumPeersToQuery(intra int, cross int) { trs.mutNumPeersToQuery.Lock() diff --git a/dataRetriever/topicSender/topicRequestSender_test.go b/dataRetriever/topicSender/topicRequestSender_test.go index 62833ad45b1..625d4268f36 100644 --- a/dataRetriever/topicSender/topicRequestSender_test.go +++ b/dataRetriever/topicSender/topicRequestSender_test.go @@ -20,10 +20,16 @@ import ( func createMockArgBaseTopicSender() topicsender.ArgBaseTopicSender { return topicsender.ArgBaseTopicSender{ - Messenger: &mock.MessageHandlerStub{}, - TopicName: "topic", - OutputAntiflooder: &mock.P2PAntifloodHandlerStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{ + MainMessenger: &mock.MessageHandlerStub{}, + FullArchiveMessenger: &mock.MessageHandlerStub{}, + TopicName: "topic", + OutputAntiflooder: &mock.P2PAntifloodHandlerStub{}, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{ + GetCalled: func() map[uint32][]core.PeerID { + return map[uint32][]core.PeerID{} + }, + }, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{} }, @@ -34,30 +40,40 @@ func createMockArgBaseTopicSender() topicsender.ArgBaseTopicSender { func createMockArgTopicRequestSender() topicsender.ArgTopicRequestSender { return topicsender.ArgTopicRequestSender{ - ArgBaseTopicSender: createMockArgBaseTopicSender(), - Marshaller: &mock.MarshalizerMock{}, - Randomizer: &mock.IntRandomizerStub{}, - PeerListCreator: &mock.PeerListCreatorStub{}, - NumIntraShardPeers: 2, - NumCrossShardPeers: 2, - NumFullHistoryPeers: 2, - CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, - SelfShardIdProvider: mock.NewMultipleShardsCoordinatorMock(), - PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + ArgBaseTopicSender: createMockArgBaseTopicSender(), + Marshaller: &mock.MarshalizerMock{}, + Randomizer: &mock.IntRandomizerStub{}, + PeerListCreator: &mock.PeerListCreatorStub{}, + NumIntraShardPeers: 2, + NumCrossShardPeers: 2, + NumFullHistoryPeers: 2, + CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + SelfShardIdProvider: mock.NewMultipleShardsCoordinatorMock(), + MainPeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } } func TestNewTopicRequestSender(t *testing.T) { t.Parallel() - t.Run("nil Messenger should error", func(t *testing.T) { + t.Run("nil MainMessenger should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicRequestSender() + arg.MainMessenger = nil + trs, err := topicsender.NewTopicRequestSender(arg) + assert.True(t, check.IfNil(trs)) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) + }) + t.Run("nil FullArchiveMessenger should error", func(t *testing.T) { t.Parallel() arg := createMockArgTopicRequestSender() - arg.Messenger = nil + arg.FullArchiveMessenger = nil trs, err := topicsender.NewTopicRequestSender(arg) assert.True(t, check.IfNil(trs)) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) }) t.Run("nil OutputAntiflooder should error", func(t *testing.T) { t.Parallel() @@ -68,14 +84,23 @@ func TestNewTopicRequestSender(t *testing.T) { assert.True(t, check.IfNil(trs)) assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) }) - t.Run("nil PreferredPeersHolder should error", func(t *testing.T) { + t.Run("nil MainPreferredPeersHolder should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicRequestSender() + arg.MainPreferredPeersHolder = nil + trs, err := topicsender.NewTopicRequestSender(arg) + assert.True(t, check.IfNil(trs)) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) + }) + t.Run("nil FullArchivePreferredPeersHolder should error", func(t *testing.T) { t.Parallel() arg := createMockArgTopicRequestSender() - arg.PreferredPeersHolder = nil + arg.FullArchivePreferredPeersHolder = nil trs, err := topicsender.NewTopicRequestSender(arg) assert.True(t, check.IfNil(trs)) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) }) t.Run("nil Marshaller should error", func(t *testing.T) { t.Parallel() @@ -113,14 +138,23 @@ func TestNewTopicRequestSender(t *testing.T) { assert.True(t, check.IfNil(trs)) assert.Equal(t, dataRetriever.ErrNilCurrentNetworkEpochProvider, err) }) - t.Run("nil PeersRatingHandler should error", func(t *testing.T) { + t.Run("nil MainPeersRatingHandler should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicRequestSender() + arg.MainPeersRatingHandler = nil + trs, err := topicsender.NewTopicRequestSender(arg) + assert.True(t, check.IfNil(trs)) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) + }) + t.Run("nil FullArchivePeersRatingHandler should error", func(t *testing.T) { t.Parallel() arg := createMockArgTopicRequestSender() - arg.PeersRatingHandler = nil + arg.FullArchivePeersRatingHandler = nil trs, err := topicsender.NewTopicRequestSender(arg) assert.True(t, check.IfNil(trs)) - assert.Equal(t, dataRetriever.ErrNilPeersRatingHandler, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) }) t.Run("nil SelfShardIdProvider should error", func(t *testing.T) { t.Parallel() @@ -230,7 +264,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { sentToPid2 := false arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pID1.Bytes()) { sentToPid1 = true @@ -242,6 +276,12 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return nil }, } + arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should have not been called") + return nil + }, + } arg.PeerListCreator = &mock.PeerListCreatorStub{ CrossShardPeerListCalled: func() []core.PeerID { return []core.PeerID{pID1} @@ -251,7 +291,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } decreaseCalledCounter := 0 - arg.PeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ + arg.MainPeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ DecreaseRatingCalled: func(pid core.PeerID) { decreaseCalledCounter++ if !bytes.Equal(pid.Bytes(), pID1.Bytes()) && !bytes.Equal(pid.Bytes(), pID2.Bytes()) { @@ -268,48 +308,57 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.True(t, sentToPid2) assert.Equal(t, 2, decreaseCalledCounter) }) - // TODO[Sorin]: fix this test - //t.Run("should work and send to full history", func(t *testing.T) { - // t.Parallel() - // - // pIDfullHistory := core.PeerID("full history peer") - // sentToFullHistoryPeer := false - // - // arg := createMockArgTopicRequestSender() - // arg.Messenger = &mock.MessageHandlerStub{ - // SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - // if bytes.Equal(peerID.Bytes(), pIDfullHistory.Bytes()) { - // sentToFullHistoryPeer = true - // } - // - // return nil - // }, - // } - // arg.PeerListCreator = &mock.PeerListCreatorStub{ - // FullHistoryListCalled: func() []core.PeerID { - // return []core.PeerID{pIDfullHistory} - // }, - // } - // arg.CurrentNetworkEpochProvider = &mock.CurrentNetworkEpochProviderStub{ - // EpochIsActiveInNetworkCalled: func(epoch uint32) bool { - // return false - // }, - // } - // decreaseCalledCounter := 0 - // arg.PeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ - // DecreaseRatingCalled: func(pid core.PeerID) { - // decreaseCalledCounter++ - // assert.Equal(t, pIDfullHistory, pid) - // }, - // } - // trs, _ := topicsender.NewTopicRequestSender(arg) - // - // err := trs.SendOnRequestTopic(&dataRetriever.RequestData{}, defaultHashes) - // assert.Nil(t, err) - // assert.True(t, sentToFullHistoryPeer) - // assert.Equal(t, 1, decreaseCalledCounter) - //}) - t.Run("should work and send to preferred peers", func(t *testing.T) { + t.Run("should work and send to full history", func(t *testing.T) { + t.Parallel() + + pIDfullHistory := core.PeerID("full history peer") + sentToFullHistoryPeer := false + + arg := createMockArgTopicRequestSender() + arg.MainMessenger = &mock.MessageHandlerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should have not been called") + + return nil + }, + } + arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + ConnectedPeersCalled: func() []core.PeerID { + return []core.PeerID{pIDfullHistory} + }, + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + if bytes.Equal(peerID.Bytes(), pIDfullHistory.Bytes()) { + sentToFullHistoryPeer = true + } + + return nil + }, + } + arg.CurrentNetworkEpochProvider = &mock.CurrentNetworkEpochProviderStub{ + EpochIsActiveInNetworkCalled: func(epoch uint32) bool { + return false + }, + } + arg.MainPeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ + DecreaseRatingCalled: func(pid core.PeerID) { + assert.Fail(t, "should have not been called") + }, + } + decreaseCalledCounter := 0 + arg.FullArchivePeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ + DecreaseRatingCalled: func(pid core.PeerID) { + decreaseCalledCounter++ + assert.Equal(t, pIDfullHistory, pid) + }, + } + trs, _ := topicsender.NewTopicRequestSender(arg) + + err := trs.SendOnRequestTopic(&dataRetriever.RequestData{}, defaultHashes) + assert.Nil(t, err) + assert.True(t, sentToFullHistoryPeer) + assert.Equal(t, 1, decreaseCalledCounter) + }) + t.Run("should work and send to preferred regular peers", func(t *testing.T) { t.Parallel() selfShardID := uint32(0) @@ -342,7 +391,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{regularPeer1} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ selfShardID: preferredPeersShard0, @@ -352,7 +401,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { } arg.NumCrossShardPeers = 5 arg.NumIntraShardPeers = 5 - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if strings.HasPrefix(string(peerID), "prefPIDsh0") { countPrefPeersSh0++ @@ -371,7 +420,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.Nil(t, err) assert.Equal(t, 1, countPrefPeersSh1) }) - t.Run("should work and send to preferred cross peer first", func(t *testing.T) { + t.Run("should work and send to preferred regular cross peer first", func(t *testing.T) { t.Parallel() targetShardID := uint32(37) @@ -391,7 +440,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ targetShardID: {pidPreferred}, @@ -399,7 +448,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { sentToPreferredPeer = true @@ -416,7 +465,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.Nil(t, err) assert.True(t, sentToPreferredPeer) }) - t.Run("should work and send to preferred intra peer first", func(t *testing.T) { + t.Run("should work and send to preferred regular intra peer first", func(t *testing.T) { t.Parallel() selfShardID := uint32(37) @@ -436,7 +485,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{regularPeer0, regularPeer1, regularPeer0, regularPeer1} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ selfShardID: {pidPreferred}, @@ -444,7 +493,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { sentToPreferredPeer = true @@ -466,6 +515,54 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { assert.Nil(t, err) assert.True(t, sentToPreferredPeer) }) + t.Run("should work and send to preferred full archive first", func(t *testing.T) { + t.Parallel() + + selfShardID := uint32(37) + pidPreferred := core.PeerID("preferred peer") + sentToPreferredPeer := false + regularPeer0, regularPeer1 := core.PeerID("peer0"), core.PeerID("peer1") + + arg := createMockArgTopicRequestSender() + arg.NumFullHistoryPeers = 2 + arg.CurrentNetworkEpochProvider = &mock.CurrentNetworkEpochProviderStub{ + EpochIsActiveInNetworkCalled: func(epoch uint32) bool { + return false + }, + } + arg.FullArchivePreferredPeersHolder = &p2pmocks.PeersHolderStub{ + GetCalled: func() map[uint32][]core.PeerID { + return map[uint32][]core.PeerID{ + selfShardID: {pidPreferred}, + } + }, + } + arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + ConnectedPeersCalled: func() []core.PeerID { + return []core.PeerID{regularPeer0, regularPeer1} + }, + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { + sentToPreferredPeer = true + } + + return nil + }, + } + arg.MainMessenger = &mock.MessageHandlerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + + return nil + }, + } + + trs, _ := topicsender.NewTopicRequestSender(arg) + + err := trs.SendOnRequestTopic(&dataRetriever.RequestData{}, defaultHashes) + assert.Nil(t, err) + assert.True(t, sentToPreferredPeer) + }) t.Run("should work and skip antiflood checks for preferred peers", func(t *testing.T) { t.Parallel() @@ -487,7 +584,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ targetShardID: {pidPreferred}, @@ -498,7 +595,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if peerID == pidPreferred { sentToPreferredPeer = true @@ -543,7 +640,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return []core.PeerID{} }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ GetCalled: func() map[uint32][]core.PeerID { return map[uint32][]core.PeerID{ 37: {pidPreferred}, @@ -551,7 +648,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { sentToPreferredPeer = true @@ -575,7 +672,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { numSent := 0 arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { numSent++ @@ -605,7 +702,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { numSent := 0 arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if peerID == pidNotCalled { assert.Fail(t, fmt.Sprintf("should not have called pid %s", peerID)) @@ -639,7 +736,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { numSent := 0 arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if peerID == pidNotCalled { assert.Fail(t, fmt.Sprintf("should not have called pid %s", peerID)) @@ -672,7 +769,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { sentToPid1 := false arg := createMockArgTopicRequestSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pID1.Bytes()) { sentToPid1 = true diff --git a/dataRetriever/topicSender/topicResolverSender.go b/dataRetriever/topicSender/topicResolverSender.go index 59f33f083e8..d4fadc87335 100644 --- a/dataRetriever/topicSender/topicResolverSender.go +++ b/dataRetriever/topicSender/topicResolverSender.go @@ -31,7 +31,11 @@ func NewTopicResolverSender(arg ArgTopicResolverSender) (*topicResolverSender, e // Send is used to send an array buffer to a connected peer // It is used when replying to a request func (trs *topicResolverSender) Send(buff []byte, peer core.PeerID) error { - return trs.sendToConnectedPeer(trs.topicName, buff, peer) + if trs.fullArchiveMessenger.IsConnected(peer) { + return trs.sendToConnectedPeer(trs.topicName, buff, peer, trs.fullArchiveMessenger, fullArchiveNetwork, trs.fullArchivePreferredPeersHolderHandler) + } + + return trs.sendToConnectedPeer(trs.topicName, buff, peer, trs.mainMessenger, mainNetwork, trs.mainPreferredPeersHolderHandler) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/topicSender/topicResolverSender_test.go b/dataRetriever/topicSender/topicResolverSender_test.go index e51e46dfe90..e6076ffaf14 100644 --- a/dataRetriever/topicSender/topicResolverSender_test.go +++ b/dataRetriever/topicSender/topicResolverSender_test.go @@ -22,15 +22,26 @@ func createMockArgTopicResolverSender() topicsender.ArgTopicResolverSender { } } -func TestNewTopicResolverSender_NilMessengerShouldErr(t *testing.T) { +func TestNewTopicResolverSender_NilMainMessengerShouldErr(t *testing.T) { t.Parallel() arg := createMockArgTopicResolverSender() - arg.Messenger = nil + arg.MainMessenger = nil trs, err := topicsender.NewTopicResolverSender(arg) assert.True(t, check.IfNil(trs)) - assert.Equal(t, dataRetriever.ErrNilMessenger, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) +} + +func TestNewTopicResolverSender_NilFullArchiveMessengerShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicResolverSender() + arg.FullArchiveMessenger = nil + trs, err := topicsender.NewTopicResolverSender(arg) + + assert.True(t, check.IfNil(trs)) + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessenger)) } func TestNewTopicResolverSender_NilOutputAntiflooderShouldErr(t *testing.T) { @@ -44,15 +55,26 @@ func TestNewTopicResolverSender_NilOutputAntiflooderShouldErr(t *testing.T) { assert.Equal(t, dataRetriever.ErrNilAntifloodHandler, err) } -func TestNewTopicResolverSender_NilPreferredPeersHolderShouldErr(t *testing.T) { +func TestNewTopicResolverSender_NilMainPreferredPeersHolderShouldErr(t *testing.T) { t.Parallel() arg := createMockArgTopicResolverSender() - arg.PreferredPeersHolder = nil + arg.MainPreferredPeersHolder = nil trs, err := topicsender.NewTopicResolverSender(arg) assert.True(t, check.IfNil(trs)) - assert.Equal(t, dataRetriever.ErrNilPreferredPeersHolder, err) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) +} + +func TestNewTopicResolverSender_NilFullArchivePreferredPeersHolderShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicResolverSender() + arg.FullArchivePreferredPeersHolder = nil + trs, err := topicsender.NewTopicResolverSender(arg) + + assert.True(t, check.IfNil(trs)) + assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } func TestNewTopicResolverSender_OkValsShouldWork(t *testing.T) { @@ -74,7 +96,7 @@ func TestTopicResolverSender_SendOutputAntiflooderErrorsShouldNotSendButError(t expectedErr := errors.New("can not send to peer") arg := createMockArgTopicResolverSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { assert.Fail(t, "send shouldn't have been called") @@ -106,7 +128,7 @@ func TestTopicResolverSender_SendShouldNotCheckAntifloodForPreferred(t *testing. sendWasCalled := false arg := createMockArgTopicResolverSender() - arg.Messenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &mock.MessageHandlerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { sendWasCalled = true return nil @@ -119,7 +141,7 @@ func TestTopicResolverSender_SendShouldNotCheckAntifloodForPreferred(t *testing. return nil }, } - arg.PreferredPeersHolder = &p2pmocks.PeersHolderStub{ + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ ContainsCalled: func(peerID core.PeerID) bool { return peerID == pID1 }, @@ -137,24 +159,68 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { pID1 := core.PeerID("peer1") sentToPid1 := false buffToSend := []byte("buff") - - arg := createMockArgTopicResolverSender() - arg.Messenger = &mock.MessageHandlerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - if bytes.Equal(peerID.Bytes(), pID1.Bytes()) && - bytes.Equal(buff, buffToSend) { - sentToPid1 = true - } - - return nil - }, - } - trs, _ := topicsender.NewTopicResolverSender(arg) - - err := trs.Send(buffToSend, pID1) - - assert.Nil(t, err) - assert.True(t, sentToPid1) + t.Run("on main network", func(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicResolverSender() + arg.MainMessenger = &mock.MessageHandlerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + if bytes.Equal(peerID.Bytes(), pID1.Bytes()) && + bytes.Equal(buff, buffToSend) { + sentToPid1 = true + } + + return nil + }, + } + arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + IsConnectedCalled: func(peerID core.PeerID) bool { + return false + }, + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should have not been called") + + return nil + }, + } + trs, _ := topicsender.NewTopicResolverSender(arg) + + err := trs.Send(buffToSend, pID1) + + assert.Nil(t, err) + assert.True(t, sentToPid1) + }) + t.Run("on full archive network", func(t *testing.T) { + t.Parallel() + + arg := createMockArgTopicResolverSender() + arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + IsConnectedCalled: func(peerID core.PeerID) bool { + return true + }, + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + if bytes.Equal(peerID.Bytes(), pID1.Bytes()) && + bytes.Equal(buff, buffToSend) { + sentToPid1 = true + } + + return nil + }, + } + arg.MainMessenger = &mock.MessageHandlerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should have not been called") + + return nil + }, + } + trs, _ := topicsender.NewTopicResolverSender(arg) + + err := trs.Send(buffToSend, pID1) + + assert.Nil(t, err) + assert.True(t, sentToPid1) + }) } func TestTopicResolverSender_Topic(t *testing.T) { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 75e94061133..8372772a58f 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1212,20 +1212,22 @@ func (e *epochStartBootstrap) createResolversContainer() error { // this one should only be used before determining the correct shard where the node should reside log.Debug("epochStartBootstrap.createRequestHandler", "shard", e.shardCoordinator.SelfId()) resolversContainerArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: e.shardCoordinator, - Messenger: e.mainMessenger, - Store: storageService, - Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), - DataPools: e.dataPool, - Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), - NumConcurrentResolvingJobs: 10, - DataPacker: dataPacker, - TriesContainer: e.trieContainer, - SizeCheckDelta: 0, - InputAntifloodHandler: disabled.NewAntiFloodHandler(), - OutputAntifloodHandler: disabled.NewAntiFloodHandler(), - PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - PayloadValidator: payloadValidator, + ShardCoordinator: e.shardCoordinator, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, + Store: storageService, + Marshalizer: e.coreComponentsHolder.InternalMarshalizer(), + DataPools: e.dataPool, + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + NumConcurrentResolvingJobs: 10, + DataPacker: dataPacker, + TriesContainer: e.trieContainer, + SizeCheckDelta: 0, + InputAntifloodHandler: disabled.NewAntiFloodHandler(), + OutputAntifloodHandler: disabled.NewAntiFloodHandler(), + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + PayloadValidator: payloadValidator, } resolverFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerArgs) if err != nil { @@ -1242,16 +1244,19 @@ func (e *epochStartBootstrap) createResolversContainer() error { func (e *epochStartBootstrap) createRequestHandler() error { requestersContainerArgs := requesterscontainer.FactoryArgs{ - RequesterConfig: e.generalConfig.Requesters, - ShardCoordinator: e.shardCoordinator, - Messenger: e.mainMessenger, - Marshaller: e.coreComponentsHolder.InternalMarshalizer(), - Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), - OutputAntifloodHandler: disabled.NewAntiFloodHandler(), - CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), - PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - PeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), - SizeCheckDelta: 0, + RequesterConfig: e.generalConfig.Requesters, + ShardCoordinator: e.shardCoordinator, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, + Marshaller: e.coreComponentsHolder.InternalMarshalizer(), + Uint64ByteSliceConverter: uint64ByteSlice.NewBigEndianConverter(), + OutputAntifloodHandler: disabled.NewAntiFloodHandler(), + CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + MainPeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), + FullArchivePeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), + SizeCheckDelta: 0, } requestersFactory, err := requesterscontainer.NewMetaRequestersContainerFactory(requestersContainerArgs) if err != nil { diff --git a/factory/disabled/preferredPeersHolder.go b/factory/disabled/preferredPeersHolder.go new file mode 100644 index 00000000000..222a0b7393d --- /dev/null +++ b/factory/disabled/preferredPeersHolder.go @@ -0,0 +1,44 @@ +package disabled + +import ( + "github.com/multiversx/mx-chain-core-go/core" +) + +type preferredPeersHolder struct { +} + +// NewPreferredPeersHolder returns a new instance of preferredPeersHolder +func NewPreferredPeersHolder() *preferredPeersHolder { + return &preferredPeersHolder{} +} + +// PutConnectionAddress does nothing as it is disabled +func (holder *preferredPeersHolder) PutConnectionAddress(_ core.PeerID, _ string) { +} + +// PutShardID does nothing as it is disabled +func (holder *preferredPeersHolder) PutShardID(_ core.PeerID, _ uint32) { +} + +// Get does nothing as it is disabled +func (holder *preferredPeersHolder) Get() map[uint32][]core.PeerID { + return make(map[uint32][]core.PeerID) +} + +// Contains returns false +func (holder *preferredPeersHolder) Contains(_ core.PeerID) bool { + return false +} + +// Remove does nothing as it is disabled +func (holder *preferredPeersHolder) Remove(_ core.PeerID) { +} + +// Clear does nothing as it is disabled +func (holder *preferredPeersHolder) Clear() { +} + +// IsInterfaceNil returns true if there is no value under the interface +func (holder *preferredPeersHolder) IsInterfaceNil() bool { + return holder == nil +} diff --git a/factory/interface.go b/factory/interface.go index 9dd05c13f69..c01b0031b86 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -250,6 +250,7 @@ type NetworkComponentsHolder interface { FullArchiveNetworkMessenger() p2p.Messenger FullArchivePeersRatingHandler() p2p.PeersRatingHandler FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor + FullArchivePreferredPeersHolderHandler() PreferredPeersHolderHandler IsInterfaceNil() bool } diff --git a/factory/mock/networkComponentsMock.go b/factory/mock/networkComponentsMock.go index 04e89f19067..fed47d83ffc 100644 --- a/factory/mock/networkComponentsMock.go +++ b/factory/mock/networkComponentsMock.go @@ -18,6 +18,7 @@ type NetworkComponentsMock struct { FullArchiveNetworkMessengerField p2p.Messenger FullArchivePeersRatingHandlerField p2p.PeersRatingHandler FullArchivePeersRatingMonitorField p2p.PeersRatingMonitor + FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler } // PubKeyCacher - @@ -95,6 +96,11 @@ func (ncm *NetworkComponentsMock) FullArchivePeersRatingMonitor() p2p.PeersRatin return ncm.FullArchivePeersRatingMonitorField } +// FullArchivePreferredPeersHolderHandler - +func (ncm *NetworkComponentsMock) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return ncm.FullArchivePreferredPeersHolder +} + // IsInterfaceNil - func (ncm *NetworkComponentsMock) IsInterfaceNil() bool { return ncm == nil diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index a3cfc467c88..d6c1b5a6492 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -28,40 +28,43 @@ import ( // NetworkComponentsFactoryArgs holds the arguments to create a network component handler instance type NetworkComponentsFactoryArgs struct { - MainP2pConfig p2pConfig.P2PConfig - FullArchiveP2pConfig p2pConfig.P2PConfig - MainConfig config.Config - RatingsConfig config.RatingsConfig - StatusHandler core.AppStatusHandler - Marshalizer marshal.Marshalizer - Syncer p2p.SyncTimer - PreferredPeersSlices []string - BootstrapWaitTime time.Duration - NodeOperationMode p2p.NodeOperation - ConnectionWatcherType string - CryptoComponents factory.CryptoComponentsHolder + MainP2pConfig p2pConfig.P2PConfig + FullArchiveP2pConfig p2pConfig.P2PConfig + MainConfig config.Config + RatingsConfig config.RatingsConfig + StatusHandler core.AppStatusHandler + Marshalizer marshal.Marshalizer + Syncer p2p.SyncTimer + MainPreferredPeersSlices []string + FullArchivePreferredPeersSlices []string + BootstrapWaitTime time.Duration + NodeOperationMode p2p.NodeOperation + ConnectionWatcherType string + CryptoComponents factory.CryptoComponentsHolder } type networkComponentsFactory struct { - mainP2PConfig p2pConfig.P2PConfig - fullArchiveP2PConfig p2pConfig.P2PConfig - mainConfig config.Config - ratingsConfig config.RatingsConfig - statusHandler core.AppStatusHandler - listenAddress string - marshalizer marshal.Marshalizer - syncer p2p.SyncTimer - preferredPeersSlices []string - bootstrapWaitTime time.Duration - nodeOperationMode p2p.NodeOperation - connectionWatcherType string - cryptoComponents factory.CryptoComponentsHolder + mainP2PConfig p2pConfig.P2PConfig + fullArchiveP2PConfig p2pConfig.P2PConfig + mainConfig config.Config + ratingsConfig config.RatingsConfig + statusHandler core.AppStatusHandler + listenAddress string + marshalizer marshal.Marshalizer + syncer p2p.SyncTimer + mainPreferredPeersSlices []string + fullArchivePreferredPeersSlices []string + bootstrapWaitTime time.Duration + nodeOperationMode p2p.NodeOperation + connectionWatcherType string + cryptoComponents factory.CryptoComponentsHolder } type networkComponentsHolder struct { - netMessenger p2p.Messenger - peersRatingHandler p2p.PeersRatingHandler - peersRatingMonitor p2p.PeersRatingMonitor + netMessenger p2p.Messenger + peersRatingHandler p2p.PeersRatingHandler + peersRatingMonitor p2p.PeersRatingMonitor + preferredPeersHolder p2p.PreferredPeersHolderHandler } // networkComponents struct holds the network components @@ -76,7 +79,6 @@ type networkComponents struct { peerBlackListHandler process.PeerBlackListCacher antifloodConfig config.AntifloodConfig peerHonestyHandler consensus.PeerHonestyHandler - peersHolder factory.PreferredPeersHolderHandler closeFunc context.CancelFunc } @@ -103,35 +105,31 @@ func NewNetworkComponentsFactory( } return &networkComponentsFactory{ - mainP2PConfig: args.MainP2pConfig, - fullArchiveP2PConfig: args.FullArchiveP2pConfig, - ratingsConfig: args.RatingsConfig, - marshalizer: args.Marshalizer, - mainConfig: args.MainConfig, - statusHandler: args.StatusHandler, - listenAddress: p2p.ListenAddrWithIp4AndTcp, - syncer: args.Syncer, - bootstrapWaitTime: args.BootstrapWaitTime, - preferredPeersSlices: args.PreferredPeersSlices, - nodeOperationMode: args.NodeOperationMode, - connectionWatcherType: args.ConnectionWatcherType, - cryptoComponents: args.CryptoComponents, + mainP2PConfig: args.MainP2pConfig, + fullArchiveP2PConfig: args.FullArchiveP2pConfig, + ratingsConfig: args.RatingsConfig, + marshalizer: args.Marshalizer, + mainConfig: args.MainConfig, + statusHandler: args.StatusHandler, + listenAddress: p2p.ListenAddrWithIp4AndTcp, + syncer: args.Syncer, + bootstrapWaitTime: args.BootstrapWaitTime, + mainPreferredPeersSlices: args.MainPreferredPeersSlices, + fullArchivePreferredPeersSlices: args.FullArchivePreferredPeersSlices, + nodeOperationMode: args.NodeOperationMode, + connectionWatcherType: args.ConnectionWatcherType, + cryptoComponents: args.CryptoComponents, }, nil } // Create creates and returns the network components func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { - peersHolder, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) + mainNetworkComp, err := ncf.createMainNetworkHolder() if err != nil { return nil, err } - mainNetworkComp, err := ncf.createMainNetworkHolder(peersHolder) - if err != nil { - return nil, err - } - - fullArchiveNetworkComp, err := ncf.createFullArchiveNetworkHolder(peersHolder) + fullArchiveNetworkComp, err := ncf.createFullArchiveNetworkHolder() if err != nil { return nil, err } @@ -171,7 +169,6 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { peerBlackListHandler: antiFloodComponents.BlacklistHandler, antifloodConfig: ncf.mainConfig.Antiflood, peerHonestyHandler: peerHonestyHandler, - peersHolder: peersHolder, closeFunc: cancelFunc, }, nil } @@ -232,11 +229,16 @@ func (ncf *networkComponentsFactory) createPeerHonestyHandler( } func (ncf *networkComponentsFactory) createNetworkHolder( - peersHolder p2p.PreferredPeersHolderHandler, p2pConfig p2pConfig.P2PConfig, logger p2p.Logger, + preferredPeers []string, ) (networkComponentsHolder, error) { + peersHolder, err := p2pFactory.NewPeersHolder(preferredPeers) + if err != nil { + return networkComponentsHolder{}, err + } + peersRatingCfg := ncf.mainConfig.PeersRatingConfig topRatedCache, err := cache.NewLRUCache(peersRatingCfg.TopRatedCacheCapacity) if err != nil { @@ -286,29 +288,31 @@ func (ncf *networkComponentsFactory) createNetworkHolder( } return networkComponentsHolder{ - netMessenger: networkMessenger, - peersRatingHandler: peersRatingHandler, - peersRatingMonitor: peersRatingMonitor, + netMessenger: networkMessenger, + peersRatingHandler: peersRatingHandler, + peersRatingMonitor: peersRatingMonitor, + preferredPeersHolder: peersHolder, }, nil } -func (ncf *networkComponentsFactory) createMainNetworkHolder(peersHolder p2p.PreferredPeersHolderHandler) (networkComponentsHolder, error) { +func (ncf *networkComponentsFactory) createMainNetworkHolder() (networkComponentsHolder, error) { loggerInstance := logger.GetOrCreate("main/p2p") - return ncf.createNetworkHolder(peersHolder, ncf.mainP2PConfig, loggerInstance) + return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance, ncf.mainPreferredPeersSlices) } -func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder(peersHolder p2p.PreferredPeersHolderHandler) (networkComponentsHolder, error) { +func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder() (networkComponentsHolder, error) { if ncf.nodeOperationMode != p2p.FullArchiveMode { return networkComponentsHolder{ - netMessenger: p2pDisabled.NewNetworkMessenger(), - peersRatingHandler: disabled.NewPeersRatingHandler(), - peersRatingMonitor: disabled.NewPeersRatingMonitor(), + netMessenger: p2pDisabled.NewNetworkMessenger(), + peersRatingHandler: disabled.NewPeersRatingHandler(), + peersRatingMonitor: disabled.NewPeersRatingMonitor(), + preferredPeersHolder: disabled.NewPreferredPeersHolder(), }, nil } loggerInstance := logger.GetOrCreate("full-archive/p2p") - return ncf.createNetworkHolder(peersHolder, ncf.fullArchiveP2PConfig, loggerInstance) + return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance, ncf.fullArchivePreferredPeersSlices) } // Close closes all underlying components that need closing diff --git a/factory/network/networkComponentsHandler.go b/factory/network/networkComponentsHandler.go index ce018c5d057..79811f0b5ad 100644 --- a/factory/network/networkComponentsHandler.go +++ b/factory/network/networkComponentsHandler.go @@ -187,7 +187,7 @@ func (mnc *managedNetworkComponents) PeerHonestyHandler() factory.PeerHonestyHan return mnc.networkComponents.peerHonestyHandler } -// PreferredPeersHolderHandler returns the preferred peers holder +// PreferredPeersHolderHandler returns the preferred peers holder of the main network func (mnc *managedNetworkComponents) PreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { mnc.mutNetworkComponents.RLock() defer mnc.mutNetworkComponents.RUnlock() @@ -196,7 +196,7 @@ func (mnc *managedNetworkComponents) PreferredPeersHolderHandler() factory.Prefe return nil } - return mnc.networkComponents.peersHolder + return mnc.mainNetworkHolder.preferredPeersHolder } // PeersRatingHandler returns the peers rating handler of the main network @@ -259,6 +259,18 @@ func (mnc *managedNetworkComponents) FullArchivePeersRatingMonitor() p2p.PeersRa return mnc.fullArchiveNetworkHolder.peersRatingMonitor } +// FullArchivePreferredPeersHolderHandler returns the preferred peers holder of the full archive network +func (mnc *managedNetworkComponents) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + mnc.mutNetworkComponents.RLock() + defer mnc.mutNetworkComponents.RUnlock() + + if mnc.networkComponents == nil { + return nil + } + + return mnc.mainNetworkHolder.preferredPeersHolder +} + // IsInterfaceNil returns true if the value under the interface is nil func (mnc *managedNetworkComponents) IsInterfaceNil() bool { return mnc == nil diff --git a/factory/network/networkComponentsHandler_test.go b/factory/network/networkComponentsHandler_test.go index c4383174113..84f44e3b235 100644 --- a/factory/network/networkComponentsHandler_test.go +++ b/factory/network/networkComponentsHandler_test.go @@ -64,6 +64,7 @@ func TestManagedNetworkComponents_Create(t *testing.T) { require.Nil(t, managedNetworkComponents.FullArchiveNetworkMessenger()) require.Nil(t, managedNetworkComponents.FullArchivePeersRatingHandler()) require.Nil(t, managedNetworkComponents.FullArchivePeersRatingMonitor()) + require.Nil(t, managedNetworkComponents.FullArchivePreferredPeersHolderHandler()) err = managedNetworkComponents.Create() require.NoError(t, err) @@ -78,6 +79,7 @@ func TestManagedNetworkComponents_Create(t *testing.T) { require.NotNil(t, managedNetworkComponents.FullArchiveNetworkMessenger()) require.NotNil(t, managedNetworkComponents.FullArchivePeersRatingHandler()) require.NotNil(t, managedNetworkComponents.FullArchivePeersRatingMonitor()) + require.NotNil(t, managedNetworkComponents.FullArchivePreferredPeersHolderHandler()) require.Equal(t, factory.NetworkComponentsName, managedNetworkComponents.String()) }) diff --git a/factory/network/networkComponents_test.go b/factory/network/networkComponents_test.go index dca1e2f2d80..307614a1a5a 100644 --- a/factory/network/networkComponents_test.go +++ b/factory/network/networkComponents_test.go @@ -77,7 +77,7 @@ func TestNetworkComponentsFactory_Create(t *testing.T) { t.Parallel() args := componentsMock.GetNetworkFactoryArgs() - args.PreferredPeersSlices = []string{"invalid peer"} + args.MainPreferredPeersSlices = []string{"invalid peer"} ncf, _ := networkComp.NewNetworkComponentsFactory(args) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 39f1cb3412e..f75f5ab0768 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -185,10 +185,10 @@ type processComponentsFactory struct { coreData factory.CoreComponentsHolder crypto factory.CryptoComponentsHolder state factory.StateComponentsHolder - network factory.NetworkComponentsHolder bootstrapComponents factory.BootstrapComponentsHolder statusComponents factory.StatusComponentsHolder statusCoreComponents factory.StatusCoreComponentsHolder + network factory.NetworkComponentsHolder } // NewProcessComponentsFactory will return a new instance of processComponentsFactory @@ -1351,21 +1351,23 @@ func (pcf *processComponentsFactory) newShardResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Messenger: pcf.network.NetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } resolversContainerFactory, err := resolverscontainer.NewShardResolversContainerFactory(resolversContainerFactoryArgs) if err != nil { @@ -1385,21 +1387,23 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( } resolversContainerFactoryArgs := resolverscontainer.FactoryArgs{ - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Messenger: pcf.network.NetworkMessenger(), - Store: pcf.data.StorageService(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - DataPools: pcf.data.Datapool(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - DataPacker: dataPacker, - TriesContainer: pcf.state.TriesContainer(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - PayloadValidator: payloadValidator, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Store: pcf.data.StorageService(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + DataPools: pcf.data.Datapool(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + DataPacker: dataPacker, + TriesContainer: pcf.state.TriesContainer(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + PayloadValidator: payloadValidator, } return resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) @@ -1416,16 +1420,19 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ - RequesterConfig: pcf.config.Requesters, - ShardCoordinator: shardCoordinator, - Messenger: pcf.network.NetworkMessenger(), - Marshaller: pcf.coreData.InternalMarshalizer(), - Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - CurrentNetworkEpochProvider: currentEpochProvider, - PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), - PeersRatingHandler: pcf.network.PeersRatingHandler(), - SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, + RequesterConfig: pcf.config.Requesters, + ShardCoordinator: shardCoordinator, + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + Marshaller: pcf.coreData.InternalMarshalizer(), + Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + CurrentNetworkEpochProvider: currentEpochProvider, + MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), + FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), + MainPeersRatingHandler: pcf.network.PeersRatingHandler(), + FullArchivePeersRatingHandler: pcf.network.FullArchivePeersRatingHandler(), + SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, } if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { @@ -1769,12 +1776,12 @@ func (pcf *processComponentsFactory) createExportFactoryHandler( CoreComponents: pcf.coreData, CryptoComponents: pcf.crypto, StatusCoreComponents: pcf.statusCoreComponents, + NetworkComponents: pcf.network, HeaderValidator: headerValidator, DataPool: pcf.data.Datapool(), StorageService: pcf.data.StorageService(), RequestHandler: requestHandler, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Messenger: pcf.network.NetworkMessenger(), ActiveAccountsDBs: accountsDBs, ExistingResolvers: resolversContainer, ExistingRequesters: requestersContainer, @@ -1790,14 +1797,11 @@ func (pcf *processComponentsFactory) createExportFactoryHandler( HeaderSigVerifier: headerSigVerifier, HeaderIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), ValidityAttester: blockTracker, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), RoundHandler: pcf.coreData.RoundHandler(), InterceptorDebugConfig: pcf.config.Debug.InterceptorResolver, MaxHardCapForMissingNodes: pcf.config.TrieSync.MaxHardCapForMissingNodes, NumConcurrentTrieSyncers: pcf.config.TrieSync.NumConcurrentTrieSyncers, TrieSyncerVersion: pcf.config.TrieSync.TrieSyncerVersion, - PeersRatingHandler: pcf.network.PeersRatingHandler(), } return updateFactory.NewExportHandlerFactory(argsExporter) } diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index 534fef02ec8..3bdb0aa20f7 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -90,6 +90,7 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.True(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) require.True(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) require.True(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + require.True(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) err := managedProcessComponents.Create() require.NoError(t, err) @@ -131,6 +132,7 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.False(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) require.False(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) require.False(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + require.False(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) }) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index af3cea67084..c5f7e218e9d 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -215,12 +215,14 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, }, Network: &testsMocks.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, - FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, - InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, - OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, }, BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), diff --git a/integrationTests/mock/networkComponentsMock.go b/integrationTests/mock/networkComponentsMock.go index 5cedae5b3fd..198fb751b88 100644 --- a/integrationTests/mock/networkComponentsMock.go +++ b/integrationTests/mock/networkComponentsMock.go @@ -20,6 +20,7 @@ type NetworkComponentsStub struct { FullArchiveNetworkMessengerField p2p.Messenger FullArchivePeersRatingHandlerField p2p.PeersRatingHandler FullArchivePeersRatingMonitorField p2p.PeersRatingMonitor + FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler } // PubKeyCacher - @@ -100,6 +101,11 @@ func (ncs *NetworkComponentsStub) FullArchivePeersRatingMonitor() p2p.PeersRatin return ncs.FullArchivePeersRatingMonitorField } +// FullArchivePreferredPeersHolderHandler - +func (ncs *NetworkComponentsStub) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return ncs.FullArchivePreferredPeersHolder +} + // String - func (ncs *NetworkComponentsStub) String() string { return "NetworkComponentsStub" diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index c71aa6441f0..42555d61b51 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -576,16 +576,23 @@ func createHardForkExporter( AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, } + networkComponents := integrationTests.GetDefaultNetworkComponents() + networkComponents.Messenger = node.MainMessenger + networkComponents.FullArchiveNetworkMessengerField = node.FullArchiveMessenger + networkComponents.PeersRatingHandlerField = node.MainPeersRatingHandler + networkComponents.FullArchivePeersRatingHandlerField = node.FullArchivePeersRatingHandler + networkComponents.InputAntiFlood = &mock.NilAntifloodHandler{} + networkComponents.OutputAntiFlood = &mock.NilAntifloodHandler{} argsExportHandler := factory.ArgsExporter{ CoreComponents: coreComponents, CryptoComponents: cryptoComponents, StatusCoreComponents: statusCoreComponents, + NetworkComponents: networkComponents, HeaderValidator: node.HeaderValidator, DataPool: node.DataPool, StorageService: node.Storage, RequestHandler: node.RequestHandler, ShardCoordinator: node.ShardCoordinator, - Messenger: node.MainMessenger, ActiveAccountsDBs: accountsDBs, ExportFolder: node.ExportFolder, ExportTriesStorageConfig: config.StorageConfig{ @@ -614,8 +621,6 @@ func createHardForkExporter( HeaderSigVerifier: node.HeaderSigVerifier, HeaderIntegrityVerifier: node.HeaderIntegrityVerifier, ValidityAttester: node.BlockTracker, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, RoundHandler: &mock.RoundHandlerMock{}, InterceptorDebugConfig: config.InterceptorResolverDebugConfig{ Enabled: true, @@ -629,7 +634,6 @@ func createHardForkExporter( MaxHardCapForMissingNodes: 500, NumConcurrentTrieSyncers: 50, TrieSyncerVersion: 2, - PeersRatingHandler: node.PeersRatingHandler, CheckNodesOnDisk: false, } diff --git a/integrationTests/node/heartbeatV2/heartbeatV2_test.go b/integrationTests/node/heartbeatV2/heartbeatV2_test.go index d88c89e3e8f..c38b2d982db 100644 --- a/integrationTests/node/heartbeatV2/heartbeatV2_test.go +++ b/integrationTests/node/heartbeatV2/heartbeatV2_test.go @@ -143,7 +143,6 @@ func connectNodes(nodes []*integrationTests.TestHeartbeatNode, interactingNodes func checkMessages(t *testing.T, nodes []*integrationTests.TestHeartbeatNode, maxMessageAgeAllowed time.Duration) { numOfNodes := len(nodes) for i := 0; i < numOfNodes; i++ { - // TODO[Sorin]: check also the full archive cachers paCache := nodes[i].DataPool.PeerAuthentications() hbCache := nodes[i].DataPool.Heartbeats() diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index eb7e62c4bd9..5fe10df380e 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -167,18 +167,19 @@ func (pr *ProcessorRunner) createStatusCoreComponents(tb testing.TB) { func (pr *ProcessorRunner) createNetworkComponents(tb testing.TB) { argsNetwork := factoryNetwork.NetworkComponentsFactoryArgs{ - MainP2pConfig: *pr.Config.MainP2pConfig, - FullArchiveP2pConfig: *pr.Config.FullArchiveP2pConfig, - MainConfig: *pr.Config.GeneralConfig, - RatingsConfig: *pr.Config.RatingsConfig, - StatusHandler: pr.StatusCoreComponents.AppStatusHandler(), - Marshalizer: pr.CoreComponents.InternalMarshalizer(), - Syncer: pr.CoreComponents.SyncTimer(), - PreferredPeersSlices: make([]string, 0), - BootstrapWaitTime: 1, - NodeOperationMode: p2p.NormalOperation, - ConnectionWatcherType: "", - CryptoComponents: pr.CryptoComponents, + MainP2pConfig: *pr.Config.MainP2pConfig, + FullArchiveP2pConfig: *pr.Config.FullArchiveP2pConfig, + MainConfig: *pr.Config.GeneralConfig, + RatingsConfig: *pr.Config.RatingsConfig, + StatusHandler: pr.StatusCoreComponents.AppStatusHandler(), + Marshalizer: pr.CoreComponents.InternalMarshalizer(), + Syncer: pr.CoreComponents.SyncTimer(), + MainPreferredPeersSlices: make([]string, 0), + FullArchivePreferredPeersSlices: make([]string, 0), + BootstrapWaitTime: 1, + NodeOperationMode: p2p.NormalOperation, + ConnectionWatcherType: "", + CryptoComponents: pr.CryptoComponents, } networkFactory, err := factoryNetwork.NewNetworkComponentsFactory(argsNetwork) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index da8775fb329..6d05087bd52 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -462,7 +462,8 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { payloadValidator, _ := validator.NewPeerAuthenticationPayloadValidator(thn.heartbeatExpiryTimespanInSec) resolverContainerFactoryArgs := resolverscontainer.FactoryArgs{ ShardCoordinator: thn.ShardCoordinator, - Messenger: thn.MainMessenger, + MainMessenger: thn.MainMessenger, + FullArchiveMessenger: thn.FullArchiveMessenger, Store: thn.Storage, Marshalizer: TestMarshaller, DataPools: thn.DataPool, @@ -473,12 +474,13 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { return &trieMock.TrieStub{} }, }, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PayloadValidator: payloadValidator, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PayloadValidator: payloadValidator, } requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ @@ -486,15 +488,18 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { NumCrossShardPeers: 2, NumTotalPeers: 3, NumFullHistoryPeers: 3}, - ShardCoordinator: thn.ShardCoordinator, - Messenger: thn.MainMessenger, - Marshaller: TestMarshaller, - Uint64ByteSliceConverter: TestUint64Converter, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, - SizeCheckDelta: 0, + ShardCoordinator: thn.ShardCoordinator, + MainMessenger: thn.MainMessenger, + FullArchiveMessenger: thn.FullArchiveMessenger, + Marshaller: TestMarshaller, + Uint64ByteSliceConverter: TestUint64Converter, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + MainPeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + SizeCheckDelta: 0, } if thn.ShardCoordinator.SelfId() == core.MetachainShardId { diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 303bb2f7a40..45529591a96 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -389,12 +389,13 @@ type TestProcessorNode struct { EnableEpochsHandler common.EnableEpochsHandler UseValidVmBlsSigVerifier bool - TransactionLogProcessor process.TransactionLogProcessor - PeersRatingHandler p2p.PeersRatingHandler - PeersRatingMonitor p2p.PeersRatingMonitor - HardforkTrigger node.HardforkTrigger - AppStatusHandler core.AppStatusHandler - StatusMetrics external.StatusMetricsHandler + TransactionLogProcessor process.TransactionLogProcessor + MainPeersRatingHandler p2p.PeersRatingHandler + FullArchivePeersRatingHandler p2p.PeersRatingHandler + PeersRatingMonitor p2p.PeersRatingMonitor + HardforkTrigger node.HardforkTrigger + AppStatusHandler core.AppStatusHandler + StatusMetrics external.StatusMetricsHandler } // CreatePkBytes creates 'numShards' public key-like byte slices @@ -471,32 +472,33 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - MainMessenger: messenger, - FullArchiveMessenger: &p2pmocks.MessengerStub{}, // TODO[Sorin]: inject a proper messenger when all pieces are done to test this network as well, - NodeOperationMode: p2p.NormalOperation, - NodesCoordinator: nodesCoordinatorInstance, - ChainID: ChainID, - MinTransactionVersion: MinTransactionVersion, - NodesSetup: nodesSetup, - HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, - EpochNotifier: genericEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - WasmVMChangeLocker: &sync.RWMutex{}, - TransactionLogProcessor: logsProcessor, - Bootstrapper: mock.NewTestBootstrapperMock(), - PeersRatingHandler: peersRatingHandler, - MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), - FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), - EnableEpochs: *epochsConfig, - UseValidVmBlsSigVerifier: args.WithBLSSigVerifier, - StorageBootstrapper: &mock.StorageBootstrapperMock{}, - BootstrapStorer: &mock.BoostrapStorerMock{}, - RatingsData: args.RatingsData, - EpochStartNotifier: args.EpochStartSubscriber, - GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, - AppStatusHandler: appStatusHandler, - PeersRatingMonitor: peersRatingMonitor, + ShardCoordinator: shardCoordinator, + MainMessenger: messenger, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, // TODO[Sorin]: inject a proper messenger when all pieces are done to test this network as well, + NodeOperationMode: p2p.NormalOperation, + NodesCoordinator: nodesCoordinatorInstance, + ChainID: ChainID, + MinTransactionVersion: MinTransactionVersion, + NodesSetup: nodesSetup, + HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, + EpochNotifier: genericEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + WasmVMChangeLocker: &sync.RWMutex{}, + TransactionLogProcessor: logsProcessor, + Bootstrapper: mock.NewTestBootstrapperMock(), + MainPeersRatingHandler: peersRatingHandler, + FullArchivePeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + EnableEpochs: *epochsConfig, + UseValidVmBlsSigVerifier: args.WithBLSSigVerifier, + StorageBootstrapper: &mock.StorageBootstrapperMock{}, + BootstrapStorer: &mock.BoostrapStorerMock{}, + RatingsData: args.RatingsData, + EpochStartNotifier: args.EpochStartSubscriber, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, + AppStatusHandler: appStatusHandler, + PeersRatingMonitor: peersRatingMonitor, } tpn.NodeKeys = args.NodeKeys @@ -1351,22 +1353,25 @@ func (tpn *TestProcessorNode) initResolvers() { _ = tpn.MainMessenger.CreateTopic(common.ConsensusTopic+tpn.ShardCoordinator.CommunicationIdentifier(tpn.ShardCoordinator.SelfId()), true) payloadValidator, _ := validator.NewPeerAuthenticationPayloadValidator(60) preferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) + fullArchivePreferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) resolverContainerFactory := resolverscontainer.FactoryArgs{ - ShardCoordinator: tpn.ShardCoordinator, - Messenger: tpn.MainMessenger, - Store: tpn.Storage, - Marshalizer: TestMarshalizer, - DataPools: tpn.DataPool, - Uint64ByteSliceConverter: TestUint64Converter, - DataPacker: dataPacker, - TriesContainer: tpn.TrieContainer, - SizeCheckDelta: 100, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - NumConcurrentResolvingJobs: 10, - PreferredPeersHolder: preferredPeersHolder, - PayloadValidator: payloadValidator, + ShardCoordinator: tpn.ShardCoordinator, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, + Store: tpn.Storage, + Marshalizer: TestMarshalizer, + DataPools: tpn.DataPool, + Uint64ByteSliceConverter: TestUint64Converter, + DataPacker: dataPacker, + TriesContainer: tpn.TrieContainer, + SizeCheckDelta: 100, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + NumConcurrentResolvingJobs: 10, + MainPreferredPeersHolder: preferredPeersHolder, + FullArchivePreferredPeersHolder: fullArchivePreferredPeersHolder, + PayloadValidator: payloadValidator, } var err error @@ -1390,15 +1395,18 @@ func (tpn *TestProcessorNode) initRequesters() { NumTotalPeers: 3, NumFullHistoryPeers: 3, }, - ShardCoordinator: tpn.ShardCoordinator, - Messenger: tpn.MainMessenger, - Marshaller: TestMarshaller, - Uint64ByteSliceConverter: TestUint64Converter, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandler: tpn.PeersRatingHandler, - SizeCheckDelta: 0, + ShardCoordinator: tpn.ShardCoordinator, + MainMessenger: tpn.MainMessenger, + FullArchiveMessenger: tpn.FullArchiveMessenger, + Marshaller: TestMarshaller, + Uint64ByteSliceConverter: TestUint64Converter, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + MainPeersRatingHandler: tpn.MainPeersRatingHandler, + FullArchivePeersRatingHandler: tpn.FullArchivePeersRatingHandler, + SizeCheckDelta: 0, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { @@ -2403,7 +2411,8 @@ func (tpn *TestProcessorNode) initNode() { networkComponents := GetDefaultNetworkComponents() networkComponents.Messenger = tpn.MainMessenger networkComponents.FullArchiveNetworkMessengerField = tpn.FullArchiveMessenger - networkComponents.PeersRatingHandlerField = tpn.PeersRatingHandler + networkComponents.PeersRatingHandlerField = tpn.MainPeersRatingHandler + networkComponents.FullArchivePeersRatingHandlerField = tpn.FullArchivePeersRatingHandler networkComponents.PeersRatingMonitorField = tpn.PeersRatingMonitor tpn.Node, err = node.NewNode( @@ -3253,12 +3262,16 @@ func GetDefaultStateComponents() *testFactory.StateComponentsMock { // GetDefaultNetworkComponents - func GetDefaultNetworkComponents() *mock.NetworkComponentsStub { return &mock.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, - InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, - OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, - PeerBlackList: &mock.PeerBlackListCacherStub{}, - PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, - PeersRatingMonitorField: &p2pmocks.PeersRatingMonitorStub{}, + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + PeerBlackList: &mock.PeerBlackListCacherStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + PeersRatingMonitorField: &p2pmocks.PeersRatingMonitorStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + FullArchivePeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, } } diff --git a/node/mock/factory/networkComponentsMock.go b/node/mock/factory/networkComponentsMock.go index ae42eeda2e7..1422633a13c 100644 --- a/node/mock/factory/networkComponentsMock.go +++ b/node/mock/factory/networkComponentsMock.go @@ -18,6 +18,7 @@ type NetworkComponentsMock struct { FullArchiveNetworkMessengerField p2p.Messenger FullArchivePeersRatingHandlerField p2p.PeersRatingHandler FullArchivePeersRatingMonitorField p2p.PeersRatingMonitor + FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler } // PubKeyCacher - @@ -100,6 +101,11 @@ func (ncm *NetworkComponentsMock) String() string { return "NetworkComponentsMock" } +// FullArchivePreferredPeersHolderHandler - +func (ncm *NetworkComponentsMock) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { + return ncm.FullArchivePreferredPeersHolder +} + // IsInterfaceNil - func (ncm *NetworkComponentsMock) IsInterfaceNil() bool { return ncm == nil diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 410aaf16661..3cf5a039da5 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -1385,18 +1385,19 @@ func (nr *nodeRunner) CreateManagedNetworkComponents( cryptoComponents mainFactory.CryptoComponentsHolder, ) (mainFactory.NetworkComponentsHandler, error) { networkComponentsFactoryArgs := networkComp.NetworkComponentsFactoryArgs{ - MainP2pConfig: *nr.configs.MainP2pConfig, - FullArchiveP2pConfig: *nr.configs.FullArchiveP2pConfig, - MainConfig: *nr.configs.GeneralConfig, - RatingsConfig: *nr.configs.RatingsConfig, - StatusHandler: statusCoreComponents.AppStatusHandler(), - Marshalizer: coreComponents.InternalMarshalizer(), - Syncer: coreComponents.SyncTimer(), - PreferredPeersSlices: nr.configs.PreferencesConfig.Preferences.PreferredConnections, - BootstrapWaitTime: common.TimeToWaitForP2PBootstrap, - NodeOperationMode: p2p.NormalOperation, - ConnectionWatcherType: nr.configs.PreferencesConfig.Preferences.ConnectionWatcherType, - CryptoComponents: cryptoComponents, + MainP2pConfig: *nr.configs.MainP2pConfig, + FullArchiveP2pConfig: *nr.configs.FullArchiveP2pConfig, + MainConfig: *nr.configs.GeneralConfig, + RatingsConfig: *nr.configs.RatingsConfig, + StatusHandler: statusCoreComponents.AppStatusHandler(), + Marshalizer: coreComponents.InternalMarshalizer(), + Syncer: coreComponents.SyncTimer(), + MainPreferredPeersSlices: nr.configs.PreferencesConfig.Preferences.PreferredConnections, + FullArchivePreferredPeersSlices: nr.configs.PreferencesConfig.Preferences.PreferredFullArchiveConnections, + BootstrapWaitTime: common.TimeToWaitForP2PBootstrap, + NodeOperationMode: p2p.NormalOperation, + ConnectionWatcherType: nr.configs.PreferencesConfig.Preferences.ConnectionWatcherType, + CryptoComponents: cryptoComponents, } if nr.configs.ImportDbConfig.IsImportDBMode { networkComponentsFactoryArgs.BootstrapWaitTime = 0 diff --git a/p2p/disabled/networkMessenger.go b/p2p/disabled/networkMessenger.go index 3db01a8c9bf..34364812bcb 100644 --- a/p2p/disabled/networkMessenger.go +++ b/p2p/disabled/networkMessenger.go @@ -91,9 +91,9 @@ func (netMes *networkMessenger) ConnectToPeer(_ string) error { return nil } -// IsConnected returns true as it is disabled +// IsConnected returns false as it is disabled func (netMes *networkMessenger) IsConnected(_ core.PeerID) bool { - return true + return false } // ConnectedPeers returns an empty slice as it is disabled diff --git a/update/errors.go b/update/errors.go index 938ae2020ee..8a23b6d2dce 100644 --- a/update/errors.go +++ b/update/errors.go @@ -295,3 +295,6 @@ var ErrNilAddressConverter = errors.New("nil address converter") // ErrNilEnableEpochsHandler signals that a nil enable epochs handler was provided var ErrNilEnableEpochsHandler = errors.New("nil enable epochs handler") + +// ErrNilNetworkComponents signals that a nil network components instance was provided +var ErrNilNetworkComponents = errors.New("nil network components") diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index bb80be0101a..46f1b679d4b 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -15,8 +15,8 @@ import ( "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/epochStart/shardchain" + mxFactory "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/genesis/process/disabled" - "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -40,12 +40,12 @@ type ArgsExporter struct { CoreComponents process.CoreComponentsHolder CryptoComponents process.CryptoComponentsHolder StatusCoreComponents process.StatusCoreComponentsHolder + NetworkComponents mxFactory.NetworkComponentsHolder HeaderValidator epochStart.HeaderValidator DataPool dataRetriever.PoolsHolder StorageService dataRetriever.StorageService RequestHandler process.RequestHandler ShardCoordinator sharding.Coordinator - Messenger p2p.Messenger ActiveAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter ExistingResolvers dataRetriever.ResolversContainer ExistingRequesters dataRetriever.RequestersContainer @@ -61,10 +61,7 @@ type ArgsExporter struct { HeaderSigVerifier process.InterceptedHeaderSigVerifier HeaderIntegrityVerifier process.HeaderIntegrityVerifier ValidityAttester process.ValidityAttester - InputAntifloodHandler process.P2PAntifloodHandler - OutputAntifloodHandler process.P2PAntifloodHandler RoundHandler process.RoundHandler - PeersRatingHandler dataRetriever.PeersRatingHandler InterceptorDebugConfig config.InterceptorResolverDebugConfig MaxHardCapForMissingNodes int NumConcurrentTrieSyncers int @@ -73,15 +70,15 @@ type ArgsExporter struct { } type exportHandlerFactory struct { - CoreComponents process.CoreComponentsHolder - CryptoComponents process.CryptoComponentsHolder + coreComponents process.CoreComponentsHolder + cryptoComponents process.CryptoComponentsHolder statusCoreComponents process.StatusCoreComponentsHolder + networkComponents mxFactory.NetworkComponentsHolder headerValidator epochStart.HeaderValidator dataPool dataRetriever.PoolsHolder storageService dataRetriever.StorageService requestHandler process.RequestHandler shardCoordinator sharding.Coordinator - messenger p2p.Messenger activeAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter exportFolder string exportTriesStorageConfig config.StorageConfig @@ -101,10 +98,7 @@ type exportHandlerFactory struct { validityAttester process.ValidityAttester resolverContainer dataRetriever.ResolversContainer requestersContainer dataRetriever.RequestersContainer - inputAntifloodHandler process.P2PAntifloodHandler - outputAntifloodHandler process.P2PAntifloodHandler roundHandler process.RoundHandler - peersRatingHandler dataRetriever.PeersRatingHandler interceptorDebugConfig config.InterceptorResolverDebugConfig maxHardCapForMissingNodes int numConcurrentTrieSyncers int @@ -120,6 +114,9 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.CoreComponents) { return nil, update.ErrNilCoreComponents } + if check.IfNil(args.NetworkComponents) { + return nil, update.ErrNilNetworkComponents + } if check.IfNil(args.CryptoComponents) { return nil, update.ErrNilCryptoComponents } @@ -144,7 +141,7 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.RequestHandler) { return nil, update.ErrNilRequestHandler } - if check.IfNil(args.Messenger) { + if check.IfNil(args.NetworkComponents) { return nil, update.ErrNilMessenger } if args.ActiveAccountsDBs == nil { @@ -205,18 +202,9 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.CoreComponents.TxMarshalizer()) { return nil, update.ErrNilMarshalizer } - if check.IfNil(args.InputAntifloodHandler) { - return nil, update.ErrNilAntiFloodHandler - } - if check.IfNil(args.OutputAntifloodHandler) { - return nil, update.ErrNilAntiFloodHandler - } if check.IfNil(args.RoundHandler) { return nil, update.ErrNilRoundHandler } - if check.IfNil(args.PeersRatingHandler) { - return nil, update.ErrNilPeersRatingHandler - } if check.IfNil(args.CoreComponents.TxSignHasher()) { return nil, update.ErrNilHasher } @@ -238,14 +226,14 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { } e := &exportHandlerFactory{ - CoreComponents: args.CoreComponents, - CryptoComponents: args.CryptoComponents, + coreComponents: args.CoreComponents, + cryptoComponents: args.CryptoComponents, + networkComponents: args.NetworkComponents, headerValidator: args.HeaderValidator, dataPool: args.DataPool, storageService: args.StorageService, requestHandler: args.RequestHandler, shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, activeAccountsDBs: args.ActiveAccountsDBs, exportFolder: args.ExportFolder, exportTriesStorageConfig: args.ExportTriesStorageConfig, @@ -261,11 +249,8 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { headerSigVerifier: args.HeaderSigVerifier, headerIntegrityVerifier: args.HeaderIntegrityVerifier, validityAttester: args.ValidityAttester, - inputAntifloodHandler: args.InputAntifloodHandler, - outputAntifloodHandler: args.OutputAntifloodHandler, maxTrieLevelInMemory: args.MaxTrieLevelInMemory, roundHandler: args.RoundHandler, - peersRatingHandler: args.PeersRatingHandler, interceptorDebugConfig: args.InterceptorDebugConfig, maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, numConcurrentTrieSyncers: args.NumConcurrentTrieSyncers, @@ -300,10 +285,10 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { return nil, err } argsEpochTrigger := shardchain.ArgsShardEpochStartTrigger{ - Marshalizer: e.CoreComponents.InternalMarshalizer(), - Hasher: e.CoreComponents.Hasher(), + Marshalizer: e.coreComponents.InternalMarshalizer(), + Hasher: e.coreComponents.Hasher(), HeaderValidator: e.headerValidator, - Uint64Converter: e.CoreComponents.Uint64ByteSliceConverter(), + Uint64Converter: e.coreComponents.Uint64ByteSliceConverter(), DataPool: e.dataPool, Storage: e.storageService, RequestHandler: e.requestHandler, @@ -314,7 +299,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { PeerMiniBlocksSyncer: peerMiniBlocksSyncer, RoundHandler: e.roundHandler, AppStatusHandler: e.statusCoreComponents.AppStatusHandler(), - EnableEpochsHandler: e.CoreComponents.EnableEpochsHandler(), + EnableEpochsHandler: e.coreComponents.EnableEpochsHandler(), } epochHandler, err := shardchain.NewEpochStartTrigger(&argsEpochTrigger) if err != nil { @@ -324,11 +309,11 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsDataTrieFactory := ArgsNewDataTrieFactory{ StorageConfig: e.exportTriesStorageConfig, SyncFolder: e.exportFolder, - Marshalizer: e.CoreComponents.InternalMarshalizer(), - Hasher: e.CoreComponents.Hasher(), + Marshalizer: e.coreComponents.InternalMarshalizer(), + Hasher: e.coreComponents.Hasher(), ShardCoordinator: e.shardCoordinator, MaxTrieLevelInMemory: e.maxTrieLevelInMemory, - EnableEpochsHandler: e.CoreComponents.EnableEpochsHandler(), + EnableEpochsHandler: e.coreComponents.EnableEpochsHandler(), } dataTriesContainerFactory, err := NewDataTrieFactory(argsDataTrieFactory) if err != nil { @@ -351,13 +336,14 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsResolvers := ArgsNewResolversContainerFactory{ ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, - Marshalizer: e.CoreComponents.InternalMarshalizer(), + MainMessenger: e.networkComponents.NetworkMessenger(), + FullArchiveMessenger: e.networkComponents.FullArchiveNetworkMessenger(), + Marshalizer: e.coreComponents.InternalMarshalizer(), DataTrieContainer: dataTries, ExistingResolvers: e.existingResolvers, NumConcurrentResolvingJobs: 100, - InputAntifloodHandler: e.inputAntifloodHandler, - OutputAntifloodHandler: e.outputAntifloodHandler, + InputAntifloodHandler: e.networkComponents.InputAntiFloodHandler(), + OutputAntifloodHandler: e.networkComponents.OutputAntiFloodHandler(), } resolversFactory, err := NewResolversContainerFactory(argsResolvers) if err != nil { @@ -378,12 +364,14 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { }) argsRequesters := ArgsRequestersContainerFactory{ - ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, - Marshaller: e.CoreComponents.InternalMarshalizer(), - ExistingRequesters: e.existingRequesters, - OutputAntifloodHandler: e.outputAntifloodHandler, - PeersRatingHandler: e.peersRatingHandler, + ShardCoordinator: e.shardCoordinator, + MainMessenger: e.networkComponents.NetworkMessenger(), + FullArchiveMessenger: e.networkComponents.FullArchiveNetworkMessenger(), + Marshaller: e.coreComponents.InternalMarshalizer(), + ExistingRequesters: e.existingRequesters, + OutputAntifloodHandler: e.networkComponents.OutputAntiFloodHandler(), + MainPeersRatingHandler: e.networkComponents.PeersRatingHandler(), + FullArchivePeersRatingHandler: e.networkComponents.FullArchivePeersRatingHandler(), } requestersFactory, err := NewRequestersContainerFactory(argsRequesters) if err != nil { @@ -407,8 +395,8 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { TrieCacher: e.dataPool.TrieNodes(), RequestHandler: e.requestHandler, ShardCoordinator: e.shardCoordinator, - Hasher: e.CoreComponents.Hasher(), - Marshalizer: e.CoreComponents.InternalMarshalizer(), + Hasher: e.coreComponents.Hasher(), + Marshalizer: e.coreComponents.InternalMarshalizer(), TrieStorageManager: trieStorageManager, TimoutGettingTrieNode: common.TimeoutGettingTrieNodesInHardfork, MaxTrieLevelInMemory: e.maxTrieLevelInMemory, @@ -416,8 +404,8 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { NumConcurrentTrieSyncers: e.numConcurrentTrieSyncers, TrieSyncerVersion: e.trieSyncerVersion, CheckNodesOnDisk: e.checkNodesOnDisk, - AddressPubKeyConverter: e.CoreComponents.AddressPubKeyConverter(), - EnableEpochsHandler: e.CoreComponents.EnableEpochsHandler(), + AddressPubKeyConverter: e.coreComponents.AddressPubKeyConverter(), + EnableEpochsHandler: e.coreComponents.EnableEpochsHandler(), } accountsDBSyncerFactory, err := NewAccountsDBSContainerFactory(argsAccountsSyncers) if err != nil { @@ -431,11 +419,11 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsNewHeadersSync := sync.ArgsNewHeadersSyncHandler{ StorageService: e.storageService, Cache: e.dataPool.Headers(), - Marshalizer: e.CoreComponents.InternalMarshalizer(), - Hasher: e.CoreComponents.Hasher(), + Marshalizer: e.coreComponents.InternalMarshalizer(), + Hasher: e.coreComponents.Hasher(), EpochHandler: epochHandler, RequestHandler: e.requestHandler, - Uint64Converter: e.CoreComponents.Uint64ByteSliceConverter(), + Uint64Converter: e.coreComponents.Uint64ByteSliceConverter(), ShardCoordinator: e.shardCoordinator, } epochStartHeadersSyncer, err := sync.NewHeadersSyncHandler(argsNewHeadersSync) @@ -460,7 +448,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsMiniBlockSyncer := sync.ArgsNewPendingMiniBlocksSyncer{ Storage: storer, Cache: e.dataPool.MiniBlocks(), - Marshalizer: e.CoreComponents.InternalMarshalizer(), + Marshalizer: e.coreComponents.InternalMarshalizer(), RequestHandler: e.requestHandler, } epochStartMiniBlocksSyncer, err := sync.NewPendingMiniBlocksSyncer(argsMiniBlockSyncer) @@ -471,7 +459,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsPendingTransactions := sync.ArgsNewTransactionsSyncer{ DataPools: e.dataPool, Storages: e.storageService, - Marshaller: e.CoreComponents.InternalMarshalizer(), + Marshaller: e.coreComponents.InternalMarshalizer(), RequestHandler: e.requestHandler, } epochStartTransactionsSyncer, err := sync.NewTransactionsSyncer(argsPendingTransactions) @@ -516,7 +504,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { arg := storing.ArgHardforkStorer{ KeysStore: keysStorer, KeyValue: keysVals, - Marshalizer: e.CoreComponents.InternalMarshalizer(), + Marshalizer: e.coreComponents.InternalMarshalizer(), } hs, err := storing.NewHardforkStorer(arg) if err != nil { @@ -526,13 +514,13 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsExporter := genesis.ArgsNewStateExporter{ ShardCoordinator: e.shardCoordinator, StateSyncer: stateSyncer, - Marshalizer: e.CoreComponents.InternalMarshalizer(), + Marshalizer: e.coreComponents.InternalMarshalizer(), HardforkStorer: hs, - Hasher: e.CoreComponents.Hasher(), + Hasher: e.coreComponents.Hasher(), ExportFolder: e.exportFolder, - ValidatorPubKeyConverter: e.CoreComponents.ValidatorPubKeyConverter(), - AddressPubKeyConverter: e.CoreComponents.AddressPubKeyConverter(), - GenesisNodesSetupHandler: e.CoreComponents.GenesisNodesSetup(), + ValidatorPubKeyConverter: e.coreComponents.ValidatorPubKeyConverter(), + AddressPubKeyConverter: e.coreComponents.AddressPubKeyConverter(), + GenesisNodesSetupHandler: e.coreComponents.GenesisNodesSetup(), } exportHandler, err := genesis.NewStateExporter(argsExporter) if err != nil { @@ -568,12 +556,12 @@ func (e *exportHandlerFactory) prepareFolders(folder string) error { func (e *exportHandlerFactory) createInterceptors() error { argsInterceptors := ArgsNewFullSyncInterceptorsContainerFactory{ - CoreComponents: e.CoreComponents, - CryptoComponents: e.CryptoComponents, + CoreComponents: e.coreComponents, + CryptoComponents: e.cryptoComponents, Accounts: e.accounts, ShardCoordinator: e.shardCoordinator, NodesCoordinator: e.nodesCoordinator, - Messenger: e.messenger, + Messenger: e.networkComponents.NetworkMessenger(), Store: e.storageService, DataPool: e.dataPool, MaxTxNonceDeltaAllowed: math.MaxInt32, @@ -587,7 +575,7 @@ func (e *exportHandlerFactory) createInterceptors() error { WhiteListHandler: e.whiteListHandler, WhiteListerVerifiedTxs: e.whiteListerVerifiedTxs, InterceptorsContainer: e.interceptorsContainer, - AntifloodHandler: e.inputAntifloodHandler, + AntifloodHandler: e.networkComponents.InputAntiFloodHandler(), } fullSyncInterceptors, err := NewFullSyncInterceptorsContainerFactory(argsInterceptors) if err != nil { diff --git a/update/factory/fullSyncRequestersContainerFactory.go b/update/factory/fullSyncRequestersContainerFactory.go index 237c43ad192..e3e0d89f3aa 100644 --- a/update/factory/fullSyncRequestersContainerFactory.go +++ b/update/factory/fullSyncRequestersContainerFactory.go @@ -1,6 +1,8 @@ package factory import ( + "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/random" @@ -23,23 +25,27 @@ const ( ) type requestersContainerFactory struct { - shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler - marshaller marshal.Marshalizer - intRandomizer dataRetriever.IntRandomizer - container dataRetriever.RequestersContainer - outputAntifloodHandler dataRetriever.P2PAntifloodHandler - peersRatingHandler dataRetriever.PeersRatingHandler + shardCoordinator sharding.Coordinator + mainMessenger dataRetriever.TopicMessageHandler + fullArchiveMessenger dataRetriever.TopicMessageHandler + marshaller marshal.Marshalizer + intRandomizer dataRetriever.IntRandomizer + container dataRetriever.RequestersContainer + outputAntifloodHandler dataRetriever.P2PAntifloodHandler + mainPeersRatingHandler dataRetriever.PeersRatingHandler + fullArchivePeersRatingHandler dataRetriever.PeersRatingHandler } // ArgsRequestersContainerFactory defines the arguments for the requestersContainerFactory constructor type ArgsRequestersContainerFactory struct { - ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler - Marshaller marshal.Marshalizer - ExistingRequesters dataRetriever.RequestersContainer - OutputAntifloodHandler dataRetriever.P2PAntifloodHandler - PeersRatingHandler dataRetriever.PeersRatingHandler + ShardCoordinator sharding.Coordinator + MainMessenger dataRetriever.TopicMessageHandler + FullArchiveMessenger dataRetriever.TopicMessageHandler + Marshaller marshal.Marshalizer + ExistingRequesters dataRetriever.RequestersContainer + OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + MainPeersRatingHandler dataRetriever.PeersRatingHandler + FullArchivePeersRatingHandler dataRetriever.PeersRatingHandler } // NewRequestersContainerFactory creates a new container filled with topic requesters @@ -47,8 +53,11 @@ func NewRequestersContainerFactory(args ArgsRequestersContainerFactory) (*reques if check.IfNil(args.ShardCoordinator) { return nil, update.ErrNilShardCoordinator } - if check.IfNil(args.Messenger) { - return nil, update.ErrNilMessenger + if check.IfNil(args.MainMessenger) { + return nil, fmt.Errorf("%w on main network", update.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilMessenger) } if check.IfNil(args.Marshaller) { return nil, update.ErrNilMarshalizer @@ -59,18 +68,23 @@ func NewRequestersContainerFactory(args ArgsRequestersContainerFactory) (*reques if check.IfNil(args.OutputAntifloodHandler) { return nil, update.ErrNilAntiFloodHandler } - if check.IfNil(args.PeersRatingHandler) { - return nil, update.ErrNilPeersRatingHandler + if check.IfNil(args.MainPeersRatingHandler) { + return nil, fmt.Errorf("%w on main network", update.ErrNilPeersRatingHandler) + } + if check.IfNil(args.FullArchivePeersRatingHandler) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilPeersRatingHandler) } return &requestersContainerFactory{ - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - marshaller: args.Marshaller, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - container: args.ExistingRequesters, - outputAntifloodHandler: args.OutputAntifloodHandler, - peersRatingHandler: args.PeersRatingHandler, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + marshaller: args.Marshaller, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + container: args.ExistingRequesters, + outputAntifloodHandler: args.OutputAntifloodHandler, + mainPeersRatingHandler: args.MainPeersRatingHandler, + fullArchivePeersRatingHandler: args.FullArchivePeersRatingHandler, }, nil } @@ -145,7 +159,7 @@ func (rcf *requestersContainerFactory) createTrieNodesRequester(baseTopic string targetConsensusTopic := common.ConsensusTopic + targetShardCoordinator.CommunicationIdentifier(targetShardID) peerListCreator, err := topicsender.NewDiffPeerListCreator( - rcf.messenger, + rcf.mainMessenger, baseTopic, targetConsensusTopic, resolverscontainer.EmptyExcludePeersOnTopic, @@ -156,21 +170,24 @@ func (rcf *requestersContainerFactory) createTrieNodesRequester(baseTopic string arg := topicsender.ArgTopicRequestSender{ ArgBaseTopicSender: topicsender.ArgBaseTopicSender{ - Messenger: rcf.messenger, - TopicName: baseTopic, - OutputAntiflooder: rcf.outputAntifloodHandler, - PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - TargetShardId: defaultTargetShardID, + MainMessenger: rcf.mainMessenger, + FullArchiveMessenger: rcf.fullArchiveMessenger, + TopicName: baseTopic, + OutputAntiflooder: rcf.outputAntifloodHandler, + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + TargetShardId: defaultTargetShardID, }, - Marshaller: rcf.marshaller, - Randomizer: rcf.intRandomizer, - PeerListCreator: peerListCreator, - NumIntraShardPeers: numIntraShardPeers, - NumCrossShardPeers: numCrossShardPeers, - NumFullHistoryPeers: numFullHistoryPeers, - CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), - SelfShardIdProvider: rcf.shardCoordinator, - PeersRatingHandler: rcf.peersRatingHandler, + Marshaller: rcf.marshaller, + Randomizer: rcf.intRandomizer, + PeerListCreator: peerListCreator, + NumIntraShardPeers: numIntraShardPeers, + NumCrossShardPeers: numCrossShardPeers, + NumFullHistoryPeers: numFullHistoryPeers, + CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), + SelfShardIdProvider: rcf.shardCoordinator, + MainPeersRatingHandler: rcf.mainPeersRatingHandler, + FullArchivePeersRatingHandler: rcf.fullArchivePeersRatingHandler, } requestSender, err := topicsender.NewTopicRequestSender(arg) if err != nil { diff --git a/update/factory/fullSyncResolversContainerFactory.go b/update/factory/fullSyncResolversContainerFactory.go index 41214051282..b6865bfb009 100644 --- a/update/factory/fullSyncResolversContainerFactory.go +++ b/update/factory/fullSyncResolversContainerFactory.go @@ -1,6 +1,8 @@ package factory import ( + "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/throttler" @@ -20,7 +22,8 @@ const defaultTargetShardID = uint32(0) type resolversContainerFactory struct { shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler + mainMessenger dataRetriever.TopicMessageHandler + fullArchiveMessenger dataRetriever.TopicMessageHandler marshalizer marshal.Marshalizer dataTrieContainer common.TriesHolder container dataRetriever.ResolversContainer @@ -32,7 +35,8 @@ type resolversContainerFactory struct { // ArgsNewResolversContainerFactory defines the arguments for the resolversContainerFactory constructor type ArgsNewResolversContainerFactory struct { ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler + MainMessenger dataRetriever.TopicMessageHandler + FullArchiveMessenger dataRetriever.TopicMessageHandler Marshalizer marshal.Marshalizer DataTrieContainer common.TriesHolder ExistingResolvers dataRetriever.ResolversContainer @@ -46,8 +50,11 @@ func NewResolversContainerFactory(args ArgsNewResolversContainerFactory) (*resol if check.IfNil(args.ShardCoordinator) { return nil, update.ErrNilShardCoordinator } - if check.IfNil(args.Messenger) { - return nil, update.ErrNilMessenger + if check.IfNil(args.MainMessenger) { + return nil, fmt.Errorf("%w on main network", update.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilMessenger) } if check.IfNil(args.Marshalizer) { return nil, update.ErrNilMarshalizer @@ -65,7 +72,8 @@ func NewResolversContainerFactory(args ArgsNewResolversContainerFactory) (*resol } return &resolversContainerFactory{ shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, marshalizer: args.Marshalizer, dataTrieContainer: args.DataTrieContainer, container: args.ExistingResolvers, @@ -143,11 +151,13 @@ func (rcf *resolversContainerFactory) createTrieNodesResolver(baseTopic string, arg := topicsender.ArgTopicResolverSender{ ArgBaseTopicSender: topicsender.ArgBaseTopicSender{ - Messenger: rcf.messenger, - TopicName: baseTopic, - OutputAntiflooder: rcf.outputAntifloodHandler, - PreferredPeersHolder: disabled.NewPreferredPeersHolder(), - TargetShardId: defaultTargetShardID, + MainMessenger: rcf.mainMessenger, + FullArchiveMessenger: rcf.fullArchiveMessenger, + TopicName: baseTopic, + OutputAntiflooder: rcf.outputAntifloodHandler, + MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), + FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), + TargetShardId: defaultTargetShardID, }, } resolverSender, err := topicsender.NewTopicResolverSender(arg) @@ -170,7 +180,12 @@ func (rcf *resolversContainerFactory) createTrieNodesResolver(baseTopic string, return nil, err } - err = rcf.messenger.RegisterMessageProcessor(resolver.RequestTopic(), common.HardforkResolversIdentifier, resolver) + err = rcf.mainMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.HardforkResolversIdentifier, resolver) + if err != nil { + return nil, err + } + + err = rcf.fullArchiveMessenger.RegisterMessageProcessor(resolver.RequestTopic(), common.HardforkResolversIdentifier, resolver) if err != nil { return nil, err } From d0cfc108a6841644ff254beb72779402c592aaea Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Wed, 14 Jun 2023 16:51:25 +0300 Subject: [PATCH 13/38] updated comment --- cmd/node/config/prefs.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 1fd8d43f000..1db13700071 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -29,7 +29,7 @@ PreferredConnections = [] # PreferredFullArchiveConnections holds an array containing valid ips or peer ids from nodes to connect with (in top of other connections) - # This is only considered on FullArchive mode + # This is only considered on FullArchive mode but each full archive preferred peer must be added to PreferredConnections as well # Example: # PreferredConnections = [ # "127.0.0.10", From a971271021a3a9153916042c5d70c86b51d45cff Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Wed, 14 Jun 2023 17:51:17 +0300 Subject: [PATCH 14/38] added new integration test for full archive network + peers rating --- .../consensus/consensusSigning_test.go | 3 +- integrationTests/consensus/consensus_test.go | 6 +- .../startInEpoch/startInEpoch_test.go | 2 +- .../node/heartbeatV2/heartbeatV2_test.go | 2 +- .../p2p/peersRating/peersRating_test.go | 120 ++++++++++++++++-- .../interceptedRequestHdr_test.go | 4 +- .../interceptedRequestTxBlockBody_test.go | 2 +- .../interceptedResolvedTx_test.go | 4 +- .../interceptedResolvedUnsignedTx_test.go | 2 +- .../state/stateTrieSync/stateTrieSync_test.go | 8 +- .../sync/edgeCases/edgeCases_test.go | 2 +- integrationTests/testConsensusNode.go | 59 ++++++--- integrationTests/testHeartbeatNode.go | 28 +++- integrationTests/testInitializer.go | 12 +- integrationTests/testProcessorNode.go | 86 ++++++++++--- 15 files changed, 266 insertions(+), 74 deletions(-) diff --git a/integrationTests/consensus/consensusSigning_test.go b/integrationTests/consensus/consensusSigning_test.go index 7566828ada1..68f85cde15c 100644 --- a/integrationTests/consensus/consensusSigning_test.go +++ b/integrationTests/consensus/consensusSigning_test.go @@ -79,7 +79,8 @@ func TestConsensusWithInvalidSigners(t *testing.T) { defer func() { for shardID := range nodes { for _, n := range nodes[shardID] { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() + _ = n.FullArchiveMessenger.Close() } } }() diff --git a/integrationTests/consensus/consensus_test.go b/integrationTests/consensus/consensus_test.go index f2530e562a7..a94c5717efe 100644 --- a/integrationTests/consensus/consensus_test.go +++ b/integrationTests/consensus/consensus_test.go @@ -234,7 +234,8 @@ func runFullConsensusTest(t *testing.T, consensusType string, numKeysOnEachNode defer func() { for shardID := range nodes { for _, n := range nodes[shardID] { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() + _ = n.FullArchiveMessenger.Close() } } }() @@ -296,7 +297,8 @@ func runConsensusWithNotEnoughValidators(t *testing.T, consensusType string) { defer func() { for shardID := range nodes { for _, n := range nodes[shardID] { - _ = n.Messenger.Close() + _ = n.MainMessenger.Close() + _ = n.FullArchiveMessenger.Close() } } }() diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 538a52018a9..6c2648a56ad 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -205,7 +205,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui nodeToJoinLate.FullArchiveMessenger = &p2pmocks.MessengerStub{} for _, n := range nodes { - _ = n.ConnectTo(nodeToJoinLate) + _ = n.ConnectOnMain(nodeToJoinLate) } roundHandler := &mock.RoundHandlerMock{IndexField: int64(round)} diff --git a/integrationTests/node/heartbeatV2/heartbeatV2_test.go b/integrationTests/node/heartbeatV2/heartbeatV2_test.go index c38b2d982db..82fb2b276d5 100644 --- a/integrationTests/node/heartbeatV2/heartbeatV2_test.go +++ b/integrationTests/node/heartbeatV2/heartbeatV2_test.go @@ -135,7 +135,7 @@ func connectNodes(nodes []*integrationTests.TestHeartbeatNode, interactingNodes for j := i + 1; j < interactingNodes; j++ { src := nodes[i] dst := nodes[j] - _ = src.ConnectTo(dst) + _ = src.ConnectOnMain(dst) } } } diff --git a/integrationTests/p2p/peersRating/peersRating_test.go b/integrationTests/p2p/peersRating/peersRating_test.go index 212476a99c8..e03ab3307f5 100644 --- a/integrationTests/p2p/peersRating/peersRating_test.go +++ b/integrationTests/p2p/peersRating/peersRating_test.go @@ -10,6 +10,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/integrationTests" + "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,9 +29,9 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { var numOfShards uint32 = 1 var shardID uint32 = 0 - resolverNode := createNodeWithPeersRatingHandler(shardID, numOfShards) - maliciousNode := createNodeWithPeersRatingHandler(shardID, numOfShards) - requesterNode := createNodeWithPeersRatingHandler(core.MetachainShardId, numOfShards) + resolverNode := createNodeWithPeersRatingHandler(shardID, numOfShards, p2p.NormalOperation) + maliciousNode := createNodeWithPeersRatingHandler(shardID, numOfShards, p2p.NormalOperation) + requesterNode := createNodeWithPeersRatingHandler(core.MetachainShardId, numOfShards, p2p.NormalOperation) defer func() { _ = resolverNode.MainMessenger.Close() @@ -38,9 +40,9 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { }() time.Sleep(time.Second) - require.Nil(t, resolverNode.ConnectTo(maliciousNode)) - require.Nil(t, resolverNode.ConnectTo(requesterNode)) - require.Nil(t, maliciousNode.ConnectTo(requesterNode)) + require.Nil(t, resolverNode.ConnectOnMain(maliciousNode)) + require.Nil(t, resolverNode.ConnectOnMain(requesterNode)) + require.Nil(t, maliciousNode.ConnectOnMain(requesterNode)) time.Sleep(time.Second) hdr, hdrHash, hdrBuff := getHeader() @@ -65,7 +67,7 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { resolverNode.DataPool.Headers().AddHeader(hdrHash, hdr) requestHeader(requesterNode, numOfRequests, hdrHash, resolverNode.ShardCoordinator.SelfId()) - peerRatingsMap := getRatingsMap(t, requesterNode) + peerRatingsMap := getRatingsMap(t, requesterNode.MainPeersRatingMonitor) // resolver node should have received and responded to numOfRequests initialResolverRating, exists := peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) @@ -81,7 +83,7 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { numOfRequests = 120 requestHeader(requesterNode, numOfRequests, hdrHash, resolverNode.ShardCoordinator.SelfId()) - peerRatingsMap = getRatingsMap(t, requesterNode) + peerRatingsMap = getRatingsMap(t, requesterNode.MainPeersRatingMonitor) // Resolver should have reached max limit and timestamps still update initialResolverRating, exists = peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) @@ -98,7 +100,7 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { numOfRequests = 10 requestHeader(requesterNode, numOfRequests, hdrHash, resolverNode.ShardCoordinator.SelfId()) - peerRatingsMap = getRatingsMap(t, requesterNode) + peerRatingsMap = getRatingsMap(t, requesterNode.MainPeersRatingMonitor) // resolver node should have the max rating + numOfRequests that didn't answer to resolverRating, exists := peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) @@ -111,12 +113,106 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { assert.Equal(t, finalMaliciousExpectedRating, maliciousRating) } -func createNodeWithPeersRatingHandler(shardID uint32, numShards uint32) *integrationTests.TestProcessorNode { +func TestPeersRatingAndResponsivenessOnFullArchive(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + var numOfShards uint32 = 1 + var shardID uint32 = 0 + resolverFullArchiveNode := createNodeWithPeersRatingHandler(shardID, numOfShards, p2p.FullArchiveMode) + requesterFullArchiveNode := createNodeWithPeersRatingHandler(core.MetachainShardId, numOfShards, p2p.FullArchiveMode) + regularNode := createNodeWithPeersRatingHandler(shardID, numOfShards, p2p.FullArchiveMode) + + defer func() { + _ = resolverFullArchiveNode.MainMessenger.Close() + _ = resolverFullArchiveNode.FullArchiveMessenger.Close() + _ = requesterFullArchiveNode.MainMessenger.Close() + _ = requesterFullArchiveNode.FullArchiveMessenger.Close() + _ = regularNode.MainMessenger.Close() + _ = regularNode.FullArchiveMessenger.Close() + }() + + // all nodes are connected on main network, but only the full archive resolver and requester are connected on full archive network + time.Sleep(time.Second) + require.Nil(t, resolverFullArchiveNode.ConnectOnFullArchive(requesterFullArchiveNode)) + require.Nil(t, resolverFullArchiveNode.ConnectOnMain(regularNode)) + require.Nil(t, requesterFullArchiveNode.ConnectOnMain(regularNode)) + time.Sleep(time.Second) + + hdr, hdrHash, hdrBuff := getHeader() + + // Broadcasts should not be considered for peers rating and should only be available on full archive network + topic := factory.ShardBlocksTopic + resolverFullArchiveNode.ShardCoordinator.CommunicationIdentifier(requesterFullArchiveNode.ShardCoordinator.SelfId()) + resolverFullArchiveNode.FullArchiveMessenger.Broadcast(topic, hdrBuff) + time.Sleep(time.Second) + // check that broadcasts were successful + _, err := requesterFullArchiveNode.DataPool.Headers().GetHeaderByHash(hdrHash) + assert.Nil(t, err) + _, err = regularNode.DataPool.Headers().GetHeaderByHash(hdrHash) + assert.NotNil(t, err) + // clean the above broadcast consequences + requesterFullArchiveNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + resolverFullArchiveNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + + // Broadcast on main network should also work and reach all nodes + topic = factory.ShardBlocksTopic + regularNode.ShardCoordinator.CommunicationIdentifier(requesterFullArchiveNode.ShardCoordinator.SelfId()) + regularNode.MainMessenger.Broadcast(topic, hdrBuff) + time.Sleep(time.Second) + // check that broadcasts were successful + _, err = requesterFullArchiveNode.DataPool.Headers().GetHeaderByHash(hdrHash) + assert.Nil(t, err) + _, err = resolverFullArchiveNode.DataPool.Headers().GetHeaderByHash(hdrHash) + assert.Nil(t, err) + // clean the above broadcast consequences + requesterFullArchiveNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + resolverFullArchiveNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + regularNode.DataPool.Headers().RemoveHeaderByHash(hdrHash) + + numOfRequests := 10 + // Add header to the resolver node's cache + resolverFullArchiveNode.DataPool.Headers().AddHeader(hdrHash, hdr) + epochProviderStub, ok := requesterFullArchiveNode.EpochProvider.(*mock.CurrentNetworkEpochProviderStub) + assert.True(t, ok) + epochProviderStub.EpochIsActiveInNetworkCalled = func(epoch uint32) bool { + return false // force the full archive requester to request from full archive network + } + requestHeader(requesterFullArchiveNode, numOfRequests, hdrHash, resolverFullArchiveNode.ShardCoordinator.SelfId()) + + peerRatingsMap := getRatingsMap(t, requesterFullArchiveNode.FullArchivePeersRatingMonitor) + // resolver node should have received and responded to numOfRequests + initialResolverRating, exists := peerRatingsMap[resolverFullArchiveNode.MainMessenger.ID().Pretty()] + require.True(t, exists) + initialResolverExpectedRating := fmt.Sprintf("%d", numOfRequests*(decreaseFactor+increaseFactor)) + assert.Equal(t, initialResolverExpectedRating, initialResolverRating) + // main nodes should not be found in this cacher + _, exists = peerRatingsMap[regularNode.MainMessenger.ID().Pretty()] + require.False(t, exists) + + // force the full archive requester to request the header from main network + // as it does not exists on the main resolver, it should only decrease its rating + epochProviderStub.EpochIsActiveInNetworkCalled = func(epoch uint32) bool { + return true // force the full archive requester to request from main network + } + requestHeader(requesterFullArchiveNode, numOfRequests, hdrHash, regularNode.ShardCoordinator.SelfId()) + peerRatingsMap = getRatingsMap(t, requesterFullArchiveNode.MainPeersRatingMonitor) + + _, exists = peerRatingsMap[resolverFullArchiveNode.MainMessenger.ID().Pretty()] + require.False(t, exists) // should not be any request on the main monitor to the full archive resolver + + mainResolverRating, exists := peerRatingsMap[regularNode.MainMessenger.ID().Pretty()] + require.True(t, exists) + mainResolverExpectedRating := fmt.Sprintf("%d", numOfRequests*decreaseFactor) + assert.Equal(t, mainResolverExpectedRating, mainResolverRating) +} + +func createNodeWithPeersRatingHandler(shardID uint32, numShards uint32, nodeOperationMode p2p.NodeOperation) *integrationTests.TestProcessorNode { tpn := integrationTests.NewTestProcessorNode(integrationTests.ArgTestProcessorNode{ MaxShards: numShards, NodeShardId: shardID, WithPeersRatingHandler: true, + NodeOperationMode: nodeOperationMode, }) return tpn @@ -147,8 +243,8 @@ func getHeader() (*block.Header, []byte, []byte) { return hdr, hdrHash, hdrBuff } -func getRatingsMap(t *testing.T, node *integrationTests.TestProcessorNode) map[string]string { - peerRatingsStr := node.PeersRatingMonitor.GetConnectedPeersRatings() +func getRatingsMap(t *testing.T, monitor p2p.PeersRatingMonitor) map[string]string { + peerRatingsStr := monitor.GetConnectedPeersRatings() peerRatingsMap := make(map[string]string) err := json.Unmarshal([]byte(peerRatingsStr), &peerRatingsMap) diff --git a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go index 89c75c645ff..375ce48ffce 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go @@ -55,7 +55,7 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { //connect messengers together time.Sleep(time.Second) - err := nResolver.ConnectTo(nRequester) + err := nResolver.ConnectOnMain(nRequester) require.Nil(t, err) time.Sleep(time.Second) @@ -123,7 +123,7 @@ func TestNode_InterceptedHeaderWithWrongChainIDShouldBeDiscarded(t *testing.T) { //connect messengers together time.Sleep(time.Second) - err := nResolver.ConnectTo(nRequester) + err := nResolver.ConnectOnMain(nRequester) require.Nil(t, err) time.Sleep(time.Second) diff --git a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go index ea2da120a5c..066264f5869 100644 --- a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go +++ b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go @@ -48,7 +48,7 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { //connect messengers together time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(time.Second) diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go index 45a6dc18e00..068a245a7c0 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go @@ -46,7 +46,7 @@ func TestNode_RequestInterceptTransactionWithMessengerAndWhitelist(t *testing.T) //connect messengers together time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(time.Second) @@ -142,7 +142,7 @@ func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { //connect messengers together time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(time.Second) diff --git a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go index ef2abacd76e..9a0510fd8d2 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go @@ -44,7 +44,7 @@ func TestNode_RequestInterceptUnsignedTransactionWithMessenger(t *testing.T) { //connect messengers together time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(time.Second) diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 39b144366d8..f57c9b80c79 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -92,7 +92,7 @@ func testNodeRequestInterceptTrieNodesWithMessenger(t *testing.T, version int) { }() time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(integrationTests.SyncDelay) @@ -213,7 +213,7 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin }() time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(integrationTests.SyncDelay) @@ -321,7 +321,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves }() time.Sleep(time.Second) - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(integrationTests.SyncDelay) @@ -454,7 +454,7 @@ func testSyncMissingSnapshotNodes(t *testing.T, version int) { nRequester := nodes[0] nResolver := nodes[1] - err := nRequester.ConnectTo(nResolver) + err := nRequester.ConnectOnMain(nResolver) require.Nil(t, err) time.Sleep(integrationTests.SyncDelay) diff --git a/integrationTests/sync/edgeCases/edgeCases_test.go b/integrationTests/sync/edgeCases/edgeCases_test.go index b02c2e39140..f3167b0528e 100644 --- a/integrationTests/sync/edgeCases/edgeCases_test.go +++ b/integrationTests/sync/edgeCases/edgeCases_test.go @@ -89,7 +89,7 @@ func TestSyncMetaNodeIsSyncingReceivedHigherRoundBlockFromShard(t *testing.T) { syncNodesSlice := []*integrationTests.TestProcessorNode{syncMetaNode} for _, n := range nodes { for _, sn := range syncNodesSlice { - _ = sn.ConnectTo(n) + _ = sn.ConnectOnMain(n) } } integrationTests.BootstrapDelay() diff --git a/integrationTests/testConsensusNode.go b/integrationTests/testConsensusNode.go index fb4348620cf..049e660a8dc 100644 --- a/integrationTests/testConsensusNode.go +++ b/integrationTests/testConsensusNode.go @@ -44,6 +44,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" testFactory "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" @@ -76,16 +77,17 @@ type ArgsTestConsensusNode struct { // TestConsensusNode represents a structure used in integration tests used for consensus tests type TestConsensusNode struct { - Node *node.Node - Messenger p2p.Messenger - NodesCoordinator nodesCoordinator.NodesCoordinator - ShardCoordinator sharding.Coordinator - ChainHandler data.ChainHandler - BlockProcessor *mock.BlockProcessorMock - RequestersFinder dataRetriever.RequestersFinder - AccountsDB *state.AccountsDB - NodeKeys *TestKeyPair - MultiSigner *cryptoMocks.MultisignerMock + Node *node.Node + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + NodesCoordinator nodesCoordinator.NodesCoordinator + ShardCoordinator sharding.Coordinator + ChainHandler data.ChainHandler + BlockProcessor *mock.BlockProcessorMock + RequestersFinder dataRetriever.RequestersFinder + AccountsDB *state.AccountsDB + NodeKeys *TestKeyPair + MultiSigner *cryptoMocks.MultisignerMock } // NewTestConsensusNode returns a new TestConsensusNode @@ -181,7 +183,8 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { pkBytes, _ := tcn.NodeKeys.Pk.ToByteArray() tcn.initNodesCoordinator(args.ConsensusSize, testHasher, epochStartRegistrationHandler, args.EligibleMap, args.WaitingMap, pkBytes, consensusCache) - tcn.Messenger = CreateMessengerWithNoDiscovery() + tcn.MainMessenger = CreateMessengerWithNoDiscovery() + tcn.FullArchiveMessenger = &p2pmocks.MessengerStub{} tcn.initBlockChain(testHasher) tcn.initBlockProcessor() @@ -271,7 +274,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { argsKeysHandler := keysManagement.ArgsKeysHandler{ ManagedPeersHolder: keysHolder, PrivateKey: tcn.NodeKeys.Sk, - Pid: tcn.Messenger.ID(), + Pid: tcn.MainMessenger.ID(), } keysHandler, _ := keysManagement.NewKeysHandler(argsKeysHandler) @@ -285,7 +288,7 @@ func (tcn *TestConsensusNode) initNode(args ArgsTestConsensusNode) { sigHandler, _ := cryptoFactory.NewSigningHandler(signingHandlerArgs) networkComponents := GetDefaultNetworkComponents() - networkComponents.Messenger = tcn.Messenger + networkComponents.Messenger = tcn.MainMessenger networkComponents.InputAntiFlood = &mock.NilAntifloodHandler{} networkComponents.PeerHonesty = &mock.PeerHonestyHandlerStub{} @@ -512,22 +515,40 @@ func createTestStore() dataRetriever.StorageService { return store } -// ConnectTo will try to initiate a connection to the provided parameter -func (tcn *TestConsensusNode) ConnectTo(connectable Connectable) error { +// ConnectOnMain will try to initiate a connection to the provided parameter on the main messenger +func (tcn *TestConsensusNode) ConnectOnMain(connectable Connectable) error { if check.IfNil(connectable) { return fmt.Errorf("trying to connect to a nil Connectable parameter") } - return tcn.Messenger.ConnectToPeer(connectable.GetConnectableAddress()) + return tcn.MainMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) } -// GetConnectableAddress returns a non circuit, non windows default connectable p2p address -func (tcn *TestConsensusNode) GetConnectableAddress() string { +// ConnectOnFullArchive will try to initiate a connection to the provided parameter on the full archive messenger +func (tcn *TestConsensusNode) ConnectOnFullArchive(connectable Connectable) error { + if check.IfNil(connectable) { + return fmt.Errorf("trying to connect to a nil Connectable parameter") + } + + return tcn.FullArchiveMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) +} + +// GetMainConnectableAddress returns a non circuit, non windows default connectable p2p address +func (tcn *TestConsensusNode) GetMainConnectableAddress() string { + if tcn == nil { + return "nil" + } + + return GetConnectableAddress(tcn.MainMessenger) +} + +// GetFullArchiveConnectableAddress returns a non circuit, non windows default connectable p2p address of the full archive network +func (tcn *TestConsensusNode) GetFullArchiveConnectableAddress() string { if tcn == nil { return "nil" } - return GetConnectableAddress(tcn.Messenger) + return GetConnectableAddress(tcn.FullArchiveMessenger) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 6d05087bd52..28cc76590dd 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -741,17 +741,26 @@ func (thn *TestHeartbeatNode) initCrossShardPeerTopicNotifier(tb testing.TB) { } -// ConnectTo will try to initiate a connection to the provided parameter -func (thn *TestHeartbeatNode) ConnectTo(connectable Connectable) error { +// ConnectOnMain will try to initiate a connection to the provided parameter on the main messenger +func (thn *TestHeartbeatNode) ConnectOnMain(connectable Connectable) error { if check.IfNil(connectable) { return fmt.Errorf("trying to connect to a nil Connectable parameter") } - return thn.MainMessenger.ConnectToPeer(connectable.GetConnectableAddress()) + return thn.MainMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) } -// GetConnectableAddress returns a non circuit, non windows default connectable p2p address -func (thn *TestHeartbeatNode) GetConnectableAddress() string { +// ConnectOnFullArchive will try to initiate a connection to the provided parameter on the full archive messenger +func (thn *TestHeartbeatNode) ConnectOnFullArchive(connectable Connectable) error { + if check.IfNil(connectable) { + return fmt.Errorf("trying to connect to a nil Connectable parameter") + } + + return thn.FullArchiveMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) +} + +// GetMainConnectableAddress returns a non circuit, non windows default connectable p2p address +func (thn *TestHeartbeatNode) GetMainConnectableAddress() string { if thn == nil { return "nil" } @@ -759,6 +768,15 @@ func (thn *TestHeartbeatNode) GetConnectableAddress() string { return GetConnectableAddress(thn.MainMessenger) } +// GetFullArchiveConnectableAddress returns a non circuit, non windows default connectable p2p address of the full archive network +func (thn *TestHeartbeatNode) GetFullArchiveConnectableAddress() string { + if thn == nil { + return "nil" + } + + return GetConnectableAddress(thn.FullArchiveMessenger) +} + // MakeDisplayTableForHeartbeatNodes returns a string containing counters for received messages for all provided test nodes func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) string { header := []string{"pk", "pid", "shard ID", "messages global", "messages intra", "messages cross", "conns Total/IntraVal/CrossVal/IntraObs/CrossObs/FullObs/Unk/Sed"} diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 6b91a5fe769..713611d8ced 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -198,7 +198,7 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { } // CreateMessengerFromConfigWithPeersRatingHandler creates a new libp2p messenger with provided configuration -func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler) p2p.Messenger { +func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler, p2pKey crypto.PrivateKey) p2p.Messenger { arg := p2pFactory.ArgsNetworkMessenger{ Marshaller: TestMarshalizer, ListenAddress: p2p.ListenLocalhostAddrWithIp4AndTcp, @@ -207,7 +207,7 @@ func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConf PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, PeersRatingHandler: peersRatingHandler, ConnectionWatcherType: p2p.ConnectionWatcherTypePrint, - P2pPrivateKey: mock.NewPrivateKeyMock(), + P2pPrivateKey: p2pKey, P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), @@ -242,7 +242,7 @@ func CreateMessengerWithNoDiscovery() p2p.Messenger { } // CreateMessengerWithNoDiscoveryAndPeersRatingHandler creates a new libp2p messenger with no peer discovery -func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler) p2p.Messenger { +func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler, p2pKey crypto.PrivateKey) p2p.Messenger { p2pCfg := p2pConfig.P2PConfig{ Node: p2pConfig.NodeConfig{ Port: "0", @@ -255,7 +255,7 @@ func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p. }, } - return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder) + return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder, p2pKey) } // CreateFixedNetworkOf8Peers assembles a network as following: @@ -1368,10 +1368,10 @@ func ConnectNodes(nodes []Connectable) { for j := i + 1; j < len(nodes); j++ { src := nodes[i] dst := nodes[j] - err := src.ConnectTo(dst) + err := src.ConnectOnMain(dst) if err != nil { encounteredErrors = append(encounteredErrors, - fmt.Errorf("%w while %s was connecting to %s", err, src.GetConnectableAddress(), dst.GetConnectableAddress())) + fmt.Errorf("%w while %s was connecting to %s", err, src.GetMainConnectableAddress(), dst.GetMainConnectableAddress())) } } } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 45529591a96..291e52139b5 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -252,8 +252,10 @@ type CryptoParams struct { // Connectable defines the operations for a struct to become connectable by other struct // In other words, all instances that implement this interface are able to connect with each other type Connectable interface { - ConnectTo(connectable Connectable) error - GetConnectableAddress() string + ConnectOnMain(connectable Connectable) error + ConnectOnFullArchive(connectable Connectable) error + GetMainConnectableAddress() string + GetFullArchiveConnectableAddress() string IsInterfaceNil() bool } @@ -285,6 +287,7 @@ type ArgTestProcessorNode struct { AppStatusHandler core.AppStatusHandler StatusMetrics external.StatusMetricsHandler WithPeersRatingHandler bool + NodeOperationMode p2p.NodeOperation } // TestProcessorNode represents a container type of class used in integration tests @@ -355,6 +358,7 @@ type TestProcessorNode struct { EpochStartTrigger TestEpochStartTrigger EpochStartNotifier notifier.EpochStartNotifier + EpochProvider dataRetriever.CurrentNetworkEpochProviderHandler MultiSigner crypto.MultiSigner HeaderSigVerifier process.InterceptedHeaderSigVerifier @@ -392,7 +396,8 @@ type TestProcessorNode struct { TransactionLogProcessor process.TransactionLogProcessor MainPeersRatingHandler p2p.PeersRatingHandler FullArchivePeersRatingHandler p2p.PeersRatingHandler - PeersRatingMonitor p2p.PeersRatingMonitor + MainPeersRatingMonitor p2p.PeersRatingMonitor + FullArchivePeersRatingMonitor p2p.PeersRatingMonitor HardforkTrigger node.HardforkTrigger AppStatusHandler core.AppStatusHandler StatusMetrics external.StatusMetricsHandler @@ -441,6 +446,10 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { peersRatingHandler = &p2pmocks.PeersRatingHandlerStub{} topRatedCache := testscommon.NewCacherMock() badRatedCache := testscommon.NewCacherMock() + var fullArchivePeersRatingHandler p2p.PeersRatingHandler + fullArchivePeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{} + fullArchiveTopRatedCache := testscommon.NewCacherMock() + fullArchiveBadRatedCache := testscommon.NewCacherMock() if args.WithPeersRatingHandler { peersRatingHandler, _ = p2pFactory.NewPeersRatingHandler( p2pFactory.ArgPeersRatingHandler{ @@ -448,12 +457,23 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { BadRatedCache: badRatedCache, Logger: &testscommon.LoggerStub{}, }) + + fullArchivePeersRatingHandler, _ = p2pFactory.NewPeersRatingHandler( + p2pFactory.ArgPeersRatingHandler{ + TopRatedCache: fullArchiveTopRatedCache, + BadRatedCache: fullArchiveBadRatedCache, + Logger: &testscommon.LoggerStub{}, + }) } - messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler) + p2pKey := mock.NewPrivateKeyMock() + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey) + fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(fullArchivePeersRatingHandler, p2pKey) var peersRatingMonitor p2p.PeersRatingMonitor peersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} + var fullArchivePeersRatingMonitor p2p.PeersRatingMonitor + fullArchivePeersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} if args.WithPeersRatingHandler { peersRatingMonitor, _ = p2pFactory.NewPeersRatingMonitor( p2pFactory.ArgPeersRatingMonitor{ @@ -461,6 +481,12 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { BadRatedCache: badRatedCache, ConnectionsProvider: messenger, }) + fullArchivePeersRatingMonitor, _ = p2pFactory.NewPeersRatingMonitor( + p2pFactory.ArgPeersRatingMonitor{ + TopRatedCache: fullArchiveTopRatedCache, + BadRatedCache: fullArchiveBadRatedCache, + ConnectionsProvider: fullArchiveMessenger, + }) } genericEpochNotifier := forking.NewGenericEpochNotifier() @@ -470,12 +496,17 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { } enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(*epochsConfig, genericEpochNotifier) + nodeOperationMode := p2p.NormalOperation + if len(args.NodeOperationMode) != 0 { + nodeOperationMode = args.NodeOperationMode + } + logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) tpn := &TestProcessorNode{ ShardCoordinator: shardCoordinator, MainMessenger: messenger, - FullArchiveMessenger: &p2pmocks.MessengerStub{}, // TODO[Sorin]: inject a proper messenger when all pieces are done to test this network as well, - NodeOperationMode: p2p.NormalOperation, + FullArchiveMessenger: fullArchiveMessenger, + NodeOperationMode: nodeOperationMode, NodesCoordinator: nodesCoordinatorInstance, ChainID: ChainID, MinTransactionVersion: MinTransactionVersion, @@ -483,11 +514,12 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, EpochNotifier: genericEpochNotifier, EnableEpochsHandler: enableEpochsHandler, + EpochProvider: &mock.CurrentNetworkEpochProviderStub{}, WasmVMChangeLocker: &sync.RWMutex{}, TransactionLogProcessor: logsProcessor, Bootstrapper: mock.NewTestBootstrapperMock(), MainPeersRatingHandler: peersRatingHandler, - FullArchivePeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePeersRatingHandler: fullArchivePeersRatingHandler, MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), EnableEpochs: *epochsConfig, @@ -498,7 +530,8 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { EpochStartNotifier: args.EpochStartSubscriber, GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, AppStatusHandler: appStatusHandler, - PeersRatingMonitor: peersRatingMonitor, + MainPeersRatingMonitor: peersRatingMonitor, + FullArchivePeersRatingMonitor: fullArchivePeersRatingMonitor, } tpn.NodeKeys = args.NodeKeys @@ -549,17 +582,26 @@ func NewTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { return tpn } -// ConnectTo will try to initiate a connection to the provided parameter -func (tpn *TestProcessorNode) ConnectTo(connectable Connectable) error { +// ConnectOnMain will try to initiate a connection to the provided parameter on the main messenger +func (tpn *TestProcessorNode) ConnectOnMain(connectable Connectable) error { + if check.IfNil(connectable) { + return fmt.Errorf("trying to connect to a nil Connectable parameter") + } + + return tpn.MainMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) +} + +// ConnectOnFullArchive will try to initiate a connection to the provided parameter on the full archive messenger +func (tpn *TestProcessorNode) ConnectOnFullArchive(connectable Connectable) error { if check.IfNil(connectable) { return fmt.Errorf("trying to connect to a nil Connectable parameter") } - return tpn.MainMessenger.ConnectToPeer(connectable.GetConnectableAddress()) + return tpn.FullArchiveMessenger.ConnectToPeer(connectable.GetFullArchiveConnectableAddress()) } -// GetConnectableAddress returns a non circuit, non windows default connectable p2p address -func (tpn *TestProcessorNode) GetConnectableAddress() string { +// GetMainConnectableAddress returns a non circuit, non windows default connectable p2p address main network +func (tpn *TestProcessorNode) GetMainConnectableAddress() string { if tpn == nil { return "nil" } @@ -567,6 +609,15 @@ func (tpn *TestProcessorNode) GetConnectableAddress() string { return GetConnectableAddress(tpn.MainMessenger) } +// GetFullArchiveConnectableAddress returns a non circuit, non windows default connectable p2p address of the full archive network +func (tpn *TestProcessorNode) GetFullArchiveConnectableAddress() string { + if tpn == nil { + return "nil" + } + + return GetConnectableAddress(tpn.FullArchiveMessenger) +} + // Close - func (tpn *TestProcessorNode) Close() { _ = tpn.MainMessenger.Close() @@ -1350,7 +1401,9 @@ func (tpn *TestProcessorNode) createHardforkTrigger(heartbeatPk string) []byte { func (tpn *TestProcessorNode) initResolvers() { dataPacker, _ := partitioning.NewSimpleDataPacker(TestMarshalizer) - _ = tpn.MainMessenger.CreateTopic(common.ConsensusTopic+tpn.ShardCoordinator.CommunicationIdentifier(tpn.ShardCoordinator.SelfId()), true) + consensusTopic := common.ConsensusTopic + tpn.ShardCoordinator.CommunicationIdentifier(tpn.ShardCoordinator.SelfId()) + _ = tpn.MainMessenger.CreateTopic(consensusTopic, true) + _ = tpn.FullArchiveMessenger.CreateTopic(consensusTopic, true) payloadValidator, _ := validator.NewPeerAuthenticationPayloadValidator(60) preferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) fullArchivePreferredPeersHolder, _ := p2pFactory.NewPeersHolder([]string{}) @@ -1401,7 +1454,7 @@ func (tpn *TestProcessorNode) initRequesters() { Marshaller: TestMarshaller, Uint64ByteSliceConverter: TestUint64Converter, OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + CurrentNetworkEpochProvider: tpn.EpochProvider, MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, MainPeersRatingHandler: tpn.MainPeersRatingHandler, @@ -2413,7 +2466,8 @@ func (tpn *TestProcessorNode) initNode() { networkComponents.FullArchiveNetworkMessengerField = tpn.FullArchiveMessenger networkComponents.PeersRatingHandlerField = tpn.MainPeersRatingHandler networkComponents.FullArchivePeersRatingHandlerField = tpn.FullArchivePeersRatingHandler - networkComponents.PeersRatingMonitorField = tpn.PeersRatingMonitor + networkComponents.PeersRatingMonitorField = tpn.MainPeersRatingMonitor + networkComponents.FullArchivePeersRatingMonitorField = tpn.FullArchivePeersRatingMonitor tpn.Node, err = node.NewNode( node.WithAddressSignatureSize(64), From 633fdf712031aec220cc3b47b89d98be07b2c1db Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 15 Jun 2023 13:01:07 +0300 Subject: [PATCH 15/38] fixes after review, added new interceptors container --- common/constants.go | 3 - .../epochStartInterceptorsContainerFactory.go | 30 +- epochStart/bootstrap/fromLocalStorage.go | 13 +- epochStart/bootstrap/process.go | 49 +-- epochStart/bootstrap/storageProcess.go | 5 - factory/heartbeat/heartbeatV2Components.go | 4 +- factory/interface.go | 1 + factory/mock/processComponentsStub.go | 6 + factory/processing/processComponents.go | 263 ++++++++-------- .../processing/processComponentsHandler.go | 23 +- .../processComponentsHandler_test.go | 4 + .../mock/processComponentsStub.go | 6 + .../multiShard/hardFork/hardFork_test.go | 35 ++- .../networkSharding_test.go | 3 +- .../state/stateTrieSync/stateTrieSync_test.go | 13 +- integrationTests/testHeartbeatNode.go | 1 - integrationTests/testProcessorNode.go | 35 ++- node/nodeHelper.go | 11 +- .../baseInterceptorsContainerFactory.go | 116 +++---- .../metaInterceptorsContainerFactory.go | 50 ++- .../metaInterceptorsContainerFactory_test.go | 44 +-- .../shardInterceptorsContainerFactory.go | 48 ++- .../shardInterceptorsContainerFactory_test.go | 32 +- process/interface.go | 2 +- {common => storage}/disabled/cache.go | 0 update/factory/exportHandlerFactory.go | 285 ++++++++++-------- update/factory/fullSyncInterceptors.go | 167 ++++++---- 27 files changed, 664 insertions(+), 585 deletions(-) rename {common => storage}/disabled/cache.go (100%) diff --git a/common/constants.go b/common/constants.go index 828540fc28d..5cc7e7ccd4c 100644 --- a/common/constants.go +++ b/common/constants.go @@ -75,9 +75,6 @@ const ConnectionTopic = "connection" // ValidatorInfoTopic is the topic used for validatorInfo signaling const ValidatorInfoTopic = "validatorInfo" -// FullArchiveTopicPrefix is the topic prefix used for specific topics that have different interceptors on full archive network -const FullArchiveTopicPrefix = "full_archive_" - // MetricCurrentRound is the metric for monitoring the current round of a node const MetricCurrentRound = "erd_current_round" diff --git a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go index d17c2b67f8b..095f85e5c70 100644 --- a/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go +++ b/epochStart/bootstrap/factory/epochStartInterceptorsContainerFactory.go @@ -46,21 +46,21 @@ type ArgsEpochStartInterceptorContainer struct { } // NewEpochStartInterceptorsContainer will return a real interceptors container factory, but with many disabled components -func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) (process.InterceptorsContainer, error) { +func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) (process.InterceptorsContainer, process.InterceptorsContainer, error) { if check.IfNil(args.CoreComponents) { - return nil, epochStart.ErrNilCoreComponentsHolder + return nil, nil, epochStart.ErrNilCoreComponentsHolder } if check.IfNil(args.CryptoComponents) { - return nil, epochStart.ErrNilCryptoComponentsHolder + return nil, nil, epochStart.ErrNilCryptoComponentsHolder } if check.IfNil(args.CoreComponents.AddressPubKeyConverter()) { - return nil, epochStart.ErrNilPubkeyConverter + return nil, nil, epochStart.ErrNilPubkeyConverter } cryptoComponents := args.CryptoComponents.Clone().(process.CryptoComponentsHolder) err := cryptoComponents.SetMultiSignerContainer(disabled.NewMultiSignerContainer()) if err != nil { - return nil, err + return nil, nil, err } nodesCoordinator := disabled.NewNodesCoordinator() @@ -108,22 +108,30 @@ func NewEpochStartInterceptorsContainer(args ArgsEpochStartInterceptorContainer) MainPeerShardMapper: peerShardMapper, FullArchivePeerShardMapper: fullArchivePeerShardMapper, HardforkTrigger: hardforkTrigger, + NodeOperationMode: args.NodeOperationMode, } interceptorsContainerFactory, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(containerFactoryArgs) if err != nil { - return nil, err + return nil, nil, err } - container, err := interceptorsContainerFactory.Create() + mainContainer, fullArchiveContainer, err := interceptorsContainerFactory.Create() if err != nil { - return nil, err + return nil, nil, err } - err = interceptorsContainerFactory.AddShardTrieNodeInterceptors(container) + err = interceptorsContainerFactory.AddShardTrieNodeInterceptors(mainContainer) if err != nil { - return nil, err + return nil, nil, err } - return container, nil + if args.NodeOperationMode == p2p.FullArchiveMode { + err = interceptorsContainerFactory.AddShardTrieNodeInterceptors(fullArchiveContainer) + if err != nil { + return nil, nil, err + } + } + + return mainContainer, fullArchiveContainer, nil } diff --git a/epochStart/bootstrap/fromLocalStorage.go b/epochStart/bootstrap/fromLocalStorage.go index 4f19f07a90a..0c3f2d6544d 100644 --- a/epochStart/bootstrap/fromLocalStorage.go +++ b/epochStart/bootstrap/fromLocalStorage.go @@ -138,9 +138,13 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { } defer func() { - errClose := e.interceptorContainer.Close() + errClose := e.mainInterceptorContainer.Close() if errClose != nil { - log.Warn("prepareEpochFromStorage interceptorContainer.Close()", "error", errClose) + log.Warn("prepareEpochFromStorage mainInterceptorContainer.Close()", "error", errClose) + } + errClose = e.fullArchiveInterceptorContainer.Close() + if errClose != nil { + log.Warn("prepareEpochFromStorage fullArchiveInterceptorContainer.Close()", "error", errClose) } }() @@ -167,11 +171,6 @@ func (e *epochStartBootstrap) prepareEpochFromStorage() (Parameters, error) { return Parameters{}, err } - err = e.fullArchiveMessenger.CreateTopic(consensusTopic, true) - if err != nil { - return Parameters{}, err - } - emptyPeerMiniBlocksSlice := make([]*block.MiniBlock, 0) // empty slice since we have bootstrapped from storage if e.shardCoordinator.SelfId() == core.MetachainShardId { err = e.requestAndProcessForMeta(emptyPeerMiniBlocksSlice) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 75e94061133..6040ca4c6a6 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -120,22 +120,23 @@ type epochStartBootstrap struct { nodeProcessingMode common.NodeProcessingMode nodeOperationMode p2p.NodeOperation // created components - requestHandler process.RequestHandler - interceptorContainer process.InterceptorsContainer - dataPool dataRetriever.PoolsHolder - miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler - headersSyncer epochStart.HeadersByHashSyncer - txSyncerForScheduled update.TransactionsSyncHandler - epochStartMetaBlockSyncer epochStart.StartOfEpochMetaSyncer - nodesConfigHandler StartOfEpochNodesConfigHandler - whiteListHandler update.WhiteListHandler - whiteListerVerifiedTxs update.WhiteListHandler - storageOpenerHandler storage.UnitOpenerHandler - latestStorageDataProvider storage.LatestStorageDataProviderHandler - argumentsParser process.ArgumentsParser - dataSyncerFactory types.ScheduledDataSyncerCreator - dataSyncerWithScheduled types.ScheduledDataSyncer - storageService dataRetriever.StorageService + requestHandler process.RequestHandler + mainInterceptorContainer process.InterceptorsContainer + fullArchiveInterceptorContainer process.InterceptorsContainer + dataPool dataRetriever.PoolsHolder + miniBlocksSyncer epochStart.PendingMiniBlocksSyncHandler + headersSyncer epochStart.HeadersByHashSyncer + txSyncerForScheduled update.TransactionsSyncHandler + epochStartMetaBlockSyncer epochStart.StartOfEpochMetaSyncer + nodesConfigHandler StartOfEpochNodesConfigHandler + whiteListHandler update.WhiteListHandler + whiteListerVerifiedTxs update.WhiteListHandler + storageOpenerHandler storage.UnitOpenerHandler + latestStorageDataProvider storage.LatestStorageDataProviderHandler + argumentsParser process.ArgumentsParser + dataSyncerFactory types.ScheduledDataSyncerCreator + dataSyncerWithScheduled types.ScheduledDataSyncer + storageService dataRetriever.StorageService // gathered data epochStartMeta data.MetaHeaderHandler @@ -379,9 +380,14 @@ func (e *epochStartBootstrap) Bootstrap() (Parameters, error) { } defer func() { - errClose := e.interceptorContainer.Close() + errClose := e.mainInterceptorContainer.Close() if errClose != nil { - log.Warn("prepareEpochFromStorage interceptorContainer.Close()", "error", errClose) + log.Warn("prepareEpochFromStorage mainInterceptorContainer.Close()", "error", errClose) + } + + errClose = e.fullArchiveInterceptorContainer.Close() + if errClose != nil { + log.Warn("prepareEpochFromStorage fullArchiveInterceptorContainer.Close()", "error", errClose) } }() @@ -576,7 +582,7 @@ func (e *epochStartBootstrap) createSyncers() error { NodeOperationMode: e.nodeOperationMode, } - e.interceptorContainer, err = factoryInterceptors.NewEpochStartInterceptorsContainer(args) + e.mainInterceptorContainer, e.fullArchiveInterceptorContainer, err = factoryInterceptors.NewEpochStartInterceptorsContainer(args) if err != nil { return err } @@ -694,11 +700,6 @@ func (e *epochStartBootstrap) requestAndProcessing() (Parameters, error) { return Parameters{}, err } - err = e.fullArchiveMessenger.CreateTopic(consensusTopic, true) - if err != nil { - return Parameters{}, err - } - err = e.createHeartbeatSender() if err != nil { return Parameters{}, err diff --git a/epochStart/bootstrap/storageProcess.go b/epochStart/bootstrap/storageProcess.go index 898af0ffb17..6560549f2e3 100644 --- a/epochStart/bootstrap/storageProcess.go +++ b/epochStart/bootstrap/storageProcess.go @@ -335,11 +335,6 @@ func (sesb *storageEpochStartBootstrap) requestAndProcessFromStorage() (Paramete return Parameters{}, err } - err = sesb.fullArchiveMessenger.CreateTopic(consensusTopic, true) - if err != nil { - return Parameters{}, err - } - emptyPeerMiniBlocksSlice := make([]*block.MiniBlock, 0) // empty slice since we have bootstrapped from storage if sesb.shardCoordinator.SelfId() == core.MetachainShardId { err = sesb.requestAndProcessForMeta(emptyPeerMiniBlocksSlice) diff --git a/factory/heartbeat/heartbeatV2Components.go b/factory/heartbeat/heartbeatV2Components.go index b1ac0180b30..a551f22e869 100644 --- a/factory/heartbeat/heartbeatV2Components.go +++ b/factory/heartbeat/heartbeatV2Components.go @@ -251,7 +251,7 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error Messenger: hcf.networkComponents.NetworkMessenger(), PeerShardMapper: hcf.processComponents.PeerShardMapper(), ShardCoordinator: hcf.processComponents.ShardCoordinator(), - BaseIntraShardTopic: common.ConsensusTopic, + BaseIntraShardTopic: common.HeartbeatV2Topic, BaseCrossShardTopic: processFactory.MiniBlocksTopic, } mainDirectConnectionProcessor, err := processor.NewDirectConnectionProcessor(argsMainDirectConnectionProcessor) @@ -264,7 +264,7 @@ func (hcf *heartbeatV2ComponentsFactory) Create() (*heartbeatV2Components, error Messenger: hcf.networkComponents.FullArchiveNetworkMessenger(), PeerShardMapper: hcf.processComponents.FullArchivePeerShardMapper(), ShardCoordinator: hcf.processComponents.ShardCoordinator(), - BaseIntraShardTopic: common.ConsensusTopic, + BaseIntraShardTopic: common.HeartbeatV2Topic, BaseCrossShardTopic: processFactory.MiniBlocksTopic, } fullArchiveDirectConnectionProcessor, err := processor.NewDirectConnectionProcessor(argsFullArchiveDirectConnectionProcessor) diff --git a/factory/interface.go b/factory/interface.go index 9dd05c13f69..466b3637384 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -270,6 +270,7 @@ type ProcessComponentsHolder interface { NodesCoordinator() nodesCoordinator.NodesCoordinator ShardCoordinator() sharding.Coordinator InterceptorsContainer() process.InterceptorsContainer + FullArchiveInterceptorsContainer() process.InterceptorsContainer ResolversContainer() dataRetriever.ResolversContainer RequestersFinder() dataRetriever.RequestersFinder RoundHandler() consensus.RoundHandler diff --git a/factory/mock/processComponentsStub.go b/factory/mock/processComponentsStub.go index 5f90716cea4..b12e7cc083b 100644 --- a/factory/mock/processComponentsStub.go +++ b/factory/mock/processComponentsStub.go @@ -19,6 +19,7 @@ type ProcessComponentsMock struct { NodesCoord nodesCoordinator.NodesCoordinator ShardCoord sharding.Coordinator IntContainer process.InterceptorsContainer + FullArchiveIntContainer process.InterceptorsContainer ResContainer dataRetriever.ResolversContainer ReqFinder dataRetriever.RequestersFinder RoundHandlerField consensus.RoundHandler @@ -87,6 +88,11 @@ func (pcm *ProcessComponentsMock) InterceptorsContainer() process.InterceptorsCo return pcm.IntContainer } +// FullArchiveInterceptorsContainer - +func (pcm *ProcessComponentsMock) FullArchiveInterceptorsContainer() process.InterceptorsContainer { + return pcm.FullArchiveIntContainer +} + // ResolversContainer - func (pcm *ProcessComponentsMock) ResolversContainer() dataRetriever.ResolversContainer { return pcm.ResContainer diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 39f1cb3412e..db89edbe3f5 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -84,50 +84,51 @@ var timeSpanForBadHeaders = time.Minute * 2 // processComponents struct holds the process components type processComponents struct { - nodesCoordinator nodesCoordinator.NodesCoordinator - shardCoordinator sharding.Coordinator - interceptorsContainer process.InterceptorsContainer - resolversContainer dataRetriever.ResolversContainer - requestersFinder dataRetriever.RequestersFinder - roundHandler consensus.RoundHandler - epochStartTrigger epochStart.TriggerHandler - epochStartNotifier factory.EpochStartNotifier - forkDetector process.ForkDetector - blockProcessor process.BlockProcessor - blackListHandler process.TimeCacher - bootStorer process.BootStorer - headerSigVerifier process.InterceptedHeaderSigVerifier - headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler - validatorsStatistics process.ValidatorStatisticsProcessor - validatorsProvider process.ValidatorsProvider - blockTracker process.BlockTracker - pendingMiniBlocksHandler process.PendingMiniBlocksHandler - requestHandler process.RequestHandler - txLogsProcessor process.TransactionLogProcessorDatabase - headerConstructionValidator process.HeaderConstructionValidator - mainPeerShardMapper process.NetworkShardingCollector - fullArchivePeerShardMapper process.NetworkShardingCollector - txSimulatorProcessor factory.TransactionSimulatorProcessor - miniBlocksPoolCleaner process.PoolsCleaner - txsPoolCleaner process.PoolsCleaner - fallbackHeaderValidator process.FallbackHeaderValidator - whiteListHandler process.WhiteListHandler - whiteListerVerifiedTxs process.WhiteListHandler - historyRepository dblookupext.HistoryRepository - importStartHandler update.ImportStartHandler - requestedItemsHandler dataRetriever.RequestedItemsHandler - importHandler update.ImportHandler - nodeRedundancyHandler consensus.NodeRedundancyHandler - currentEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler - vmFactoryForTxSimulator process.VirtualMachinesContainerFactory - vmFactoryForProcessing process.VirtualMachinesContainerFactory - scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler - txsSender process.TxsSenderHandler - hardforkTrigger factory.HardforkTrigger - processedMiniBlocksTracker process.ProcessedMiniBlocksTracker - esdtDataStorageForApi vmcommon.ESDTNFTStorageHandler - accountsParser genesis.AccountsParser - receiptsRepository mainFactory.ReceiptsRepository + nodesCoordinator nodesCoordinator.NodesCoordinator + shardCoordinator sharding.Coordinator + mainInterceptorsContainer process.InterceptorsContainer + fullArchiveInterceptorsContainer process.InterceptorsContainer + resolversContainer dataRetriever.ResolversContainer + requestersFinder dataRetriever.RequestersFinder + roundHandler consensus.RoundHandler + epochStartTrigger epochStart.TriggerHandler + epochStartNotifier factory.EpochStartNotifier + forkDetector process.ForkDetector + blockProcessor process.BlockProcessor + blackListHandler process.TimeCacher + bootStorer process.BootStorer + headerSigVerifier process.InterceptedHeaderSigVerifier + headerIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler + validatorsStatistics process.ValidatorStatisticsProcessor + validatorsProvider process.ValidatorsProvider + blockTracker process.BlockTracker + pendingMiniBlocksHandler process.PendingMiniBlocksHandler + requestHandler process.RequestHandler + txLogsProcessor process.TransactionLogProcessorDatabase + headerConstructionValidator process.HeaderConstructionValidator + mainPeerShardMapper process.NetworkShardingCollector + fullArchivePeerShardMapper process.NetworkShardingCollector + txSimulatorProcessor factory.TransactionSimulatorProcessor + miniBlocksPoolCleaner process.PoolsCleaner + txsPoolCleaner process.PoolsCleaner + fallbackHeaderValidator process.FallbackHeaderValidator + whiteListHandler process.WhiteListHandler + whiteListerVerifiedTxs process.WhiteListHandler + historyRepository dblookupext.HistoryRepository + importStartHandler update.ImportStartHandler + requestedItemsHandler dataRetriever.RequestedItemsHandler + importHandler update.ImportHandler + nodeRedundancyHandler consensus.NodeRedundancyHandler + currentEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler + vmFactoryForTxSimulator process.VirtualMachinesContainerFactory + vmFactoryForProcessing process.VirtualMachinesContainerFactory + scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + txsSender process.TxsSenderHandler + hardforkTrigger factory.HardforkTrigger + processedMiniBlocksTracker process.ProcessedMiniBlocksTracker + esdtDataStorageForApi vmcommon.ESDTNFTStorageHandler + accountsParser genesis.AccountsParser + receiptsRepository mainFactory.ReceiptsRepository } // ProcessComponentsFactoryArgs holds the arguments needed to create a process components factory @@ -266,7 +267,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - // TODO: maybe move PeerShardMapper to network components mainPeerShardMapper, err := pcf.prepareNetworkShardingCollectorForMessenger(pcf.network.NetworkMessenger()) if err != nil { return nil, err @@ -513,7 +513,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } // TODO refactor all these factory calls - interceptorsContainer, err := interceptorContainerFactory.Create() + mainInterceptorsContainer, fullArchiveInterceptorsContainer, err := interceptorContainerFactory.Create() if err != nil { return nil, err } @@ -523,7 +523,8 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { requestHandler, resolversContainer, requestersContainer, - interceptorsContainer, + mainInterceptorsContainer, + fullArchiveInterceptorsContainer, headerSigVerifier, blockTracker, ) @@ -680,50 +681,51 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } return &processComponents{ - nodesCoordinator: pcf.nodesCoordinator, - shardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - interceptorsContainer: interceptorsContainer, - resolversContainer: resolversContainer, - requestersFinder: requestersFinder, - roundHandler: pcf.coreData.RoundHandler(), - forkDetector: forkDetector, - blockProcessor: blockProcessorComponents.blockProcessor, - epochStartTrigger: epochStartTrigger, - epochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), - blackListHandler: blackListHandler, - bootStorer: bootStorer, - headerSigVerifier: headerSigVerifier, - validatorsStatistics: validatorStatisticsProcessor, - validatorsProvider: validatorsProvider, - blockTracker: blockTracker, - pendingMiniBlocksHandler: pendingMiniBlocksHandler, - requestHandler: requestHandler, - txLogsProcessor: txLogsProcessor, - headerConstructionValidator: headerValidator, - headerIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), - mainPeerShardMapper: mainPeerShardMapper, - fullArchivePeerShardMapper: fullArchivePeerShardMapper, - txSimulatorProcessor: txSimulatorProcessor, - miniBlocksPoolCleaner: mbsPoolsCleaner, - txsPoolCleaner: txsPoolsCleaner, - fallbackHeaderValidator: fallbackHeaderValidator, - whiteListHandler: pcf.whiteListHandler, - whiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, - historyRepository: pcf.historyRepo, - importStartHandler: pcf.importStartHandler, - requestedItemsHandler: pcf.requestedItemsHandler, - importHandler: pcf.importHandler, - nodeRedundancyHandler: nodeRedundancyHandler, - currentEpochProvider: currentEpochProvider, - vmFactoryForTxSimulator: vmFactoryForTxSimulate, - vmFactoryForProcessing: blockProcessorComponents.vmFactoryForProcessing, - scheduledTxsExecutionHandler: scheduledTxsExecutionHandler, - txsSender: txsSenderWithAccumulator, - hardforkTrigger: hardforkTrigger, - processedMiniBlocksTracker: processedMiniBlocksTracker, - esdtDataStorageForApi: pcf.esdtNftStorage, - accountsParser: pcf.accountsParser, - receiptsRepository: receiptsRepository, + nodesCoordinator: pcf.nodesCoordinator, + shardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + mainInterceptorsContainer: mainInterceptorsContainer, + fullArchiveInterceptorsContainer: fullArchiveInterceptorsContainer, + resolversContainer: resolversContainer, + requestersFinder: requestersFinder, + roundHandler: pcf.coreData.RoundHandler(), + forkDetector: forkDetector, + blockProcessor: blockProcessorComponents.blockProcessor, + epochStartTrigger: epochStartTrigger, + epochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), + blackListHandler: blackListHandler, + bootStorer: bootStorer, + headerSigVerifier: headerSigVerifier, + validatorsStatistics: validatorStatisticsProcessor, + validatorsProvider: validatorsProvider, + blockTracker: blockTracker, + pendingMiniBlocksHandler: pendingMiniBlocksHandler, + requestHandler: requestHandler, + txLogsProcessor: txLogsProcessor, + headerConstructionValidator: headerValidator, + headerIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), + mainPeerShardMapper: mainPeerShardMapper, + fullArchivePeerShardMapper: fullArchivePeerShardMapper, + txSimulatorProcessor: txSimulatorProcessor, + miniBlocksPoolCleaner: mbsPoolsCleaner, + txsPoolCleaner: txsPoolsCleaner, + fallbackHeaderValidator: fallbackHeaderValidator, + whiteListHandler: pcf.whiteListHandler, + whiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, + historyRepository: pcf.historyRepo, + importStartHandler: pcf.importStartHandler, + requestedItemsHandler: pcf.requestedItemsHandler, + importHandler: pcf.importHandler, + nodeRedundancyHandler: nodeRedundancyHandler, + currentEpochProvider: currentEpochProvider, + vmFactoryForTxSimulator: vmFactoryForTxSimulate, + vmFactoryForProcessing: blockProcessorComponents.vmFactoryForProcessing, + scheduledTxsExecutionHandler: scheduledTxsExecutionHandler, + txsSender: txsSenderWithAccumulator, + hardforkTrigger: hardforkTrigger, + processedMiniBlocksTracker: processedMiniBlocksTracker, + esdtDataStorageForApi: pcf.esdtNftStorage, + accountsParser: pcf.accountsParser, + receiptsRepository: receiptsRepository, }, nil } @@ -1755,7 +1757,8 @@ func (pcf *processComponentsFactory) createExportFactoryHandler( requestHandler process.RequestHandler, resolversContainer dataRetriever.ResolversContainer, requestersContainer dataRetriever.RequestersContainer, - interceptorsContainer process.InterceptorsContainer, + mainInterceptorsContainer process.InterceptorsContainer, + fullArchiveInterceptorsContainer process.InterceptorsContainer, headerSigVerifier process.InterceptedHeaderSigVerifier, blockTracker process.ValidityAttester, ) (update.ExportFactoryHandler, error) { @@ -1765,39 +1768,46 @@ func (pcf *processComponentsFactory) createExportFactoryHandler( accountsDBs[state.UserAccountsState] = pcf.state.AccountsAdapter() accountsDBs[state.PeerAccountsState] = pcf.state.PeerAccounts() exportFolder := filepath.Join(pcf.flagsConfig.WorkingDir, hardforkConfig.ImportFolder) + nodeOperationMode := p2p.NormalOperation + if pcf.prefConfigs.Preferences.FullArchive { + nodeOperationMode = p2p.FullArchiveMode + } argsExporter := updateFactory.ArgsExporter{ - CoreComponents: pcf.coreData, - CryptoComponents: pcf.crypto, - StatusCoreComponents: pcf.statusCoreComponents, - HeaderValidator: headerValidator, - DataPool: pcf.data.Datapool(), - StorageService: pcf.data.StorageService(), - RequestHandler: requestHandler, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Messenger: pcf.network.NetworkMessenger(), - ActiveAccountsDBs: accountsDBs, - ExistingResolvers: resolversContainer, - ExistingRequesters: requestersContainer, - ExportFolder: exportFolder, - ExportTriesStorageConfig: hardforkConfig.ExportTriesStorageConfig, - ExportStateStorageConfig: hardforkConfig.ExportStateStorageConfig, - ExportStateKeysConfig: hardforkConfig.ExportKeysStorageConfig, - MaxTrieLevelInMemory: pcf.config.StateTriesConfig.MaxStateTrieLevelInMemory, - WhiteListHandler: pcf.whiteListHandler, - WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, - InterceptorsContainer: interceptorsContainer, - NodesCoordinator: pcf.nodesCoordinator, - HeaderSigVerifier: headerSigVerifier, - HeaderIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), - ValidityAttester: blockTracker, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - RoundHandler: pcf.coreData.RoundHandler(), - InterceptorDebugConfig: pcf.config.Debug.InterceptorResolver, - MaxHardCapForMissingNodes: pcf.config.TrieSync.MaxHardCapForMissingNodes, - NumConcurrentTrieSyncers: pcf.config.TrieSync.NumConcurrentTrieSyncers, - TrieSyncerVersion: pcf.config.TrieSync.TrieSyncerVersion, - PeersRatingHandler: pcf.network.PeersRatingHandler(), + CoreComponents: pcf.coreData, + CryptoComponents: pcf.crypto, + StatusCoreComponents: pcf.statusCoreComponents, + HeaderValidator: headerValidator, + DataPool: pcf.data.Datapool(), + StorageService: pcf.data.StorageService(), + RequestHandler: requestHandler, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + MainMessenger: pcf.network.NetworkMessenger(), + FullArchiveMessenger: pcf.network.FullArchiveNetworkMessenger(), + ActiveAccountsDBs: accountsDBs, + ExistingResolvers: resolversContainer, + ExistingRequesters: requestersContainer, + ExportFolder: exportFolder, + ExportTriesStorageConfig: hardforkConfig.ExportTriesStorageConfig, + ExportStateStorageConfig: hardforkConfig.ExportStateStorageConfig, + ExportStateKeysConfig: hardforkConfig.ExportKeysStorageConfig, + MaxTrieLevelInMemory: pcf.config.StateTriesConfig.MaxStateTrieLevelInMemory, + WhiteListHandler: pcf.whiteListHandler, + WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, + MainInterceptorsContainer: mainInterceptorsContainer, + FullArchiveInterceptorsContainer: fullArchiveInterceptorsContainer, + NodesCoordinator: pcf.nodesCoordinator, + HeaderSigVerifier: headerSigVerifier, + HeaderIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), + OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), + RoundHandler: pcf.coreData.RoundHandler(), + InterceptorDebugConfig: pcf.config.Debug.InterceptorResolver, + MaxHardCapForMissingNodes: pcf.config.TrieSync.MaxHardCapForMissingNodes, + NumConcurrentTrieSyncers: pcf.config.TrieSync.NumConcurrentTrieSyncers, + TrieSyncerVersion: pcf.config.TrieSync.TrieSyncerVersion, + PeersRatingHandler: pcf.network.PeersRatingHandler(), + NodeOperationMode: nodeOperationMode, } return updateFactory.NewExportHandlerFactory(argsExporter) } @@ -1979,8 +1989,9 @@ func (pc *processComponents) Close() error { if !check.IfNil(pc.importHandler) { log.LogIfError(pc.importHandler.Close()) } - if !check.IfNil(pc.interceptorsContainer) { - log.LogIfError(pc.interceptorsContainer.Close()) + // only calling close on the mainInterceptorsContainer as it should be the same interceptors on full archive + if !check.IfNil(pc.mainInterceptorsContainer) { + log.LogIfError(pc.mainInterceptorsContainer.Close()) } if !check.IfNil(pc.vmFactoryForTxSimulator) { log.LogIfError(pc.vmFactoryForTxSimulator.Close()) diff --git a/factory/processing/processComponentsHandler.go b/factory/processing/processComponentsHandler.go index 49c7ac40444..c29985caa29 100644 --- a/factory/processing/processComponentsHandler.go +++ b/factory/processing/processComponentsHandler.go @@ -87,8 +87,11 @@ func (m *managedProcessComponents) CheckSubcomponents() error { if check.IfNil(m.processComponents.shardCoordinator) { return errors.ErrNilShardCoordinator } - if check.IfNil(m.processComponents.interceptorsContainer) { - return errors.ErrNilInterceptorsContainer + if check.IfNil(m.processComponents.mainInterceptorsContainer) { + return fmt.Errorf("%w on main network", errors.ErrNilInterceptorsContainer) + } + if check.IfNil(m.processComponents.fullArchiveInterceptorsContainer) { + return fmt.Errorf("%w on full archive network", errors.ErrNilInterceptorsContainer) } if check.IfNil(m.processComponents.resolversContainer) { return errors.ErrNilResolversContainer @@ -199,7 +202,7 @@ func (m *managedProcessComponents) ShardCoordinator() sharding.Coordinator { return m.processComponents.shardCoordinator } -// InterceptorsContainer returns the interceptors container +// InterceptorsContainer returns the interceptors container on the main network func (m *managedProcessComponents) InterceptorsContainer() process.InterceptorsContainer { m.mutProcessComponents.RLock() defer m.mutProcessComponents.RUnlock() @@ -208,7 +211,19 @@ func (m *managedProcessComponents) InterceptorsContainer() process.InterceptorsC return nil } - return m.processComponents.interceptorsContainer + return m.processComponents.mainInterceptorsContainer +} + +// FullArchiveInterceptorsContainer returns the interceptors container on the full archive network +func (m *managedProcessComponents) FullArchiveInterceptorsContainer() process.InterceptorsContainer { + m.mutProcessComponents.RLock() + defer m.mutProcessComponents.RUnlock() + + if m.processComponents == nil { + return nil + } + + return m.processComponents.fullArchiveInterceptorsContainer } // ResolversContainer returns the resolvers container diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index 534fef02ec8..d2081e48b6f 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -90,6 +90,8 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.True(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) require.True(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) require.True(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + require.True(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) + require.True(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) err := managedProcessComponents.Create() require.NoError(t, err) @@ -131,6 +133,8 @@ func TestManagedProcessComponents_Create(t *testing.T) { require.False(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) require.False(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) require.False(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + require.False(t, check.IfNil(managedProcessComponents.FullArchivePeerShardMapper())) + require.False(t, check.IfNil(managedProcessComponents.FullArchiveInterceptorsContainer())) require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) }) diff --git a/integrationTests/mock/processComponentsStub.go b/integrationTests/mock/processComponentsStub.go index f12a3a1d59b..40d296129ed 100644 --- a/integrationTests/mock/processComponentsStub.go +++ b/integrationTests/mock/processComponentsStub.go @@ -21,6 +21,7 @@ type ProcessComponentsStub struct { ShardCoord sharding.Coordinator ShardCoordinatorCalled func() sharding.Coordinator IntContainer process.InterceptorsContainer + FullArchiveIntContainer process.InterceptorsContainer ResContainer dataRetriever.ResolversContainer ReqFinder dataRetriever.RequestersFinder RoundHandlerField consensus.RoundHandler @@ -96,6 +97,11 @@ func (pcs *ProcessComponentsStub) InterceptorsContainer() process.InterceptorsCo return pcs.IntContainer } +// FullArchiveInterceptorsContainer - +func (pcs *ProcessComponentsStub) FullArchiveInterceptorsContainer() process.InterceptorsContainer { + return pcs.FullArchiveIntContainer +} + // ResolversContainer - func (pcs *ProcessComponentsStub) ResolversContainer() dataRetriever.ResolversContainer { return pcs.ResContainer diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index c71aa6441f0..6cbf58bc4ae 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -585,7 +585,8 @@ func createHardForkExporter( StorageService: node.Storage, RequestHandler: node.RequestHandler, ShardCoordinator: node.ShardCoordinator, - Messenger: node.MainMessenger, + MainMessenger: node.MainMessenger, + FullArchiveMessenger: node.FullArchiveMessenger, ActiveAccountsDBs: accountsDBs, ExportFolder: node.ExportFolder, ExportTriesStorageConfig: config.StorageConfig{ @@ -602,21 +603,22 @@ func createHardForkExporter( MaxOpenFiles: 10, }, }, - ExportStateStorageConfig: exportConfig, - ExportStateKeysConfig: keysConfig, - MaxTrieLevelInMemory: uint(5), - WhiteListHandler: node.WhiteListHandler, - WhiteListerVerifiedTxs: node.WhiteListerVerifiedTxs, - InterceptorsContainer: node.InterceptorsContainer, - ExistingResolvers: node.ResolversContainer, - ExistingRequesters: node.RequestersContainer, - NodesCoordinator: node.NodesCoordinator, - HeaderSigVerifier: node.HeaderSigVerifier, - HeaderIntegrityVerifier: node.HeaderIntegrityVerifier, - ValidityAttester: node.BlockTracker, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - RoundHandler: &mock.RoundHandlerMock{}, + ExportStateStorageConfig: exportConfig, + ExportStateKeysConfig: keysConfig, + MaxTrieLevelInMemory: uint(5), + WhiteListHandler: node.WhiteListHandler, + WhiteListerVerifiedTxs: node.WhiteListerVerifiedTxs, + MainInterceptorsContainer: node.MainInterceptorsContainer, + FullArchiveInterceptorsContainer: node.FullArchiveInterceptorsContainer, + ExistingResolvers: node.ResolversContainer, + ExistingRequesters: node.RequestersContainer, + NodesCoordinator: node.NodesCoordinator, + HeaderSigVerifier: node.HeaderSigVerifier, + HeaderIntegrityVerifier: node.HeaderIntegrityVerifier, + ValidityAttester: node.BlockTracker, + OutputAntifloodHandler: &mock.NilAntifloodHandler{}, + InputAntifloodHandler: &mock.NilAntifloodHandler{}, + RoundHandler: &mock.RoundHandlerMock{}, InterceptorDebugConfig: config.InterceptorResolverDebugConfig{ Enabled: true, EnablePrint: true, @@ -631,6 +633,7 @@ func createHardForkExporter( TrieSyncerVersion: 2, PeersRatingHandler: node.PeersRatingHandler, CheckNodesOnDisk: false, + NodeOperationMode: node.NodeOperationMode, } exportHandler, err := factory.NewExportHandlerFactory(argsExportHandler) diff --git a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go index 644223672cb..03679fb4201 100644 --- a/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go +++ b/integrationTests/p2p/networkSharding-hbv2/networkSharding_test.go @@ -119,8 +119,7 @@ func stopNodes(advertiser p2p.Messenger, nodesMap map[uint32][]*integrationTests _ = advertiser.Close() for _, nodes := range nodesMap { for _, n := range nodes { - _ = n.MainMessenger.Close() - _ = n.FullArchiveMessenger.Close() + n.Close() } } } diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 39b144366d8..0b38e697d5d 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -54,7 +54,6 @@ func createTestProcessorNodeAndTrieStorage( GasScheduleMap: createTestGasMap(), }) _ = node.MainMessenger.CreateTopic(common.ConsensusTopic+node.ShardCoordinator.CommunicationIdentifier(node.ShardCoordinator.SelfId()), true) - _ = node.FullArchiveMessenger.CreateTopic(common.ConsensusTopic+node.ShardCoordinator.CommunicationIdentifier(node.ShardCoordinator.SelfId()), true) return node, mainStorer } @@ -87,8 +86,8 @@ func testNodeRequestInterceptTrieNodesWithMessenger(t *testing.T, version int) { _ = trieStorageRequester.DestroyUnit() _ = trieStorageResolver.DestroyUnit() - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() time.Sleep(time.Second) @@ -208,8 +207,8 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin _ = trieStorageRequester.DestroyUnit() _ = trieStorageResolver.DestroyUnit() - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() time.Sleep(time.Second) @@ -316,8 +315,8 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves _ = trieStorageRequester.DestroyUnit() _ = trieStorageResolver.DestroyUnit() - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() time.Sleep(time.Second) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index da8775fb329..530a1313b93 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -457,7 +457,6 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { dataPacker, _ := partitioning.NewSimpleDataPacker(TestMarshaller) _ = thn.MainMessenger.CreateTopic(common.ConsensusTopic+thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()), true) - _ = thn.FullArchiveMessenger.CreateTopic(common.ConsensusTopic+thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()), true) payloadValidator, _ := validator.NewPeerAuthenticationPayloadValidator(thn.heartbeatExpiryTimespanInSec) resolverContainerFactoryArgs := resolverscontainer.FactoryArgs{ diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 303bb2f7a40..601b2481961 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -315,15 +315,16 @@ type TestProcessorNode struct { EconomicsData *economics.TestEconomicsData RatingsData *rating.RatingsData - BlockBlackListHandler process.TimeCacher - HeaderValidator process.HeaderConstructionValidator - BlockTracker process.BlockTracker - InterceptorsContainer process.InterceptorsContainer - ResolversContainer dataRetriever.ResolversContainer - RequestersContainer dataRetriever.RequestersContainer - RequestersFinder dataRetriever.RequestersFinder - RequestHandler process.RequestHandler - WasmVMChangeLocker common.Locker + BlockBlackListHandler process.TimeCacher + HeaderValidator process.HeaderConstructionValidator + BlockTracker process.BlockTracker + MainInterceptorsContainer process.InterceptorsContainer + FullArchiveInterceptorsContainer process.InterceptorsContainer + ResolversContainer dataRetriever.ResolversContainer + RequestersContainer dataRetriever.RequestersContainer + RequestersFinder dataRetriever.RequestersFinder + RequestHandler process.RequestHandler + WasmVMChangeLocker common.Locker InterimProcContainer process.IntermediateProcessorContainer TxProcessor process.TransactionProcessor @@ -776,7 +777,7 @@ func (tpn *TestProcessorNode) initTestNodeWithArgs(args ArgTestProcessorNode) { tpn.ShardCoordinator, tpn.OwnAccount.PeerSigHandler, tpn.DataPool.Headers(), - tpn.InterceptorsContainer, + tpn.MainInterceptorsContainer, &testscommon.AlarmSchedulerStub{}, testscommon.NewKeysHandlerSingleSignerMock( tpn.NodeKeys.MainKey.Sk, @@ -965,7 +966,7 @@ func (tpn *TestProcessorNode) InitializeProcessors(gasMap map[string]map[string] tpn.ShardCoordinator, tpn.OwnAccount.PeerSigHandler, tpn.DataPool.Headers(), - tpn.InterceptorsContainer, + tpn.MainInterceptorsContainer, &testscommon.AlarmSchedulerStub{}, testscommon.NewKeysHandlerSingleSignerMock( tpn.NodeKeys.MainKey.Sk, @@ -1242,7 +1243,7 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { } interceptorContainerFactory, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(metaInterceptorContainerFactoryArgs) - tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() + tpn.MainInterceptorsContainer, tpn.FullArchiveInterceptorsContainer, err = interceptorContainerFactory.Create() if err != nil { log.Debug("interceptor container factory Create", "error", err.Error()) } @@ -1310,7 +1311,7 @@ func (tpn *TestProcessorNode) initInterceptors(heartbeatPk string) { } interceptorContainerFactory, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(shardIntereptorContainerFactoryArgs) - tpn.InterceptorsContainer, err = interceptorContainerFactory.Create() + tpn.MainInterceptorsContainer, tpn.FullArchiveInterceptorsContainer, err = interceptorContainerFactory.Create() if err != nil { fmt.Println(err.Error()) } @@ -2365,7 +2366,8 @@ func (tpn *TestProcessorNode) initNode() { processComponents.BlackListHdl = tpn.BlockBlackListHandler processComponents.NodesCoord = tpn.NodesCoordinator processComponents.ShardCoord = tpn.ShardCoordinator - processComponents.IntContainer = tpn.InterceptorsContainer + processComponents.IntContainer = tpn.MainInterceptorsContainer + processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer processComponents.HistoryRepositoryInternal = tpn.HistoryRepository processComponents.WhiteListHandlerInternal = tpn.WhiteListHandler processComponents.WhiteListerVerifiedTxsInternal = tpn.WhiteListerVerifiedTxs @@ -2423,7 +2425,7 @@ func (tpn *TestProcessorNode) initNode() { err = nodeDebugFactory.CreateInterceptedDebugHandler( tpn.Node, - tpn.InterceptorsContainer, + tpn.MainInterceptorsContainer, tpn.ResolversContainer, tpn.RequestersFinder, config.InterceptorResolverDebugConfig{ @@ -2960,7 +2962,8 @@ func (tpn *TestProcessorNode) createHeartbeatWithHardforkTrigger() { processComponents.BlackListHdl = tpn.BlockBlackListHandler processComponents.NodesCoord = tpn.NodesCoordinator processComponents.ShardCoord = tpn.ShardCoordinator - processComponents.IntContainer = tpn.InterceptorsContainer + processComponents.IntContainer = tpn.MainInterceptorsContainer + processComponents.FullArchiveIntContainer = tpn.FullArchiveInterceptorsContainer processComponents.ValidatorStatistics = &mock.ValidatorStatisticsProcessorStub{ GetValidatorInfoForRootHashCalled: func(_ []byte) (map[uint32][]*state.ValidatorInfo, error) { return map[uint32][]*state.ValidatorInfo{ diff --git a/node/nodeHelper.go b/node/nodeHelper.go index 7685c44f600..b1b5a27c816 100644 --- a/node/nodeHelper.go +++ b/node/nodeHelper.go @@ -23,14 +23,11 @@ func prepareOpenTopics( ) { selfID := shardCoordinator.SelfId() selfShardHeartbeatV2Topic := common.HeartbeatV2Topic + core.CommunicationIdentifierBetweenShards(selfID, selfID) - selfShardHeartbeatV2TopicFullArchive := common.FullArchiveTopicPrefix + common.HeartbeatV2Topic + core.CommunicationIdentifierBetweenShards(selfID, selfID) if selfID == core.MetachainShardId { antiflood.SetTopicsForAll( common.PeerAuthenticationTopic, selfShardHeartbeatV2Topic, - selfShardHeartbeatV2TopicFullArchive, - common.ConnectionTopic, - common.FullArchiveTopicPrefix+common.ConnectionTopic) + common.ConnectionTopic) return } @@ -38,9 +35,7 @@ func prepareOpenTopics( antiflood.SetTopicsForAll( common.PeerAuthenticationTopic, selfShardHeartbeatV2Topic, - selfShardHeartbeatV2TopicFullArchive, common.ConnectionTopic, - common.FullArchiveTopicPrefix+common.ConnectionTopic, selfShardTxTopic) } @@ -63,7 +58,7 @@ func CreateNode( ) (*Node, error) { prepareOpenTopics(networkComponents.InputAntiFloodHandler(), processComponents.ShardCoordinator()) - peerDenialEvaluator, err := preparePeerDenialEvaluators(networkComponents, processComponents) + peerDenialEvaluator, err := createAndAttachPeerDenialEvaluators(networkComponents, processComponents) if err != nil { return nil, err } @@ -128,7 +123,7 @@ func CreateNode( return nd, nil } -func preparePeerDenialEvaluators( +func createAndAttachPeerDenialEvaluators( networkComponents factory.NetworkComponentsHandler, processComponents factory.ProcessComponentsHandler, ) (p2p.PeerDenialEvaluator, error) { diff --git a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go index f212b7fdc0c..cc7061d7158 100644 --- a/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/baseInterceptorsContainerFactory.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/p2p" @@ -33,7 +32,8 @@ const ( ) type baseInterceptorsContainerFactory struct { - container process.InterceptorsContainer + mainContainer process.InterceptorsContainer + fullArchiveContainer process.InterceptorsContainer shardCoordinator sharding.Coordinator accounts state.AccountsAdapter store dataRetriever.StorageService @@ -247,7 +247,7 @@ func (bicf *baseInterceptorsContainerFactory) generateTxInterceptors() error { keys = append(keys, identifierTx) interceptorSlice = append(interceptorSlice, interceptor) - return bicf.container.AddMultiple(keys, interceptorSlice) + return bicf.addInterceptorsToContainers(keys, interceptorSlice) } func (bicf *baseInterceptorsContainerFactory) createOneTxInterceptor(topic string) (process.Interceptor, error) { @@ -437,7 +437,7 @@ func (bicf *baseInterceptorsContainerFactory) generateHeaderInterceptors() error return err } - return bicf.container.Add(identifierHdr, interceptor) + return bicf.addInterceptorsToContainers([]string{identifierHdr}, []process.Interceptor{interceptor}) } // ------- MiniBlocks interceptors @@ -472,15 +472,15 @@ func (bicf *baseInterceptorsContainerFactory) generateMiniBlocksInterceptors() e identifierAllShardsMiniBlocks := factory.MiniBlocksTopic + shardC.CommunicationIdentifier(core.AllShardId) - allShardsMiniBlocksInterceptorinterceptor, err := bicf.createOneMiniBlocksInterceptor(identifierAllShardsMiniBlocks) + allShardsMiniBlocksInterceptor, err := bicf.createOneMiniBlocksInterceptor(identifierAllShardsMiniBlocks) if err != nil { return err } keys[noOfShards+1] = identifierAllShardsMiniBlocks - interceptorsSlice[noOfShards+1] = allShardsMiniBlocksInterceptorinterceptor + interceptorsSlice[noOfShards+1] = allShardsMiniBlocksInterceptor - return bicf.container.AddMultiple(keys, interceptorsSlice) + return bicf.addInterceptorsToContainers(keys, interceptorsSlice) } func (bicf *baseInterceptorsContainerFactory) createOneMiniBlocksInterceptor(topic string) (process.Interceptor, error) { @@ -564,7 +564,7 @@ func (bicf *baseInterceptorsContainerFactory) generateMetachainHeaderInterceptor return err } - return bicf.container.Add(identifierHdr, interceptor) + return bicf.addInterceptorsToContainers([]string{identifierHdr}, []process.Interceptor{interceptor}) } func (bicf *baseInterceptorsContainerFactory) createOneTrieNodesInterceptor(topic string) (process.Interceptor, error) { @@ -645,7 +645,7 @@ func (bicf *baseInterceptorsContainerFactory) generateUnsignedTxsInterceptors() keys = append(keys, identifierScr) interceptorsSlice = append(interceptorsSlice, interceptor) - return bicf.container.AddMultiple(keys, interceptorsSlice) + return bicf.addInterceptorsToContainers(keys, interceptorsSlice) } //------- PeerAuthentication interceptor @@ -692,50 +692,24 @@ func (bicf *baseInterceptorsContainerFactory) generatePeerAuthenticationIntercep return err } - return bicf.container.Add(identifierPeerAuthentication, mdInterceptor) + return bicf.mainContainer.Add(identifierPeerAuthentication, mdInterceptor) } -//------- Heartbeat interceptors +//------- Heartbeat interceptor -func (bicf *baseInterceptorsContainerFactory) generateMainHeartbeatInterceptor() error { +func (bicf *baseInterceptorsContainerFactory) generateHeartbeatInterceptor() error { shardC := bicf.shardCoordinator identifierHeartbeat := common.HeartbeatV2Topic + shardC.CommunicationIdentifier(shardC.SelfId()) - interceptor, err := bicf.createOneHeartbeatV2Interceptor(identifierHeartbeat, bicf.dataPool.Heartbeats(), bicf.mainPeerShardMapper) + interceptor, err := bicf.createHeartbeatV2Interceptor(identifierHeartbeat, bicf.dataPool.Heartbeats(), bicf.mainPeerShardMapper) if err != nil { return err } - err = createTopicAndAssignHandlerOnMessenger(identifierHeartbeat, interceptor, true, bicf.mainMessenger) - if err != nil { - return err - } - - return bicf.container.Add(identifierHeartbeat, interceptor) -} - -func (bicf *baseInterceptorsContainerFactory) generateFullArchiveHeartbeatInterceptor() error { - if bicf.nodeOperationMode != p2p.FullArchiveMode { - return nil - } - - shardC := bicf.shardCoordinator - identifierHeartbeat := common.FullArchiveTopicPrefix + common.HeartbeatV2Topic + shardC.CommunicationIdentifier(shardC.SelfId()) - - interceptor, err := bicf.createOneHeartbeatV2Interceptor(identifierHeartbeat, disabled.NewCache(), bicf.fullArchivePeerShardMapper) - if err != nil { - return err - } - - err = createTopicAndAssignHandlerOnMessenger(identifierHeartbeat, interceptor, true, bicf.fullArchiveMessenger) - if err != nil { - return err - } - - return bicf.container.Add(identifierHeartbeat, interceptor) + return bicf.addInterceptorsToContainers([]string{identifierHeartbeat}, []process.Interceptor{interceptor}) } -func (bicf *baseInterceptorsContainerFactory) createOneHeartbeatV2Interceptor( +func (bicf *baseInterceptorsContainerFactory) createHeartbeatV2Interceptor( identifier string, heartbeatCahcer storage.Cacher, peerShardMapper process.PeerShardMapper, @@ -755,7 +729,7 @@ func (bicf *baseInterceptorsContainerFactory) createOneHeartbeatV2Interceptor( return nil, err } - return interceptors.NewSingleDataInterceptor( + interceptor, err := interceptors.NewSingleDataInterceptor( interceptors.ArgSingleDataInterceptor{ Topic: identifier, DataFactory: heartbeatFactory, @@ -767,47 +741,27 @@ func (bicf *baseInterceptorsContainerFactory) createOneHeartbeatV2Interceptor( CurrentPeerId: bicf.mainMessenger.ID(), }, ) -} - -// ------- PeerShard interceptors - -func (bicf *baseInterceptorsContainerFactory) generateMainPeerShardInterceptor() error { - identifier := common.ConnectionTopic - - interceptor, err := bicf.createOnePeerShardInterceptor(identifier, bicf.mainPeerShardMapper) if err != nil { - return err - } - - err = createTopicAndAssignHandlerOnMessenger(identifier, interceptor, true, bicf.mainMessenger) - if err != nil { - return err + return nil, err } - return bicf.container.Add(identifier, interceptor) + return bicf.createTopicAndAssignHandler(identifier, interceptor, true) } -func (bicf *baseInterceptorsContainerFactory) generateFullArchivePeerShardInterceptor() error { - if bicf.nodeOperationMode != p2p.FullArchiveMode { - return nil - } - - identifier := common.FullArchiveTopicPrefix + common.ConnectionTopic +// ------- PeerShard interceptor - interceptor, err := bicf.createOnePeerShardInterceptor(identifier, bicf.fullArchivePeerShardMapper) - if err != nil { - return err - } +func (bicf *baseInterceptorsContainerFactory) generatePeerShardInterceptor() error { + identifier := common.ConnectionTopic - err = createTopicAndAssignHandlerOnMessenger(identifier, interceptor, true, bicf.fullArchiveMessenger) + interceptor, err := bicf.createPeerShardInterceptor(identifier, bicf.mainPeerShardMapper) if err != nil { return err } - return bicf.container.Add(identifier, interceptor) + return bicf.addInterceptorsToContainers([]string{identifier}, []process.Interceptor{interceptor}) } -func (bicf *baseInterceptorsContainerFactory) createOnePeerShardInterceptor( +func (bicf *baseInterceptorsContainerFactory) createPeerShardInterceptor( identifier string, peerShardMapper process.PeerShardMapper, ) (process.Interceptor, error) { @@ -824,7 +778,7 @@ func (bicf *baseInterceptorsContainerFactory) createOnePeerShardInterceptor( return nil, err } - return interceptors.NewSingleDataInterceptor( + interceptor, err := interceptors.NewSingleDataInterceptor( interceptors.ArgSingleDataInterceptor{ Topic: identifier, DataFactory: interceptedPeerShardFactory, @@ -836,6 +790,11 @@ func (bicf *baseInterceptorsContainerFactory) createOnePeerShardInterceptor( PreferredPeersHolder: bicf.preferredPeersHolder, }, ) + if err != nil { + return nil, err + } + + return bicf.createTopicAndAssignHandler(identifier, interceptor, true) } func (bicf *baseInterceptorsContainerFactory) generateValidatorInfoInterceptor() error { @@ -878,5 +837,18 @@ func (bicf *baseInterceptorsContainerFactory) generateValidatorInfoInterceptor() return err } - return bicf.container.Add(identifier, interceptor) + return bicf.addInterceptorsToContainers([]string{identifier}, []process.Interceptor{interceptor}) +} + +func (bicf *baseInterceptorsContainerFactory) addInterceptorsToContainers(keys []string, interceptors []process.Interceptor) error { + err := bicf.mainContainer.AddMultiple(keys, interceptors) + if err != nil { + return err + } + + if bicf.nodeOperationMode != p2p.FullArchiveMode { + return nil + } + + return bicf.fullArchiveContainer.AddMultiple(keys, interceptors) } diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go index bd1d3bee45a..38d3e460bce 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory.go @@ -101,9 +101,9 @@ func NewMetaInterceptorsContainerFactory( PeerID: args.MainMessenger.ID(), } - container := containers.NewInterceptorsContainer() base := &baseInterceptorsContainerFactory{ - container: container, + mainContainer: containers.NewInterceptorsContainer(), + fullArchiveContainer: containers.NewInterceptorsContainer(), shardCoordinator: args.ShardCoordinator, mainMessenger: args.MainMessenger, fullArchiveMessenger: args.FullArchiveMessenger, @@ -139,73 +139,63 @@ func NewMetaInterceptorsContainerFactory( } // Create returns an interceptor container that will hold all interceptors in the system -func (micf *metaInterceptorsContainerFactory) Create() (process.InterceptorsContainer, error) { +func (micf *metaInterceptorsContainerFactory) Create() (process.InterceptorsContainer, process.InterceptorsContainer, error) { err := micf.generateMetachainHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateShardHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateUnsignedTxsInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateRewardTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateMiniBlocksInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generateTrieNodesInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = micf.generatePeerAuthenticationInterceptor() if err != nil { - return nil, err + return nil, nil, err } - err = micf.generateMainHeartbeatInterceptor() + err = micf.generateHeartbeatInterceptor() if err != nil { - return nil, err + return nil, nil, err } - err = micf.generateFullArchiveHeartbeatInterceptor() + err = micf.generatePeerShardInterceptor() if err != nil { - return nil, err - } - - err = micf.generateMainPeerShardInterceptor() - if err != nil { - return nil, err - } - - err = micf.generateFullArchivePeerShardInterceptor() - if err != nil { - return nil, err + return nil, nil, err } err = micf.generateValidatorInfoInterceptor() if err != nil { - return nil, err + return nil, nil, err } - return micf.container, nil + return micf.mainContainer, micf.fullArchiveContainer, nil } // AddShardTrieNodeInterceptors will add the shard trie node interceptors into the existing container @@ -253,7 +243,7 @@ func (micf *metaInterceptorsContainerFactory) generateShardHeaderInterceptors() interceptorsSlice[int(idx)] = interceptor } - return micf.container.AddMultiple(keys, interceptorsSlice) + return micf.addInterceptorsToContainers(keys, interceptorsSlice) } func (micf *metaInterceptorsContainerFactory) createOneShardHeaderInterceptor(topic string) (process.Interceptor, error) { @@ -312,7 +302,7 @@ func (micf *metaInterceptorsContainerFactory) generateTrieNodesInterceptors() er keys = append(keys, identifierTrieNodes) trieInterceptors = append(trieInterceptors, interceptor) - return micf.container.AddMultiple(keys, trieInterceptors) + return micf.addInterceptorsToContainers(keys, trieInterceptors) } //------- Reward transactions interceptors @@ -336,7 +326,7 @@ func (micf *metaInterceptorsContainerFactory) generateRewardTxInterceptors() err interceptorSlice[int(idx)] = interceptor } - return micf.container.AddMultiple(keys, interceptorSlice) + return micf.addInterceptorsToContainers(keys, interceptorSlice) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go index c794c94dafb..34e96b201da 100644 --- a/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/metaInterceptorsContainerFactory_test.go @@ -517,11 +517,11 @@ func TestMetaInterceptorsContainerFactory_CreateTopicsAndRegisterFailure(t *test testCreateMetaTopicShouldFailOnAllMessenger(t, "generateValidatorInfoInterceptor", common.ValidatorInfoTopic, "") + testCreateMetaTopicShouldFailOnAllMessenger(t, "generateHeartbeatInterceptor", common.HeartbeatV2Topic, "") + + testCreateMetaTopicShouldFailOnAllMessenger(t, "generatePeerShardInterceptor", common.ConnectionTopic, "") + t.Run("generatePeerAuthenticationInterceptor_main", testCreateMetaTopicShouldFail(common.PeerAuthenticationTopic, "")) - t.Run("generateHeartbeatInterceptor_main", testCreateMetaTopicShouldFail(common.HeartbeatV2Topic, "")) - t.Run("generateHeartbeatInterceptor_full_archive", testCreateMetaTopicShouldFail(common.FullArchiveTopicPrefix+common.HeartbeatV2Topic, "")) - t.Run("generatePeerShardInterceptor_main", testCreateMetaTopicShouldFail(common.ConnectionTopic, "")) - t.Run("generatePeerShardInterceptor_full_archive", testCreateMetaTopicShouldFail(common.FullArchiveTopicPrefix+common.ConnectionTopic, "")) } func testCreateMetaTopicShouldFailOnAllMessenger(t *testing.T, testNamePrefix string, matchStrToErrOnCreate string, matchStrToErrOnRegister string) { @@ -543,9 +543,10 @@ func testCreateMetaTopicShouldFail(matchStrToErrOnCreate string, matchStrToErrOn } icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - container, err := icf.Create() + mainContainer, fullArchiveConatiner, err := icf.Create() - assert.Nil(t, container) + assert.Nil(t, mainContainer) + assert.Nil(t, fullArchiveConatiner) assert.Equal(t, errExpected, err) } } @@ -557,9 +558,10 @@ func TestMetaInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { args := getArgumentsMeta(coreComp, cryptoComp) icf, _ := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) - container, err := icf.Create() + mainContainer, fullArchiveContainer, err := icf.Create() - assert.NotNil(t, container) + assert.NotNil(t, mainContainer) + assert.NotNil(t, fullArchiveContainer) assert.Nil(t, err) } @@ -589,7 +591,7 @@ func TestMetaInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) require.Nil(t, err) - container, err := icf.Create() + mainContainer, fullArchiveContainer, err := icf.Create() numInterceptorsMetablock := 1 numInterceptorsShardHeadersForMetachain := noOfShards @@ -608,11 +610,12 @@ func TestMetaInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { numInterceptorsShardValidatorInfoForMetachain + numInterceptorValidatorInfo assert.Nil(t, err) - assert.Equal(t, totalInterceptors, container.Len()) + assert.Equal(t, totalInterceptors, mainContainer.Len()) + assert.Equal(t, 0, fullArchiveContainer.Len()) - err = icf.AddShardTrieNodeInterceptors(container) + err = icf.AddShardTrieNodeInterceptors(mainContainer) assert.Nil(t, err) - assert.Equal(t, totalInterceptors+noOfShards, container.Len()) + assert.Equal(t, totalInterceptors+noOfShards, mainContainer.Len()) }) t.Run("full archive mode", func(t *testing.T) { t.Parallel() @@ -638,7 +641,7 @@ func TestMetaInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { icf, err := interceptorscontainer.NewMetaInterceptorsContainerFactory(args) require.Nil(t, err) - container, err := icf.Create() + mainContainer, fullArchiveContainer, err := icf.Create() numInterceptorsMetablock := 1 numInterceptorsShardHeadersForMetachain := noOfShards @@ -648,8 +651,8 @@ func TestMetaInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { numInterceptorsRewardsTxsForMetachain := noOfShards numInterceptorsTrieNodes := 2 numInterceptorsPeerAuthForMetachain := 1 - numInterceptorsHeartbeatForMetachain := 2 // one for full archive - numInterceptorsShardValidatorInfoForMetachain := 2 // one for full archive + numInterceptorsHeartbeatForMetachain := 1 + numInterceptorsShardValidatorInfoForMetachain := 1 numInterceptorValidatorInfo := 1 totalInterceptors := numInterceptorsMetablock + numInterceptorsShardHeadersForMetachain + numInterceptorsTrieNodes + numInterceptorsTransactionsForMetachain + numInterceptorsUnsignedTxsForMetachain + numInterceptorsMiniBlocksForMetachain + @@ -657,11 +660,16 @@ func TestMetaInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { numInterceptorsShardValidatorInfoForMetachain + numInterceptorValidatorInfo assert.Nil(t, err) - assert.Equal(t, totalInterceptors, container.Len()) + assert.Equal(t, totalInterceptors, mainContainer.Len()) + assert.Equal(t, totalInterceptors-1, fullArchiveContainer.Len()) // no peerAuthentication needed + + err = icf.AddShardTrieNodeInterceptors(mainContainer) + assert.Nil(t, err) + assert.Equal(t, totalInterceptors+noOfShards, mainContainer.Len()) - err = icf.AddShardTrieNodeInterceptors(container) + err = icf.AddShardTrieNodeInterceptors(fullArchiveContainer) assert.Nil(t, err) - assert.Equal(t, totalInterceptors+noOfShards, container.Len()) + assert.Equal(t, totalInterceptors-1+noOfShards, fullArchiveContainer.Len()) }) } diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go index 87d18138ba0..beef288c54c 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory.go @@ -100,9 +100,9 @@ func NewShardInterceptorsContainerFactory( PeerID: args.MainMessenger.ID(), } - container := containers.NewInterceptorsContainer() base := &baseInterceptorsContainerFactory{ - container: container, + mainContainer: containers.NewInterceptorsContainer(), + fullArchiveContainer: containers.NewInterceptorsContainer(), accounts: args.Accounts, shardCoordinator: args.ShardCoordinator, mainMessenger: args.MainMessenger, @@ -138,73 +138,63 @@ func NewShardInterceptorsContainerFactory( } // Create returns an interceptor container that will hold all interceptors in the system -func (sicf *shardInterceptorsContainerFactory) Create() (process.InterceptorsContainer, error) { +func (sicf *shardInterceptorsContainerFactory) Create() (process.InterceptorsContainer, process.InterceptorsContainer, error) { err := sicf.generateTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateUnsignedTxsInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateRewardTxInterceptor() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateMiniBlocksInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateMetachainHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateTrieNodesInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = sicf.generatePeerAuthenticationInterceptor() if err != nil { - return nil, err + return nil, nil, err } - err = sicf.generateMainHeartbeatInterceptor() + err = sicf.generateHeartbeatInterceptor() if err != nil { - return nil, err + return nil, nil, err } - err = sicf.generateFullArchiveHeartbeatInterceptor() + err = sicf.generatePeerShardInterceptor() if err != nil { - return nil, err - } - - err = sicf.generateMainPeerShardInterceptor() - if err != nil { - return nil, err - } - - err = sicf.generateFullArchivePeerShardInterceptor() - if err != nil { - return nil, err + return nil, nil, err } err = sicf.generateValidatorInfoInterceptor() if err != nil { - return nil, err + return nil, nil, err } - return sicf.container, nil + return sicf.mainContainer, sicf.fullArchiveContainer, nil } func (sicf *shardInterceptorsContainerFactory) generateTrieNodesInterceptors() error { @@ -222,7 +212,7 @@ func (sicf *shardInterceptorsContainerFactory) generateTrieNodesInterceptors() e keys = append(keys, identifierTrieNodes) interceptorsSlice = append(interceptorsSlice, interceptor) - return sicf.container.AddMultiple(keys, interceptorsSlice) + return sicf.addInterceptorsToContainers(keys, interceptorsSlice) } // ------- Reward transactions interceptors @@ -242,7 +232,7 @@ func (sicf *shardInterceptorsContainerFactory) generateRewardTxInterceptor() err keys = append(keys, identifierTx) interceptorSlice = append(interceptorSlice, interceptor) - return sicf.container.AddMultiple(keys, interceptorSlice) + return sicf.addInterceptorsToContainers(keys, interceptorSlice) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go index 009cde05e94..4f1ec24c12d 100644 --- a/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go +++ b/process/factory/interceptorscontainer/shardInterceptorsContainerFactory_test.go @@ -475,11 +475,11 @@ func TestShardInterceptorsContainerFactory_CreateTopicsAndRegisterFailure(t *tes testCreateShardTopicShouldFailOnAllMessenger(t, "generateValidatorInfoInterceptor", common.ValidatorInfoTopic, "") + testCreateShardTopicShouldFailOnAllMessenger(t, "generateHeartbeatInterceptor", common.HeartbeatV2Topic, "") + + testCreateShardTopicShouldFailOnAllMessenger(t, "generatePeerShardIntercepto", common.ConnectionTopic, "") + t.Run("generatePeerAuthenticationInterceptor_main", testCreateShardTopicShouldFail(common.PeerAuthenticationTopic, "")) - t.Run("generateHeartbeatInterceptor_main", testCreateShardTopicShouldFail(common.HeartbeatV2Topic, "")) - t.Run("generateHeartbeatInterceptor_full_archive", testCreateShardTopicShouldFail(common.FullArchiveTopicPrefix+common.HeartbeatV2Topic, "")) - t.Run("generatePeerShardInterceptor_main", testCreateShardTopicShouldFail(common.ConnectionTopic, "")) - t.Run("generatePeerShardInterceptor_full_archive", testCreateShardTopicShouldFail(common.FullArchiveTopicPrefix+common.ConnectionTopic, "")) } func testCreateShardTopicShouldFailOnAllMessenger(t *testing.T, testNamePrefix string, matchStrToErrOnCreate string, matchStrToErrOnRegister string) { t.Run(testNamePrefix+"main messenger", testCreateShardTopicShouldFail(matchStrToErrOnCreate, matchStrToErrOnRegister)) @@ -500,9 +500,10 @@ func testCreateShardTopicShouldFail(matchStrToErrOnCreate string, matchStrToErrO } icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - container, err := icf.Create() + mainContainer, fullArchiveContainer, err := icf.Create() - assert.Nil(t, container) + assert.Nil(t, mainContainer) + assert.Nil(t, fullArchiveContainer) assert.Equal(t, errExpected, err) } } @@ -560,9 +561,10 @@ func TestShardInterceptorsContainerFactory_CreateShouldWork(t *testing.T) { icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - container, err := icf.Create() + mainContainer, fullArchiveContainer, err := icf.Create() - assert.NotNil(t, container) + assert.NotNil(t, mainContainer) + assert.NotNil(t, fullArchiveContainer) assert.Nil(t, err) } @@ -594,7 +596,7 @@ func TestShardInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - container, err := icf.Create() + mainContainer, fullArchiveContainer, err := icf.Create() numInterceptorTxs := noOfShards + 1 numInterceptorsUnsignedTxs := numInterceptorTxs @@ -612,7 +614,8 @@ func TestShardInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { numInterceptorPeerAuth + numInterceptorHeartbeat + numInterceptorsShardValidatorInfo + numInterceptorValidatorInfo assert.Nil(t, err) - assert.Equal(t, totalInterceptors, container.Len()) + assert.Equal(t, totalInterceptors, mainContainer.Len()) + assert.Equal(t, 0, fullArchiveContainer.Len()) }) t.Run("full archive mode", func(t *testing.T) { @@ -641,7 +644,7 @@ func TestShardInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { icf, _ := interceptorscontainer.NewShardInterceptorsContainerFactory(args) - container, err := icf.Create() + mainContainer, fullArchiveContainer, err := icf.Create() numInterceptorTxs := noOfShards + 1 numInterceptorsUnsignedTxs := numInterceptorTxs @@ -651,15 +654,16 @@ func TestShardInterceptorsContainerFactory_With4ShardsShouldWork(t *testing.T) { numInterceptorMetachainHeaders := 1 numInterceptorTrieNodes := 1 numInterceptorPeerAuth := 1 - numInterceptorHeartbeat := 2 // one for full archive - numInterceptorsShardValidatorInfo := 2 // one for full archive + numInterceptorHeartbeat := 1 + numInterceptorsShardValidatorInfo := 1 numInterceptorValidatorInfo := 1 totalInterceptors := numInterceptorTxs + numInterceptorsUnsignedTxs + numInterceptorsRewardTxs + numInterceptorHeaders + numInterceptorMiniBlocks + numInterceptorMetachainHeaders + numInterceptorTrieNodes + numInterceptorPeerAuth + numInterceptorHeartbeat + numInterceptorsShardValidatorInfo + numInterceptorValidatorInfo assert.Nil(t, err) - assert.Equal(t, totalInterceptors, container.Len()) + assert.Equal(t, totalInterceptors, mainContainer.Len()) + assert.Equal(t, totalInterceptors-1, fullArchiveContainer.Len()) // no peerAuthentication needed }) } diff --git a/process/interface.go b/process/interface.go index 806fd3f84de..8b131f1d7da 100644 --- a/process/interface.go +++ b/process/interface.go @@ -385,7 +385,7 @@ type InterceptorsContainer interface { // InterceptorsContainerFactory defines the functionality to create an interceptors container type InterceptorsContainerFactory interface { - Create() (InterceptorsContainer, error) + Create() (InterceptorsContainer, InterceptorsContainer, error) IsInterfaceNil() bool } diff --git a/common/disabled/cache.go b/storage/disabled/cache.go similarity index 100% rename from common/disabled/cache.go rename to storage/disabled/cache.go diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index bb80be0101a..8b8a02237fa 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -37,79 +37,85 @@ var log = logger.GetOrCreate("update/factory") // ArgsExporter is the argument structure to create a new exporter type ArgsExporter struct { - CoreComponents process.CoreComponentsHolder - CryptoComponents process.CryptoComponentsHolder - StatusCoreComponents process.StatusCoreComponentsHolder - HeaderValidator epochStart.HeaderValidator - DataPool dataRetriever.PoolsHolder - StorageService dataRetriever.StorageService - RequestHandler process.RequestHandler - ShardCoordinator sharding.Coordinator - Messenger p2p.Messenger - ActiveAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter - ExistingResolvers dataRetriever.ResolversContainer - ExistingRequesters dataRetriever.RequestersContainer - ExportFolder string - ExportTriesStorageConfig config.StorageConfig - ExportStateStorageConfig config.StorageConfig - ExportStateKeysConfig config.StorageConfig - MaxTrieLevelInMemory uint - WhiteListHandler process.WhiteListHandler - WhiteListerVerifiedTxs process.WhiteListHandler - InterceptorsContainer process.InterceptorsContainer - NodesCoordinator nodesCoordinator.NodesCoordinator - HeaderSigVerifier process.InterceptedHeaderSigVerifier - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - ValidityAttester process.ValidityAttester - InputAntifloodHandler process.P2PAntifloodHandler - OutputAntifloodHandler process.P2PAntifloodHandler - RoundHandler process.RoundHandler - PeersRatingHandler dataRetriever.PeersRatingHandler - InterceptorDebugConfig config.InterceptorResolverDebugConfig - MaxHardCapForMissingNodes int - NumConcurrentTrieSyncers int - TrieSyncerVersion int - CheckNodesOnDisk bool + CoreComponents process.CoreComponentsHolder + CryptoComponents process.CryptoComponentsHolder + StatusCoreComponents process.StatusCoreComponentsHolder + HeaderValidator epochStart.HeaderValidator + DataPool dataRetriever.PoolsHolder + StorageService dataRetriever.StorageService + RequestHandler process.RequestHandler + ShardCoordinator sharding.Coordinator + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + ActiveAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter + ExistingResolvers dataRetriever.ResolversContainer + ExistingRequesters dataRetriever.RequestersContainer + ExportFolder string + ExportTriesStorageConfig config.StorageConfig + ExportStateStorageConfig config.StorageConfig + ExportStateKeysConfig config.StorageConfig + MaxTrieLevelInMemory uint + WhiteListHandler process.WhiteListHandler + WhiteListerVerifiedTxs process.WhiteListHandler + MainInterceptorsContainer process.InterceptorsContainer + FullArchiveInterceptorsContainer process.InterceptorsContainer + NodesCoordinator nodesCoordinator.NodesCoordinator + HeaderSigVerifier process.InterceptedHeaderSigVerifier + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + ValidityAttester process.ValidityAttester + InputAntifloodHandler process.P2PAntifloodHandler + OutputAntifloodHandler process.P2PAntifloodHandler + RoundHandler process.RoundHandler + PeersRatingHandler dataRetriever.PeersRatingHandler + InterceptorDebugConfig config.InterceptorResolverDebugConfig + MaxHardCapForMissingNodes int + NumConcurrentTrieSyncers int + TrieSyncerVersion int + CheckNodesOnDisk bool + NodeOperationMode p2p.NodeOperation } type exportHandlerFactory struct { - CoreComponents process.CoreComponentsHolder - CryptoComponents process.CryptoComponentsHolder - statusCoreComponents process.StatusCoreComponentsHolder - headerValidator epochStart.HeaderValidator - dataPool dataRetriever.PoolsHolder - storageService dataRetriever.StorageService - requestHandler process.RequestHandler - shardCoordinator sharding.Coordinator - messenger p2p.Messenger - activeAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter - exportFolder string - exportTriesStorageConfig config.StorageConfig - exportStateStorageConfig config.StorageConfig - exportStateKeysConfig config.StorageConfig - maxTrieLevelInMemory uint - whiteListHandler process.WhiteListHandler - whiteListerVerifiedTxs process.WhiteListHandler - interceptorsContainer process.InterceptorsContainer - existingResolvers dataRetriever.ResolversContainer - existingRequesters dataRetriever.RequestersContainer - epochStartTrigger epochStart.TriggerHandler - accounts state.AccountsAdapter - nodesCoordinator nodesCoordinator.NodesCoordinator - headerSigVerifier process.InterceptedHeaderSigVerifier - headerIntegrityVerifier process.HeaderIntegrityVerifier - validityAttester process.ValidityAttester - resolverContainer dataRetriever.ResolversContainer - requestersContainer dataRetriever.RequestersContainer - inputAntifloodHandler process.P2PAntifloodHandler - outputAntifloodHandler process.P2PAntifloodHandler - roundHandler process.RoundHandler - peersRatingHandler dataRetriever.PeersRatingHandler - interceptorDebugConfig config.InterceptorResolverDebugConfig - maxHardCapForMissingNodes int - numConcurrentTrieSyncers int - trieSyncerVersion int - checkNodesOnDisk bool + CoreComponents process.CoreComponentsHolder + CryptoComponents process.CryptoComponentsHolder + statusCoreComponents process.StatusCoreComponentsHolder + headerValidator epochStart.HeaderValidator + dataPool dataRetriever.PoolsHolder + storageService dataRetriever.StorageService + requestHandler process.RequestHandler + shardCoordinator sharding.Coordinator + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger + activeAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter + exportFolder string + exportTriesStorageConfig config.StorageConfig + exportStateStorageConfig config.StorageConfig + exportStateKeysConfig config.StorageConfig + maxTrieLevelInMemory uint + whiteListHandler process.WhiteListHandler + whiteListerVerifiedTxs process.WhiteListHandler + mainInterceptorsContainer process.InterceptorsContainer + fullArchiveInterceptorsContainer process.InterceptorsContainer + existingResolvers dataRetriever.ResolversContainer + existingRequesters dataRetriever.RequestersContainer + epochStartTrigger epochStart.TriggerHandler + accounts state.AccountsAdapter + nodesCoordinator nodesCoordinator.NodesCoordinator + headerSigVerifier process.InterceptedHeaderSigVerifier + headerIntegrityVerifier process.HeaderIntegrityVerifier + validityAttester process.ValidityAttester + resolverContainer dataRetriever.ResolversContainer + requestersContainer dataRetriever.RequestersContainer + inputAntifloodHandler process.P2PAntifloodHandler + outputAntifloodHandler process.P2PAntifloodHandler + roundHandler process.RoundHandler + peersRatingHandler dataRetriever.PeersRatingHandler + interceptorDebugConfig config.InterceptorResolverDebugConfig + maxHardCapForMissingNodes int + numConcurrentTrieSyncers int + trieSyncerVersion int + checkNodesOnDisk bool + nodeOperationMode p2p.NodeOperation } // NewExportHandlerFactory creates an exporter factory @@ -144,8 +150,11 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.RequestHandler) { return nil, update.ErrNilRequestHandler } - if check.IfNil(args.Messenger) { - return nil, update.ErrNilMessenger + if check.IfNil(args.MainMessenger) { + return nil, fmt.Errorf("%w on main network", update.ErrNilMessenger) + } + if check.IfNil(args.FullArchiveMessenger) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilMessenger) } if args.ActiveAccountsDBs == nil { return nil, update.ErrNilAccounts @@ -156,8 +165,11 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { if check.IfNil(args.WhiteListerVerifiedTxs) { return nil, update.ErrNilWhiteListHandler } - if check.IfNil(args.InterceptorsContainer) { - return nil, update.ErrNilInterceptorsContainer + if check.IfNil(args.MainInterceptorsContainer) { + return nil, fmt.Errorf("%w on main network", update.ErrNilInterceptorsContainer) + } + if check.IfNil(args.FullArchiveInterceptorsContainer) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilInterceptorsContainer) } if check.IfNil(args.ExistingResolvers) { return nil, update.ErrNilResolverContainer @@ -238,40 +250,43 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { } e := &exportHandlerFactory{ - CoreComponents: args.CoreComponents, - CryptoComponents: args.CryptoComponents, - headerValidator: args.HeaderValidator, - dataPool: args.DataPool, - storageService: args.StorageService, - requestHandler: args.RequestHandler, - shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, - activeAccountsDBs: args.ActiveAccountsDBs, - exportFolder: args.ExportFolder, - exportTriesStorageConfig: args.ExportTriesStorageConfig, - exportStateStorageConfig: args.ExportStateStorageConfig, - exportStateKeysConfig: args.ExportStateKeysConfig, - interceptorsContainer: args.InterceptorsContainer, - whiteListHandler: args.WhiteListHandler, - whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - existingResolvers: args.ExistingResolvers, - existingRequesters: args.ExistingRequesters, - accounts: args.ActiveAccountsDBs[state.UserAccountsState], - nodesCoordinator: args.NodesCoordinator, - headerSigVerifier: args.HeaderSigVerifier, - headerIntegrityVerifier: args.HeaderIntegrityVerifier, - validityAttester: args.ValidityAttester, - inputAntifloodHandler: args.InputAntifloodHandler, - outputAntifloodHandler: args.OutputAntifloodHandler, - maxTrieLevelInMemory: args.MaxTrieLevelInMemory, - roundHandler: args.RoundHandler, - peersRatingHandler: args.PeersRatingHandler, - interceptorDebugConfig: args.InterceptorDebugConfig, - maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, - numConcurrentTrieSyncers: args.NumConcurrentTrieSyncers, - trieSyncerVersion: args.TrieSyncerVersion, - checkNodesOnDisk: args.CheckNodesOnDisk, - statusCoreComponents: args.StatusCoreComponents, + CoreComponents: args.CoreComponents, + CryptoComponents: args.CryptoComponents, + headerValidator: args.HeaderValidator, + dataPool: args.DataPool, + storageService: args.StorageService, + requestHandler: args.RequestHandler, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + activeAccountsDBs: args.ActiveAccountsDBs, + exportFolder: args.ExportFolder, + exportTriesStorageConfig: args.ExportTriesStorageConfig, + exportStateStorageConfig: args.ExportStateStorageConfig, + exportStateKeysConfig: args.ExportStateKeysConfig, + mainInterceptorsContainer: args.MainInterceptorsContainer, + fullArchiveInterceptorsContainer: args.FullArchiveInterceptorsContainer, + whiteListHandler: args.WhiteListHandler, + whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + existingResolvers: args.ExistingResolvers, + existingRequesters: args.ExistingRequesters, + accounts: args.ActiveAccountsDBs[state.UserAccountsState], + nodesCoordinator: args.NodesCoordinator, + headerSigVerifier: args.HeaderSigVerifier, + headerIntegrityVerifier: args.HeaderIntegrityVerifier, + validityAttester: args.ValidityAttester, + inputAntifloodHandler: args.InputAntifloodHandler, + outputAntifloodHandler: args.OutputAntifloodHandler, + maxTrieLevelInMemory: args.MaxTrieLevelInMemory, + roundHandler: args.RoundHandler, + peersRatingHandler: args.PeersRatingHandler, + interceptorDebugConfig: args.InterceptorDebugConfig, + maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, + numConcurrentTrieSyncers: args.NumConcurrentTrieSyncers, + trieSyncerVersion: args.TrieSyncerVersion, + checkNodesOnDisk: args.CheckNodesOnDisk, + statusCoreComponents: args.StatusCoreComponents, + nodeOperationMode: args.NodeOperationMode, } return e, nil @@ -351,7 +366,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsResolvers := ArgsNewResolversContainerFactory{ ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, + Messenger: e.mainMessenger, Marshalizer: e.CoreComponents.InternalMarshalizer(), DataTrieContainer: dataTries, ExistingResolvers: e.existingResolvers, @@ -379,7 +394,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { argsRequesters := ArgsRequestersContainerFactory{ ShardCoordinator: e.shardCoordinator, - Messenger: e.messenger, + Messenger: e.mainMessenger, Marshaller: e.CoreComponents.InternalMarshalizer(), ExistingRequesters: e.existingRequesters, OutputAntifloodHandler: e.outputAntifloodHandler, @@ -545,7 +560,7 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { return nil, err } - e.interceptorsContainer.Iterate(func(key string, interceptor process.Interceptor) bool { + e.mainInterceptorsContainer.Iterate(func(key string, interceptor process.Interceptor) bool { errNotCritical = interceptor.SetInterceptedDebugHandler(debugger) if errNotCritical != nil { log.Warn("error setting debugger", "interceptor", key, "error", errNotCritical) @@ -568,38 +583,42 @@ func (e *exportHandlerFactory) prepareFolders(folder string) error { func (e *exportHandlerFactory) createInterceptors() error { argsInterceptors := ArgsNewFullSyncInterceptorsContainerFactory{ - CoreComponents: e.CoreComponents, - CryptoComponents: e.CryptoComponents, - Accounts: e.accounts, - ShardCoordinator: e.shardCoordinator, - NodesCoordinator: e.nodesCoordinator, - Messenger: e.messenger, - Store: e.storageService, - DataPool: e.dataPool, - MaxTxNonceDeltaAllowed: math.MaxInt32, - TxFeeHandler: &disabled.FeeHandler{}, - BlockBlackList: cache.NewTimeCache(time.Second), - HeaderSigVerifier: e.headerSigVerifier, - HeaderIntegrityVerifier: e.headerIntegrityVerifier, - SizeCheckDelta: math.MaxUint32, - ValidityAttester: e.validityAttester, - EpochStartTrigger: e.epochStartTrigger, - WhiteListHandler: e.whiteListHandler, - WhiteListerVerifiedTxs: e.whiteListerVerifiedTxs, - InterceptorsContainer: e.interceptorsContainer, - AntifloodHandler: e.inputAntifloodHandler, + CoreComponents: e.CoreComponents, + CryptoComponents: e.CryptoComponents, + Accounts: e.accounts, + ShardCoordinator: e.shardCoordinator, + NodesCoordinator: e.nodesCoordinator, + MainMessenger: e.mainMessenger, + FullArchiveMessenger: e.fullArchiveMessenger, + Store: e.storageService, + DataPool: e.dataPool, + MaxTxNonceDeltaAllowed: math.MaxInt32, + TxFeeHandler: &disabled.FeeHandler{}, + BlockBlackList: cache.NewTimeCache(time.Second), + HeaderSigVerifier: e.headerSigVerifier, + HeaderIntegrityVerifier: e.headerIntegrityVerifier, + SizeCheckDelta: math.MaxUint32, + ValidityAttester: e.validityAttester, + EpochStartTrigger: e.epochStartTrigger, + WhiteListHandler: e.whiteListHandler, + WhiteListerVerifiedTxs: e.whiteListerVerifiedTxs, + MainInterceptorsContainer: e.mainInterceptorsContainer, + FullArchiveInterceptorsContainer: e.fullArchiveInterceptorsContainer, + NodeOperationMode: e.nodeOperationMode, + AntifloodHandler: e.inputAntifloodHandler, } fullSyncInterceptors, err := NewFullSyncInterceptorsContainerFactory(argsInterceptors) if err != nil { return err } - interceptorsContainer, err := fullSyncInterceptors.Create() + mainInterceptorsContainer, fullArchiveInterceptorsContainer, err := fullSyncInterceptors.Create() if err != nil { return err } - e.interceptorsContainer = interceptorsContainer + e.mainInterceptorsContainer = mainInterceptorsContainer + e.fullArchiveInterceptorsContainer = fullArchiveInterceptorsContainer return nil } diff --git a/update/factory/fullSyncInterceptors.go b/update/factory/fullSyncInterceptors.go index 545b4114f51..4c88c26945b 100644 --- a/update/factory/fullSyncInterceptors.go +++ b/update/factory/fullSyncInterceptors.go @@ -1,12 +1,15 @@ package factory import ( + "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/throttler" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/dataValidators" "github.com/multiversx/mx-chain-go/process/factory" @@ -27,12 +30,14 @@ const numGoRoutines = 2000 // fullSyncInterceptorsContainerFactory will handle the creation the interceptors container for shards type fullSyncInterceptorsContainerFactory struct { - container process.InterceptorsContainer + mainContainer process.InterceptorsContainer + fullArchiveContainer process.InterceptorsContainer shardCoordinator sharding.Coordinator accounts state.AccountsAdapter store dataRetriever.StorageService dataPool dataRetriever.PoolsHolder - messenger process.TopicHandler + mainMessenger process.TopicHandler + fullArchiveMessenger process.TopicHandler nodesCoordinator nodesCoordinator.NodesCoordinator blockBlackList process.TimeCacher argInterceptorFactory *interceptorFactory.ArgInterceptedDataFactory @@ -43,30 +48,34 @@ type fullSyncInterceptorsContainerFactory struct { whiteListerVerifiedTxs update.WhiteListHandler antifloodHandler process.P2PAntifloodHandler preferredPeersHolder update.PreferredPeersHolderHandler + nodeOperationMode p2p.NodeOperation } // ArgsNewFullSyncInterceptorsContainerFactory holds the arguments needed for fullSyncInterceptorsContainerFactory type ArgsNewFullSyncInterceptorsContainerFactory struct { - CoreComponents process.CoreComponentsHolder - CryptoComponents process.CryptoComponentsHolder - Accounts state.AccountsAdapter - ShardCoordinator sharding.Coordinator - NodesCoordinator nodesCoordinator.NodesCoordinator - Messenger process.TopicHandler - Store dataRetriever.StorageService - DataPool dataRetriever.PoolsHolder - MaxTxNonceDeltaAllowed int - TxFeeHandler process.FeeHandler - BlockBlackList process.TimeCacher - HeaderSigVerifier process.InterceptedHeaderSigVerifier - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - SizeCheckDelta uint32 - ValidityAttester process.ValidityAttester - EpochStartTrigger process.EpochStartTriggerHandler - WhiteListHandler update.WhiteListHandler - WhiteListerVerifiedTxs update.WhiteListHandler - InterceptorsContainer process.InterceptorsContainer - AntifloodHandler process.P2PAntifloodHandler + CoreComponents process.CoreComponentsHolder + CryptoComponents process.CryptoComponentsHolder + Accounts state.AccountsAdapter + ShardCoordinator sharding.Coordinator + NodesCoordinator nodesCoordinator.NodesCoordinator + MainMessenger process.TopicHandler + FullArchiveMessenger process.TopicHandler + Store dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + MaxTxNonceDeltaAllowed int + TxFeeHandler process.FeeHandler + BlockBlackList process.TimeCacher + HeaderSigVerifier process.InterceptedHeaderSigVerifier + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + SizeCheckDelta uint32 + ValidityAttester process.ValidityAttester + EpochStartTrigger process.EpochStartTriggerHandler + WhiteListHandler update.WhiteListHandler + WhiteListerVerifiedTxs update.WhiteListHandler + MainInterceptorsContainer process.InterceptorsContainer + FullArchiveInterceptorsContainer process.InterceptorsContainer + AntifloodHandler process.P2PAntifloodHandler + NodeOperationMode p2p.NodeOperation } // NewFullSyncInterceptorsContainerFactory is responsible for creating a new interceptors factory object @@ -80,7 +89,8 @@ func NewFullSyncInterceptorsContainerFactory( args.Accounts, args.Store, args.DataPool, - args.Messenger, + args.MainMessenger, + args.FullArchiveMessenger, args.NodesCoordinator, args.BlockBlackList, args.WhiteListerVerifiedTxs, @@ -111,8 +121,11 @@ func NewFullSyncInterceptorsContainerFactory( if check.IfNil(args.EpochStartTrigger) { return nil, process.ErrNilEpochStartTrigger } - if check.IfNil(args.InterceptorsContainer) { - return nil, update.ErrNilInterceptorsContainer + if check.IfNil(args.MainInterceptorsContainer) { + return nil, fmt.Errorf("%w on main network", update.ErrNilInterceptorsContainer) + } + if check.IfNil(args.FullArchiveInterceptorsContainer) { + return nil, fmt.Errorf("%w on full archive network", update.ErrNilInterceptorsContainer) } if check.IfNil(args.WhiteListHandler) { return nil, update.ErrNilWhiteListHandler @@ -136,10 +149,12 @@ func NewFullSyncInterceptorsContainerFactory( } icf := &fullSyncInterceptorsContainerFactory{ - container: args.InterceptorsContainer, + mainContainer: args.MainInterceptorsContainer, + fullArchiveContainer: args.FullArchiveInterceptorsContainer, accounts: args.Accounts, shardCoordinator: args.ShardCoordinator, - messenger: args.Messenger, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, store: args.Store, dataPool: args.DataPool, nodesCoordinator: args.NodesCoordinator, @@ -151,6 +166,7 @@ func NewFullSyncInterceptorsContainerFactory( antifloodHandler: args.AntifloodHandler, //TODO: inject the real peers holder once we have the peers mapping before epoch bootstrap finishes preferredPeersHolder: disabled.NewPreferredPeersHolder(), + nodeOperationMode: args.NodeOperationMode, } icf.globalThrottler, err = throttler.NewNumGoRoutinesThrottler(numGoRoutines) @@ -162,43 +178,43 @@ func NewFullSyncInterceptorsContainerFactory( } // Create returns an interceptor container that will hold all interceptors in the system -func (ficf *fullSyncInterceptorsContainerFactory) Create() (process.InterceptorsContainer, error) { +func (ficf *fullSyncInterceptorsContainerFactory) Create() (process.InterceptorsContainer, process.InterceptorsContainer, error) { err := ficf.generateTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateUnsignedTxsInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateRewardTxInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateMiniBlocksInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateMetachainHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateShardHeaderInterceptors() if err != nil { - return nil, err + return nil, nil, err } err = ficf.generateTrieNodesInterceptors() if err != nil { - return nil, err + return nil, nil, err } - return ficf.container, nil + return ficf.mainContainer, ficf.fullArchiveContainer, nil } func checkBaseParams( @@ -208,7 +224,8 @@ func checkBaseParams( accounts state.AccountsAdapter, store dataRetriever.StorageService, dataPool dataRetriever.PoolsHolder, - messenger process.TopicHandler, + mainMessenger process.TopicHandler, + fullArchiveMessenger process.TopicHandler, nodesCoordinator nodesCoordinator.NodesCoordinator, blockBlackList process.TimeCacher, whiteListerVerifiedTxs update.WhiteListHandler, @@ -256,8 +273,11 @@ func checkBaseParams( if check.IfNil(shardCoordinator) { return process.ErrNilShardCoordinator } - if check.IfNil(messenger) { - return process.ErrNilMessenger + if check.IfNil(mainMessenger) { + return fmt.Errorf("%w on main network", process.ErrNilMessenger) + } + if check.IfNil(fullArchiveMessenger) { + return fmt.Errorf("%w on full archive network", process.ErrNilMessenger) } if check.IfNil(store) { return process.ErrNilStore @@ -282,7 +302,7 @@ func checkBaseParams( } func (ficf *fullSyncInterceptorsContainerFactory) checkIfInterceptorExists(identifier string) bool { - _, err := ficf.container.Get(identifier) + _, err := ficf.mainContainer.Get(identifier) return err == nil } @@ -312,7 +332,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateShardHeaderInterceptor interceptorsSlice[int(idx)] = interceptor } - return ficf.container.AddMultiple(keys, interceptorsSlice) + return ficf.addInterceptorsToContainers(keys, interceptorsSlice) } func (ficf *fullSyncInterceptorsContainerFactory) createOneShardHeaderInterceptor(topic string) (process.Interceptor, error) { @@ -338,7 +358,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneShardHeaderIntercepto Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), }, ) if err != nil { @@ -382,7 +402,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateUnsignedTxsInterceptor interceptorsSlice[numShards] = interceptor } - return ficf.container.AddMultiple(keys, interceptorsSlice) + return ficf.addInterceptorsToContainers(keys, interceptorsSlice) } func (ficf *fullSyncInterceptorsContainerFactory) generateTrieNodesInterceptors() error { @@ -428,7 +448,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateTrieNodesInterceptors( trieInterceptors = append(trieInterceptors, interceptor) } - return ficf.container.AddMultiple(keys, trieInterceptors) + return ficf.addInterceptorsToContainers(keys, trieInterceptors) } func (ficf *fullSyncInterceptorsContainerFactory) createTopicAndAssignHandler( @@ -437,12 +457,34 @@ func (ficf *fullSyncInterceptorsContainerFactory) createTopicAndAssignHandler( createChannel bool, ) (process.Interceptor, error) { - err := ficf.messenger.CreateTopic(topic, createChannel) + err := createTopicAndAssignHandlerOnMessenger(topic, interceptor, createChannel, ficf.mainMessenger) if err != nil { return nil, err } - return interceptor, ficf.messenger.RegisterMessageProcessor(topic, common.HardforkInterceptorsIdentifier, interceptor) + if ficf.nodeOperationMode == p2p.FullArchiveMode { + err = createTopicAndAssignHandlerOnMessenger(topic, interceptor, createChannel, ficf.fullArchiveMessenger) + if err != nil { + return nil, err + } + } + + return interceptor, nil +} + +func createTopicAndAssignHandlerOnMessenger( + topic string, + interceptor process.Interceptor, + createChannel bool, + messenger process.TopicHandler, +) error { + + err := messenger.CreateTopic(topic, createChannel) + if err != nil { + return err + } + + return messenger.RegisterMessageProcessor(topic, common.HardforkInterceptorsIdentifier, interceptor) } func (ficf *fullSyncInterceptorsContainerFactory) generateTxInterceptors() error { @@ -480,7 +522,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateTxInterceptors() error interceptorSlice = append(interceptorSlice, interceptor) } - return ficf.container.AddMultiple(keys, interceptorSlice) + return ficf.addInterceptorsToContainers(keys, interceptorSlice) } func (ficf *fullSyncInterceptorsContainerFactory) createOneTxInterceptor(topic string) (process.Interceptor, error) { @@ -519,7 +561,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneTxInterceptor(topic s Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -554,7 +596,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneUnsignedTxInterceptor Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -589,7 +631,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneRewardTxInterceptor(t Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -632,7 +674,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateMiniBlocksInterceptors interceptorsSlice[numShards] = interceptor } - return ficf.container.AddMultiple(keys, interceptorsSlice) + return ficf.addInterceptorsToContainers(keys, interceptorsSlice) } func (ficf *fullSyncInterceptorsContainerFactory) createOneMiniBlocksInterceptor(topic string) (process.Interceptor, error) { @@ -661,7 +703,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneMiniBlocksInterceptor Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -701,7 +743,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateMetachainHeaderInterce Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -714,7 +756,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateMetachainHeaderInterce return err } - return ficf.container.Add(identifierHdr, interceptor) + return ficf.addInterceptorsToContainers([]string{identifierHdr}, []process.Interceptor{interceptor}) } func (ficf *fullSyncInterceptorsContainerFactory) createOneTrieNodesInterceptor(topic string) (process.Interceptor, error) { @@ -737,7 +779,7 @@ func (ficf *fullSyncInterceptorsContainerFactory) createOneTrieNodesInterceptor( Throttler: ficf.globalThrottler, AntifloodHandler: ficf.antifloodHandler, WhiteListRequest: ficf.whiteListHandler, - CurrentPeerId: ficf.messenger.ID(), + CurrentPeerId: ficf.mainMessenger.ID(), PreferredPeersHolder: ficf.preferredPeersHolder, }, ) @@ -775,7 +817,20 @@ func (ficf *fullSyncInterceptorsContainerFactory) generateRewardTxInterceptors() interceptorSlice[int(idx)] = interceptor } - return ficf.container.AddMultiple(keys, interceptorSlice) + return ficf.addInterceptorsToContainers(keys, interceptorSlice) +} + +func (ficf *fullSyncInterceptorsContainerFactory) addInterceptorsToContainers(keys []string, interceptors []process.Interceptor) error { + err := ficf.mainContainer.AddMultiple(keys, interceptors) + if err != nil { + return err + } + + if ficf.nodeOperationMode != p2p.FullArchiveMode { + return nil + } + + return ficf.fullArchiveContainer.AddMultiple(keys, interceptors) } // IsInterfaceNil returns true if there is no value under the interface From 67ae3be181475f5a10449107bb692ef59810d7d4 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 15 Jun 2023 14:35:38 +0300 Subject: [PATCH 16/38] fixes after merge --- factory/processing/processComponents.go | 58 +++-- .../multiShard/hardFork/hardFork_test.go | 26 +- update/factory/exportHandlerFactory.go | 223 +++++++++--------- 3 files changed, 152 insertions(+), 155 deletions(-) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index bde74960a06..f8f622c54cb 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -1780,38 +1780,36 @@ func (pcf *processComponentsFactory) createExportFactoryHandler( nodeOperationMode = p2p.FullArchiveMode } argsExporter := updateFactory.ArgsExporter{ - CoreComponents: pcf.coreData, - CryptoComponents: pcf.crypto, - StatusCoreComponents: pcf.statusCoreComponents, - NetworkComponents: pcf.network, - HeaderValidator: headerValidator, - DataPool: pcf.data.Datapool(), - StorageService: pcf.data.StorageService(), - RequestHandler: requestHandler, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ActiveAccountsDBs: accountsDBs, - ExistingResolvers: resolversContainer, - ExistingRequesters: requestersContainer, - ExportFolder: exportFolder, - ExportTriesStorageConfig: hardforkConfig.ExportTriesStorageConfig, - ExportStateStorageConfig: hardforkConfig.ExportStateStorageConfig, - ExportStateKeysConfig: hardforkConfig.ExportKeysStorageConfig, - MaxTrieLevelInMemory: pcf.config.StateTriesConfig.MaxStateTrieLevelInMemory, - WhiteListHandler: pcf.whiteListHandler, - WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, + CoreComponents: pcf.coreData, + CryptoComponents: pcf.crypto, + StatusCoreComponents: pcf.statusCoreComponents, + NetworkComponents: pcf.network, + HeaderValidator: headerValidator, + DataPool: pcf.data.Datapool(), + StorageService: pcf.data.StorageService(), + RequestHandler: requestHandler, + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ActiveAccountsDBs: accountsDBs, + ExistingResolvers: resolversContainer, + ExistingRequesters: requestersContainer, + ExportFolder: exportFolder, + ExportTriesStorageConfig: hardforkConfig.ExportTriesStorageConfig, + ExportStateStorageConfig: hardforkConfig.ExportStateStorageConfig, + ExportStateKeysConfig: hardforkConfig.ExportKeysStorageConfig, + MaxTrieLevelInMemory: pcf.config.StateTriesConfig.MaxStateTrieLevelInMemory, + WhiteListHandler: pcf.whiteListHandler, + WhiteListerVerifiedTxs: pcf.whiteListerVerifiedTxs, MainInterceptorsContainer: mainInterceptorsContainer, FullArchiveInterceptorsContainer: fullArchiveInterceptorsContainer, - NodesCoordinator: pcf.nodesCoordinator, - HeaderSigVerifier: headerSigVerifier, - HeaderIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), - ValidityAttester: blockTracker, - InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), - OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), - RoundHandler: pcf.coreData.RoundHandler(), - InterceptorDebugConfig: pcf.config.Debug.InterceptorResolver, - MaxHardCapForMissingNodes: pcf.config.TrieSync.MaxHardCapForMissingNodes, - NumConcurrentTrieSyncers: pcf.config.TrieSync.NumConcurrentTrieSyncers, - TrieSyncerVersion: pcf.config.TrieSync.TrieSyncerVersion, + NodesCoordinator: pcf.nodesCoordinator, + HeaderSigVerifier: headerSigVerifier, + HeaderIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), + ValidityAttester: blockTracker, + RoundHandler: pcf.coreData.RoundHandler(), + InterceptorDebugConfig: pcf.config.Debug.InterceptorResolver, + MaxHardCapForMissingNodes: pcf.config.TrieSync.MaxHardCapForMissingNodes, + NumConcurrentTrieSyncers: pcf.config.TrieSync.NumConcurrentTrieSyncers, + TrieSyncerVersion: pcf.config.TrieSync.TrieSyncerVersion, NodeOperationMode: nodeOperationMode, } return updateFactory.NewExportHandlerFactory(argsExporter) diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 356aa036b2d..98314294e5d 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -609,22 +609,20 @@ func createHardForkExporter( MaxOpenFiles: 10, }, }, - ExportStateStorageConfig: exportConfig, - ExportStateKeysConfig: keysConfig, - MaxTrieLevelInMemory: uint(5), - WhiteListHandler: node.WhiteListHandler, - WhiteListerVerifiedTxs: node.WhiteListerVerifiedTxs, + ExportStateStorageConfig: exportConfig, + ExportStateKeysConfig: keysConfig, + MaxTrieLevelInMemory: uint(5), + WhiteListHandler: node.WhiteListHandler, + WhiteListerVerifiedTxs: node.WhiteListerVerifiedTxs, MainInterceptorsContainer: node.MainInterceptorsContainer, FullArchiveInterceptorsContainer: node.FullArchiveInterceptorsContainer, - ExistingResolvers: node.ResolversContainer, - ExistingRequesters: node.RequestersContainer, - NodesCoordinator: node.NodesCoordinator, - HeaderSigVerifier: node.HeaderSigVerifier, - HeaderIntegrityVerifier: node.HeaderIntegrityVerifier, - ValidityAttester: node.BlockTracker, - OutputAntifloodHandler: &mock.NilAntifloodHandler{}, - InputAntifloodHandler: &mock.NilAntifloodHandler{}, - RoundHandler: &mock.RoundHandlerMock{}, + ExistingResolvers: node.ResolversContainer, + ExistingRequesters: node.RequestersContainer, + NodesCoordinator: node.NodesCoordinator, + HeaderSigVerifier: node.HeaderSigVerifier, + HeaderIntegrityVerifier: node.HeaderIntegrityVerifier, + ValidityAttester: node.BlockTracker, + RoundHandler: &mock.RoundHandlerMock{}, InterceptorDebugConfig: config.InterceptorResolverDebugConfig{ Enabled: true, EnablePrint: true, diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index 8984625759e..5692ada6c23 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -38,76 +38,76 @@ var log = logger.GetOrCreate("update/factory") // ArgsExporter is the argument structure to create a new exporter type ArgsExporter struct { - CoreComponents process.CoreComponentsHolder - CryptoComponents process.CryptoComponentsHolder - StatusCoreComponents process.StatusCoreComponentsHolder - NetworkComponents mxFactory.NetworkComponentsHolder - HeaderValidator epochStart.HeaderValidator - DataPool dataRetriever.PoolsHolder - StorageService dataRetriever.StorageService - RequestHandler process.RequestHandler - ShardCoordinator sharding.Coordinator - ActiveAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter - ExistingResolvers dataRetriever.ResolversContainer - ExistingRequesters dataRetriever.RequestersContainer - ExportFolder string - ExportTriesStorageConfig config.StorageConfig - ExportStateStorageConfig config.StorageConfig - ExportStateKeysConfig config.StorageConfig - MaxTrieLevelInMemory uint - WhiteListHandler process.WhiteListHandler - WhiteListerVerifiedTxs process.WhiteListHandler + CoreComponents process.CoreComponentsHolder + CryptoComponents process.CryptoComponentsHolder + StatusCoreComponents process.StatusCoreComponentsHolder + NetworkComponents mxFactory.NetworkComponentsHolder + HeaderValidator epochStart.HeaderValidator + DataPool dataRetriever.PoolsHolder + StorageService dataRetriever.StorageService + RequestHandler process.RequestHandler + ShardCoordinator sharding.Coordinator + ActiveAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter + ExistingResolvers dataRetriever.ResolversContainer + ExistingRequesters dataRetriever.RequestersContainer + ExportFolder string + ExportTriesStorageConfig config.StorageConfig + ExportStateStorageConfig config.StorageConfig + ExportStateKeysConfig config.StorageConfig + MaxTrieLevelInMemory uint + WhiteListHandler process.WhiteListHandler + WhiteListerVerifiedTxs process.WhiteListHandler MainInterceptorsContainer process.InterceptorsContainer FullArchiveInterceptorsContainer process.InterceptorsContainer - NodesCoordinator nodesCoordinator.NodesCoordinator - HeaderSigVerifier process.InterceptedHeaderSigVerifier - HeaderIntegrityVerifier process.HeaderIntegrityVerifier - ValidityAttester process.ValidityAttester - RoundHandler process.RoundHandler - InterceptorDebugConfig config.InterceptorResolverDebugConfig - MaxHardCapForMissingNodes int - NumConcurrentTrieSyncers int - TrieSyncerVersion int - CheckNodesOnDisk bool + NodesCoordinator nodesCoordinator.NodesCoordinator + HeaderSigVerifier process.InterceptedHeaderSigVerifier + HeaderIntegrityVerifier process.HeaderIntegrityVerifier + ValidityAttester process.ValidityAttester + RoundHandler process.RoundHandler + InterceptorDebugConfig config.InterceptorResolverDebugConfig + MaxHardCapForMissingNodes int + NumConcurrentTrieSyncers int + TrieSyncerVersion int + CheckNodesOnDisk bool NodeOperationMode p2p.NodeOperation } type exportHandlerFactory struct { - coreComponents process.CoreComponentsHolder - cryptoComponents process.CryptoComponentsHolder - statusCoreComponents process.StatusCoreComponentsHolder - networkComponents mxFactory.NetworkComponentsHolder - headerValidator epochStart.HeaderValidator - dataPool dataRetriever.PoolsHolder - storageService dataRetriever.StorageService - requestHandler process.RequestHandler - shardCoordinator sharding.Coordinator - activeAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter - exportFolder string - exportTriesStorageConfig config.StorageConfig - exportStateStorageConfig config.StorageConfig - exportStateKeysConfig config.StorageConfig - maxTrieLevelInMemory uint - whiteListHandler process.WhiteListHandler - whiteListerVerifiedTxs process.WhiteListHandler + coreComponents process.CoreComponentsHolder + cryptoComponents process.CryptoComponentsHolder + statusCoreComponents process.StatusCoreComponentsHolder + networkComponents mxFactory.NetworkComponentsHolder + headerValidator epochStart.HeaderValidator + dataPool dataRetriever.PoolsHolder + storageService dataRetriever.StorageService + requestHandler process.RequestHandler + shardCoordinator sharding.Coordinator + activeAccountsDBs map[state.AccountsDbIdentifier]state.AccountsAdapter + exportFolder string + exportTriesStorageConfig config.StorageConfig + exportStateStorageConfig config.StorageConfig + exportStateKeysConfig config.StorageConfig + maxTrieLevelInMemory uint + whiteListHandler process.WhiteListHandler + whiteListerVerifiedTxs process.WhiteListHandler mainInterceptorsContainer process.InterceptorsContainer fullArchiveInterceptorsContainer process.InterceptorsContainer - existingResolvers dataRetriever.ResolversContainer - existingRequesters dataRetriever.RequestersContainer - epochStartTrigger epochStart.TriggerHandler - accounts state.AccountsAdapter - nodesCoordinator nodesCoordinator.NodesCoordinator - headerSigVerifier process.InterceptedHeaderSigVerifier - headerIntegrityVerifier process.HeaderIntegrityVerifier - validityAttester process.ValidityAttester - resolverContainer dataRetriever.ResolversContainer - requestersContainer dataRetriever.RequestersContainer - roundHandler process.RoundHandler - interceptorDebugConfig config.InterceptorResolverDebugConfig - maxHardCapForMissingNodes int - numConcurrentTrieSyncers int - trieSyncerVersion int - checkNodesOnDisk bool + existingResolvers dataRetriever.ResolversContainer + existingRequesters dataRetriever.RequestersContainer + epochStartTrigger epochStart.TriggerHandler + accounts state.AccountsAdapter + nodesCoordinator nodesCoordinator.NodesCoordinator + headerSigVerifier process.InterceptedHeaderSigVerifier + headerIntegrityVerifier process.HeaderIntegrityVerifier + validityAttester process.ValidityAttester + resolverContainer dataRetriever.ResolversContainer + requestersContainer dataRetriever.RequestersContainer + roundHandler process.RoundHandler + interceptorDebugConfig config.InterceptorResolverDebugConfig + maxHardCapForMissingNodes int + numConcurrentTrieSyncers int + trieSyncerVersion int + checkNodesOnDisk bool nodeOperationMode p2p.NodeOperation } @@ -234,38 +234,38 @@ func NewExportHandlerFactory(args ArgsExporter) (*exportHandlerFactory, error) { } e := &exportHandlerFactory{ - coreComponents: args.CoreComponents, - cryptoComponents: args.CryptoComponents, - networkComponents: args.NetworkComponents, - headerValidator: args.HeaderValidator, - dataPool: args.DataPool, - storageService: args.StorageService, - requestHandler: args.RequestHandler, - shardCoordinator: args.ShardCoordinator, - activeAccountsDBs: args.ActiveAccountsDBs, - exportFolder: args.ExportFolder, - exportTriesStorageConfig: args.ExportTriesStorageConfig, - exportStateStorageConfig: args.ExportStateStorageConfig, - exportStateKeysConfig: args.ExportStateKeysConfig, + coreComponents: args.CoreComponents, + cryptoComponents: args.CryptoComponents, + networkComponents: args.NetworkComponents, + headerValidator: args.HeaderValidator, + dataPool: args.DataPool, + storageService: args.StorageService, + requestHandler: args.RequestHandler, + shardCoordinator: args.ShardCoordinator, + activeAccountsDBs: args.ActiveAccountsDBs, + exportFolder: args.ExportFolder, + exportTriesStorageConfig: args.ExportTriesStorageConfig, + exportStateStorageConfig: args.ExportStateStorageConfig, + exportStateKeysConfig: args.ExportStateKeysConfig, mainInterceptorsContainer: args.MainInterceptorsContainer, fullArchiveInterceptorsContainer: args.FullArchiveInterceptorsContainer, - whiteListHandler: args.WhiteListHandler, - whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, - existingResolvers: args.ExistingResolvers, - existingRequesters: args.ExistingRequesters, - accounts: args.ActiveAccountsDBs[state.UserAccountsState], - nodesCoordinator: args.NodesCoordinator, - headerSigVerifier: args.HeaderSigVerifier, - headerIntegrityVerifier: args.HeaderIntegrityVerifier, - validityAttester: args.ValidityAttester, - maxTrieLevelInMemory: args.MaxTrieLevelInMemory, - roundHandler: args.RoundHandler, - interceptorDebugConfig: args.InterceptorDebugConfig, - maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, - numConcurrentTrieSyncers: args.NumConcurrentTrieSyncers, - trieSyncerVersion: args.TrieSyncerVersion, - checkNodesOnDisk: args.CheckNodesOnDisk, - statusCoreComponents: args.StatusCoreComponents, + whiteListHandler: args.WhiteListHandler, + whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, + existingResolvers: args.ExistingResolvers, + existingRequesters: args.ExistingRequesters, + accounts: args.ActiveAccountsDBs[state.UserAccountsState], + nodesCoordinator: args.NodesCoordinator, + headerSigVerifier: args.HeaderSigVerifier, + headerIntegrityVerifier: args.HeaderIntegrityVerifier, + validityAttester: args.ValidityAttester, + maxTrieLevelInMemory: args.MaxTrieLevelInMemory, + roundHandler: args.RoundHandler, + interceptorDebugConfig: args.InterceptorDebugConfig, + maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, + numConcurrentTrieSyncers: args.NumConcurrentTrieSyncers, + trieSyncerVersion: args.TrieSyncerVersion, + checkNodesOnDisk: args.CheckNodesOnDisk, + statusCoreComponents: args.StatusCoreComponents, nodeOperationMode: args.NodeOperationMode, } @@ -566,27 +566,28 @@ func (e *exportHandlerFactory) prepareFolders(folder string) error { func (e *exportHandlerFactory) createInterceptors() error { argsInterceptors := ArgsNewFullSyncInterceptorsContainerFactory{ - CoreComponents: e.coreComponents, - CryptoComponents: e.cryptoComponents, - Accounts: e.accounts, - ShardCoordinator: e.shardCoordinator, - NodesCoordinator: e.nodesCoordinator, - Messenger: e.networkComponents.NetworkMessenger(), - Store: e.storageService, - DataPool: e.dataPool, - MaxTxNonceDeltaAllowed: math.MaxInt32, - TxFeeHandler: &disabled.FeeHandler{}, - BlockBlackList: cache.NewTimeCache(time.Second), - HeaderSigVerifier: e.headerSigVerifier, - HeaderIntegrityVerifier: e.headerIntegrityVerifier, - SizeCheckDelta: math.MaxUint32, - ValidityAttester: e.validityAttester, - EpochStartTrigger: e.epochStartTrigger, - WhiteListHandler: e.whiteListHandler, - WhiteListerVerifiedTxs: e.whiteListerVerifiedTxs, + CoreComponents: e.coreComponents, + CryptoComponents: e.cryptoComponents, + Accounts: e.accounts, + ShardCoordinator: e.shardCoordinator, + NodesCoordinator: e.nodesCoordinator, + MainMessenger: e.networkComponents.NetworkMessenger(), + FullArchiveMessenger: e.networkComponents.FullArchiveNetworkMessenger(), + Store: e.storageService, + DataPool: e.dataPool, + MaxTxNonceDeltaAllowed: math.MaxInt32, + TxFeeHandler: &disabled.FeeHandler{}, + BlockBlackList: cache.NewTimeCache(time.Second), + HeaderSigVerifier: e.headerSigVerifier, + HeaderIntegrityVerifier: e.headerIntegrityVerifier, + SizeCheckDelta: math.MaxUint32, + ValidityAttester: e.validityAttester, + EpochStartTrigger: e.epochStartTrigger, + WhiteListHandler: e.whiteListHandler, + WhiteListerVerifiedTxs: e.whiteListerVerifiedTxs, MainInterceptorsContainer: e.mainInterceptorsContainer, FullArchiveInterceptorsContainer: e.fullArchiveInterceptorsContainer, - AntifloodHandler: e.networkComponents.InputAntiFloodHandler(), + AntifloodHandler: e.networkComponents.InputAntiFloodHandler(), NodeOperationMode: e.nodeOperationMode, } fullSyncInterceptors, err := NewFullSyncInterceptorsContainerFactory(argsInterceptors) From 0ac33ddc8de5e83d8f0dfb8ff8d6901ba9c68496 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 15 Jun 2023 16:25:19 +0300 Subject: [PATCH 17/38] updated the defer func --- .../p2p/peersRating/peersRating_test.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/integrationTests/p2p/peersRating/peersRating_test.go b/integrationTests/p2p/peersRating/peersRating_test.go index e03ab3307f5..32128c77d10 100644 --- a/integrationTests/p2p/peersRating/peersRating_test.go +++ b/integrationTests/p2p/peersRating/peersRating_test.go @@ -34,9 +34,9 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { requesterNode := createNodeWithPeersRatingHandler(core.MetachainShardId, numOfShards, p2p.NormalOperation) defer func() { - _ = resolverNode.MainMessenger.Close() - _ = maliciousNode.MainMessenger.Close() - _ = requesterNode.MainMessenger.Close() + resolverNode.Close() + maliciousNode.Close() + requesterNode.Close() }() time.Sleep(time.Second) @@ -125,12 +125,9 @@ func TestPeersRatingAndResponsivenessOnFullArchive(t *testing.T) { regularNode := createNodeWithPeersRatingHandler(shardID, numOfShards, p2p.FullArchiveMode) defer func() { - _ = resolverFullArchiveNode.MainMessenger.Close() - _ = resolverFullArchiveNode.FullArchiveMessenger.Close() - _ = requesterFullArchiveNode.MainMessenger.Close() - _ = requesterFullArchiveNode.FullArchiveMessenger.Close() - _ = regularNode.MainMessenger.Close() - _ = regularNode.FullArchiveMessenger.Close() + resolverFullArchiveNode.Close() + requesterFullArchiveNode.Close() + regularNode.Close() }() // all nodes are connected on main network, but only the full archive resolver and requester are connected on full archive network From 11cdfc96671b254334ee877959981a3ce211dd08 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 15 Jun 2023 18:01:59 +0300 Subject: [PATCH 18/38] replaced messenger.Close with node.Close on tests --- .../multiShard/hardFork/hardFork_test.go | 4 +-- integrationTests/oneNodeNetwork.go | 3 +- .../p2p/peersRating/peersRating_test.go | 6 ++-- .../resolvers/headers/headers_test.go | 24 +++++++------- .../resolvers/metablock/metablock_test.go | 24 +++++++------- .../resolvers/miniblocks/miniblocks_test.go | 32 +++++++++---------- .../resolvers/rewards/rewards_test.go | 4 +-- .../smartContractsResults/scrs_test.go | 4 +-- .../interceptedRequestHdr_test.go | 8 ++--- .../interceptedRequestTxBlockBody_test.go | 4 +-- .../interceptedBulkTx_test.go | 2 +- .../interceptedResolvedTx_test.go | 8 ++--- .../interceptedResolvedUnsignedTx_test.go | 4 +-- .../state/stateTrieSync/stateTrieSync_test.go | 2 +- 14 files changed, 64 insertions(+), 65 deletions(-) diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 6cbf58bc4ae..4971bcc084e 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -73,7 +73,7 @@ func TestHardForkWithoutTransactionInMultiShardedEnvironment(t *testing.T) { n.Close() } - _ = hardforkTriggerNode.MainMessenger.Close() + hardforkTriggerNode.Close() }() round := uint64(0) @@ -144,7 +144,7 @@ func TestHardForkWithContinuousTransactionsInMultiShardedEnvironment(t *testing. n.Close() } - _ = hardforkTriggerNode.MainMessenger.Close() + hardforkTriggerNode.Close() }() initialVal := big.NewInt(1000000000) diff --git a/integrationTests/oneNodeNetwork.go b/integrationTests/oneNodeNetwork.go index 6a69dd0f0d5..720ff0529c6 100644 --- a/integrationTests/oneNodeNetwork.go +++ b/integrationTests/oneNodeNetwork.go @@ -32,8 +32,7 @@ func NewOneNodeNetwork() *oneNodeNetwork { // Stop stops the test network func (n *oneNodeNetwork) Stop() { - _ = n.Node.MainMessenger.Close() - _ = n.Node.VMContainer.Close() + n.Node.Close() } // Mint mints the given address diff --git a/integrationTests/p2p/peersRating/peersRating_test.go b/integrationTests/p2p/peersRating/peersRating_test.go index 212476a99c8..2e0dc7374cf 100644 --- a/integrationTests/p2p/peersRating/peersRating_test.go +++ b/integrationTests/p2p/peersRating/peersRating_test.go @@ -32,9 +32,9 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { requesterNode := createNodeWithPeersRatingHandler(core.MetachainShardId, numOfShards) defer func() { - _ = resolverNode.MainMessenger.Close() - _ = maliciousNode.MainMessenger.Close() - _ = requesterNode.MainMessenger.Close() + resolverNode.Close() + maliciousNode.Close() + requesterNode.Close() }() time.Sleep(time.Second) diff --git a/integrationTests/resolvers/headers/headers_test.go b/integrationTests/resolvers/headers/headers_test.go index 74fb94cf5c0..e686225bbc6 100644 --- a/integrationTests/resolvers/headers/headers_test.go +++ b/integrationTests/resolvers/headers/headers_test.go @@ -24,8 +24,8 @@ func TestRequestResolveShardHeadersByHashRequestingShardResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -61,8 +61,8 @@ func TestRequestResolveShardHeadersByHashRequestingMetaResolvingShard(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -98,8 +98,8 @@ func TestRequestResolveShardHeadersByHashRequestingShardResolvingMeta(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -137,8 +137,8 @@ func TestRequestResolveShardHeadersByNonceRequestingShardResolvingShard(t *testi shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -176,8 +176,8 @@ func TestRequestResolveShardHeadersByNonceRequestingMetaResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) @@ -215,8 +215,8 @@ func TestRequestResolveShardHeadersByNonceRequestingShardResolvingMeta(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateShardHeader(headerNonce, integrationTests.ChainID) diff --git a/integrationTests/resolvers/metablock/metablock_test.go b/integrationTests/resolvers/metablock/metablock_test.go index 00deff89238..957fffb7fa2 100644 --- a/integrationTests/resolvers/metablock/metablock_test.go +++ b/integrationTests/resolvers/metablock/metablock_test.go @@ -24,8 +24,8 @@ func TestRequestResolveMetaHeadersByHashRequestingShardResolvingShard(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -61,8 +61,8 @@ func TestRequestResolveMetaHeadersByHashRequestingMetaResolvingShard(t *testing. shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -98,8 +98,8 @@ func TestRequestResolveMetaHeadersByHashRequestingShardResolvingMeta(t *testing. shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -137,8 +137,8 @@ func TestRequestResolveMetaHeadersByNonceRequestingShardResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -176,8 +176,8 @@ func TestRequestResolveMetaHeadersByNonceRequestingMetaResolvingShard(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) @@ -215,8 +215,8 @@ func TestRequestResolveMetaHeadersByNonceRequestingShardResolvingMeta(t *testing shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) header, hash := resolvers.CreateMetaHeader(headerNonce, integrationTests.ChainID) diff --git a/integrationTests/resolvers/miniblocks/miniblocks_test.go b/integrationTests/resolvers/miniblocks/miniblocks_test.go index 12046e157d4..989dd239ec6 100644 --- a/integrationTests/resolvers/miniblocks/miniblocks_test.go +++ b/integrationTests/resolvers/miniblocks/miniblocks_test.go @@ -18,8 +18,8 @@ func TestRequestResolveMiniblockByHashRequestingShardResolvingSameShard(t *testi shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, shardId) @@ -54,8 +54,8 @@ func TestRequestResolveMiniblockByHashRequestingShardResolvingOtherShard(t *test shardIdRequester := uint32(1) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardIdResolver, shardIdRequester) @@ -89,8 +89,8 @@ func TestRequestResolveMiniblockByHashRequestingShardResolvingMeta(t *testing.T) shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, shardId) @@ -124,8 +124,8 @@ func TestRequestResolveMiniblockByHashRequestingMetaResolvingShard(t *testing.T) shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, core.MetachainShardId) @@ -159,8 +159,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingShardResolvingSameShard(t *t shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(core.MetachainShardId, core.AllShardId) @@ -195,8 +195,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingShardResolvingOtherShard(t * shardIdRequester := uint32(1) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardIdResolver, core.AllShardId) @@ -230,8 +230,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingShardResolvingMeta(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(core.MetachainShardId, shardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, core.AllShardId) @@ -265,8 +265,8 @@ func TestRequestResolvePeerMiniblockByHashRequestingMetaResolvingShard(t *testin shardId := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardId, core.MetachainShardId) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() miniblock, hash := resolvers.CreateMiniblock(shardId, core.AllShardId) diff --git a/integrationTests/resolvers/rewards/rewards_test.go b/integrationTests/resolvers/rewards/rewards_test.go index bc7f63f9524..c0cf4cea66d 100644 --- a/integrationTests/resolvers/rewards/rewards_test.go +++ b/integrationTests/resolvers/rewards/rewards_test.go @@ -20,8 +20,8 @@ func TestRequestResolveRewardsByHashRequestingShardResolvingOtherShard(t *testin shardIdRequester := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() headerNonce := uint64(0) reward, hash := resolvers.CreateReward(headerNonce) diff --git a/integrationTests/resolvers/smartContractsResults/scrs_test.go b/integrationTests/resolvers/smartContractsResults/scrs_test.go index 9db1cf8c5a0..75ac19c693c 100644 --- a/integrationTests/resolvers/smartContractsResults/scrs_test.go +++ b/integrationTests/resolvers/smartContractsResults/scrs_test.go @@ -20,8 +20,8 @@ func TestRequestResolveLargeSCRByHashRequestingShardResolvingOtherShard(t *testi shardIdRequester := uint32(0) nResolver, nRequester := resolvers.CreateResolverRequester(shardIdResolver, shardIdRequester) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() scr, hash := resolvers.CreateLargeSmartContractResults() diff --git a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go index 89c75c645ff..62c2d00733c 100644 --- a/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go +++ b/integrationTests/singleShard/block/interceptedRequestHdr/interceptedRequestHdr_test.go @@ -49,8 +49,8 @@ func TestNode_GenerateSendInterceptHeaderByNonceWithNetMessenger(t *testing.T) { }) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together @@ -117,8 +117,8 @@ func TestNode_InterceptedHeaderWithWrongChainIDShouldBeDiscarded(t *testing.T) { }) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together diff --git a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go index ea2da120a5c..b6c31bba12e 100644 --- a/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go +++ b/integrationTests/singleShard/block/interceptedRequestTxBlockBody/interceptedRequestTxBlockBody_test.go @@ -42,8 +42,8 @@ func TestNode_GenerateSendInterceptTxBlockBodyWithNetMessenger(t *testing.T) { }) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together diff --git a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go index 3db8bb8faf9..fab7310acb5 100644 --- a/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedBulkTx/interceptedBulkTx_test.go @@ -136,7 +136,7 @@ func TestNode_SendTransactionFromAnUnmintedAccountShouldReturnErrorAtApiLevel(t }) defer func() { - _ = node.MainMessenger.Close() + node.Close() }() tx := &transaction.Transaction{ diff --git a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go index 45a6dc18e00..5c303e485fa 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedTx/interceptedResolvedTx_test.go @@ -40,8 +40,8 @@ func TestNode_RequestInterceptTransactionWithMessengerAndWhitelist(t *testing.T) TxSignPrivKeyShardId: txSignPrivKeyShardId, }) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together @@ -136,8 +136,8 @@ func TestNode_RequestInterceptRewardTransactionWithMessenger(t *testing.T) { TxSignPrivKeyShardId: txSignPrivKeyShardId, }) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together diff --git a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go index ef2abacd76e..9bb1fddd292 100644 --- a/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go +++ b/integrationTests/singleShard/transaction/interceptedResolvedUnsignedTx/interceptedResolvedUnsignedTx_test.go @@ -38,8 +38,8 @@ func TestNode_RequestInterceptUnsignedTransactionWithMessenger(t *testing.T) { TxSignPrivKeyShardId: txSignPrivKeyShardId, }) defer func() { - _ = nRequester.MainMessenger.Close() - _ = nResolver.MainMessenger.Close() + nRequester.Close() + nResolver.Close() }() //connect messengers together diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 0b38e697d5d..b6b93e5c845 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -254,7 +254,7 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin go func() { // sudden close of the resolver node after just 2 seconds time.Sleep(time.Second * 2) - _ = nResolver.MainMessenger.Close() + nResolver.Close() log.Info("resolver node closed, the requester should soon fail in error") }() From 8618e387722dfaebb43e8eb2f276db27ba87cd15 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Tue, 20 Jun 2023 14:05:01 +0300 Subject: [PATCH 19/38] fixes after review --- cmd/node/config/prefs.toml | 9 -- config/prefsConfig.go | 17 ++-- config/tomlConfig_test.go | 17 ++-- .../topicSender/topicResolverSender.go | 1 + factory/disabled/preferredPeersHolder.go | 2 +- factory/network/networkComponents.go | 86 +++++++++---------- factory/network/networkComponentsHandler.go | 2 +- factory/network/networkComponents_test.go | 2 +- .../realcomponents/processorRunner.go | 25 +++--- node/nodeRunner.go | 25 +++--- 10 files changed, 82 insertions(+), 104 deletions(-) diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 1db13700071..98d5c02557f 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -28,15 +28,6 @@ # ] PreferredConnections = [] - # PreferredFullArchiveConnections holds an array containing valid ips or peer ids from nodes to connect with (in top of other connections) - # This is only considered on FullArchive mode but each full archive preferred peer must be added to PreferredConnections as well - # Example: - # PreferredConnections = [ - # "127.0.0.10", - # "16Uiu2HAm6yvbp1oZ6zjnWsn9FdRqBSaQkbhELyaThuq48ybdorrr" - # ] - PreferredFullArchiveConnections = [] - # ConnectionWatcherType represents the type of a connection watcher needed. # possible options: # - "disabled" - no connection watching should be made diff --git a/config/prefsConfig.go b/config/prefsConfig.go index ee2e7483381..34861d647e8 100644 --- a/config/prefsConfig.go +++ b/config/prefsConfig.go @@ -9,15 +9,14 @@ type Preferences struct { // PreferencesConfig will hold the fields which are node specific such as the display name type PreferencesConfig struct { - DestinationShardAsObserver string - NodeDisplayName string - Identity string - RedundancyLevel int64 - PreferredConnections []string - PreferredFullArchiveConnections []string - ConnectionWatcherType string - OverridableConfigTomlValues []OverridableConfig - FullArchive bool + DestinationShardAsObserver string + NodeDisplayName string + Identity string + RedundancyLevel int64 + PreferredConnections []string + ConnectionWatcherType string + OverridableConfigTomlValues []OverridableConfig + FullArchive bool } // OverridableConfig holds the path and the new value to be updated in the configuration diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 2c28462c85a..a33f910a832 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -315,17 +315,14 @@ func TestTomlPreferencesParser(t *testing.T) { redundancyLevel := int64(0) prefPubKey0 := "preferred pub key 0" prefPubKey1 := "preferred pub key 1" - prefFAPubKey0 := "preferred full archive pub key 0" - prefFAPubKey1 := "preferred full archive pub key 1" cfgPreferencesExpected := Preferences{ Preferences: PreferencesConfig{ - NodeDisplayName: nodeDisplayName, - DestinationShardAsObserver: destinationShardAsObs, - Identity: identity, - RedundancyLevel: redundancyLevel, - PreferredConnections: []string{prefPubKey0, prefPubKey1}, - PreferredFullArchiveConnections: []string{prefFAPubKey0, prefFAPubKey1}, + NodeDisplayName: nodeDisplayName, + DestinationShardAsObserver: destinationShardAsObs, + Identity: identity, + RedundancyLevel: redundancyLevel, + PreferredConnections: []string{prefPubKey0, prefPubKey1}, }, BlockProcessingCutoff: BlockProcessingCutoffConfig{ Enabled: true, @@ -345,10 +342,6 @@ func TestTomlPreferencesParser(t *testing.T) { "` + prefPubKey0 + `", "` + prefPubKey1 + `" ] - PreferredFullArchiveConnections = [ - "` + prefFAPubKey0 + `", - "` + prefFAPubKey1 + `" - ] [BlockProcessingCutoff] Enabled = true diff --git a/dataRetriever/topicSender/topicResolverSender.go b/dataRetriever/topicSender/topicResolverSender.go index d4fadc87335..f8ba1c1ad54 100644 --- a/dataRetriever/topicSender/topicResolverSender.go +++ b/dataRetriever/topicSender/topicResolverSender.go @@ -31,6 +31,7 @@ func NewTopicResolverSender(arg ArgTopicResolverSender) (*topicResolverSender, e // Send is used to send an array buffer to a connected peer // It is used when replying to a request func (trs *topicResolverSender) Send(buff []byte, peer core.PeerID) error { + // TODO[Sorin]: add a new field on MessageP2P for the network the message should be on if trs.fullArchiveMessenger.IsConnected(peer) { return trs.sendToConnectedPeer(trs.topicName, buff, peer, trs.fullArchiveMessenger, fullArchiveNetwork, trs.fullArchivePreferredPeersHolderHandler) } diff --git a/factory/disabled/preferredPeersHolder.go b/factory/disabled/preferredPeersHolder.go index 222a0b7393d..5e0eeefb856 100644 --- a/factory/disabled/preferredPeersHolder.go +++ b/factory/disabled/preferredPeersHolder.go @@ -20,7 +20,7 @@ func (holder *preferredPeersHolder) PutConnectionAddress(_ core.PeerID, _ string func (holder *preferredPeersHolder) PutShardID(_ core.PeerID, _ uint32) { } -// Get does nothing as it is disabled +// Get returns an empty map as it is disabled func (holder *preferredPeersHolder) Get() map[uint32][]core.PeerID { return make(map[uint32][]core.PeerID) } diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index d6c1b5a6492..b153d86c62c 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -28,36 +28,34 @@ import ( // NetworkComponentsFactoryArgs holds the arguments to create a network component handler instance type NetworkComponentsFactoryArgs struct { - MainP2pConfig p2pConfig.P2PConfig - FullArchiveP2pConfig p2pConfig.P2PConfig - MainConfig config.Config - RatingsConfig config.RatingsConfig - StatusHandler core.AppStatusHandler - Marshalizer marshal.Marshalizer - Syncer p2p.SyncTimer - MainPreferredPeersSlices []string - FullArchivePreferredPeersSlices []string - BootstrapWaitTime time.Duration - NodeOperationMode p2p.NodeOperation - ConnectionWatcherType string - CryptoComponents factory.CryptoComponentsHolder + MainP2pConfig p2pConfig.P2PConfig + FullArchiveP2pConfig p2pConfig.P2PConfig + MainConfig config.Config + RatingsConfig config.RatingsConfig + StatusHandler core.AppStatusHandler + Marshalizer marshal.Marshalizer + Syncer p2p.SyncTimer + PreferredPeersSlices []string + BootstrapWaitTime time.Duration + NodeOperationMode p2p.NodeOperation + ConnectionWatcherType string + CryptoComponents factory.CryptoComponentsHolder } type networkComponentsFactory struct { - mainP2PConfig p2pConfig.P2PConfig - fullArchiveP2PConfig p2pConfig.P2PConfig - mainConfig config.Config - ratingsConfig config.RatingsConfig - statusHandler core.AppStatusHandler - listenAddress string - marshalizer marshal.Marshalizer - syncer p2p.SyncTimer - mainPreferredPeersSlices []string - fullArchivePreferredPeersSlices []string - bootstrapWaitTime time.Duration - nodeOperationMode p2p.NodeOperation - connectionWatcherType string - cryptoComponents factory.CryptoComponentsHolder + mainP2PConfig p2pConfig.P2PConfig + fullArchiveP2PConfig p2pConfig.P2PConfig + mainConfig config.Config + ratingsConfig config.RatingsConfig + statusHandler core.AppStatusHandler + listenAddress string + marshalizer marshal.Marshalizer + syncer p2p.SyncTimer + preferredPeersSlices []string + bootstrapWaitTime time.Duration + nodeOperationMode p2p.NodeOperation + connectionWatcherType string + cryptoComponents factory.CryptoComponentsHolder } type networkComponentsHolder struct { @@ -105,20 +103,19 @@ func NewNetworkComponentsFactory( } return &networkComponentsFactory{ - mainP2PConfig: args.MainP2pConfig, - fullArchiveP2PConfig: args.FullArchiveP2pConfig, - ratingsConfig: args.RatingsConfig, - marshalizer: args.Marshalizer, - mainConfig: args.MainConfig, - statusHandler: args.StatusHandler, - listenAddress: p2p.ListenAddrWithIp4AndTcp, - syncer: args.Syncer, - bootstrapWaitTime: args.BootstrapWaitTime, - mainPreferredPeersSlices: args.MainPreferredPeersSlices, - fullArchivePreferredPeersSlices: args.FullArchivePreferredPeersSlices, - nodeOperationMode: args.NodeOperationMode, - connectionWatcherType: args.ConnectionWatcherType, - cryptoComponents: args.CryptoComponents, + mainP2PConfig: args.MainP2pConfig, + fullArchiveP2PConfig: args.FullArchiveP2pConfig, + ratingsConfig: args.RatingsConfig, + marshalizer: args.Marshalizer, + mainConfig: args.MainConfig, + statusHandler: args.StatusHandler, + listenAddress: p2p.ListenAddrWithIp4AndTcp, + syncer: args.Syncer, + bootstrapWaitTime: args.BootstrapWaitTime, + preferredPeersSlices: args.PreferredPeersSlices, + nodeOperationMode: args.NodeOperationMode, + connectionWatcherType: args.ConnectionWatcherType, + cryptoComponents: args.CryptoComponents, }, nil } @@ -231,10 +228,9 @@ func (ncf *networkComponentsFactory) createPeerHonestyHandler( func (ncf *networkComponentsFactory) createNetworkHolder( p2pConfig p2pConfig.P2PConfig, logger p2p.Logger, - preferredPeers []string, ) (networkComponentsHolder, error) { - peersHolder, err := p2pFactory.NewPeersHolder(preferredPeers) + peersHolder, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) if err != nil { return networkComponentsHolder{}, err } @@ -297,7 +293,7 @@ func (ncf *networkComponentsFactory) createNetworkHolder( func (ncf *networkComponentsFactory) createMainNetworkHolder() (networkComponentsHolder, error) { loggerInstance := logger.GetOrCreate("main/p2p") - return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance, ncf.mainPreferredPeersSlices) + return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance) } func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder() (networkComponentsHolder, error) { @@ -312,7 +308,7 @@ func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder() (networkCo loggerInstance := logger.GetOrCreate("full-archive/p2p") - return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance, ncf.fullArchivePreferredPeersSlices) + return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance) } // Close closes all underlying components that need closing diff --git a/factory/network/networkComponentsHandler.go b/factory/network/networkComponentsHandler.go index 79811f0b5ad..d76a18cf322 100644 --- a/factory/network/networkComponentsHandler.go +++ b/factory/network/networkComponentsHandler.go @@ -268,7 +268,7 @@ func (mnc *managedNetworkComponents) FullArchivePreferredPeersHolderHandler() fa return nil } - return mnc.mainNetworkHolder.preferredPeersHolder + return mnc.fullArchiveNetworkHolder.preferredPeersHolder } // IsInterfaceNil returns true if the value under the interface is nil diff --git a/factory/network/networkComponents_test.go b/factory/network/networkComponents_test.go index 307614a1a5a..dca1e2f2d80 100644 --- a/factory/network/networkComponents_test.go +++ b/factory/network/networkComponents_test.go @@ -77,7 +77,7 @@ func TestNetworkComponentsFactory_Create(t *testing.T) { t.Parallel() args := componentsMock.GetNetworkFactoryArgs() - args.MainPreferredPeersSlices = []string{"invalid peer"} + args.PreferredPeersSlices = []string{"invalid peer"} ncf, _ := networkComp.NewNetworkComponentsFactory(args) diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 5fe10df380e..eb7e62c4bd9 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -167,19 +167,18 @@ func (pr *ProcessorRunner) createStatusCoreComponents(tb testing.TB) { func (pr *ProcessorRunner) createNetworkComponents(tb testing.TB) { argsNetwork := factoryNetwork.NetworkComponentsFactoryArgs{ - MainP2pConfig: *pr.Config.MainP2pConfig, - FullArchiveP2pConfig: *pr.Config.FullArchiveP2pConfig, - MainConfig: *pr.Config.GeneralConfig, - RatingsConfig: *pr.Config.RatingsConfig, - StatusHandler: pr.StatusCoreComponents.AppStatusHandler(), - Marshalizer: pr.CoreComponents.InternalMarshalizer(), - Syncer: pr.CoreComponents.SyncTimer(), - MainPreferredPeersSlices: make([]string, 0), - FullArchivePreferredPeersSlices: make([]string, 0), - BootstrapWaitTime: 1, - NodeOperationMode: p2p.NormalOperation, - ConnectionWatcherType: "", - CryptoComponents: pr.CryptoComponents, + MainP2pConfig: *pr.Config.MainP2pConfig, + FullArchiveP2pConfig: *pr.Config.FullArchiveP2pConfig, + MainConfig: *pr.Config.GeneralConfig, + RatingsConfig: *pr.Config.RatingsConfig, + StatusHandler: pr.StatusCoreComponents.AppStatusHandler(), + Marshalizer: pr.CoreComponents.InternalMarshalizer(), + Syncer: pr.CoreComponents.SyncTimer(), + PreferredPeersSlices: make([]string, 0), + BootstrapWaitTime: 1, + NodeOperationMode: p2p.NormalOperation, + ConnectionWatcherType: "", + CryptoComponents: pr.CryptoComponents, } networkFactory, err := factoryNetwork.NewNetworkComponentsFactory(argsNetwork) diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 3cf5a039da5..410aaf16661 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -1385,19 +1385,18 @@ func (nr *nodeRunner) CreateManagedNetworkComponents( cryptoComponents mainFactory.CryptoComponentsHolder, ) (mainFactory.NetworkComponentsHandler, error) { networkComponentsFactoryArgs := networkComp.NetworkComponentsFactoryArgs{ - MainP2pConfig: *nr.configs.MainP2pConfig, - FullArchiveP2pConfig: *nr.configs.FullArchiveP2pConfig, - MainConfig: *nr.configs.GeneralConfig, - RatingsConfig: *nr.configs.RatingsConfig, - StatusHandler: statusCoreComponents.AppStatusHandler(), - Marshalizer: coreComponents.InternalMarshalizer(), - Syncer: coreComponents.SyncTimer(), - MainPreferredPeersSlices: nr.configs.PreferencesConfig.Preferences.PreferredConnections, - FullArchivePreferredPeersSlices: nr.configs.PreferencesConfig.Preferences.PreferredFullArchiveConnections, - BootstrapWaitTime: common.TimeToWaitForP2PBootstrap, - NodeOperationMode: p2p.NormalOperation, - ConnectionWatcherType: nr.configs.PreferencesConfig.Preferences.ConnectionWatcherType, - CryptoComponents: cryptoComponents, + MainP2pConfig: *nr.configs.MainP2pConfig, + FullArchiveP2pConfig: *nr.configs.FullArchiveP2pConfig, + MainConfig: *nr.configs.GeneralConfig, + RatingsConfig: *nr.configs.RatingsConfig, + StatusHandler: statusCoreComponents.AppStatusHandler(), + Marshalizer: coreComponents.InternalMarshalizer(), + Syncer: coreComponents.SyncTimer(), + PreferredPeersSlices: nr.configs.PreferencesConfig.Preferences.PreferredConnections, + BootstrapWaitTime: common.TimeToWaitForP2PBootstrap, + NodeOperationMode: p2p.NormalOperation, + ConnectionWatcherType: nr.configs.PreferencesConfig.Preferences.ConnectionWatcherType, + CryptoComponents: cryptoComponents, } if nr.configs.ImportDbConfig.IsImportDBMode { networkComponentsFactoryArgs.BootstrapWaitTime = 0 From d02bbe59522a7a75c91bb6b9bc2a42ac0eccc714 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Wed, 21 Jun 2023 11:33:03 +0300 Subject: [PATCH 20/38] update mx-chain-communication-go which --- cmd/seednode/main.go | 1 + dataRetriever/interface.go | 2 +- dataRetriever/mock/topicResolverSenderStub.go | 7 +- dataRetriever/resolvers/headerResolver.go | 2 +- .../resolvers/headerResolver_test.go | 22 +-- dataRetriever/resolvers/miniblockResolver.go | 12 +- .../resolvers/miniblockResolver_test.go | 10 +- .../resolvers/peerAuthenticationResolver.go | 10 +- .../peerAuthenticationResolver_test.go | 12 +- .../resolvers/transactionResolver.go | 12 +- .../resolvers/transactionResolver_test.go | 10 +- dataRetriever/resolvers/trieNodeResolver.go | 4 +- .../resolvers/trieNodeResolver_test.go | 12 +- .../resolvers/validatorInfoResolver.go | 20 +-- .../resolvers/validatorInfoResolver_test.go | 10 +- dataRetriever/topicSender/baseTopicSender.go | 11 +- .../topicSender/topicRequestSender.go | 9 +- .../topicSender/topicResolverSender.go | 25 ++- .../topicSender/topicResolverSender_test.go | 8 +- factory/network/networkComponents.go | 6 +- go.mod | 2 +- go.sum | 158 +++++++++++++++++- integrationTests/testInitializer.go | 9 +- integrationTests/testProcessorNode.go | 4 +- p2p/constants.go | 9 + p2p/errors.go | 3 + testscommon/p2pmocks/p2pMessageMock.go | 6 + 27 files changed, 295 insertions(+), 101 deletions(-) diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index c76756357d5..95a12ac9302 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -269,6 +269,7 @@ func createNode( P2pSingleSigner: p2pSingleSigner, P2pKeyGenerator: p2pKeyGen, Logger: logger.GetOrCreate("seed/p2p"), + Network: p2p.MainNetwork, } return p2pFactory.NewNetworkMessenger(arg) diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 3c96db90af5..52cb7a58f75 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -50,7 +50,7 @@ type HeaderRequester interface { // TopicResolverSender defines what sending operations are allowed for a topic resolver type TopicResolverSender interface { - Send(buff []byte, peer core.PeerID) error + Send(buff []byte, peer core.PeerID, network p2p.Network) error RequestTopic() string TargetShardID() uint32 SetDebugHandler(handler DebugHandler) error diff --git a/dataRetriever/mock/topicResolverSenderStub.go b/dataRetriever/mock/topicResolverSenderStub.go index 9188a9d99ef..948679d40eb 100644 --- a/dataRetriever/mock/topicResolverSenderStub.go +++ b/dataRetriever/mock/topicResolverSenderStub.go @@ -4,11 +4,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" ) // TopicResolverSenderStub - type TopicResolverSenderStub struct { - SendCalled func(buff []byte, peer core.PeerID) error + SendCalled func(buff []byte, peer core.PeerID, network p2p.Network) error TargetShardIDCalled func() uint32 debugHandler dataRetriever.DebugHandler } @@ -19,9 +20,9 @@ func (trss *TopicResolverSenderStub) RequestTopic() string { } // Send - -func (trss *TopicResolverSenderStub) Send(buff []byte, peer core.PeerID) error { +func (trss *TopicResolverSenderStub) Send(buff []byte, peer core.PeerID, network p2p.Network) error { if trss.SendCalled != nil { - return trss.SendCalled(buff, peer) + return trss.SendCalled(buff, peer, network) } return nil diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 59216068c2f..675303b7c60 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -158,7 +158,7 @@ func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fro hdrRes.DebugHandler().LogSucceededToResolveData(hdrRes.topic, rd.Value) - return hdrRes.Send(buff, message.Peer()) + return hdrRes.Send(buff, message.Peer(), message.Network()) } func (hdrRes *HeaderResolver) resolveHeaderFromNonce(rd *dataRetriever.RequestData) ([]byte, error) { diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index e71fff039bd..c8470e434d5 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -207,7 +207,7 @@ func TestHeaderResolver_ProcessReceivedMessageEpochTypeUnknownEpochShouldWork(t } wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSent = true return nil }, @@ -272,7 +272,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { sendWasCalled = true return nil }, @@ -309,7 +309,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg := createMockArgHeaderResolver() arg.IsFullHistoryNode = true arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { sendWasCalled = true return nil }, @@ -352,7 +352,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { return nil }, } @@ -391,7 +391,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSent = true return nil }, @@ -460,7 +460,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSent = true return nil }, @@ -514,7 +514,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSent = true return nil }, @@ -575,7 +575,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSend = true return nil }, @@ -625,7 +625,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { assert.Fail(t, "should not have been called") return nil }, @@ -679,7 +679,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce } arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSend = true return nil }, @@ -729,7 +729,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { return nil }, TargetShardIDCalled: func() uint32 { diff --git a/dataRetriever/resolvers/miniblockResolver.go b/dataRetriever/resolvers/miniblockResolver.go index c67f0f4502b..aad1b986453 100644 --- a/dataRetriever/resolvers/miniblockResolver.go +++ b/dataRetriever/resolvers/miniblockResolver.go @@ -93,9 +93,9 @@ func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, f switch rd.Type { case dataRetriever.HashType: - err = mbRes.resolveMbRequestByHash(rd.Value, message.Peer(), rd.Epoch) + err = mbRes.resolveMbRequestByHash(rd.Value, message.Peer(), rd.Epoch, message.Network()) case dataRetriever.HashArrayType: - err = mbRes.resolveMbRequestByHashArray(rd.Value, message.Peer(), rd.Epoch) + err = mbRes.resolveMbRequestByHashArray(rd.Value, message.Peer(), rd.Epoch, message.Network()) default: err = dataRetriever.ErrRequestTypeNotImplemented } @@ -107,7 +107,7 @@ func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, f return err } -func (mbRes *miniblockResolver) resolveMbRequestByHash(hash []byte, pid core.PeerID, epoch uint32) error { +func (mbRes *miniblockResolver) resolveMbRequestByHash(hash []byte, pid core.PeerID, epoch uint32, network p2p.Network) error { mb, err := mbRes.fetchMbAsByteSlice(hash, epoch) if err != nil { return err @@ -121,7 +121,7 @@ func (mbRes *miniblockResolver) resolveMbRequestByHash(hash []byte, pid core.Pee return err } - return mbRes.Send(buffToSend, pid) + return mbRes.Send(buffToSend, pid, network) } func (mbRes *miniblockResolver) fetchMbAsByteSlice(hash []byte, epoch uint32) ([]byte, error) { @@ -146,7 +146,7 @@ func (mbRes *miniblockResolver) fetchMbAsByteSlice(hash []byte, epoch uint32) ([ return buff, nil } -func (mbRes *miniblockResolver) resolveMbRequestByHashArray(mbBuff []byte, pid core.PeerID, epoch uint32) error { +func (mbRes *miniblockResolver) resolveMbRequestByHashArray(mbBuff []byte, pid core.PeerID, epoch uint32, network p2p.Network) error { b := batch.Batch{} err := mbRes.marshalizer.Unmarshal(&b, mbBuff) if err != nil { @@ -177,7 +177,7 @@ func (mbRes *miniblockResolver) resolveMbRequestByHashArray(mbBuff []byte, pid c } for _, buff := range buffsToSend { - errSend := mbRes.Send(buff, pid) + errSend := mbRes.Send(buff, pid, network) if errSend != nil { return errSend } diff --git a/dataRetriever/resolvers/miniblockResolver_test.go b/dataRetriever/resolvers/miniblockResolver_test.go index 1b336c50396..f4c52448236 100644 --- a/dataRetriever/resolvers/miniblockResolver_test.go +++ b/dataRetriever/resolvers/miniblockResolver_test.go @@ -184,7 +184,7 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolShouldRetValAndSend( arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSent = true return nil }, @@ -386,7 +386,7 @@ func TestMiniblockResolver_ProcessReceivedMessageSendFails(t *testing.T) { } arg.Marshaller = goodMarshalizer arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { return expectedErr }, } @@ -428,7 +428,7 @@ func TestMiniblockResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStor arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSend = true return nil }, @@ -474,7 +474,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMarshalFails(t *testing.T) { arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { assert.Fail(t, "should have not been called") return nil }, @@ -523,7 +523,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMissingDataShouldNotSend(t *tes arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSent = true return nil }, diff --git a/dataRetriever/resolvers/peerAuthenticationResolver.go b/dataRetriever/resolvers/peerAuthenticationResolver.go index 3a93101b4c2..867ca34c4b9 100644 --- a/dataRetriever/resolvers/peerAuthenticationResolver.go +++ b/dataRetriever/resolvers/peerAuthenticationResolver.go @@ -91,7 +91,7 @@ func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.Messag switch rd.Type { case dataRetriever.HashArrayType: - return res.resolveMultipleHashesRequest(rd.Value, message.Peer()) + return res.resolveMultipleHashesRequest(rd.Value, message.Peer(), message.Network()) default: err = dataRetriever.ErrRequestTypeNotImplemented } @@ -103,7 +103,7 @@ func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.Messag } // resolveMultipleHashesRequest sends the response for multiple hashes request -func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff []byte, pid core.PeerID) error { +func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff []byte, pid core.PeerID, network p2p.Network) error { b := batch.Batch{} err := res.marshalizer.Unmarshal(&b, hashesBuff) if err != nil { @@ -116,18 +116,18 @@ func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff [ return fmt.Errorf("resolveMultipleHashesRequest error %w from buff %x", err, hashesBuff) } - return res.sendPeerAuthsForHashes(peerAuthsForHashes, pid) + return res.sendPeerAuthsForHashes(peerAuthsForHashes, pid, network) } // sendPeerAuthsForHashes sends multiple peer authentication messages for specific hashes -func (res *peerAuthenticationResolver) sendPeerAuthsForHashes(dataBuff [][]byte, pid core.PeerID) error { +func (res *peerAuthenticationResolver) sendPeerAuthsForHashes(dataBuff [][]byte, pid core.PeerID, network p2p.Network) error { buffsToSend, err := res.dataPacker.PackDataInChunks(dataBuff, maxBuffToSendPeerAuthentications) if err != nil { return err } for _, buff := range buffsToSend { - err = res.Send(buff, pid) + err = res.Send(buff, pid, network) if err != nil { return err } diff --git a/dataRetriever/resolvers/peerAuthenticationResolver_test.go b/dataRetriever/resolvers/peerAuthenticationResolver_test.go index 962d50be2ec..c55e00900d5 100644 --- a/dataRetriever/resolvers/peerAuthenticationResolver_test.go +++ b/dataRetriever/resolvers/peerAuthenticationResolver_test.go @@ -241,7 +241,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSent = true return nil }, @@ -270,7 +270,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSent = true return nil }, @@ -306,7 +306,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { wasSent = true return nil }, @@ -360,7 +360,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { b := &batch.Batch{} err = arg.Marshaller.Unmarshal(b, buff) assert.Nil(t, err) @@ -426,7 +426,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg := createMockArgPeerAuthenticationResolver() arg.PeerAuthenticationPool = cache arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { return expectedErr }, } @@ -463,7 +463,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { messagesSent := 0 hashesReceived := 0 arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) assert.Nil(t, err) diff --git a/dataRetriever/resolvers/transactionResolver.go b/dataRetriever/resolvers/transactionResolver.go index cbf83d9fe04..d91fe82d497 100644 --- a/dataRetriever/resolvers/transactionResolver.go +++ b/dataRetriever/resolvers/transactionResolver.go @@ -98,9 +98,9 @@ func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConn switch rd.Type { case dataRetriever.HashType: - err = txRes.resolveTxRequestByHash(rd.Value, message.Peer(), rd.Epoch) + err = txRes.resolveTxRequestByHash(rd.Value, message.Peer(), rd.Epoch, message.Network()) case dataRetriever.HashArrayType: - err = txRes.resolveTxRequestByHashArray(rd.Value, message.Peer(), rd.Epoch) + err = txRes.resolveTxRequestByHashArray(rd.Value, message.Peer(), rd.Epoch, message.Network()) default: err = dataRetriever.ErrRequestTypeNotImplemented } @@ -112,7 +112,7 @@ func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConn return err } -func (txRes *TxResolver) resolveTxRequestByHash(hash []byte, pid core.PeerID, epoch uint32) error { +func (txRes *TxResolver) resolveTxRequestByHash(hash []byte, pid core.PeerID, epoch uint32, network p2p.Network) error { // TODO this can be optimized by searching in corresponding datapool (taken by topic name) tx, err := txRes.fetchTxAsByteSlice(hash, epoch) if err != nil { @@ -127,7 +127,7 @@ func (txRes *TxResolver) resolveTxRequestByHash(hash []byte, pid core.PeerID, ep return err } - return txRes.Send(buff, pid) + return txRes.Send(buff, pid, network) } func (txRes *TxResolver) fetchTxAsByteSlice(hash []byte, epoch uint32) ([]byte, error) { @@ -152,7 +152,7 @@ func (txRes *TxResolver) fetchTxAsByteSlice(hash []byte, epoch uint32) ([]byte, return buff, nil } -func (txRes *TxResolver) resolveTxRequestByHashArray(hashesBuff []byte, pid core.PeerID, epoch uint32) error { +func (txRes *TxResolver) resolveTxRequestByHashArray(hashesBuff []byte, pid core.PeerID, epoch uint32, network p2p.Network) error { // TODO this can be optimized by searching in corresponding datapool (taken by topic name) b := batch.Batch{} err := txRes.marshalizer.Unmarshal(&b, hashesBuff) @@ -186,7 +186,7 @@ func (txRes *TxResolver) resolveTxRequestByHashArray(hashesBuff []byte, pid core } for _, buff := range buffsToSend { - errSend := txRes.Send(buff, pid) + errSend := txRes.Send(buff, pid, network) if errSend != nil { return errSend } diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index d75d2192789..4cf0bb0e86e 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -206,7 +206,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *te arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { sendWasCalled = true return nil }, @@ -342,7 +342,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { sendWasCalled = true return nil }, @@ -432,7 +432,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsShouldCal sendWasCalled := false arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { sendWasCalled = true return nil }, @@ -488,7 +488,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsFoundOnly sendWasCalled := false arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { sendWasCalled = true return nil }, @@ -585,7 +585,7 @@ func TestTxResolver_ProcessReceivedMessageHashArraySendFails(t *testing.T) { arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { return expectedErr }, } diff --git a/dataRetriever/resolvers/trieNodeResolver.go b/dataRetriever/resolvers/trieNodeResolver.go index 7086eb35605..ae8ec7f2db8 100644 --- a/dataRetriever/resolvers/trieNodeResolver.go +++ b/dataRetriever/resolvers/trieNodeResolver.go @@ -214,7 +214,7 @@ func (tnRes *TrieNodeResolver) sendResponse( return err } - return tnRes.Send(buff, message.Peer()) + return tnRes.Send(buff, message.Peer(), message.Network()) } func (tnRes *TrieNodeResolver) sendLargeMessage( @@ -248,7 +248,7 @@ func (tnRes *TrieNodeResolver) sendLargeMessage( return err } - return tnRes.Send(buff, message.Peer()) + return tnRes.Send(buff, message.Peer(), message.Network()) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/resolvers/trieNodeResolver_test.go b/dataRetriever/resolvers/trieNodeResolver_test.go index dd7325d533b..349fa0cd20e 100644 --- a/dataRetriever/resolvers/trieNodeResolver_test.go +++ b/dataRetriever/resolvers/trieNodeResolver_test.go @@ -183,7 +183,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndSend(t *test arg := createMockArgTrieNodeResolver() arg.TrieDataGetter = tr arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { sendWasCalled = true return nil }, @@ -297,7 +297,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodeE arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { assert.Fail(t, "should have not called send") return nil }, @@ -337,7 +337,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodes var receivedNodes [][]byte arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -392,7 +392,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesNotEnoughSpaceShou var receivedNodes [][]byte arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -449,7 +449,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesShouldWorkWithSubt var receivedNodes [][]byte arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -519,7 +519,7 @@ func testTrieNodeResolverProcessReceivedMessageLargeTrieNode( sendWasCalled := false arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) diff --git a/dataRetriever/resolvers/validatorInfoResolver.go b/dataRetriever/resolvers/validatorInfoResolver.go index 47e1e21baeb..505a6ae2903 100644 --- a/dataRetriever/resolvers/validatorInfoResolver.go +++ b/dataRetriever/resolvers/validatorInfoResolver.go @@ -105,26 +105,26 @@ func (res *validatorInfoResolver) ProcessReceivedMessage(message p2p.MessageP2P, switch rd.Type { case dataRetriever.HashType: - return res.resolveHashRequest(rd.Value, rd.Epoch, fromConnectedPeer) + return res.resolveHashRequest(rd.Value, rd.Epoch, fromConnectedPeer, message.Network()) case dataRetriever.HashArrayType: - return res.resolveMultipleHashesRequest(rd.Value, rd.Epoch, fromConnectedPeer) + return res.resolveMultipleHashesRequest(rd.Value, rd.Epoch, fromConnectedPeer, message.Network()) } return fmt.Errorf("%w for value %s", dataRetriever.ErrRequestTypeNotImplemented, logger.DisplayByteSlice(rd.Value)) } // resolveHashRequest sends the response for a hash request -func (res *validatorInfoResolver) resolveHashRequest(hash []byte, epoch uint32, pid core.PeerID) error { +func (res *validatorInfoResolver) resolveHashRequest(hash []byte, epoch uint32, pid core.PeerID, network p2p.Network) error { data, err := res.fetchValidatorInfoByteSlice(hash, epoch) if err != nil { return err } - return res.marshalAndSend(data, pid) + return res.marshalAndSend(data, pid, network) } // resolveMultipleHashesRequest sends the response for a hash array type request -func (res *validatorInfoResolver) resolveMultipleHashesRequest(hashesBuff []byte, epoch uint32, pid core.PeerID) error { +func (res *validatorInfoResolver) resolveMultipleHashesRequest(hashesBuff []byte, epoch uint32, pid core.PeerID, network p2p.Network) error { b := batch.Batch{} err := res.marshalizer.Unmarshal(&b, hashesBuff) if err != nil { @@ -141,17 +141,17 @@ func (res *validatorInfoResolver) resolveMultipleHashesRequest(hashesBuff []byte return fmt.Errorf("resolveMultipleHashesRequest error %w from buff %s", err, outputHashes) } - return res.sendValidatorInfoForHashes(validatorInfoForHashes, pid) + return res.sendValidatorInfoForHashes(validatorInfoForHashes, pid, network) } -func (res *validatorInfoResolver) sendValidatorInfoForHashes(validatorInfoForHashes [][]byte, pid core.PeerID) error { +func (res *validatorInfoResolver) sendValidatorInfoForHashes(validatorInfoForHashes [][]byte, pid core.PeerID, network p2p.Network) error { buffsToSend, err := res.dataPacker.PackDataInChunks(validatorInfoForHashes, maxBuffToSendValidatorsInfo) if err != nil { return err } for _, buff := range buffsToSend { - err = res.Send(buff, pid) + err = res.Send(buff, pid, network) if err != nil { return err } @@ -197,7 +197,7 @@ func (res *validatorInfoResolver) fetchValidatorInfoByteSlice(hash []byte, epoch return buff, nil } -func (res *validatorInfoResolver) marshalAndSend(data []byte, pid core.PeerID) error { +func (res *validatorInfoResolver) marshalAndSend(data []byte, pid core.PeerID, network p2p.Network) error { b := &batch.Batch{ Data: [][]byte{data}, } @@ -206,7 +206,7 @@ func (res *validatorInfoResolver) marshalAndSend(data []byte, pid core.PeerID) e return err } - return res.Send(buff, pid) + return res.Send(buff, pid, network) } // SetDebugHandler sets a debug handler diff --git a/dataRetriever/resolvers/validatorInfoResolver_test.go b/dataRetriever/resolvers/validatorInfoResolver_test.go index 19f659660f9..97f8b85720d 100644 --- a/dataRetriever/resolvers/validatorInfoResolver_test.go +++ b/dataRetriever/resolvers/validatorInfoResolver_test.go @@ -272,7 +272,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -311,7 +311,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -431,7 +431,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { return expectedErr }, } @@ -466,7 +466,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -524,7 +524,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { } numOfCallsSend := 0 args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID) error { + SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) diff --git a/dataRetriever/topicSender/baseTopicSender.go b/dataRetriever/topicSender/baseTopicSender.go index b9ddf469008..4de5075bf3c 100644 --- a/dataRetriever/topicSender/baseTopicSender.go +++ b/dataRetriever/topicSender/baseTopicSender.go @@ -18,8 +18,6 @@ var log = logger.GetOrCreate("dataretriever/topicsender") const ( minPeersToQuery = 2 preferredPeerIndex = -1 - mainNetwork = "main" - fullArchiveNetwork = "full archive" ) // ArgBaseTopicSender is the base DTO used to create a new topic sender instance @@ -82,13 +80,14 @@ func (baseSender *baseTopicSender) sendToConnectedPeer( buff []byte, peer core.PeerID, messenger dataRetriever.MessageHandler, - network string, + network p2p.Network, preferredPeersHolder dataRetriever.PreferredPeersHolderHandler, ) error { msg := &factory.Message{ - DataField: buff, - PeerField: peer, - TopicField: topic, + DataField: buff, + PeerField: peer, + TopicField: topic, + NetworkField: network, } shouldAvoidAntiFloodCheck := preferredPeersHolder.Contains(peer) diff --git a/dataRetriever/topicSender/topicRequestSender.go b/dataRetriever/topicSender/topicRequestSender.go index 996f3f8a065..fed557a8af0 100644 --- a/dataRetriever/topicSender/topicRequestSender.go +++ b/dataRetriever/topicSender/topicRequestSender.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/random" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" ) var _ dataRetriever.TopicRequestSender = (*topicRequestSender)(nil) @@ -135,7 +136,7 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, core.CrossShardPeer.String(), trs.mainMessenger, trs.mainPeersRatingHandler, - mainNetwork, + p2p.MainNetwork, trs.mainPreferredPeersHolderHandler) intraPeers = trs.peerListCreator.IntraShardPeerList() @@ -149,7 +150,7 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, core.IntraShardPeer.String(), trs.mainMessenger, trs.mainPeersRatingHandler, - mainNetwork, + p2p.MainNetwork, trs.mainPreferredPeersHolderHandler) } else { preferredPeer := trs.getPreferredFullArchivePeer() @@ -164,7 +165,7 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, core.FullHistoryPeer.String(), trs.fullArchiveMessenger, trs.fullArchivePeersRatingHandler, - fullArchiveNetwork, + p2p.FullArchiveNetwork, trs.fullArchivePreferredPeersHolderHandler) } @@ -207,7 +208,7 @@ func (trs *topicRequestSender) sendOnTopic( peerType string, messenger dataRetriever.MessageHandler, peersRatingHandler dataRetriever.PeersRatingHandler, - network string, + network p2p.Network, preferredPeersHolder dataRetriever.PreferredPeersHolderHandler, ) int { if len(peerList) == 0 || maxToSend == 0 { diff --git a/dataRetriever/topicSender/topicResolverSender.go b/dataRetriever/topicSender/topicResolverSender.go index f8ba1c1ad54..5f97a100a03 100644 --- a/dataRetriever/topicSender/topicResolverSender.go +++ b/dataRetriever/topicSender/topicResolverSender.go @@ -3,6 +3,7 @@ package topicsender import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" ) var _ dataRetriever.TopicResolverSender = (*topicResolverSender)(nil) @@ -30,13 +31,27 @@ func NewTopicResolverSender(arg ArgTopicResolverSender) (*topicResolverSender, e // Send is used to send an array buffer to a connected peer // It is used when replying to a request -func (trs *topicResolverSender) Send(buff []byte, peer core.PeerID) error { - // TODO[Sorin]: add a new field on MessageP2P for the network the message should be on - if trs.fullArchiveMessenger.IsConnected(peer) { - return trs.sendToConnectedPeer(trs.topicName, buff, peer, trs.fullArchiveMessenger, fullArchiveNetwork, trs.fullArchivePreferredPeersHolderHandler) +func (trs *topicResolverSender) Send(buff []byte, peer core.PeerID, network p2p.Network) error { + switch network { + case p2p.MainNetwork: + return trs.sendToConnectedPeer( + trs.topicName, + buff, + peer, + trs.mainMessenger, + network, + trs.mainPreferredPeersHolderHandler) + case p2p.FullArchiveNetwork: + return trs.sendToConnectedPeer( + trs.topicName, + buff, + peer, + trs.fullArchiveMessenger, + network, + trs.fullArchivePreferredPeersHolderHandler) } - return trs.sendToConnectedPeer(trs.topicName, buff, peer, trs.mainMessenger, mainNetwork, trs.mainPreferredPeersHolderHandler) + return p2p.ErrUnknownNetwork } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/topicSender/topicResolverSender_test.go b/dataRetriever/topicSender/topicResolverSender_test.go index e6076ffaf14..d78d6709ce2 100644 --- a/dataRetriever/topicSender/topicResolverSender_test.go +++ b/dataRetriever/topicSender/topicResolverSender_test.go @@ -115,7 +115,7 @@ func TestTopicResolverSender_SendOutputAntiflooderErrorsShouldNotSendButError(t } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1) + err := trs.Send(buffToSend, pID1, p2p.MainNetwork) assert.True(t, errors.Is(err, expectedErr)) } @@ -148,7 +148,7 @@ func TestTopicResolverSender_SendShouldNotCheckAntifloodForPreferred(t *testing. } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1) + err := trs.Send(buffToSend, pID1, p2p.MainNetwork) require.NoError(t, err) require.True(t, sendWasCalled) } @@ -185,7 +185,7 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1) + err := trs.Send(buffToSend, pID1, p2p.MainNetwork) assert.Nil(t, err) assert.True(t, sentToPid1) @@ -216,7 +216,7 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1) + err := trs.Send(buffToSend, pID1, p2p.FullArchiveNetwork) assert.Nil(t, err) assert.True(t, sentToPid1) diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index b153d86c62c..eb3c0fb4b8d 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -228,6 +228,7 @@ func (ncf *networkComponentsFactory) createPeerHonestyHandler( func (ncf *networkComponentsFactory) createNetworkHolder( p2pConfig p2pConfig.P2PConfig, logger p2p.Logger, + network p2p.Network, ) (networkComponentsHolder, error) { peersHolder, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) @@ -267,6 +268,7 @@ func (ncf *networkComponentsFactory) createNetworkHolder( P2pSingleSigner: ncf.cryptoComponents.P2pSingleSigner(), P2pKeyGenerator: ncf.cryptoComponents.P2pKeyGen(), Logger: logger, + Network: network, } networkMessenger, err := p2pFactory.NewNetworkMessenger(argsMessenger) if err != nil { @@ -293,7 +295,7 @@ func (ncf *networkComponentsFactory) createNetworkHolder( func (ncf *networkComponentsFactory) createMainNetworkHolder() (networkComponentsHolder, error) { loggerInstance := logger.GetOrCreate("main/p2p") - return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance) + return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance, p2p.MainNetwork) } func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder() (networkComponentsHolder, error) { @@ -308,7 +310,7 @@ func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder() (networkCo loggerInstance := logger.GetOrCreate("full-archive/p2p") - return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance) + return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance, p2p.FullArchiveNetwork) } // Close closes all underlying components that need closing diff --git a/go.mod b/go.mod index 17f33caa31a..9fb7735eb98 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230608110322-586e89326c74 + github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230620155758-5319791b9ab6 github.com/multiversx/mx-chain-core-go v1.2.6 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.4 diff --git a/go.sum b/go.sum index 187cf8b78ce..4c48a37aee0 100644 --- a/go.sum +++ b/go.sum @@ -37,8 +37,10 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -61,11 +63,15 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= +github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= @@ -77,6 +83,7 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOF github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= @@ -89,6 +96,7 @@ github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1 github.com/bytedance/sonic v1.8.0 h1:ea0Xadu+sHlu7x5O3gKhRpQ1IKiMrSiHttPF0ybECuA= github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -102,6 +110,7 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= @@ -111,10 +120,12 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d h1:t5Wuyh53qYyg9eqn4BbnlIT+vmhyww0TatL+zT3uWgI= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= @@ -134,7 +145,9 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2U github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ= github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI= +github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -154,7 +167,9 @@ github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -172,6 +187,7 @@ github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH8 github.com/gizak/termui/v3 v3.1.0 h1:ZZmVDgwHl7gR7elfKf1xc4IudXZ5qqfDh4wExk4Iajc= github.com/gizak/termui/v3 v3.1.0/go.mod h1:bXQEBkJpzxUAKf0+xq9MSWAvWZlE7c+aidmyFlkYTrY= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -184,11 +200,15 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= @@ -256,7 +276,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -287,12 +310,16 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -310,19 +337,30 @@ github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixH github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= +github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipns v0.2.0 h1:BgmNtQhqOw5XEZ8RAfWEpK4DhqaYiuP6h71MhIp7xXU= @@ -335,8 +373,10 @@ github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JP github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= +github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.19.0 h1:5axC7rJmPc17Emw6TelxGwnzALk0PdupZ2oj2roDj04= github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= @@ -345,6 +385,7 @@ github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5D github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -363,6 +404,7 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -397,6 +439,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= +github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-flow-metrics v0.0.2/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= @@ -405,22 +449,36 @@ github.com/libp2p/go-libp2p v0.22.0/go.mod h1:UDolmweypBSjQb2f7xutPnwZ/fxioLbMBx github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= +github.com/libp2p/go-libp2p-core v0.2.5/go.mod h1:6+5zJmKhsf7yHn1RbmYDu08qDUpIUxGdqHuEZckmZOA= +github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= +github.com/libp2p/go-libp2p-core v0.5.3/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= +github.com/libp2p/go-libp2p-core v0.19.0/go.mod h1:AkA+FUKQfYt1FLNef5fOPlo/naAWjKy/RCjkcPjqzYg= github.com/libp2p/go-libp2p-core v0.20.0 h1:PGKM74+T+O/FaZNARNW32i90RMBHCcgd/hkum2UQ5eY= github.com/libp2p/go-libp2p-core v0.20.0/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= github.com/libp2p/go-libp2p-kad-dht v0.18.0 h1:akqO3gPMwixR7qFSFq70ezRun97g5hrA/lBW9jrjUYM= github.com/libp2p/go-libp2p-kad-dht v0.18.0/go.mod h1:Gb92MYIPm3K2pJLGn8wl0m8wiKDvHrYpg+rOd0GzzPA= +github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= github.com/libp2p/go-libp2p-kbucket v0.4.7/go.mod h1:XyVo99AfQH0foSf176k4jY1xUJ2+jUJIZCSDm7r2YKk= +github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVdLxvqh7hXOujessqdzs= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.8.0 h1:bzTG693TA1Ju/zKmUCQzDLSqiJnyRFVwPpuloZ/OZtI= +github.com/libp2p/go-libp2p-peerstore v0.8.0/go.mod h1:9geHWmNA3YDlQBjL/uPEJD6vpDK12aDNlUNHJ6kio/s= github.com/libp2p/go-libp2p-pubsub v0.8.1 h1:hSw09NauFUaA0FLgQPBJp6QOy0a2n+HSkb8IeOx8OnY= github.com/libp2p/go-libp2p-pubsub v0.8.1/go.mod h1:e4kT+DYjzPUYGZeWk4I+oxCSYTXizzXii5LDRRhjKSw= +github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= +github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= +github.com/libp2p/go-libp2p-testing v0.11.0/go.mod h1:qG4sF27dfKFoK9KlVzK2y52LQKhp0VEmLjV5aDqr1Hg= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= +github.com/libp2p/go-libp2p-xor v0.1.0/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= +github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= github.com/libp2p/go-msgio v0.2.0 h1:W6shmB+FeynDrUVl2dgFQvzfBZcXiyqY4VmpQLu9FqU= @@ -430,6 +488,7 @@ github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= +github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= @@ -439,10 +498,12 @@ github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtI github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q= github.com/libp2p/go-yamux/v3 v3.1.2/go.mod h1:jeLEQgLXqE2YqX1ilAClIfCMDY+0uXQUKmmb/qp0gT4= +github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lucas-clemente/quic-go v0.28.1 h1:Uo0lvVxWg5la9gflIF9lwa39ONq85Xq2D91YNEIslzU= github.com/lucas-clemente/quic-go v0.28.1/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ= @@ -456,9 +517,12 @@ github.com/marten-seemann/qtls-go1-19 v0.1.0 h1:rLFKD/9mp/uq1SYGYuVZhm83wkmU95pK github.com/marten-seemann/qtls-go1-19 v0.1.0/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= @@ -467,8 +531,10 @@ github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= @@ -478,6 +544,7 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= @@ -486,6 +553,7 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -505,29 +573,38 @@ github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRV github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= +github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= github.com/multiformats/go-multiaddr v0.6.0 h1:qMnoOPj2s8xxPU5kZ57Cqdr0hHhARz7mFsPMIiYNqzg= github.com/multiformats/go-multiaddr v0.6.0/go.mod h1:F4IpaKZuPP360tOMn2Tpyu0At8w23aRyVqeK0DbFeGM= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= +github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= +github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= @@ -539,8 +616,14 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230608110322-586e89326c74 h1:jf3bWYdUku19843q7KwBKBjIOQNi/OTLyjbsE1Yfra8= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230608110322-586e89326c74/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230620155758-5319791b9ab6 h1:DaU6fOeOwhTb2GF+bSLJwtco+R5Y/8+J2EDt/Z0kGtc= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230620155758-5319791b9ab6/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= +github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.4/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= +github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.6 h1:fD5cMsByM1kgvNI+uGCQGlhvr+TrV7FPvJlXT4ubYdg= github.com/multiversx/mx-chain-core-go v1.2.6/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= @@ -549,8 +632,10 @@ github.com/multiversx/mx-chain-es-indexer-go v1.4.4 h1:3k8pB1AEILlNXL2ggSnP43uqV github.com/multiversx/mx-chain-es-indexer-go v1.4.4/go.mod h1:IAFuU3LhjVfs3+Sf4T3BlNjY1TmZHWovHRhV7tfR8cw= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= +github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-storage-go v1.0.11 h1:u4ZsfIXEU3nJWRUxyAswhBn2pT6tJkKRwf9pra4CpzA= github.com/multiversx/mx-chain-storage-go v1.0.11/go.mod h1:VP9fwyFBmbmDzahUuu0IeGX/dKG3iBWjN6FSQ6YtVaI= +github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= github.com/multiversx/mx-chain-vm-common-go v1.4.7 h1:7I1FQ2W1c9CMT2kOiroPD0je5RpiiaUO/G2HkajXMnU= github.com/multiversx/mx-chain-vm-common-go v1.4.7/go.mod h1:cnMvZN8+4oDkjloTZVExlf8ShkMGWbbDb5/D//wLT/k= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.57 h1:7jzLRq/IcKpb/qWT3YglXY4RIM4oG6aSNnAUBdItjvk= @@ -608,6 +693,7 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -643,6 +729,7 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -678,9 +765,13 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= @@ -694,6 +785,7 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -725,22 +817,28 @@ github.com/tklauser/numcpus v0.2.1 h1:ct88eFm+Q7m2ZfXJdan1xYoXKlmwsfP+k88q05KvlZ github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo= github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ugorji/go/codec v1.2.9 h1:rmenucSohSTiyL09Y+l2OCk+FrMxGMzho2+tjr5ticU= github.com/ugorji/go/codec v1.2.9/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= +github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= +github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= +github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -748,16 +846,21 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -765,7 +868,9 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -786,17 +891,26 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -809,6 +923,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -831,6 +946,9 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -879,9 +997,18 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -905,6 +1032,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -915,6 +1044,7 @@ golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -962,6 +1092,7 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -970,16 +1101,28 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -987,6 +1130,10 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -997,11 +1144,13 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -1043,6 +1192,8 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1153,6 +1304,8 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8 gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1176,6 +1329,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 713611d8ced..f27804b2d24 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -167,6 +167,7 @@ func CreateMessengerWithKadDht(initialAddr string) p2p.Messenger { P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), + Network: p2p.MainNetwork, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -189,6 +190,7 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), + Network: p2p.MainNetwork, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -198,7 +200,7 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { } // CreateMessengerFromConfigWithPeersRatingHandler creates a new libp2p messenger with provided configuration -func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler, p2pKey crypto.PrivateKey) p2p.Messenger { +func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler, p2pKey crypto.PrivateKey, network p2p.Network) p2p.Messenger { arg := p2pFactory.ArgsNetworkMessenger{ Marshaller: TestMarshalizer, ListenAddress: p2p.ListenLocalhostAddrWithIp4AndTcp, @@ -211,6 +213,7 @@ func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConf P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), + Network: network, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -242,7 +245,7 @@ func CreateMessengerWithNoDiscovery() p2p.Messenger { } // CreateMessengerWithNoDiscoveryAndPeersRatingHandler creates a new libp2p messenger with no peer discovery -func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler, p2pKey crypto.PrivateKey) p2p.Messenger { +func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler, p2pKey crypto.PrivateKey, network p2p.Network) p2p.Messenger { p2pCfg := p2pConfig.P2PConfig{ Node: p2pConfig.NodeConfig{ Port: "0", @@ -255,7 +258,7 @@ func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p. }, } - return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder, p2pKey) + return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder, p2pKey, network) } // CreateFixedNetworkOf8Peers assembles a network as following: diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 57d9d47941b..4eb195a2ad5 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -468,8 +468,8 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { } p2pKey := mock.NewPrivateKeyMock() - messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey) - fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(fullArchivePeersRatingHandler, p2pKey) + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey, p2p.MainNetwork) + fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(fullArchivePeersRatingHandler, p2pKey, p2p.FullArchiveNetwork) var peersRatingMonitor p2p.PeersRatingMonitor peersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} diff --git a/p2p/constants.go b/p2p/constants.go index 4f0807484b7..d1ac7efdf1d 100644 --- a/p2p/constants.go +++ b/p2p/constants.go @@ -30,3 +30,12 @@ const ListenLocalhostAddrWithIp4AndTcp = "/ip4/127.0.0.1/tcp/" // BroadcastMethod defines the broadcast method of the message type BroadcastMethod = p2p.BroadcastMethod + +// Network defines the network a message belongs to +type Network = p2p.Network + +// MainNetwork defines the main network +const MainNetwork = p2p.MainNetwork + +// FullArchiveNetwork defines the full archive network +const FullArchiveNetwork = p2p.FullArchiveNetwork diff --git a/p2p/errors.go b/p2p/errors.go index d80b9445433..e25d2dd12d7 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -14,3 +14,6 @@ var ErrNilPreferredPeersHolder = p2p.ErrNilPreferredPeersHolder // ErrNilStatusHandler signals that a nil status handler has been provided var ErrNilStatusHandler = errors.New("nil status handler") + +// ErrUnknownNetwork signals that an unknown network has been provided +var ErrUnknownNetwork = errors.New("unknown network") diff --git a/testscommon/p2pmocks/p2pMessageMock.go b/testscommon/p2pmocks/p2pMessageMock.go index a6e09016606..575c101712d 100644 --- a/testscommon/p2pmocks/p2pMessageMock.go +++ b/testscommon/p2pmocks/p2pMessageMock.go @@ -17,6 +17,7 @@ type P2PMessageMock struct { PayloadField []byte TimestampField int64 BroadcastMethodField p2p.BroadcastMethod + NetworkField p2p.Network } // From - @@ -69,6 +70,11 @@ func (msg *P2PMessageMock) BroadcastMethod() p2p.BroadcastMethod { return msg.BroadcastMethodField } +// Network - +func (msg *P2PMessageMock) Network() p2p.Network { + return msg.NetworkField +} + // IsInterfaceNil returns true if there is no value under the interface func (msg *P2PMessageMock) IsInterfaceNil() bool { return msg == nil From 998b8ce2e311ec73144cbbc510472a038079d603 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Wed, 21 Jun 2023 13:46:38 +0300 Subject: [PATCH 21/38] added test for heartbeat on full archive --- go.mod | 2 +- go.sum | 4 +- heartbeat/sender/multikeyHeartbeatSender.go | 2 +- .../node/heartbeatV2/heartbeatV2_test.go | 82 ++++++++++++++++ integrationTests/testHeartbeatNode.go | 94 +++++++++++++------ 5 files changed, 149 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 9fb7735eb98..dedf0f30856 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230620155758-5319791b9ab6 + github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230621085139-36073f41ef03 github.com/multiversx/mx-chain-core-go v1.2.6 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.4 diff --git a/go.sum b/go.sum index 4c48a37aee0..8a70a68d9f4 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230620155758-5319791b9ab6 h1:DaU6fOeOwhTb2GF+bSLJwtco+R5Y/8+J2EDt/Z0kGtc= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230620155758-5319791b9ab6/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230621085139-36073f41ef03 h1:NOlfuZNjiEquyMM0N40mvkug4jgj2Q6jCPjSW8Ksn2Q= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230621085139-36073f41ef03/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= diff --git a/heartbeat/sender/multikeyHeartbeatSender.go b/heartbeat/sender/multikeyHeartbeatSender.go index 4ac99e9d7e1..e97c64e4ea4 100644 --- a/heartbeat/sender/multikeyHeartbeatSender.go +++ b/heartbeat/sender/multikeyHeartbeatSender.go @@ -186,7 +186,7 @@ func (sender *multikeyHeartbeatSender) sendMessageForKey(pkBytes []byte) error { } sender.mainMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) - sender.fullArchiveMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) // TODO[Sorin]: rethink if we need to send this + sender.fullArchiveMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) return nil } diff --git a/integrationTests/node/heartbeatV2/heartbeatV2_test.go b/integrationTests/node/heartbeatV2/heartbeatV2_test.go index 82fb2b276d5..90a6173ea42 100644 --- a/integrationTests/node/heartbeatV2/heartbeatV2_test.go +++ b/integrationTests/node/heartbeatV2/heartbeatV2_test.go @@ -1,9 +1,11 @@ package heartbeatV2 import ( + "fmt" "testing" "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/integrationTests" logger "github.com/multiversx/mx-chain-logger-go" @@ -130,12 +132,92 @@ func TestHeartbeatV2_PeerAuthenticationMessageExpiration(t *testing.T) { assert.Equal(t, interactingNodes-2, nodes[0].DataPool.PeerAuthentications().Len()) } +func TestHeartbeatV2_AllPeersSendMessagesOnAllNetworks(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + + interactingNodes := 3 + nodes := make([]*integrationTests.TestHeartbeatNode, interactingNodes) + p2pConfig := integrationTests.CreateP2PConfigWithNoDiscovery() + for i := 0; i < interactingNodes; i++ { + nodes[i] = integrationTests.NewTestHeartbeatNode(t, 3, 0, interactingNodes, p2pConfig, 60) + } + assert.Equal(t, interactingNodes, len(nodes)) + + // connect nodes on main network only + for i := 0; i < interactingNodes-1; i++ { + for j := i + 1; j < interactingNodes; j++ { + src := nodes[i] + dst := nodes[j] + _ = src.ConnectOnMain(dst) + } + } + + // Wait for messages to broadcast + time.Sleep(time.Second * 15) + + // check peer shard mappers + // full archive should not be updated at this point + for i := 0; i < interactingNodes; i++ { + for j := 0; j < interactingNodes; j++ { + if i == j { + continue + } + + peerInfo := nodes[i].FullArchivePeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) + assert.Equal(t, core.UnknownPeer, peerInfo.PeerType) + + peerInfoMain := nodes[i].MainPeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) + assert.Equal(t, nodes[j].ShardCoordinator.SelfId(), peerInfoMain.ShardID) + assert.Equal(t, core.ValidatorPeer, peerInfoMain.PeerType) // on main network they are all validators, but on full archive peerAuthentication message is not sent + } + } + + // connect nodes on full archive as well network only + for i := 0; i < interactingNodes-1; i++ { + for j := i + 1; j < interactingNodes; j++ { + src := nodes[i] + dst := nodes[j] + _ = src.ConnectOnFullArchive(dst) + } + } + + // Wait for messages to broadcast + time.Sleep(time.Second * 15) + + // check peer shard mappers + // full archive should be updated at this point + for i := 0; i < interactingNodes; i++ { + for j := 0; j < interactingNodes; j++ { + if i == j { + continue + } + + peerInfo := nodes[i].FullArchivePeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) + assert.Equal(t, nodes[j].ShardCoordinator.SelfId(), peerInfo.ShardID) + println(fmt.Sprintf("main %s - %d", nodes[j].MainMessenger.ID(), peerInfo.ShardID)) + assert.Equal(t, core.ObserverPeer, peerInfo.PeerType) + + peerInfoMain := nodes[i].MainPeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) + assert.Equal(t, nodes[j].ShardCoordinator.SelfId(), peerInfoMain.ShardID) + println(fmt.Sprintf("main %s - %d", nodes[j].MainMessenger.ID(), peerInfoMain.ShardID)) + assert.Equal(t, core.ValidatorPeer, peerInfoMain.PeerType) + } + } + + for i := 0; i < len(nodes); i++ { + nodes[i].Close() + } +} + func connectNodes(nodes []*integrationTests.TestHeartbeatNode, interactingNodes int) { for i := 0; i < interactingNodes-1; i++ { for j := i + 1; j < interactingNodes; j++ { src := nodes[i] dst := nodes[j] _ = src.ConnectOnMain(dst) + _ = src.ConnectOnFullArchive(dst) } } } diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 308fcc32f6b..8fd5a02a709 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -95,7 +95,9 @@ type TestHeartbeatNode struct { Sender update.Closer PeerAuthInterceptor *interceptors.MultiDataInterceptor HeartbeatInterceptor *interceptors.SingleDataInterceptor + FullArchiveHeartbeatInterceptor *interceptors.SingleDataInterceptor PeerShardInterceptor *interceptors.SingleDataInterceptor + FullArchivePeerShardInterceptor *interceptors.SingleDataInterceptor PeerSigHandler crypto.PeerSignatureHandler WhiteListHandler process.WhiteListHandler Storage dataRetriever.StorageService @@ -124,21 +126,7 @@ func NewTestHeartbeatNode( keygen := signing.NewKeyGenerator(mcl.NewSuiteBLS12()) sk, pk := keygen.GeneratePair() - pksBytes := make(map[uint32][]byte, maxShards) - pksBytes[nodeShardId], _ = pk.ToByteArray() - nodesCoordinatorInstance := &shardingMocks.NodesCoordinatorStub{ - GetAllValidatorsPublicKeysCalled: func() (map[uint32][][]byte, error) { - keys := make(map[uint32][][]byte) - for shardID := uint32(0); shardID < maxShards; shardID++ { - keys[shardID] = append(keys[shardID], pksBytes[shardID]) - } - - shardID := core.MetachainShardId - keys[shardID] = append(keys[shardID], pksBytes[shardID]) - - return keys, nil - }, GetValidatorWithPublicKeyCalled: func(publicKey []byte) (nodesCoordinator.Validator, uint32, error) { validatorInstance, _ := nodesCoordinator.NewValidator(publicKey, defaultChancesSelection, 1) return validatorInstance, 0, nil @@ -161,7 +149,8 @@ func NewTestHeartbeatNode( shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) - messenger := CreateMessengerFromConfig(p2pConfig) + p2pKey := mock.NewPrivateKeyMock() + messenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey, p2p.MainNetwork) pidPk, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pkShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pidShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) @@ -181,14 +170,34 @@ func NewTestHeartbeatNode( log.Error("error setting NewPeerShardMapper in p2p messenger", "error", err) } + fullArchiveMessenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey, p2p.FullArchiveNetwork) + pidPkFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) + pkShardIdFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) + pidShardIdFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) + argFullArch := networksharding.ArgPeerShardMapper{ + PeerIdPkCache: pidPkFullArch, + FallbackPkShardCache: pkShardIdFullArch, + FallbackPidShardCache: pidShardIdFullArch, + NodesCoordinator: nodesCoordinatorInstance, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + } + peerShardMapperFullArch, err := networksharding.NewPeerShardMapper(argFullArch) + if err != nil { + log.Error("error creating NewPeerShardMapper for full archive network", "error", err) + } + err = fullArchiveMessenger.SetPeerShardResolver(peerShardMapperFullArch) + if err != nil { + log.Error("error setting NewPeerShardMapper in p2p messenger for full archive network", "error", err) + } + thn := &TestHeartbeatNode{ ShardCoordinator: shardCoordinator, NodesCoordinator: nodesCoordinatorInstance, MainMessenger: messenger, - FullArchiveMessenger: &p2pmocks.MessengerStub{}, // TODO[Sorin]: inject a proper messenger when all pieces are done to test this network as well + FullArchiveMessenger: fullArchiveMessenger, PeerSigHandler: peerSigHandler, MainPeerShardMapper: peerShardMapper, - FullArchivePeerShardMapper: &mock.PeerShardMapperStub{}, + FullArchivePeerShardMapper: peerShardMapperFullArch, heartbeatExpiryTimespanInSec: heartbeatExpiryTimespanInSec, } @@ -575,7 +584,9 @@ func (thn *TestHeartbeatNode) initInterceptors() { thn.createPeerAuthInterceptor(argsFactory) thn.createHeartbeatInterceptor(argsFactory) + thn.createFullArchiveHeartbeatInterceptor(argsFactory) thn.createPeerShardInterceptor(argsFactory) + thn.createFullArchivePeerShardInterceptor(argsFactory) } func (thn *TestHeartbeatNode) createPeerAuthInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { @@ -599,7 +610,19 @@ func (thn *TestHeartbeatNode) createHeartbeatInterceptor(argsFactory interceptor hbProcessor, _ := interceptorsProcessor.NewHeartbeatInterceptorProcessor(args) hbFactory, _ := interceptorFactory.NewInterceptedHeartbeatDataFactory(argsFactory) identifierHeartbeat := common.HeartbeatV2Topic + thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()) - thn.HeartbeatInterceptor = thn.initSingleDataInterceptor(identifierHeartbeat, hbFactory, hbProcessor) + thn.HeartbeatInterceptor = thn.initSingleDataInterceptor(identifierHeartbeat, hbFactory, hbProcessor, thn.MainMessenger) +} + +func (thn *TestHeartbeatNode) createFullArchiveHeartbeatInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { + args := interceptorsProcessor.ArgHeartbeatInterceptorProcessor{ + HeartbeatCacher: thn.DataPool.Heartbeats(), + ShardCoordinator: thn.ShardCoordinator, + PeerShardMapper: thn.FullArchivePeerShardMapper, + } + hbProcessor, _ := interceptorsProcessor.NewHeartbeatInterceptorProcessor(args) + hbFactory, _ := interceptorFactory.NewInterceptedHeartbeatDataFactory(argsFactory) + identifierHeartbeat := common.HeartbeatV2Topic + thn.ShardCoordinator.CommunicationIdentifier(thn.ShardCoordinator.SelfId()) + thn.FullArchiveHeartbeatInterceptor = thn.initSingleDataInterceptor(identifierHeartbeat, hbFactory, hbProcessor, thn.FullArchiveMessenger) } func (thn *TestHeartbeatNode) createPeerShardInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { @@ -608,7 +631,16 @@ func (thn *TestHeartbeatNode) createPeerShardInterceptor(argsFactory interceptor } dciProcessor, _ := interceptorsProcessor.NewPeerShardInterceptorProcessor(args) dciFactory, _ := interceptorFactory.NewInterceptedPeerShardFactory(argsFactory) - thn.PeerShardInterceptor = thn.initSingleDataInterceptor(common.ConnectionTopic, dciFactory, dciProcessor) + thn.PeerShardInterceptor = thn.initSingleDataInterceptor(common.ConnectionTopic, dciFactory, dciProcessor, thn.MainMessenger) +} + +func (thn *TestHeartbeatNode) createFullArchivePeerShardInterceptor(argsFactory interceptorFactory.ArgInterceptedDataFactory) { + args := interceptorsProcessor.ArgPeerShardInterceptorProcessor{ + PeerShardMapper: thn.FullArchivePeerShardMapper, + } + dciProcessor, _ := interceptorsProcessor.NewPeerShardInterceptorProcessor(args) + dciFactory, _ := interceptorFactory.NewInterceptedPeerShardFactory(argsFactory) + thn.FullArchivePeerShardInterceptor = thn.initSingleDataInterceptor(common.ConnectionTopic, dciFactory, dciProcessor, thn.FullArchiveMessenger) } func (thn *TestHeartbeatNode) initMultiDataInterceptor(topic string, dataFactory process.InterceptedDataFactory, processor process.InterceptorProcessor) *interceptors.MultiDataInterceptor { @@ -630,12 +662,12 @@ func (thn *TestHeartbeatNode) initMultiDataInterceptor(topic string, dataFactory }, ) - thn.registerTopicValidator(topic, mdInterceptor) + thn.registerTopicValidator(topic, mdInterceptor, thn.MainMessenger) return mdInterceptor } -func (thn *TestHeartbeatNode) initSingleDataInterceptor(topic string, dataFactory process.InterceptedDataFactory, processor process.InterceptorProcessor) *interceptors.SingleDataInterceptor { +func (thn *TestHeartbeatNode) initSingleDataInterceptor(topic string, dataFactory process.InterceptedDataFactory, processor process.InterceptorProcessor, messenger p2p.Messenger) *interceptors.SingleDataInterceptor { sdInterceptor, _ := interceptors.NewSingleDataInterceptor( interceptors.ArgSingleDataInterceptor{ Topic: topic, @@ -653,7 +685,7 @@ func (thn *TestHeartbeatNode) initSingleDataInterceptor(topic string, dataFactor }, ) - thn.registerTopicValidator(topic, sdInterceptor) + thn.registerTopicValidator(topic, sdInterceptor, messenger) return sdInterceptor } @@ -732,10 +764,10 @@ func (thn *TestHeartbeatNode) initCrossShardPeerTopicNotifier(tb testing.TB) { ShardCoordinator: thn.ShardCoordinator, PeerShardMapper: thn.FullArchivePeerShardMapper, } - crossShardPeerTopicNotifier, err = monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) + fullArchiveCrossShardPeerTopicNotifier, err := monitor.NewCrossShardPeerTopicNotifier(argsCrossShardPeerTopicNotifier) require.Nil(tb, err) - err = thn.FullArchiveMessenger.AddPeerTopicNotifier(crossShardPeerTopicNotifier) + err = thn.FullArchiveMessenger.AddPeerTopicNotifier(fullArchiveCrossShardPeerTopicNotifier) require.Nil(tb, err) } @@ -755,7 +787,7 @@ func (thn *TestHeartbeatNode) ConnectOnFullArchive(connectable Connectable) erro return fmt.Errorf("trying to connect to a nil Connectable parameter") } - return thn.FullArchiveMessenger.ConnectToPeer(connectable.GetMainConnectableAddress()) + return thn.FullArchiveMessenger.ConnectToPeer(connectable.GetFullArchiveConnectableAddress()) } // GetMainConnectableAddress returns a non circuit, non windows default connectable p2p address @@ -818,14 +850,14 @@ func MakeDisplayTableForHeartbeatNodes(nodes map[uint32][]*TestHeartbeatNode) st } // registerTopicValidator registers a message processor instance on the provided topic -func (thn *TestHeartbeatNode) registerTopicValidator(topic string, processor p2p.MessageProcessor) { - err := thn.MainMessenger.CreateTopic(topic, true) +func (thn *TestHeartbeatNode) registerTopicValidator(topic string, processor p2p.MessageProcessor, messenger p2p.Messenger) { + err := messenger.CreateTopic(topic, true) if err != nil { fmt.Printf("error while creating topic %s: %s\n", topic, err.Error()) return } - err = thn.MainMessenger.RegisterMessageProcessor(topic, "test", processor) + err = messenger.RegisterMessageProcessor(topic, "test", processor) if err != nil { fmt.Printf("error while registering topic validator %s: %s\n", topic, err.Error()) return @@ -834,14 +866,14 @@ func (thn *TestHeartbeatNode) registerTopicValidator(topic string, processor p2p // CreateTestInterceptors creates test interceptors that count the number of received messages func (thn *TestHeartbeatNode) CreateTestInterceptors() { - thn.registerTopicValidator(GlobalTopic, thn.Interceptor) + thn.registerTopicValidator(GlobalTopic, thn.Interceptor, thn.MainMessenger) metaIdentifier := ShardTopic + thn.ShardCoordinator.CommunicationIdentifier(core.MetachainShardId) - thn.registerTopicValidator(metaIdentifier, thn.Interceptor) + thn.registerTopicValidator(metaIdentifier, thn.Interceptor, thn.MainMessenger) for i := uint32(0); i < thn.ShardCoordinator.NumberOfShards(); i++ { identifier := ShardTopic + thn.ShardCoordinator.CommunicationIdentifier(i) - thn.registerTopicValidator(identifier, thn.Interceptor) + thn.registerTopicValidator(identifier, thn.Interceptor, thn.MainMessenger) } } From 21adf31de970a9216a8954d177473995ba0601b1 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Wed, 21 Jun 2023 14:44:19 +0300 Subject: [PATCH 22/38] fixed some test comments --- integrationTests/node/heartbeatV2/heartbeatV2_test.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/integrationTests/node/heartbeatV2/heartbeatV2_test.go b/integrationTests/node/heartbeatV2/heartbeatV2_test.go index 90a6173ea42..51f21c17e1a 100644 --- a/integrationTests/node/heartbeatV2/heartbeatV2_test.go +++ b/integrationTests/node/heartbeatV2/heartbeatV2_test.go @@ -1,7 +1,6 @@ package heartbeatV2 import ( - "fmt" "testing" "time" @@ -166,15 +165,15 @@ func TestHeartbeatV2_AllPeersSendMessagesOnAllNetworks(t *testing.T) { } peerInfo := nodes[i].FullArchivePeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) - assert.Equal(t, core.UnknownPeer, peerInfo.PeerType) + assert.Equal(t, core.UnknownPeer, peerInfo.PeerType) // nodes not connected on this network peerInfoMain := nodes[i].MainPeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) assert.Equal(t, nodes[j].ShardCoordinator.SelfId(), peerInfoMain.ShardID) - assert.Equal(t, core.ValidatorPeer, peerInfoMain.PeerType) // on main network they are all validators, but on full archive peerAuthentication message is not sent + assert.Equal(t, core.ValidatorPeer, peerInfoMain.PeerType) // on main network they are all validators } } - // connect nodes on full archive as well network only + // connect nodes on full archive network as well for i := 0; i < interactingNodes-1; i++ { for j := i + 1; j < interactingNodes; j++ { src := nodes[i] @@ -196,12 +195,10 @@ func TestHeartbeatV2_AllPeersSendMessagesOnAllNetworks(t *testing.T) { peerInfo := nodes[i].FullArchivePeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) assert.Equal(t, nodes[j].ShardCoordinator.SelfId(), peerInfo.ShardID) - println(fmt.Sprintf("main %s - %d", nodes[j].MainMessenger.ID(), peerInfo.ShardID)) - assert.Equal(t, core.ObserverPeer, peerInfo.PeerType) + assert.Equal(t, core.ObserverPeer, peerInfo.PeerType) // observers because the peerAuth is not sent on this network peerInfoMain := nodes[i].MainPeerShardMapper.GetPeerInfo(nodes[j].MainMessenger.ID()) assert.Equal(t, nodes[j].ShardCoordinator.SelfId(), peerInfoMain.ShardID) - println(fmt.Sprintf("main %s - %d", nodes[j].MainMessenger.ID(), peerInfoMain.ShardID)) assert.Equal(t, core.ValidatorPeer, peerInfoMain.PeerType) } } From d6541a02e875a82c62cc5f3baaa023a0c1a932a0 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 22 Jun 2023 10:45:09 +0300 Subject: [PATCH 23/38] fix after review, new approach with messenger as param on ProcessReceivedMessage --- cmd/seednode/main.go | 2 +- consensus/mock/sposWorkerMock.go | 2 +- consensus/spos/interface.go | 2 +- consensus/spos/worker.go | 2 +- consensus/spos/worker_test.go | 38 +++++++----- dataRetriever/errors.go | 3 + .../factory/requestersContainer/args.go | 4 +- .../baseRequestersContainerFactory.go | 5 +- .../shardRequestersContainerFactory_test.go | 13 ++-- .../factory/resolverscontainer/args.go | 4 +- .../baseResolversContainerFactory.go | 5 +- .../metaResolversContainerFactory_test.go | 18 +++--- .../shardResolversContainerFactory_test.go | 34 +++++----- .../storageRequestersContainer/args.go | 3 +- .../baseRequestersContainerFactory.go | 19 +++--- .../metaRequestersContainerFactory_test.go | 13 ++-- .../shardRequestersContainerFactory_test.go | 13 ++-- dataRetriever/interface.go | 11 +--- dataRetriever/mock/headerResolverStub.go | 2 +- dataRetriever/mock/resolverStub.go | 2 +- dataRetriever/mock/topicHandlerStub.go | 32 ---------- dataRetriever/mock/topicMessageHandlerStub.go | 19 ------ dataRetriever/mock/topicResolverSenderStub.go | 6 +- dataRetriever/resolvers/disabled/resolver.go | 2 +- dataRetriever/resolvers/export_test.go | 4 +- dataRetriever/resolvers/headerResolver.go | 4 +- .../resolvers/headerResolver_test.go | 54 +++++++++------- dataRetriever/resolvers/miniblockResolver.go | 14 ++--- .../resolvers/miniblockResolver_test.go | 25 +++++--- .../resolvers/peerAuthenticationResolver.go | 12 ++-- .../peerAuthenticationResolver_test.go | 37 +++++------ .../resolvers/transactionResolver.go | 14 ++--- .../resolvers/transactionResolver_test.go | 38 ++++++------ dataRetriever/resolvers/trieNodeResolver.go | 24 +++---- .../resolvers/trieNodeResolver_test.go | 38 ++++++------ .../resolvers/validatorInfoResolver.go | 22 +++---- .../resolvers/validatorInfoResolver_test.go | 41 ++++++------ dataRetriever/topicSender/baseTopicSender.go | 21 +++---- .../topicSender/topicRequestSender.go | 12 +--- .../topicSender/topicRequestSender_test.go | 34 +++++----- .../topicSender/topicResolverSender.go | 26 +++----- .../topicSender/topicResolverSender_test.go | 62 ++++++++++++++++--- epochStart/bootstrap/process.go | 8 +-- factory/interface.go | 2 +- factory/network/networkComponents.go | 6 +- go.mod | 2 +- go.sum | 4 +- integrationTests/countInterceptor.go | 2 +- .../antiflooding/messageProcessor.go | 2 +- .../p2p/antiflood/messageProcessor.go | 2 +- integrationTests/testHeartbeatNode.go | 4 +- integrationTests/testInitializer.go | 12 ++-- integrationTests/testProcessorNode.go | 4 +- p2p/constants.go | 12 ++-- p2p/disabled/networkMessenger.go | 10 +++ p2p/errors.go | 3 - p2p/interface.go | 3 + .../epochStartMetaBlockInterceptor.go | 2 +- .../epochStartMetaBlockInterceptor_test.go | 12 ++-- process/interceptors/multiDataInterceptor.go | 2 +- .../interceptors/multiDataInterceptor_test.go | 26 ++++---- process/interceptors/singleDataInterceptor.go | 2 +- .../singleDataInterceptor_test.go | 14 ++--- process/interface.go | 2 +- testscommon/interceptorStub.go | 2 +- testscommon/p2pmocks/messengerStub.go | 18 ++++++ testscommon/p2pmocks/p2pMessageMock.go | 6 -- .../fullSyncRequestersContainerFactory.go | 9 +-- .../fullSyncResolversContainerFactory.go | 9 +-- 69 files changed, 470 insertions(+), 446 deletions(-) delete mode 100644 dataRetriever/mock/topicHandlerStub.go delete mode 100644 dataRetriever/mock/topicMessageHandlerStub.go diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index 95a12ac9302..6c184d3e4cc 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -269,7 +269,7 @@ func createNode( P2pSingleSigner: p2pSingleSigner, P2pKeyGenerator: p2pKeyGen, Logger: logger.GetOrCreate("seed/p2p"), - Network: p2p.MainNetwork, + MessageHandlerType: p2p.RegularMessageHandler, } return p2pFactory.NewNetworkMessenger(arg) diff --git a/consensus/mock/sposWorkerMock.go b/consensus/mock/sposWorkerMock.go index 47e7b9e196c..0454370bedf 100644 --- a/consensus/mock/sposWorkerMock.go +++ b/consensus/mock/sposWorkerMock.go @@ -49,7 +49,7 @@ func (sposWorkerMock *SposWorkerMock) RemoveAllReceivedMessagesCalls() { } // ProcessReceivedMessage - -func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (sposWorkerMock *SposWorkerMock) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { return sposWorkerMock.ProcessReceivedMessageCalled(message) } diff --git a/consensus/spos/interface.go b/consensus/spos/interface.go index e9e31f6d202..1bb1eada421 100644 --- a/consensus/spos/interface.go +++ b/consensus/spos/interface.go @@ -126,7 +126,7 @@ type WorkerHandler interface { // RemoveAllReceivedMessagesCalls removes all the functions handlers RemoveAllReceivedMessagesCalls() // ProcessReceivedMessage method redirects the received message to the channel which should handle it - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error // Extend does an extension for the subround with subroundId Extend(subroundId int) // GetConsensusStateChangedChannel gets the channel for the consensusStateChanged diff --git a/consensus/spos/worker.go b/consensus/spos/worker.go index e91ac9c2bda..7dd1776308e 100644 --- a/consensus/spos/worker.go +++ b/consensus/spos/worker.go @@ -334,7 +334,7 @@ func (wrk *Worker) getCleanedList(cnsDataList []*consensus.Message) []*consensus } // ProcessReceivedMessage method redirects the received message to the channel which should handle it -func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (wrk *Worker) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { if check.IfNil(message) { return ErrNilMessage } diff --git a/consensus/spos/worker_test.go b/consensus/spos/worker_test.go index 47eaec4a9d2..37cc36f33c1 100644 --- a/consensus/spos/worker_test.go +++ b/consensus/spos/worker_test.go @@ -400,7 +400,7 @@ func TestWorker_ProcessReceivedMessageShouldErrIfFloodIsDetectedOnTopic(t *testi TopicField: "topic1", SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, "peer") + err := wrk.ProcessReceivedMessage(msg, "peer", &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) } @@ -515,7 +515,7 @@ func TestWorker_ProcessReceivedMessageTxBlockBodyShouldRetNil(t *testing.T) { PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) } @@ -523,7 +523,7 @@ func TestWorker_ProcessReceivedMessageTxBlockBodyShouldRetNil(t *testing.T) { func TestWorker_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker(&statusHandlerMock.AppStatusHandlerStub{}) - err := wrk.ProcessReceivedMessage(nil, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(nil, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockBody])) @@ -533,7 +533,7 @@ func TestWorker_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { func TestWorker_ProcessReceivedMessageNilMessageDataFieldShouldErr(t *testing.T) { t.Parallel() wrk := *initWorker(&statusHandlerMock.AppStatusHandlerStub{}) - err := wrk.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockBody])) @@ -548,6 +548,7 @@ func TestWorker_ProcessReceivedMessageEmptySignatureFieldShouldErr(t *testing.T) DataField: []byte("data field"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -575,6 +576,7 @@ func TestWorker_ProcessReceivedMessageRedundancyNodeShouldResetInactivityIfNeede SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, wasCalled) @@ -608,6 +610,7 @@ func TestWorker_ProcessReceivedMessageNodeNotInEligibleListShouldErr(t *testing. SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -720,7 +723,7 @@ func testWorkerProcessReceivedMessageComputeReceivedProposedBlockMetric( PeerField: currentPid, SignatureField: []byte("signature"), } - _ = wrk.ProcessReceivedMessage(msg, "") + _ = wrk.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) return receivedValue } @@ -754,6 +757,7 @@ func TestWorker_ProcessReceivedMessageInconsistentChainIDInConsensusMessageShoul SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, spos.ErrInvalidChainID)) @@ -787,6 +791,7 @@ func TestWorker_ProcessReceivedMessageTypeInvalidShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -822,6 +827,7 @@ func TestWorker_ProcessReceivedHeaderHashSizeInvalidShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -857,6 +863,7 @@ func TestWorker_ProcessReceivedMessageForFutureRoundShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -892,6 +899,7 @@ func TestWorker_ProcessReceivedMessageForPastRoundShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -927,17 +935,17 @@ func TestWorker_ProcessReceivedMessageTypeLimitReachedShouldErr(t *testing.T) { SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bls.MtBlockBody])) assert.Nil(t, err) - err = wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err = wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bls.MtBlockBody])) assert.True(t, errors.Is(err, spos.ErrMessageTypeLimitReached)) - err = wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err = wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bls.MtBlockBody])) assert.True(t, errors.Is(err, spos.ErrMessageTypeLimitReached)) @@ -971,6 +979,7 @@ func TestWorker_ProcessReceivedMessageInvalidSignatureShouldErr(t *testing.T) { SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -1005,7 +1014,7 @@ func TestWorker_ProcessReceivedMessageReceivedMessageIsFromSelfShouldRetNilAndNo PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockBody])) @@ -1040,7 +1049,7 @@ func TestWorker_ProcessReceivedMessageWhenRoundIsCanceledShouldRetNilAndNotProce PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockBody])) @@ -1092,6 +1101,7 @@ func TestWorker_ProcessReceivedMessageWrongChainIDInProposedBlockShouldError(t * SignatureField: []byte("signature"), }, fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) time.Sleep(time.Second) @@ -1146,7 +1156,7 @@ func TestWorker_ProcessReceivedMessageWithABadOriginatorShouldErr(t *testing.T) PeerField: "other originator", SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 0, len(wrk.ReceivedMessages()[bls.MtBlockHeader])) @@ -1215,7 +1225,7 @@ func TestWorker_ProcessReceivedMessageOkValsShouldWork(t *testing.T) { PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := wrk.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, 1, len(wrk.ReceivedMessages()[bls.MtBlockHeader])) @@ -1741,7 +1751,7 @@ func TestWorker_ProcessReceivedMessageWrongHeaderShouldErr(t *testing.T) { PeerField: currentPid, SignatureField: []byte("signature"), } - err := wrk.ProcessReceivedMessage(msg, "") + err := wrk.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, spos.ErrInvalidHeader)) } @@ -1786,7 +1796,7 @@ func TestWorker_ProcessReceivedMessageWithSignature(t *testing.T) { PeerField: currentPid, SignatureField: []byte("signature"), } - err = wrk.ProcessReceivedMessage(msg, "") + err = wrk.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) assert.Nil(t, err) p2pMsgWithSignature, ok := wrk.ConsensusState().GetMessageWithSignature(string(pubKey)) diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index a015e6e10ed..99f542b422c 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -265,3 +265,6 @@ var ErrNilValidatorInfoStorage = errors.New("nil validator info storage") // ErrValidatorInfoNotFound signals that no validator info was found var ErrValidatorInfoNotFound = errors.New("validator info not found") + +// ErrUnknownMessageHandlerType signals that an unknown message handler has been provided +var ErrUnknownMessageHandlerType = errors.New("unknown message handler type") diff --git a/dataRetriever/factory/requestersContainer/args.go b/dataRetriever/factory/requestersContainer/args.go index 76ca1fddaf7..6963c975696 100644 --- a/dataRetriever/factory/requestersContainer/args.go +++ b/dataRetriever/factory/requestersContainer/args.go @@ -13,8 +13,8 @@ import ( type FactoryArgs struct { RequesterConfig config.RequesterConfig ShardCoordinator sharding.Coordinator - MainMessenger dataRetriever.TopicMessageHandler - FullArchiveMessenger dataRetriever.TopicMessageHandler + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger Marshaller marshal.Marshalizer Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter OutputAntifloodHandler dataRetriever.P2PAntifloodHandler diff --git a/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go index 6fddb70ac8c..f668eb895c8 100644 --- a/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers/requesters" topicsender "github.com/multiversx/mx-chain-go/dataRetriever/topicSender" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" logger "github.com/multiversx/mx-chain-logger-go" @@ -24,8 +25,8 @@ var log = logger.GetOrCreate("dataRetriever/factory/requesterscontainer") type baseRequestersContainerFactory struct { container dataRetriever.RequestersContainer shardCoordinator sharding.Coordinator - mainMessenger dataRetriever.TopicMessageHandler - fullArchiveMessenger dataRetriever.TopicMessageHandler + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger marshaller marshal.Marshalizer uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter intRandomizer dataRetriever.IntRandomizer diff --git a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go index ee3dcba7794..42c8300184b 100644 --- a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go +++ b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/requestersContainer" "github.com/multiversx/mx-chain-go/dataRetriever/mock" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -16,10 +17,10 @@ import ( var errExpected = errors.New("expected error") -func createStubTopicMessageHandler(matchStrToErrOnCreate string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createMessengerStub(matchStrToErrOnCreate string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -30,7 +31,7 @@ func createStubTopicMessageHandler(matchStrToErrOnCreate string) dataRetriever.T return nil } - return tmhs + return stub } func TestNewShardRequestersContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { @@ -277,8 +278,8 @@ func getArguments() requesterscontainer.FactoryArgs { NumFullHistoryPeers: 3, }, ShardCoordinator: mock.NewOneShardCoordinatorMock(), - MainMessenger: createStubTopicMessageHandler(""), - FullArchiveMessenger: createStubTopicMessageHandler(""), + MainMessenger: createMessengerStub(""), + FullArchiveMessenger: createMessengerStub(""), Marshaller: &mock.MarshalizerMock{}, Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, OutputAntifloodHandler: &mock.P2PAntifloodHandlerStub{}, diff --git a/dataRetriever/factory/resolverscontainer/args.go b/dataRetriever/factory/resolverscontainer/args.go index e342eef57a8..1446af01b97 100644 --- a/dataRetriever/factory/resolverscontainer/args.go +++ b/dataRetriever/factory/resolverscontainer/args.go @@ -13,8 +13,8 @@ import ( type FactoryArgs struct { NumConcurrentResolvingJobs int32 ShardCoordinator sharding.Coordinator - MainMessenger dataRetriever.TopicMessageHandler - FullArchiveMessenger dataRetriever.TopicMessageHandler + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger Store dataRetriever.StorageService Marshalizer marshal.Marshalizer DataPools dataRetriever.PoolsHolder diff --git a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go index 52e7a50c61a..c1fc1e3a16b 100644 --- a/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/baseResolversContainerFactory.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" "github.com/multiversx/mx-chain-go/dataRetriever/topicSender" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" logger "github.com/multiversx/mx-chain-logger-go" @@ -24,8 +25,8 @@ var log = logger.GetOrCreate("dataRetriever/factory/resolverscontainer") type baseResolversContainerFactory struct { container dataRetriever.ResolversContainer shardCoordinator sharding.Coordinator - mainMessenger dataRetriever.TopicMessageHandler - fullArchiveMessenger dataRetriever.TopicMessageHandler + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger store dataRetriever.StorageService marshalizer marshal.Marshalizer dataPools dataRetriever.PoolsHolder diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index 6d90f550a71..c6659693d79 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -22,10 +22,10 @@ import ( "github.com/stretchr/testify/assert" ) -func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createStubMessengerForMeta(matchStrToErrOnCreate string, matchStrToErrOnRegister string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -36,7 +36,7 @@ func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStr return nil } - tmhs.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { + stub.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { if matchStrToErrOnRegister == "" { return nil } @@ -47,7 +47,7 @@ func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStr return nil } - return tmhs + return stub } func createDataPoolsForMeta() dataRetriever.PoolsHolder { @@ -261,7 +261,7 @@ func TestMetaResolversContainerFactory_CreateRegisterShardHeadersForMetachainOnM t.Parallel() args := getArgumentsMeta() - args.MainMessenger = createStubTopicMessageHandlerForMeta("", factory.ShardBlocksTopic) + args.MainMessenger = createStubMessengerForMeta("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) container, err := rcf.Create() @@ -274,7 +274,7 @@ func TestMetaResolversContainerFactory_CreateRegisterShardHeadersForMetachainOnF t.Parallel() args := getArgumentsMeta() - args.FullArchiveMessenger = createStubTopicMessageHandlerForMeta("", factory.ShardBlocksTopic) + args.FullArchiveMessenger = createStubMessengerForMeta("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) container, err := rcf.Create() @@ -358,8 +358,8 @@ func TestMetaResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsMeta() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ ShardCoordinator: mock.NewOneShardCoordinatorMock(), - MainMessenger: createStubTopicMessageHandlerForMeta("", ""), - FullArchiveMessenger: createStubTopicMessageHandlerForMeta("", ""), + MainMessenger: createStubMessengerForMeta("", ""), + FullArchiveMessenger: createStubMessengerForMeta("", ""), Store: createStoreForMeta(), Marshalizer: &mock.MarshalizerMock{}, DataPools: createDataPoolsForMeta(), diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index 89ebde60228..4d6ca351195 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -24,10 +24,10 @@ import ( var errExpected = errors.New("expected error") -func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createMessengerStubForShard(matchStrToErrOnCreate string, matchStrToErrOnRegister string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -39,7 +39,7 @@ func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchSt return nil } - tmhs.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { + stub.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { if matchStrToErrOnRegister == "" { return nil } @@ -51,7 +51,7 @@ func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchSt return nil } - return tmhs + return stub } func createDataPoolsForShard() dataRetriever.PoolsHolder { @@ -267,7 +267,7 @@ func TestShardResolversContainerFactory_CreateRegisterTxFailsOnMainNetworkShould t.Parallel() args := getArgumentsShard() - args.MainMessenger = createStubTopicMessageHandlerForShard("", factory.TransactionTopic) + args.MainMessenger = createMessengerStubForShard("", factory.TransactionTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -280,7 +280,7 @@ func TestShardResolversContainerFactory_CreateRegisterTxFailsOnFullArchiveNetwor t.Parallel() args := getArgumentsShard() - args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", factory.TransactionTopic) + args.FullArchiveMessenger = createMessengerStubForShard("", factory.TransactionTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -293,7 +293,7 @@ func TestShardResolversContainerFactory_CreateRegisterHdrFailsOnMainNetworkShoul t.Parallel() args := getArgumentsShard() - args.MainMessenger = createStubTopicMessageHandlerForShard("", factory.ShardBlocksTopic) + args.MainMessenger = createMessengerStubForShard("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -306,7 +306,7 @@ func TestShardResolversContainerFactory_CreateRegisterHdrFailsOnFullArchiveNetwo t.Parallel() args := getArgumentsShard() - args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", factory.ShardBlocksTopic) + args.FullArchiveMessenger = createMessengerStubForShard("", factory.ShardBlocksTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -319,7 +319,7 @@ func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsOnMainNetwo t.Parallel() args := getArgumentsShard() - args.MainMessenger = createStubTopicMessageHandlerForShard("", factory.MiniBlocksTopic) + args.MainMessenger = createMessengerStubForShard("", factory.MiniBlocksTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -332,7 +332,7 @@ func TestShardResolversContainerFactory_CreateRegisterMiniBlocksFailsOnFullArchi t.Parallel() args := getArgumentsShard() - args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", factory.MiniBlocksTopic) + args.FullArchiveMessenger = createMessengerStubForShard("", factory.MiniBlocksTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -345,7 +345,7 @@ func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsOnMainNetwor t.Parallel() args := getArgumentsShard() - args.MainMessenger = createStubTopicMessageHandlerForShard("", factory.AccountTrieNodesTopic) + args.MainMessenger = createMessengerStubForShard("", factory.AccountTrieNodesTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -358,7 +358,7 @@ func TestShardResolversContainerFactory_CreateRegisterTrieNodesFailsOnFullArchiv t.Parallel() args := getArgumentsShard() - args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", factory.AccountTrieNodesTopic) + args.FullArchiveMessenger = createMessengerStubForShard("", factory.AccountTrieNodesTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -371,7 +371,7 @@ func TestShardResolversContainerFactory_CreateRegisterPeerAuthenticationOnMainNe t.Parallel() args := getArgumentsShard() - args.MainMessenger = createStubTopicMessageHandlerForShard("", common.PeerAuthenticationTopic) + args.MainMessenger = createMessengerStubForShard("", common.PeerAuthenticationTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -384,7 +384,7 @@ func TestShardResolversContainerFactory_CreateRegisterPeerAuthenticationOnFullAr t.Parallel() args := getArgumentsShard() - args.FullArchiveMessenger = createStubTopicMessageHandlerForShard("", common.PeerAuthenticationTopic) + args.FullArchiveMessenger = createMessengerStubForShard("", common.PeerAuthenticationTopic) rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) container, err := rcf.Create() @@ -466,8 +466,8 @@ func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { func getArgumentsShard() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ ShardCoordinator: mock.NewOneShardCoordinatorMock(), - MainMessenger: createStubTopicMessageHandlerForShard("", ""), - FullArchiveMessenger: createStubTopicMessageHandlerForShard("", ""), + MainMessenger: createMessengerStubForShard("", ""), + FullArchiveMessenger: createMessengerStubForShard("", ""), Store: createStoreForShard(), Marshalizer: &mock.MarshalizerMock{}, DataPools: createDataPoolsForShard(), diff --git a/dataRetriever/factory/storageRequestersContainer/args.go b/dataRetriever/factory/storageRequestersContainer/args.go index 2e498ba6f15..ae060183cff 100644 --- a/dataRetriever/factory/storageRequestersContainer/args.go +++ b/dataRetriever/factory/storageRequestersContainer/args.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/sharding" ) @@ -19,7 +20,7 @@ type FactoryArgs struct { WorkingDirectory string Hasher hashing.Hasher ShardCoordinator sharding.Coordinator - Messenger dataRetriever.TopicMessageHandler + Messenger p2p.Messenger Store dataRetriever.StorageService Marshalizer marshal.Marshalizer Uint64ByteSliceConverter typeConverters.Uint64ByteSliceConverter diff --git a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go index e0bccf04e75..f57929d6633 100644 --- a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go @@ -16,6 +16,7 @@ import ( disabledRequesters "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers/requesters/disabled" "github.com/multiversx/mx-chain-go/dataRetriever/storageRequesters" "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/storage" @@ -28,7 +29,7 @@ const defaultBeforeGracefulClose = time.Minute type baseRequestersContainerFactory struct { container dataRetriever.RequestersContainer shardCoordinator sharding.Coordinator - messenger dataRetriever.TopicMessageHandler + messenger p2p.Messenger store dataRetriever.StorageService marshalizer marshal.Marshalizer hasher hashing.Hasher @@ -261,14 +262,14 @@ func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( } args := trieFactory.TrieCreateArgs{ - MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, - PruningEnabled: brcf.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, - CheckpointsEnabled: brcf.generalConfig.StateTriesConfig.CheckpointsEnabled, - MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, - SnapshotsEnabled: brcf.snapshotsEnabled, - IdleProvider: disabled.NewProcessStatusHandler(), - Identifier: storageIdentifier.String(), + MainStorer: mainStorer, + CheckpointsStorer: checkpointsStorer, + PruningEnabled: brcf.generalConfig.StateTriesConfig.AccountsStatePruningEnabled, + CheckpointsEnabled: brcf.generalConfig.StateTriesConfig.CheckpointsEnabled, + MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, + SnapshotsEnabled: brcf.snapshotsEnabled, + IdleProvider: disabled.NewProcessStatusHandler(), + Identifier: storageIdentifier.String(), EnableEpochsHandler: handler, } return trieFactoryInstance.Create(args) diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go index 6711a3d58c4..41a05d17350 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory_test.go @@ -14,15 +14,16 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createMessengerStubForMeta(matchStrToErrOnCreate string, matchStrToErrOnRegister string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -33,7 +34,7 @@ func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStr return nil } - tmhs.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { + stub.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { if matchStrToErrOnRegister == "" { return nil } @@ -44,7 +45,7 @@ func createStubTopicMessageHandlerForMeta(matchStrToErrOnCreate string, matchStr return nil } - return tmhs + return stub } func createStoreForMeta() dataRetriever.StorageService { @@ -217,7 +218,7 @@ func getArgumentsMeta() storagerequesterscontainer.FactoryArgs { WorkingDirectory: "", Hasher: &hashingMocks.HasherMock{}, ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandlerForMeta("", ""), + Messenger: createMessengerStubForMeta("", ""), Store: createStoreForMeta(), Marshalizer: &mock.MarshalizerMock{}, Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go index cc7a22af6c8..8695828a509 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory_test.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/enableEpochsHandlerMock" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -21,10 +22,10 @@ import ( var errExpected = errors.New("expected error") -func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchStrToErrOnRegister string) dataRetriever.TopicMessageHandler { - tmhs := mock.NewTopicMessageHandlerStub() +func createMessengerStubForShard(matchStrToErrOnCreate string, matchStrToErrOnRegister string) p2p.Messenger { + stub := &p2pmocks.MessengerStub{} - tmhs.CreateTopicCalled = func(name string, createChannelForTopic bool) error { + stub.CreateTopicCalled = func(name string, createChannelForTopic bool) error { if matchStrToErrOnCreate == "" { return nil } @@ -36,7 +37,7 @@ func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchSt return nil } - tmhs.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { + stub.RegisterMessageProcessorCalled = func(topic string, identifier string, handler p2p.MessageProcessor) error { if matchStrToErrOnRegister == "" { return nil } @@ -48,7 +49,7 @@ func createStubTopicMessageHandlerForShard(matchStrToErrOnCreate string, matchSt return nil } - return tmhs + return stub } func createStoreForShard() dataRetriever.StorageService { @@ -202,7 +203,7 @@ func getArgumentsShard() storagerequesterscontainer.FactoryArgs { WorkingDirectory: "", Hasher: &hashingMocks.HasherMock{}, ShardCoordinator: mock.NewOneShardCoordinatorMock(), - Messenger: createStubTopicMessageHandlerForShard("", ""), + Messenger: createMessengerStubForShard("", ""), Store: createStoreForShard(), Marshalizer: &mock.MarshalizerMock{}, Uint64ByteSliceConverter: &mock.Uint64ByteSliceConverterMock{}, diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 52cb7a58f75..930b6aca124 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -21,7 +21,7 @@ type ResolverThrottler interface { // Resolver defines what a data resolver should do type Resolver interface { - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error SetDebugHandler(handler DebugHandler) error Close() error IsInterfaceNil() bool @@ -50,7 +50,7 @@ type HeaderRequester interface { // TopicResolverSender defines what sending operations are allowed for a topic resolver type TopicResolverSender interface { - Send(buff []byte, peer core.PeerID, network p2p.Network) error + Send(buff []byte, peer core.PeerID, destination p2p.MessageHandler) error RequestTopic() string TargetShardID() uint32 SetDebugHandler(handler DebugHandler) error @@ -149,13 +149,6 @@ type TopicHandler interface { RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error } -// TopicMessageHandler defines the functionality needed by structs to manage topics, message processors and to send data -// to other peers -type TopicMessageHandler interface { - MessageHandler - TopicHandler -} - // IntRandomizer interface provides functionality over generating integer numbers type IntRandomizer interface { Intn(n int) int diff --git a/dataRetriever/mock/headerResolverStub.go b/dataRetriever/mock/headerResolverStub.go index 3bf8bfb0028..fa87219b082 100644 --- a/dataRetriever/mock/headerResolverStub.go +++ b/dataRetriever/mock/headerResolverStub.go @@ -26,7 +26,7 @@ func (hrs *HeaderResolverStub) SetEpochHandler(epochHandler dataRetriever.EpochH } // ProcessReceivedMessage - -func (hrs *HeaderResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (hrs *HeaderResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { if hrs.ProcessReceivedMessageCalled != nil { return hrs.ProcessReceivedMessageCalled(message) } diff --git a/dataRetriever/mock/resolverStub.go b/dataRetriever/mock/resolverStub.go index 15e698042e4..c667c9459b2 100644 --- a/dataRetriever/mock/resolverStub.go +++ b/dataRetriever/mock/resolverStub.go @@ -14,7 +14,7 @@ type ResolverStub struct { } // ProcessReceivedMessage - -func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (rs *ResolverStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { return rs.ProcessReceivedMessageCalled(message) } diff --git a/dataRetriever/mock/topicHandlerStub.go b/dataRetriever/mock/topicHandlerStub.go deleted file mode 100644 index 8d9095b300d..00000000000 --- a/dataRetriever/mock/topicHandlerStub.go +++ /dev/null @@ -1,32 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/p2p" -) - -// TopicHandlerStub - -type TopicHandlerStub struct { - HasTopicCalled func(name string) bool - CreateTopicCalled func(name string, createChannelForTopic bool) error - RegisterMessageProcessorCalled func(topic string, identifier string, handler p2p.MessageProcessor) error -} - -// HasTopic - -func (ths *TopicHandlerStub) HasTopic(name string) bool { - return ths.HasTopicCalled(name) -} - -// CreateTopic - -func (ths *TopicHandlerStub) CreateTopic(name string, createChannelForTopic bool) error { - return ths.CreateTopicCalled(name, createChannelForTopic) -} - -// RegisterMessageProcessor - -func (ths *TopicHandlerStub) RegisterMessageProcessor(topic string, identifier string, handler p2p.MessageProcessor) error { - return ths.RegisterMessageProcessorCalled(topic, identifier, handler) -} - -// IsInterfaceNil returns true if there is no value under the interface -func (ths *TopicHandlerStub) IsInterfaceNil() bool { - return ths == nil -} diff --git a/dataRetriever/mock/topicMessageHandlerStub.go b/dataRetriever/mock/topicMessageHandlerStub.go deleted file mode 100644 index 6b47a577048..00000000000 --- a/dataRetriever/mock/topicMessageHandlerStub.go +++ /dev/null @@ -1,19 +0,0 @@ -package mock - -type topicMessageHandlerStub struct { - *TopicHandlerStub - *MessageHandlerStub -} - -// NewTopicMessageHandlerStub - -func NewTopicMessageHandlerStub() *topicMessageHandlerStub { - return &topicMessageHandlerStub{ - TopicHandlerStub: &TopicHandlerStub{}, - MessageHandlerStub: &MessageHandlerStub{}, - } -} - -// IsInterfaceNil returns true if there is no value under the interface -func (s *topicMessageHandlerStub) IsInterfaceNil() bool { - return s == nil -} diff --git a/dataRetriever/mock/topicResolverSenderStub.go b/dataRetriever/mock/topicResolverSenderStub.go index 948679d40eb..744d0e6fef8 100644 --- a/dataRetriever/mock/topicResolverSenderStub.go +++ b/dataRetriever/mock/topicResolverSenderStub.go @@ -9,7 +9,7 @@ import ( // TopicResolverSenderStub - type TopicResolverSenderStub struct { - SendCalled func(buff []byte, peer core.PeerID, network p2p.Network) error + SendCalled func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error TargetShardIDCalled func() uint32 debugHandler dataRetriever.DebugHandler } @@ -20,9 +20,9 @@ func (trss *TopicResolverSenderStub) RequestTopic() string { } // Send - -func (trss *TopicResolverSenderStub) Send(buff []byte, peer core.PeerID, network p2p.Network) error { +func (trss *TopicResolverSenderStub) Send(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { if trss.SendCalled != nil { - return trss.SendCalled(buff, peer, network) + return trss.SendCalled(buff, peer, source) } return nil diff --git a/dataRetriever/resolvers/disabled/resolver.go b/dataRetriever/resolvers/disabled/resolver.go index 077c98d8f97..ac51a954260 100644 --- a/dataRetriever/resolvers/disabled/resolver.go +++ b/dataRetriever/resolvers/disabled/resolver.go @@ -15,7 +15,7 @@ func NewDisabledResolver() *resolver { } // ProcessReceivedMessage returns nil as it is disabled -func (r *resolver) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID) error { +func (r *resolver) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { return nil } diff --git a/dataRetriever/resolvers/export_test.go b/dataRetriever/resolvers/export_test.go index b13879a7d0e..360342be58a 100644 --- a/dataRetriever/resolvers/export_test.go +++ b/dataRetriever/resolvers/export_test.go @@ -11,6 +11,6 @@ func (hdrRes *HeaderResolver) EpochHandler() dataRetriever.EpochHandler { } // ResolveMultipleHashes - -func (tnRes *TrieNodeResolver) ResolveMultipleHashes(hashesBuff []byte, message p2p.MessageP2P) error { - return tnRes.resolveMultipleHashes(hashesBuff, message) +func (tnRes *TrieNodeResolver) ResolveMultipleHashes(hashesBuff []byte, message p2p.MessageP2P, source p2p.MessageHandler) error { + return tnRes.resolveMultipleHashes(hashesBuff, message, source) } diff --git a/dataRetriever/resolvers/headerResolver.go b/dataRetriever/resolvers/headerResolver.go index 675303b7c60..877c57a31da 100644 --- a/dataRetriever/resolvers/headerResolver.go +++ b/dataRetriever/resolvers/headerResolver.go @@ -109,7 +109,7 @@ func (hdrRes *HeaderResolver) SetEpochHandler(epochHandler dataRetriever.EpochHa // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := hdrRes.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -158,7 +158,7 @@ func (hdrRes *HeaderResolver) ProcessReceivedMessage(message p2p.MessageP2P, fro hdrRes.DebugHandler().LogSucceededToResolveData(hdrRes.topic, rd.Value) - return hdrRes.Send(buff, message.Peer(), message.Network()) + return hdrRes.Send(buff, message.Peer(), source) } func (hdrRes *HeaderResolver) resolveHeaderFromNonce(rd *dataRetriever.RequestData) ([]byte, error) { diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index c8470e434d5..f50606a244e 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -165,7 +165,7 @@ func TestHeaderResolver_ProcessReceivedCanProcessMessageErrorsShouldErr(t *testi } hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -177,7 +177,7 @@ func TestHeaderResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { arg := createMockArgHeaderResolver() hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, nil), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilValue, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -190,7 +190,7 @@ func TestHeaderResolver_ProcessReceivedMessage_WrongIdentifierStartBlock(t *test hdrRes, _ := resolvers.NewHeaderResolver(arg) requestedData := []byte("request") - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "") + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "", &p2pmocks.MessengerStub{}) assert.Equal(t, core.ErrInvalidIdentifierForEpochStartBlockRequest, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -207,7 +207,7 @@ func TestHeaderResolver_ProcessReceivedMessageEpochTypeUnknownEpochShouldWork(t } wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -215,7 +215,7 @@ func TestHeaderResolver_ProcessReceivedMessageEpochTypeUnknownEpochShouldWork(t hdrRes, _ := resolvers.NewHeaderResolver(arg) requestedData := []byte(fmt.Sprintf("epoch_%d", math.MaxUint32)) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "") + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "", &p2pmocks.MessengerStub{}) assert.NoError(t, err) assert.True(t, wasSent) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -234,7 +234,7 @@ func TestHeaderResolver_ProcessReceivedMessage_Ok(t *testing.T) { hdrRes, _ := resolvers.NewHeaderResolver(arg) requestedData := []byte("request_1") - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "") + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "", &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -246,7 +246,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestUnknownTypeShouldErr(t *tes arg := createMockArgHeaderResolver() hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0)), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(254, make([]byte, 0)), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrResolveTypeUnknown, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -272,7 +272,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -280,7 +280,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg.Headers = headers hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, searchWasCalled) assert.True(t, sendWasCalled) @@ -309,7 +309,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg := createMockArgHeaderResolver() arg.IsFullHistoryNode = true arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -317,7 +317,7 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend arg.Headers = headers hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, searchWasCalled) assert.True(t, sendWasCalled) @@ -352,7 +352,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return nil }, } @@ -360,7 +360,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarsh arg.Headers = headers hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, errExpected, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -391,7 +391,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -400,7 +400,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestRetFromStorageShouldRetValA arg.HdrStorage = store hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasGotFromStorage) assert.True(t, wasSent) @@ -419,7 +419,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeInvalidSliceShould } hdrRes, _ := resolvers.NewHeaderResolver(arg) - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("aaa")), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("aaa")), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrInvalidNonceByteSlice, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -446,7 +446,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceShouldCallWithTheCorre }, ) msg := &p2pmocks.P2PMessageMock{DataField: buff} - _ = hdrRes.ProcessReceivedMessage(msg, "") + _ = hdrRes.ProcessReceivedMessage(msg, "", &p2pmocks.MessengerStub{}) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } @@ -460,7 +460,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -491,6 +491,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Equal(t, expectedErr, err) assert.False(t, wasSent) @@ -514,7 +515,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -536,6 +537,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -575,7 +577,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSend = true return nil }, @@ -598,6 +600,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -625,7 +628,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { assert.Fail(t, "should not have been called") return nil }, @@ -654,6 +657,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -679,7 +683,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce } arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSend = true return nil }, @@ -695,6 +699,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNonce err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -729,7 +734,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo arg := createMockArgHeaderResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return nil }, TargetShardIDCalled: func() uint32 { @@ -751,6 +756,7 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo err := hdrRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Equal(t, errExpected, err) @@ -810,7 +816,7 @@ func TestHeaderResolver_SetEpochHandlerConcurrency(t *testing.T) { assert.Nil(t, err) return } - err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, []byte("request_1")), fromConnectedPeerId) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, []byte("request_1")), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) }(i) } diff --git a/dataRetriever/resolvers/miniblockResolver.go b/dataRetriever/resolvers/miniblockResolver.go index aad1b986453..0c1a1460074 100644 --- a/dataRetriever/resolvers/miniblockResolver.go +++ b/dataRetriever/resolvers/miniblockResolver.go @@ -77,7 +77,7 @@ func checkArgMiniblockResolver(arg ArgMiniblockResolver) error { // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := mbRes.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -93,9 +93,9 @@ func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, f switch rd.Type { case dataRetriever.HashType: - err = mbRes.resolveMbRequestByHash(rd.Value, message.Peer(), rd.Epoch, message.Network()) + err = mbRes.resolveMbRequestByHash(rd.Value, message.Peer(), rd.Epoch, source) case dataRetriever.HashArrayType: - err = mbRes.resolveMbRequestByHashArray(rd.Value, message.Peer(), rd.Epoch, message.Network()) + err = mbRes.resolveMbRequestByHashArray(rd.Value, message.Peer(), rd.Epoch, source) default: err = dataRetriever.ErrRequestTypeNotImplemented } @@ -107,7 +107,7 @@ func (mbRes *miniblockResolver) ProcessReceivedMessage(message p2p.MessageP2P, f return err } -func (mbRes *miniblockResolver) resolveMbRequestByHash(hash []byte, pid core.PeerID, epoch uint32, network p2p.Network) error { +func (mbRes *miniblockResolver) resolveMbRequestByHash(hash []byte, pid core.PeerID, epoch uint32, source p2p.MessageHandler) error { mb, err := mbRes.fetchMbAsByteSlice(hash, epoch) if err != nil { return err @@ -121,7 +121,7 @@ func (mbRes *miniblockResolver) resolveMbRequestByHash(hash []byte, pid core.Pee return err } - return mbRes.Send(buffToSend, pid, network) + return mbRes.Send(buffToSend, pid, source) } func (mbRes *miniblockResolver) fetchMbAsByteSlice(hash []byte, epoch uint32) ([]byte, error) { @@ -146,7 +146,7 @@ func (mbRes *miniblockResolver) fetchMbAsByteSlice(hash []byte, epoch uint32) ([ return buff, nil } -func (mbRes *miniblockResolver) resolveMbRequestByHashArray(mbBuff []byte, pid core.PeerID, epoch uint32, network p2p.Network) error { +func (mbRes *miniblockResolver) resolveMbRequestByHashArray(mbBuff []byte, pid core.PeerID, epoch uint32, source p2p.MessageHandler) error { b := batch.Batch{} err := mbRes.marshalizer.Unmarshal(&b, mbBuff) if err != nil { @@ -177,7 +177,7 @@ func (mbRes *miniblockResolver) resolveMbRequestByHashArray(mbBuff []byte, pid c } for _, buff := range buffsToSend { - errSend := mbRes.Send(buff, pid, network) + errSend := mbRes.Send(buff, pid, source) if errSend != nil { return errSend } diff --git a/dataRetriever/resolvers/miniblockResolver_test.go b/dataRetriever/resolvers/miniblockResolver_test.go index f4c52448236..35588e9d6a9 100644 --- a/dataRetriever/resolvers/miniblockResolver_test.go +++ b/dataRetriever/resolvers/miniblockResolver_test.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" ) @@ -127,7 +128,7 @@ func TestMiniblockResolver_ProcessReceivedAntifloodErrorsShouldErr(t *testing.T) } mbRes, _ := resolvers.NewMiniblockResolver(arg) - err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId) + err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -139,7 +140,7 @@ func TestMiniblockResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) arg := createMockArgMiniblockResolver() mbRes, _ := resolvers.NewMiniblockResolver(arg) - err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId) + err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilValue, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -151,7 +152,7 @@ func TestMiniblockResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T arg := createMockArgMiniblockResolver() mbRes, _ := resolvers.NewMiniblockResolver(arg) - err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, make([]byte, 0)), fromConnectedPeerId) + err := mbRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, make([]byte, 0)), fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, dataRetriever.ErrRequestTypeNotImplemented)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -184,7 +185,7 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolShouldRetValAndSend( arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -200,6 +201,7 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolShouldRetValAndSend( err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -254,6 +256,7 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolMarshalizerFailShoul err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, errExpected)) @@ -309,6 +312,7 @@ func TestMiniblockResolver_ProcessReceivedMessageUnmarshalFails(t *testing.T) { err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -352,6 +356,7 @@ func TestMiniblockResolver_ProcessReceivedMessagePackDataInChunksFails(t *testin err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -386,7 +391,7 @@ func TestMiniblockResolver_ProcessReceivedMessageSendFails(t *testing.T) { } arg.Marshaller = goodMarshalizer arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return expectedErr }, } @@ -395,6 +400,7 @@ func TestMiniblockResolver_ProcessReceivedMessageSendFails(t *testing.T) { err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashArrayType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -428,7 +434,7 @@ func TestMiniblockResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStor arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSend = true return nil }, @@ -440,6 +446,7 @@ func TestMiniblockResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStor err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.Nil(t, err) @@ -474,7 +481,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMarshalFails(t *testing.T) { arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { assert.Fail(t, "should have not been called") return nil }, @@ -492,6 +499,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMarshalFails(t *testing.T) { err := mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.True(t, errors.Is(err, expectedErr)) @@ -523,7 +531,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMissingDataShouldNotSend(t *tes arg := createMockArgMiniblockResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -535,6 +543,7 @@ func TestMiniblockResolver_ProcessReceivedMessageMissingDataShouldNotSend(t *tes _ = mbRes.ProcessReceivedMessage( createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeerId, + &p2pmocks.MessengerStub{}, ) assert.False(t, wasSent) diff --git a/dataRetriever/resolvers/peerAuthenticationResolver.go b/dataRetriever/resolvers/peerAuthenticationResolver.go index 867ca34c4b9..dc2a45892c2 100644 --- a/dataRetriever/resolvers/peerAuthenticationResolver.go +++ b/dataRetriever/resolvers/peerAuthenticationResolver.go @@ -75,7 +75,7 @@ func checkArgPeerAuthenticationResolver(arg ArgPeerAuthenticationResolver) error // ProcessReceivedMessage represents the callback func from the p2p.Messenger that is called each time a new message is received // (for the topic this validator was registered to, usually a request topic) -func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := res.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -91,7 +91,7 @@ func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.Messag switch rd.Type { case dataRetriever.HashArrayType: - return res.resolveMultipleHashesRequest(rd.Value, message.Peer(), message.Network()) + return res.resolveMultipleHashesRequest(rd.Value, message.Peer(), source) default: err = dataRetriever.ErrRequestTypeNotImplemented } @@ -103,7 +103,7 @@ func (res *peerAuthenticationResolver) ProcessReceivedMessage(message p2p.Messag } // resolveMultipleHashesRequest sends the response for multiple hashes request -func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff []byte, pid core.PeerID, network p2p.Network) error { +func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff []byte, pid core.PeerID, source p2p.MessageHandler) error { b := batch.Batch{} err := res.marshalizer.Unmarshal(&b, hashesBuff) if err != nil { @@ -116,18 +116,18 @@ func (res *peerAuthenticationResolver) resolveMultipleHashesRequest(hashesBuff [ return fmt.Errorf("resolveMultipleHashesRequest error %w from buff %x", err, hashesBuff) } - return res.sendPeerAuthsForHashes(peerAuthsForHashes, pid, network) + return res.sendPeerAuthsForHashes(peerAuthsForHashes, pid, source) } // sendPeerAuthsForHashes sends multiple peer authentication messages for specific hashes -func (res *peerAuthenticationResolver) sendPeerAuthsForHashes(dataBuff [][]byte, pid core.PeerID, network p2p.Network) error { +func (res *peerAuthenticationResolver) sendPeerAuthsForHashes(dataBuff [][]byte, pid core.PeerID, source p2p.MessageHandler) error { buffsToSend, err := res.dataPacker.PackDataInChunks(dataBuff, maxBuffToSendPeerAuthentications) if err != nil { return err } for _, buff := range buffsToSend { - err = res.Send(buff, pid, network) + err = res.Send(buff, pid, source) if err != nil { return err } diff --git a/dataRetriever/resolvers/peerAuthenticationResolver_test.go b/dataRetriever/resolvers/peerAuthenticationResolver_test.go index c55e00900d5..188c29d7e3f 100644 --- a/dataRetriever/resolvers/peerAuthenticationResolver_test.go +++ b/dataRetriever/resolvers/peerAuthenticationResolver_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/heartbeat" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -163,7 +164,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(nil, fromConnectedPeer) + err = res.ProcessReceivedMessage(nil, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilMessage, err) }) t.Run("canProcessMessage due to antiflood handler error", func(t *testing.T) { @@ -179,7 +180,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -197,7 +198,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.ChunkType, nil), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) }) t.Run("invalid request type should error", func(t *testing.T) { @@ -212,7 +213,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedBuff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, dataRetriever.ErrRequestTypeNotImplemented)) }) @@ -226,7 +227,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, []byte("invalid data")), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, []byte("invalid data")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.NotNil(t, err) }) t.Run("resolveMultipleHashesRequest: all hashes missing from cache should error", func(t *testing.T) { @@ -241,7 +242,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -253,7 +254,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) expectedSubstrErr := fmt.Sprintf("%s %x", "from buff", providedHashes) assert.True(t, strings.Contains(fmt.Sprintf("%s", err), expectedSubstrErr)) assert.False(t, wasSent) @@ -270,7 +271,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -282,7 +283,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) expectedSubstrErr := fmt.Sprintf("%s %x", "from buff", providedHashes) assert.True(t, strings.Contains(fmt.Sprintf("%s", err), expectedSubstrErr)) assert.False(t, wasSent) @@ -306,7 +307,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { wasSent = true return nil }, @@ -318,7 +319,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) expectedSubstrErr := fmt.Sprintf("%s %x", "from buff", providedHashes) assert.True(t, strings.Contains(fmt.Sprintf("%s", err), expectedSubstrErr)) assert.False(t, wasSent) @@ -360,7 +361,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg.PeerAuthenticationPool = cache wasSent := false arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err = arg.Marshaller.Unmarshal(b, buff) assert.Nil(t, err) @@ -386,7 +387,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasSent) }) @@ -412,7 +413,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) }) t.Run("resolveMultipleHashesRequest: Send returns error", func(t *testing.T) { @@ -426,7 +427,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { arg := createMockArgPeerAuthenticationResolver() arg.PeerAuthenticationPool = cache arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return expectedErr }, } @@ -437,7 +438,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { hashes := getKeysSlice() providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) }) t.Run("resolveMultipleHashesRequest: send large data buff", func(t *testing.T) { @@ -463,7 +464,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { messagesSent := 0 hashesReceived := 0 arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) assert.Nil(t, err) @@ -500,7 +501,7 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { chunkIndex := uint32(0) providedHashes, err := arg.Marshaller.Marshal(&batch.Batch{Data: providedKeys}) assert.Nil(t, err) - err = res.ProcessReceivedMessage(createRequestMsgWithChunkIndex(dataRetriever.HashArrayType, providedHashes, epoch, chunkIndex), fromConnectedPeer) + err = res.ProcessReceivedMessage(createRequestMsgWithChunkIndex(dataRetriever.HashArrayType, providedHashes, epoch, chunkIndex), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.Equal(t, 2, messagesSent) assert.Equal(t, expectedLen, hashesReceived) diff --git a/dataRetriever/resolvers/transactionResolver.go b/dataRetriever/resolvers/transactionResolver.go index d91fe82d497..3a88bd13c15 100644 --- a/dataRetriever/resolvers/transactionResolver.go +++ b/dataRetriever/resolvers/transactionResolver.go @@ -82,7 +82,7 @@ func checkArgTxResolver(arg ArgTxResolver) error { // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := txRes.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -98,9 +98,9 @@ func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConn switch rd.Type { case dataRetriever.HashType: - err = txRes.resolveTxRequestByHash(rd.Value, message.Peer(), rd.Epoch, message.Network()) + err = txRes.resolveTxRequestByHash(rd.Value, message.Peer(), rd.Epoch, source) case dataRetriever.HashArrayType: - err = txRes.resolveTxRequestByHashArray(rd.Value, message.Peer(), rd.Epoch, message.Network()) + err = txRes.resolveTxRequestByHashArray(rd.Value, message.Peer(), rd.Epoch, source) default: err = dataRetriever.ErrRequestTypeNotImplemented } @@ -112,7 +112,7 @@ func (txRes *TxResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConn return err } -func (txRes *TxResolver) resolveTxRequestByHash(hash []byte, pid core.PeerID, epoch uint32, network p2p.Network) error { +func (txRes *TxResolver) resolveTxRequestByHash(hash []byte, pid core.PeerID, epoch uint32, source p2p.MessageHandler) error { // TODO this can be optimized by searching in corresponding datapool (taken by topic name) tx, err := txRes.fetchTxAsByteSlice(hash, epoch) if err != nil { @@ -127,7 +127,7 @@ func (txRes *TxResolver) resolveTxRequestByHash(hash []byte, pid core.PeerID, ep return err } - return txRes.Send(buff, pid, network) + return txRes.Send(buff, pid, source) } func (txRes *TxResolver) fetchTxAsByteSlice(hash []byte, epoch uint32) ([]byte, error) { @@ -152,7 +152,7 @@ func (txRes *TxResolver) fetchTxAsByteSlice(hash []byte, epoch uint32) ([]byte, return buff, nil } -func (txRes *TxResolver) resolveTxRequestByHashArray(hashesBuff []byte, pid core.PeerID, epoch uint32, network p2p.Network) error { +func (txRes *TxResolver) resolveTxRequestByHashArray(hashesBuff []byte, pid core.PeerID, epoch uint32, source p2p.MessageHandler) error { // TODO this can be optimized by searching in corresponding datapool (taken by topic name) b := batch.Batch{} err := txRes.marshalizer.Unmarshal(&b, hashesBuff) @@ -186,7 +186,7 @@ func (txRes *TxResolver) resolveTxRequestByHashArray(hashesBuff []byte, pid core } for _, buff := range buffsToSend { - errSend := txRes.Send(buff, pid, network) + errSend := txRes.Send(buff, pid, source) if errSend != nil { return errSend } diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index 4cf0bb0e86e..2af167aae70 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -131,7 +131,7 @@ func TestTxResolver_ProcessReceivedMessageCanProcessMessageErrorsShouldErr(t *te } txRes, _ := resolvers.NewTxResolver(arg) - err := txRes.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, connectedPeerId) + err := txRes.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -144,7 +144,7 @@ func TestTxResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T) { arg := createMockArgTxResolver() txRes, _ := resolvers.NewTxResolver(arg) - err := txRes.ProcessReceivedMessage(nil, connectedPeerId) + err := txRes.ProcessReceivedMessage(nil, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilMessage, err) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -161,7 +161,7 @@ func TestTxResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) { msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, dataRetriever.ErrRequestTypeNotImplemented)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -178,7 +178,7 @@ func TestTxResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) { msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilValue, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -206,7 +206,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *te arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -218,7 +218,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolShouldSearchAndSend(t *te msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, searchWasCalled) @@ -262,7 +262,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolMarshalizerFailShouldRetN msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, errExpected)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -307,7 +307,7 @@ func TestTxResolver_ProcessReceivedMessageBatchMarshalFailShouldRetNilAndErr(t * msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -342,7 +342,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -355,7 +355,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, searchWasCalled) @@ -395,7 +395,7 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxStorageCheckRetError(t *testi msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, errExpected)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -432,7 +432,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsShouldCal sendWasCalled := false arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -455,7 +455,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsShouldCal msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, splitSliceWasCalled) @@ -488,7 +488,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsFoundOnly sendWasCalled := false arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -516,7 +516,7 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsFoundOnly msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.NotNil(t, err) assert.True(t, splitSliceWasCalled) @@ -545,7 +545,7 @@ func TestTxResolver_ProcessReceivedMessageHashArrayUnmarshalFails(t *testing.T) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: []byte("buff")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -570,7 +570,7 @@ func TestTxResolver_ProcessReceivedMessageHashArrayPackDataInChunksFails(t *test data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: buff}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) @@ -585,7 +585,7 @@ func TestTxResolver_ProcessReceivedMessageHashArraySendFails(t *testing.T) { arg := createMockArgTxResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return expectedErr }, } @@ -595,7 +595,7 @@ func TestTxResolver_ProcessReceivedMessageHashArraySendFails(t *testing.T) { data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: buff}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + err := txRes.ProcessReceivedMessage(msg, connectedPeerId, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) diff --git a/dataRetriever/resolvers/trieNodeResolver.go b/dataRetriever/resolvers/trieNodeResolver.go index ae8ec7f2db8..871ed85fee5 100644 --- a/dataRetriever/resolvers/trieNodeResolver.go +++ b/dataRetriever/resolvers/trieNodeResolver.go @@ -59,7 +59,7 @@ func checkArgTrieNodeResolver(arg ArgTrieNodeResolver) error { // ProcessReceivedMessage will be the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to, usually a request topic) -func (tnRes *TrieNodeResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (tnRes *TrieNodeResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := tnRes.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -75,15 +75,15 @@ func (tnRes *TrieNodeResolver) ProcessReceivedMessage(message p2p.MessageP2P, fr switch rd.Type { case dataRetriever.HashType: - return tnRes.resolveOneHash(rd.Value, rd.ChunkIndex, message) + return tnRes.resolveOneHash(rd.Value, rd.ChunkIndex, message, source) case dataRetriever.HashArrayType: - return tnRes.resolveMultipleHashes(rd.Value, message) + return tnRes.resolveMultipleHashes(rd.Value, message, source) default: return dataRetriever.ErrRequestTypeNotImplemented } } -func (tnRes *TrieNodeResolver) resolveMultipleHashes(hashesBuff []byte, message p2p.MessageP2P) error { +func (tnRes *TrieNodeResolver) resolveMultipleHashes(hashesBuff []byte, message p2p.MessageP2P, source p2p.MessageHandler) error { b := batch.Batch{} err := tnRes.marshalizer.Unmarshal(&b, hashesBuff) if err != nil { @@ -95,12 +95,12 @@ func (tnRes *TrieNodeResolver) resolveMultipleHashes(hashesBuff []byte, message nodes := make(map[string]struct{}) spaceUsed, usedAllSpace := tnRes.resolveOnlyRequestedHashes(hashes, nodes) if usedAllSpace { - return tnRes.sendResponse(convertMapToSlice(nodes), hashes, supportedChunkIndex, message) + return tnRes.sendResponse(convertMapToSlice(nodes), hashes, supportedChunkIndex, message, source) } tnRes.resolveSubTries(hashes, nodes, spaceUsed) - return tnRes.sendResponse(convertMapToSlice(nodes), hashes, supportedChunkIndex, message) + return tnRes.sendResponse(convertMapToSlice(nodes), hashes, supportedChunkIndex, message, source) } func (tnRes *TrieNodeResolver) resolveOnlyRequestedHashes(hashes [][]byte, nodes map[string]struct{}) (int, bool) { @@ -167,13 +167,13 @@ func convertMapToSlice(m map[string]struct{}) [][]byte { return buff } -func (tnRes *TrieNodeResolver) resolveOneHash(hash []byte, chunkIndex uint32, message p2p.MessageP2P) error { +func (tnRes *TrieNodeResolver) resolveOneHash(hash []byte, chunkIndex uint32, message p2p.MessageP2P, source p2p.MessageHandler) error { serializedNode, err := tnRes.trieDataGetter.GetSerializedNode(hash) if err != nil { return err } - return tnRes.sendResponse([][]byte{serializedNode}, [][]byte{hash}, chunkIndex, message) + return tnRes.sendResponse([][]byte{serializedNode}, [][]byte{hash}, chunkIndex, message, source) } func (tnRes *TrieNodeResolver) getSubTrie(hash []byte, remainingSpace uint64) ([][]byte, uint64, error) { @@ -198,6 +198,7 @@ func (tnRes *TrieNodeResolver) sendResponse( hashes [][]byte, chunkIndex uint32, message p2p.MessageP2P, + source p2p.MessageHandler, ) error { if len(serializedNodes) == 0 { @@ -206,7 +207,7 @@ func (tnRes *TrieNodeResolver) sendResponse( } if len(serializedNodes) == 1 && len(serializedNodes[0]) > core.MaxBufferSizeToSendTrieNodes { - return tnRes.sendLargeMessage(serializedNodes[0], hashes[0], int(chunkIndex), message) + return tnRes.sendLargeMessage(serializedNodes[0], hashes[0], int(chunkIndex), message, source) } buff, err := tnRes.marshalizer.Marshal(&batch.Batch{Data: serializedNodes}) @@ -214,7 +215,7 @@ func (tnRes *TrieNodeResolver) sendResponse( return err } - return tnRes.Send(buff, message.Peer(), message.Network()) + return tnRes.Send(buff, message.Peer(), source) } func (tnRes *TrieNodeResolver) sendLargeMessage( @@ -222,6 +223,7 @@ func (tnRes *TrieNodeResolver) sendLargeMessage( reference []byte, chunkIndex int, message p2p.MessageP2P, + source p2p.MessageHandler, ) error { logTrieNodes.Trace("assembling chunk", "reference", reference, "len", len(largeBuff)) @@ -248,7 +250,7 @@ func (tnRes *TrieNodeResolver) sendLargeMessage( return err } - return tnRes.Send(buff, message.Peer(), message.Network()) + return tnRes.Send(buff, message.Peer(), source) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/resolvers/trieNodeResolver_test.go b/dataRetriever/resolvers/trieNodeResolver_test.go index 349fa0cd20e..b2706f02b36 100644 --- a/dataRetriever/resolvers/trieNodeResolver_test.go +++ b/dataRetriever/resolvers/trieNodeResolver_test.go @@ -108,7 +108,7 @@ func TestTrieNodeResolver_ProcessReceivedAntiflooderCanProcessMessageErrShouldEr } tnRes, _ := resolvers.NewTrieNodeResolver(arg) - err := tnRes.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{}, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -120,7 +120,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageNilMessageShouldErr(t *testing.T arg := createMockArgTrieNodeResolver() tnRes, _ := resolvers.NewTrieNodeResolver(arg) - err := tnRes.ProcessReceivedMessage(nil, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(nil, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilMessage, err) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -137,7 +137,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageWrongTypeShouldErr(t *testing.T) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.NonceType, Value: []byte("aaa")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrRequestTypeNotImplemented, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -154,7 +154,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageNilValueShouldErr(t *testing.T) data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: nil}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilValue, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -183,7 +183,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndSend(t *test arg := createMockArgTrieNodeResolver() arg.TrieDataGetter = tr arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { sendWasCalled = true return nil }, @@ -193,7 +193,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndSend(t *test data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, getSerializedNodesWasCalled) @@ -223,7 +223,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageShouldGetFromTrieAndMarshalizerF data, _ := marshalizerMock.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, errExpected, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -243,7 +243,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageTrieErrorsShouldErr(t *testing.T data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("node1")}) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -286,7 +286,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesUnmarshalFails(t * ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -297,7 +297,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodeE arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { assert.Fail(t, "should have not called send") return nil }, @@ -322,7 +322,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodeE ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -337,7 +337,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodes var receivedNodes [][]byte arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -375,7 +375,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodes ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -392,7 +392,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesNotEnoughSpaceShou var receivedNodes [][]byte arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -431,7 +431,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesNotEnoughSpaceShou ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -449,7 +449,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesShouldWorkWithSubt var receivedNodes [][]byte arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -492,7 +492,7 @@ func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesShouldWorkWithSubt ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -519,7 +519,7 @@ func testTrieNodeResolverProcessReceivedMessageLargeTrieNode( sendWasCalled := false arg := createMockArgTrieNodeResolver() arg.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{} err := arg.Marshaller.Unmarshal(b, buff) require.Nil(t, err) @@ -558,7 +558,7 @@ func testTrieNodeResolverProcessReceivedMessageLargeTrieNode( ) msg := &p2pmocks.P2PMessageMock{DataField: data} - err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) diff --git a/dataRetriever/resolvers/validatorInfoResolver.go b/dataRetriever/resolvers/validatorInfoResolver.go index 505a6ae2903..9f7e5a6bb1a 100644 --- a/dataRetriever/resolvers/validatorInfoResolver.go +++ b/dataRetriever/resolvers/validatorInfoResolver.go @@ -89,7 +89,7 @@ func checkArgs(args ArgValidatorInfoResolver) error { // ProcessReceivedMessage represents the callback func from the p2p.Messenger that is called each time a new message is received // (for the topic this validator was registered to, usually a request topic) -func (res *validatorInfoResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (res *validatorInfoResolver) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { err := res.canProcessMessage(message, fromConnectedPeer) if err != nil { return err @@ -105,26 +105,26 @@ func (res *validatorInfoResolver) ProcessReceivedMessage(message p2p.MessageP2P, switch rd.Type { case dataRetriever.HashType: - return res.resolveHashRequest(rd.Value, rd.Epoch, fromConnectedPeer, message.Network()) + return res.resolveHashRequest(rd.Value, rd.Epoch, fromConnectedPeer, source) case dataRetriever.HashArrayType: - return res.resolveMultipleHashesRequest(rd.Value, rd.Epoch, fromConnectedPeer, message.Network()) + return res.resolveMultipleHashesRequest(rd.Value, rd.Epoch, fromConnectedPeer, source) } return fmt.Errorf("%w for value %s", dataRetriever.ErrRequestTypeNotImplemented, logger.DisplayByteSlice(rd.Value)) } // resolveHashRequest sends the response for a hash request -func (res *validatorInfoResolver) resolveHashRequest(hash []byte, epoch uint32, pid core.PeerID, network p2p.Network) error { +func (res *validatorInfoResolver) resolveHashRequest(hash []byte, epoch uint32, pid core.PeerID, source p2p.MessageHandler) error { data, err := res.fetchValidatorInfoByteSlice(hash, epoch) if err != nil { return err } - return res.marshalAndSend(data, pid, network) + return res.marshalAndSend(data, pid, source) } // resolveMultipleHashesRequest sends the response for a hash array type request -func (res *validatorInfoResolver) resolveMultipleHashesRequest(hashesBuff []byte, epoch uint32, pid core.PeerID, network p2p.Network) error { +func (res *validatorInfoResolver) resolveMultipleHashesRequest(hashesBuff []byte, epoch uint32, pid core.PeerID, source p2p.MessageHandler) error { b := batch.Batch{} err := res.marshalizer.Unmarshal(&b, hashesBuff) if err != nil { @@ -141,17 +141,17 @@ func (res *validatorInfoResolver) resolveMultipleHashesRequest(hashesBuff []byte return fmt.Errorf("resolveMultipleHashesRequest error %w from buff %s", err, outputHashes) } - return res.sendValidatorInfoForHashes(validatorInfoForHashes, pid, network) + return res.sendValidatorInfoForHashes(validatorInfoForHashes, pid, source) } -func (res *validatorInfoResolver) sendValidatorInfoForHashes(validatorInfoForHashes [][]byte, pid core.PeerID, network p2p.Network) error { +func (res *validatorInfoResolver) sendValidatorInfoForHashes(validatorInfoForHashes [][]byte, pid core.PeerID, source p2p.MessageHandler) error { buffsToSend, err := res.dataPacker.PackDataInChunks(validatorInfoForHashes, maxBuffToSendValidatorsInfo) if err != nil { return err } for _, buff := range buffsToSend { - err = res.Send(buff, pid, network) + err = res.Send(buff, pid, source) if err != nil { return err } @@ -197,7 +197,7 @@ func (res *validatorInfoResolver) fetchValidatorInfoByteSlice(hash []byte, epoch return buff, nil } -func (res *validatorInfoResolver) marshalAndSend(data []byte, pid core.PeerID, network p2p.Network) error { +func (res *validatorInfoResolver) marshalAndSend(data []byte, pid core.PeerID, source p2p.MessageHandler) error { b := &batch.Batch{ Data: [][]byte{data}, } @@ -206,7 +206,7 @@ func (res *validatorInfoResolver) marshalAndSend(data []byte, pid core.PeerID, n return err } - return res.Send(buff, pid, network) + return res.Send(buff, pid, source) } // SetDebugHandler sets a debug handler diff --git a/dataRetriever/resolvers/validatorInfoResolver_test.go b/dataRetriever/resolvers/validatorInfoResolver_test.go index 97f8b85720d..d17fd1aedb4 100644 --- a/dataRetriever/resolvers/validatorInfoResolver_test.go +++ b/dataRetriever/resolvers/validatorInfoResolver_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/marshallerMock" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -140,7 +141,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(createMockArgValidatorInfoResolver()) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(nil, fromConnectedPeer) + err := res.ProcessReceivedMessage(nil, fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, dataRetriever.ErrNilMessage, err) }) t.Run("canProcessMessage due to antiflood handler error", func(t *testing.T) { @@ -155,7 +156,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) assert.False(t, args.Throttler.(*mock.ThrottlerStub).StartWasCalled()) assert.False(t, args.Throttler.(*mock.ThrottlerStub).EndWasCalled()) @@ -172,7 +173,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, nil), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, expectedErr)) }) @@ -182,7 +183,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(createMockArgValidatorInfoResolver()) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.NonceType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.True(t, errors.Is(err, dataRetriever.ErrRequestTypeNotImplemented)) }) @@ -204,7 +205,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) }) t.Run("data found in cache but marshal fails", func(t *testing.T) { @@ -228,7 +229,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.NotNil(t, err) }) t.Run("data found in storage but marshal fails", func(t *testing.T) { @@ -257,7 +258,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.NotNil(t, err) }) t.Run("should work, data from cache", func(t *testing.T) { @@ -272,7 +273,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -289,7 +290,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasCalled) }) @@ -311,7 +312,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -328,7 +329,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasCalled) }) @@ -352,7 +353,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { res, _ := resolvers.NewValidatorInfoResolver(args) require.False(t, check.IfNil(res)) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, []byte("hash")), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, []byte("hash")), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) }) t.Run("no hash found", func(t *testing.T) { @@ -376,7 +377,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { Data: [][]byte{[]byte("hash")}, } buff, _ := args.Marshaller.Marshal(b) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) require.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), dataRetriever.ErrValidatorInfoNotFound.Error())) }) @@ -406,7 +407,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { Data: [][]byte{[]byte("hash")}, } buff, _ := args.Marshaller.Marshal(b) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) }) t.Run("send returns error", func(t *testing.T) { @@ -431,7 +432,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { return expectedErr }, } @@ -440,7 +441,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { require.False(t, check.IfNil(res)) buff, _ := args.Marshaller.Marshal(&batch.Batch{Data: providedHashes}) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) }) t.Run("all hashes in one chunk should work", func(t *testing.T) { @@ -466,7 +467,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { }, } args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -488,7 +489,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { require.False(t, check.IfNil(res)) buff, _ := args.Marshaller.Marshal(&batch.Batch{Data: providedHashes}) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.True(t, wasCalled) }) @@ -524,7 +525,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { } numOfCallsSend := 0 args.SenderResolver = &mock.TopicResolverSenderStub{ - SendCalled: func(buff []byte, peer core.PeerID, network p2p.Network) error { + SendCalled: func(buff []byte, peer core.PeerID, source p2p.MessageHandler) error { marshMock := marshallerMock.MarshalizerMock{} b := &batch.Batch{} _ = marshMock.Unmarshal(b, buff) @@ -550,7 +551,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { require.False(t, check.IfNil(res)) buff, _ := args.Marshaller.Marshal(&batch.Batch{Data: providedHashes}) - err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer, &p2pmocks.MessengerStub{}) assert.Nil(t, err) assert.Equal(t, 2, numOfCallsSend) // ~677 messages in a chunk assert.Equal(t, 0, len(providedDataMap)) // all items should have been deleted on Send diff --git a/dataRetriever/topicSender/baseTopicSender.go b/dataRetriever/topicSender/baseTopicSender.go index 4de5075bf3c..e6aeed6972a 100644 --- a/dataRetriever/topicSender/baseTopicSender.go +++ b/dataRetriever/topicSender/baseTopicSender.go @@ -22,8 +22,8 @@ const ( // ArgBaseTopicSender is the base DTO used to create a new topic sender instance type ArgBaseTopicSender struct { - MainMessenger dataRetriever.MessageHandler - FullArchiveMessenger dataRetriever.MessageHandler + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger TopicName string OutputAntiflooder dataRetriever.P2PAntifloodHandler MainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler @@ -32,8 +32,8 @@ type ArgBaseTopicSender struct { } type baseTopicSender struct { - mainMessenger dataRetriever.MessageHandler - fullArchiveMessenger dataRetriever.MessageHandler + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger topicName string outputAntiflooder dataRetriever.P2PAntifloodHandler mutDebugHandler sync.RWMutex @@ -79,15 +79,13 @@ func (baseSender *baseTopicSender) sendToConnectedPeer( topic string, buff []byte, peer core.PeerID, - messenger dataRetriever.MessageHandler, - network p2p.Network, + messenger p2p.MessageHandler, preferredPeersHolder dataRetriever.PreferredPeersHolderHandler, ) error { msg := &factory.Message{ - DataField: buff, - PeerField: peer, - TopicField: topic, - NetworkField: network, + DataField: buff, + PeerField: peer, + TopicField: topic, } shouldAvoidAntiFloodCheck := preferredPeersHolder.Contains(peer) @@ -97,11 +95,10 @@ func (baseSender *baseTopicSender) sendToConnectedPeer( err := baseSender.outputAntiflooder.CanProcessMessage(msg, peer) if err != nil { - return fmt.Errorf("%w while sending %d bytes to peer %s on network %s", + return fmt.Errorf("%w while sending %d bytes to peer %s", err, len(buff), p2p.PeerIdToShortString(peer), - network, ) } diff --git a/dataRetriever/topicSender/topicRequestSender.go b/dataRetriever/topicSender/topicRequestSender.go index fed557a8af0..0f1569391d8 100644 --- a/dataRetriever/topicSender/topicRequestSender.go +++ b/dataRetriever/topicSender/topicRequestSender.go @@ -136,7 +136,6 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, core.CrossShardPeer.String(), trs.mainMessenger, trs.mainPeersRatingHandler, - p2p.MainNetwork, trs.mainPreferredPeersHolderHandler) intraPeers = trs.peerListCreator.IntraShardPeerList() @@ -150,7 +149,6 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, core.IntraShardPeer.String(), trs.mainMessenger, trs.mainPeersRatingHandler, - p2p.MainNetwork, trs.mainPreferredPeersHolderHandler) } else { preferredPeer := trs.getPreferredFullArchivePeer() @@ -165,7 +163,6 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, core.FullHistoryPeer.String(), trs.fullArchiveMessenger, trs.fullArchivePeersRatingHandler, - p2p.FullArchiveNetwork, trs.fullArchivePreferredPeersHolderHandler) } @@ -206,9 +203,8 @@ func (trs *topicRequestSender) sendOnTopic( buff []byte, maxToSend int, peerType string, - messenger dataRetriever.MessageHandler, + messenger p2p.MessageHandler, peersRatingHandler dataRetriever.PeersRatingHandler, - network p2p.Network, preferredPeersHolder dataRetriever.PreferredPeersHolderHandler, ) int { if len(peerList) == 0 || maxToSend == 0 { @@ -228,12 +224,10 @@ func (trs *topicRequestSender) sendOnTopic( shuffledIndexes = append([]int{preferredPeerIndex}, shuffledIndexes...) } - logData = append(logData, "network", network) - for idx := 0; idx < len(shuffledIndexes); idx++ { peer := getPeerID(shuffledIndexes[idx], topRatedPeersList, preferredPeer, peerType, topicToSendRequest, histogramMap) - err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer, messenger, network, preferredPeersHolder) + err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer, messenger, preferredPeersHolder) if err != nil { continue } @@ -247,7 +241,7 @@ func (trs *topicRequestSender) sendOnTopic( } } log.Trace("requests are sent to", logData...) - log.Trace("request peers histogram", "network", network, "max peers to send", maxToSend, "topic", topicToSendRequest, "histogram", histogramMap) + log.Trace("request peers histogram", "max peers to send", maxToSend, "topic", topicToSendRequest, "histogram", histogramMap) return msgSentCounter } diff --git a/dataRetriever/topicSender/topicRequestSender_test.go b/dataRetriever/topicSender/topicRequestSender_test.go index 625d4268f36..aa83c03f2b3 100644 --- a/dataRetriever/topicSender/topicRequestSender_test.go +++ b/dataRetriever/topicSender/topicRequestSender_test.go @@ -20,8 +20,8 @@ import ( func createMockArgBaseTopicSender() topicsender.ArgBaseTopicSender { return topicsender.ArgBaseTopicSender{ - MainMessenger: &mock.MessageHandlerStub{}, - FullArchiveMessenger: &mock.MessageHandlerStub{}, + MainMessenger: &p2pmocks.MessengerStub{}, + FullArchiveMessenger: &p2pmocks.MessengerStub{}, TopicName: "topic", OutputAntiflooder: &mock.P2PAntifloodHandlerStub{}, MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{ @@ -264,7 +264,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { sentToPid2 := false arg := createMockArgTopicRequestSender() - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pID1.Bytes()) { sentToPid1 = true @@ -276,7 +276,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return nil }, } - arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { assert.Fail(t, "should have not been called") return nil @@ -315,14 +315,14 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { sentToFullHistoryPeer := false arg := createMockArgTopicRequestSender() - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { assert.Fail(t, "should have not been called") return nil }, } - arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ ConnectedPeersCalled: func() []core.PeerID { return []core.PeerID{pIDfullHistory} }, @@ -401,7 +401,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { } arg.NumCrossShardPeers = 5 arg.NumIntraShardPeers = 5 - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if strings.HasPrefix(string(peerID), "prefPIDsh0") { countPrefPeersSh0++ @@ -448,7 +448,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { sentToPreferredPeer = true @@ -493,7 +493,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { sentToPreferredPeer = true @@ -537,7 +537,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { } }, } - arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ ConnectedPeersCalled: func() []core.PeerID { return []core.PeerID{regularPeer0, regularPeer1} }, @@ -549,7 +549,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return nil }, } - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { assert.Fail(t, "should not have been called") @@ -595,7 +595,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if peerID == pidPreferred { sentToPreferredPeer = true @@ -648,7 +648,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pidPreferred.Bytes()) { sentToPreferredPeer = true @@ -672,7 +672,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { numSent := 0 arg := createMockArgTopicRequestSender() - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { numSent++ @@ -702,7 +702,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { numSent := 0 arg := createMockArgTopicRequestSender() - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if peerID == pidNotCalled { assert.Fail(t, fmt.Sprintf("should not have called pid %s", peerID)) @@ -736,7 +736,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { numSent := 0 arg := createMockArgTopicRequestSender() - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if peerID == pidNotCalled { assert.Fail(t, fmt.Sprintf("should not have called pid %s", peerID)) @@ -769,7 +769,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { sentToPid1 := false arg := createMockArgTopicRequestSender() - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pID1.Bytes()) { sentToPid1 = true diff --git a/dataRetriever/topicSender/topicResolverSender.go b/dataRetriever/topicSender/topicResolverSender.go index 5f97a100a03..fee483190cb 100644 --- a/dataRetriever/topicSender/topicResolverSender.go +++ b/dataRetriever/topicSender/topicResolverSender.go @@ -31,27 +31,15 @@ func NewTopicResolverSender(arg ArgTopicResolverSender) (*topicResolverSender, e // Send is used to send an array buffer to a connected peer // It is used when replying to a request -func (trs *topicResolverSender) Send(buff []byte, peer core.PeerID, network p2p.Network) error { - switch network { - case p2p.MainNetwork: - return trs.sendToConnectedPeer( - trs.topicName, - buff, - peer, - trs.mainMessenger, - network, - trs.mainPreferredPeersHolderHandler) - case p2p.FullArchiveNetwork: - return trs.sendToConnectedPeer( - trs.topicName, - buff, - peer, - trs.fullArchiveMessenger, - network, - trs.fullArchivePreferredPeersHolderHandler) +func (trs *topicResolverSender) Send(buff []byte, peer core.PeerID, destination p2p.MessageHandler) error { + switch destination.Type() { + case p2p.RegularMessageHandler: + return trs.sendToConnectedPeer(trs.topicName, buff, peer, destination, trs.mainPreferredPeersHolderHandler) + case p2p.FullArchiveMessageHandler: + return trs.sendToConnectedPeer(trs.topicName, buff, peer, destination, trs.fullArchivePreferredPeersHolderHandler) } - return p2p.ErrUnknownNetwork + return dataRetriever.ErrUnknownMessageHandlerType } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/topicSender/topicResolverSender_test.go b/dataRetriever/topicSender/topicResolverSender_test.go index d78d6709ce2..5b51964e1d7 100644 --- a/dataRetriever/topicSender/topicResolverSender_test.go +++ b/dataRetriever/topicSender/topicResolverSender_test.go @@ -96,7 +96,7 @@ func TestTopicResolverSender_SendOutputAntiflooderErrorsShouldNotSendButError(t expectedErr := errors.New("can not send to peer") arg := createMockArgTopicResolverSender() - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { assert.Fail(t, "send shouldn't have been called") @@ -115,7 +115,7 @@ func TestTopicResolverSender_SendOutputAntiflooderErrorsShouldNotSendButError(t } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1, p2p.MainNetwork) + err := trs.Send(buffToSend, pID1, arg.MainMessenger) assert.True(t, errors.Is(err, expectedErr)) } @@ -128,7 +128,7 @@ func TestTopicResolverSender_SendShouldNotCheckAntifloodForPreferred(t *testing. sendWasCalled := false arg := createMockArgTopicResolverSender() - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { sendWasCalled = true return nil @@ -148,7 +148,7 @@ func TestTopicResolverSender_SendShouldNotCheckAntifloodForPreferred(t *testing. } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1, p2p.MainNetwork) + err := trs.Send(buffToSend, pID1, arg.MainMessenger) require.NoError(t, err) require.True(t, sendWasCalled) } @@ -163,7 +163,7 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { t.Parallel() arg := createMockArgTopicResolverSender() - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pID1.Bytes()) && bytes.Equal(buff, buffToSend) { @@ -172,8 +172,11 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { return nil }, + TypeCalled: func() p2p.MessageHandlerType { + return p2p.RegularMessageHandler + }, } - arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ IsConnectedCalled: func(peerID core.PeerID) bool { return false }, @@ -182,19 +185,37 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { return nil }, + TypeCalled: func() p2p.MessageHandlerType { + return p2p.FullArchiveMessageHandler + }, + } + wasCalled := false + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ + ContainsCalled: func(peerID core.PeerID) bool { + wasCalled = true + return false + }, + } + arg.FullArchivePreferredPeersHolder = &p2pmocks.PeersHolderStub{ + ContainsCalled: func(peerID core.PeerID) bool { + assert.Fail(t, "should have not been called") + + return false + }, } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1, p2p.MainNetwork) + err := trs.Send(buffToSend, pID1, arg.MainMessenger) assert.Nil(t, err) assert.True(t, sentToPid1) + assert.True(t, wasCalled) }) t.Run("on full archive network", func(t *testing.T) { t.Parallel() arg := createMockArgTopicResolverSender() - arg.FullArchiveMessenger = &mock.MessageHandlerStub{ + arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ IsConnectedCalled: func(peerID core.PeerID) bool { return true }, @@ -206,20 +227,41 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { return nil }, + TypeCalled: func() p2p.MessageHandlerType { + return p2p.FullArchiveMessageHandler + }, } - arg.MainMessenger = &mock.MessageHandlerStub{ + arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { assert.Fail(t, "should have not been called") return nil }, + TypeCalled: func() p2p.MessageHandlerType { + return p2p.RegularMessageHandler + }, + } + wasCalled := false + arg.FullArchivePreferredPeersHolder = &p2pmocks.PeersHolderStub{ + ContainsCalled: func(peerID core.PeerID) bool { + wasCalled = true + return false + }, + } + arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ + ContainsCalled: func(peerID core.PeerID) bool { + assert.Fail(t, "should have not been called") + + return false + }, } trs, _ := topicsender.NewTopicResolverSender(arg) - err := trs.Send(buffToSend, pID1, p2p.FullArchiveNetwork) + err := trs.Send(buffToSend, pID1, arg.FullArchiveMessenger) assert.Nil(t, err) assert.True(t, sentToPid1) + assert.True(t, wasCalled) }) } diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index ae0f243467b..542cf6bb66f 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -93,8 +93,8 @@ type epochStartBootstrap struct { destinationShardAsObserver uint32 coreComponentsHolder process.CoreComponentsHolder cryptoComponentsHolder process.CryptoComponentsHolder - mainMessenger Messenger - fullArchiveMessenger Messenger + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger generalConfig config.Config prefsConfig config.PreferencesConfig flagsConfig config.ContextFlagsConfig @@ -164,8 +164,8 @@ type ArgsEpochStartBootstrap struct { CoreComponentsHolder process.CoreComponentsHolder CryptoComponentsHolder process.CryptoComponentsHolder DestinationShardAsObserver uint32 - MainMessenger Messenger - FullArchiveMessenger Messenger + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger GeneralConfig config.Config PrefsConfig config.PreferencesConfig FlagsConfig config.ContextFlagsConfig diff --git a/factory/interface.go b/factory/interface.go index ebbaf0c889f..4221af84544 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -381,7 +381,7 @@ type ConsensusWorker interface { // RemoveAllReceivedMessagesCalls removes all the functions handlers RemoveAllReceivedMessagesCalls() // ProcessReceivedMessage method redirects the received message to the channel which should handle it - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error // Extend does an extension for the subround with subroundId Extend(subroundId int) // GetConsensusStateChangedChannel gets the channel for the consensusStateChanged diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index eb3c0fb4b8d..b153d86c62c 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -228,7 +228,6 @@ func (ncf *networkComponentsFactory) createPeerHonestyHandler( func (ncf *networkComponentsFactory) createNetworkHolder( p2pConfig p2pConfig.P2PConfig, logger p2p.Logger, - network p2p.Network, ) (networkComponentsHolder, error) { peersHolder, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) @@ -268,7 +267,6 @@ func (ncf *networkComponentsFactory) createNetworkHolder( P2pSingleSigner: ncf.cryptoComponents.P2pSingleSigner(), P2pKeyGenerator: ncf.cryptoComponents.P2pKeyGen(), Logger: logger, - Network: network, } networkMessenger, err := p2pFactory.NewNetworkMessenger(argsMessenger) if err != nil { @@ -295,7 +293,7 @@ func (ncf *networkComponentsFactory) createNetworkHolder( func (ncf *networkComponentsFactory) createMainNetworkHolder() (networkComponentsHolder, error) { loggerInstance := logger.GetOrCreate("main/p2p") - return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance, p2p.MainNetwork) + return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance) } func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder() (networkComponentsHolder, error) { @@ -310,7 +308,7 @@ func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder() (networkCo loggerInstance := logger.GetOrCreate("full-archive/p2p") - return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance, p2p.FullArchiveNetwork) + return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance) } // Close closes all underlying components that need closing diff --git a/go.mod b/go.mod index dedf0f30856..4b8749a4d5c 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230621085139-36073f41ef03 + github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230622065724-f6aa2cc5035a github.com/multiversx/mx-chain-core-go v1.2.6 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.4 diff --git a/go.sum b/go.sum index 8a70a68d9f4..0ec1a04e641 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230621085139-36073f41ef03 h1:NOlfuZNjiEquyMM0N40mvkug4jgj2Q6jCPjSW8Ksn2Q= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230621085139-36073f41ef03/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230622065724-f6aa2cc5035a h1:wEp5/0ri6h7R9EfCGBH5A9R/iEjbZCtNuMPxwQn7jHg= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230622065724-f6aa2cc5035a/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= diff --git a/integrationTests/countInterceptor.go b/integrationTests/countInterceptor.go index ccca0752248..fba328de387 100644 --- a/integrationTests/countInterceptor.go +++ b/integrationTests/countInterceptor.go @@ -21,7 +21,7 @@ func NewCountInterceptor() *CountInterceptor { } // ProcessReceivedMessage is called each time a new message is received -func (ci *CountInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (ci *CountInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { ci.mutMessagesCount.Lock() ci.messagesCount[message.Topic()]++ ci.mutMessagesCount.Unlock() diff --git a/integrationTests/longTests/antiflooding/messageProcessor.go b/integrationTests/longTests/antiflooding/messageProcessor.go index 3e0ae8963da..5c3838dea61 100644 --- a/integrationTests/longTests/antiflooding/messageProcessor.go +++ b/integrationTests/longTests/antiflooding/messageProcessor.go @@ -31,7 +31,7 @@ func NewMessageProcessor(antiflooder process.P2PAntifloodHandler, messenger p2p. } // ProcessReceivedMessage is the callback function from the p2p side whenever a new message is received -func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (mp *messageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { atomic.AddUint32(&mp.numMessagesReceived, 1) atomic.AddUint64(&mp.sizeMessagesReceived, uint64(len(message.Data()))) atomic.AddUint32(&mp.numMessagesReceivedPerInterval, 1) diff --git a/integrationTests/p2p/antiflood/messageProcessor.go b/integrationTests/p2p/antiflood/messageProcessor.go index bf04257b2c5..5f56985861f 100644 --- a/integrationTests/p2p/antiflood/messageProcessor.go +++ b/integrationTests/p2p/antiflood/messageProcessor.go @@ -30,7 +30,7 @@ func newMessageProcessor() *MessageProcessor { } // ProcessReceivedMessage is the callback function from the p2p side whenever a new message is received -func (mp *MessageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (mp *MessageProcessor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { atomic.AddUint32(&mp.numMessagesReceived, 1) atomic.AddUint64(&mp.sizeMessagesReceived, uint64(len(message.Data()))) diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 8fd5a02a709..482b7dbb43c 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -150,7 +150,7 @@ func NewTestHeartbeatNode( shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) p2pKey := mock.NewPrivateKeyMock() - messenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey, p2p.MainNetwork) + messenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey, p2p.RegularMessageHandler) pidPk, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pkShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pidShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) @@ -170,7 +170,7 @@ func NewTestHeartbeatNode( log.Error("error setting NewPeerShardMapper in p2p messenger", "error", err) } - fullArchiveMessenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey, p2p.FullArchiveNetwork) + fullArchiveMessenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey, p2p.FullArchiveMessageHandler) pidPkFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pkShardIdFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pidShardIdFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index f27804b2d24..8855ae6e8ee 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -167,7 +167,7 @@ func CreateMessengerWithKadDht(initialAddr string) p2p.Messenger { P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), - Network: p2p.MainNetwork, + MessageHandlerType: p2p.RegularMessageHandler, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -190,7 +190,7 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), - Network: p2p.MainNetwork, + MessageHandlerType: p2p.RegularMessageHandler, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -200,7 +200,7 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { } // CreateMessengerFromConfigWithPeersRatingHandler creates a new libp2p messenger with provided configuration -func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler, p2pKey crypto.PrivateKey, network p2p.Network) p2p.Messenger { +func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler, p2pKey crypto.PrivateKey, messageHandlerType p2p.MessageHandlerType) p2p.Messenger { arg := p2pFactory.ArgsNetworkMessenger{ Marshaller: TestMarshalizer, ListenAddress: p2p.ListenLocalhostAddrWithIp4AndTcp, @@ -213,7 +213,7 @@ func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConf P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), - Network: network, + MessageHandlerType: messageHandlerType, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -245,7 +245,7 @@ func CreateMessengerWithNoDiscovery() p2p.Messenger { } // CreateMessengerWithNoDiscoveryAndPeersRatingHandler creates a new libp2p messenger with no peer discovery -func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler, p2pKey crypto.PrivateKey, network p2p.Network) p2p.Messenger { +func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler, p2pKey crypto.PrivateKey, messageHandlerType p2p.MessageHandlerType) p2p.Messenger { p2pCfg := p2pConfig.P2PConfig{ Node: p2pConfig.NodeConfig{ Port: "0", @@ -258,7 +258,7 @@ func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p. }, } - return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder, p2pKey, network) + return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder, p2pKey, messageHandlerType) } // CreateFixedNetworkOf8Peers assembles a network as following: diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 4eb195a2ad5..ab288c603ac 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -468,8 +468,8 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { } p2pKey := mock.NewPrivateKeyMock() - messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey, p2p.MainNetwork) - fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(fullArchivePeersRatingHandler, p2pKey, p2p.FullArchiveNetwork) + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey, p2p.RegularMessageHandler) + fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(fullArchivePeersRatingHandler, p2pKey, p2p.FullArchiveMessageHandler) var peersRatingMonitor p2p.PeersRatingMonitor peersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} diff --git a/p2p/constants.go b/p2p/constants.go index d1ac7efdf1d..8e26c13b889 100644 --- a/p2p/constants.go +++ b/p2p/constants.go @@ -31,11 +31,11 @@ const ListenLocalhostAddrWithIp4AndTcp = "/ip4/127.0.0.1/tcp/" // BroadcastMethod defines the broadcast method of the message type BroadcastMethod = p2p.BroadcastMethod -// Network defines the network a message belongs to -type Network = p2p.Network +// MessageHandlerType defines the type of the message handler +type MessageHandlerType = p2p.MessageHandlerType -// MainNetwork defines the main network -const MainNetwork = p2p.MainNetwork +// RegularMessageHandler defines a message handler for the main network +const RegularMessageHandler = p2p.RegularMessageHandler -// FullArchiveNetwork defines the full archive network -const FullArchiveNetwork = p2p.FullArchiveNetwork +// FullArchiveMessageHandler defines a message handler for the full archive network +const FullArchiveMessageHandler = p2p.FullArchiveMessageHandler diff --git a/p2p/disabled/networkMessenger.go b/p2p/disabled/networkMessenger.go index 34364812bcb..029f4280c7b 100644 --- a/p2p/disabled/networkMessenger.go +++ b/p2p/disabled/networkMessenger.go @@ -180,6 +180,16 @@ func (netMes *networkMessenger) AddPeerTopicNotifier(_ p2p.PeerTopicNotifier) er return nil } +// ProcessReceivedMessage returns nil as it is disabled +func (netMes *networkMessenger) ProcessReceivedMessage(_ p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { + return nil +} + +// Type returns regular message handler as it is disabled +func (netMes *networkMessenger) Type() p2p.MessageHandlerType { + return p2p.RegularMessageHandler +} + // IsInterfaceNil returns true if there is no value under the interface func (netMes *networkMessenger) IsInterfaceNil() bool { return netMes == nil diff --git a/p2p/errors.go b/p2p/errors.go index e25d2dd12d7..d80b9445433 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -14,6 +14,3 @@ var ErrNilPreferredPeersHolder = p2p.ErrNilPreferredPeersHolder // ErrNilStatusHandler signals that a nil status handler has been provided var ErrNilStatusHandler = errors.New("nil status handler") - -// ErrUnknownNetwork signals that an unknown network has been provided -var ErrUnknownNetwork = errors.New("unknown network") diff --git a/p2p/interface.go b/p2p/interface.go index 808f22b77c3..2fbfb86e169 100644 --- a/p2p/interface.go +++ b/p2p/interface.go @@ -26,6 +26,9 @@ type Messenger = p2p.Messenger // MessageP2P defines what a p2p message can do (should return) type MessageP2P = p2p.MessageP2P +// MessageHandler defines the behaviour of a component able to send and process messages +type MessageHandler = p2p.MessageHandler + // ChannelLoadBalancer defines what a load balancer that uses chans should do type ChannelLoadBalancer interface { AddChannel(channel string) error diff --git a/process/interceptors/epochStartMetaBlockInterceptor.go b/process/interceptors/epochStartMetaBlockInterceptor.go index 3dd033d17ec..36bfc121988 100644 --- a/process/interceptors/epochStartMetaBlockInterceptor.go +++ b/process/interceptors/epochStartMetaBlockInterceptor.go @@ -56,7 +56,7 @@ func NewEpochStartMetaBlockInterceptor(args ArgsEpochStartMetaBlockInterceptor) } // ProcessReceivedMessage will handle received messages containing epoch start meta blocks -func (e *epochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (e *epochStartMetaBlockInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { var epochStartMb block.MetaBlock err := e.marshalizer.Unmarshal(&epochStartMb, message.Data()) if err != nil { diff --git a/process/interceptors/epochStartMetaBlockInterceptor_test.go b/process/interceptors/epochStartMetaBlockInterceptor_test.go index 0d62cc89543..6958be19f8c 100644 --- a/process/interceptors/epochStartMetaBlockInterceptor_test.go +++ b/process/interceptors/epochStartMetaBlockInterceptor_test.go @@ -100,7 +100,7 @@ func TestEpochStartMetaBlockInterceptor_ProcessReceivedMessageUnmarshalError(t * require.NotNil(t, esmbi) message := &p2pmocks.P2PMessageMock{DataField: []byte("wrong meta block bytes")} - err := esmbi.ProcessReceivedMessage(message, "") + err := esmbi.ProcessReceivedMessage(message, "", &p2pmocks.MessengerStub{}) require.Error(t, err) } @@ -144,23 +144,23 @@ func TestEpochStartMetaBlockInterceptor_EntireFlowShouldWorkAndSetTheEpoch(t *te wrongMetaBlock := &block.MetaBlock{Epoch: 0} wrongMetaBlockBytes, _ := args.Marshalizer.Marshal(wrongMetaBlock) - err := esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer0") + err := esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer0", &p2pmocks.MessengerStub{}) require.NoError(t, err) require.False(t, wasCalled) - _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer1") + _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer1", &p2pmocks.MessengerStub{}) require.False(t, wasCalled) // send again from peer1 => should not be taken into account - _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer1") + _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer1", &p2pmocks.MessengerStub{}) require.False(t, wasCalled) // send another meta block - _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: wrongMetaBlockBytes}, "peer2") + _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: wrongMetaBlockBytes}, "peer2", &p2pmocks.MessengerStub{}) require.False(t, wasCalled) // send the last needed metablock from a new peer => should fetch the epoch - _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer3") + _ = esmbi.ProcessReceivedMessage(&p2pmocks.P2PMessageMock{DataField: metaBlockBytes}, "peer3", &p2pmocks.MessengerStub{}) require.True(t, wasCalled) } diff --git a/process/interceptors/multiDataInterceptor.go b/process/interceptors/multiDataInterceptor.go index f785b931fc2..9e0197ea741 100644 --- a/process/interceptors/multiDataInterceptor.go +++ b/process/interceptors/multiDataInterceptor.go @@ -91,7 +91,7 @@ func NewMultiDataInterceptor(arg ArgMultiDataInterceptor) (*MultiDataInterceptor // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) -func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (mdi *MultiDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { err := mdi.preProcessMesage(message, fromConnectedPeer) if err != nil { return err diff --git a/process/interceptors/multiDataInterceptor_test.go b/process/interceptors/multiDataInterceptor_test.go index af8511f1f74..6ca244409b7 100644 --- a/process/interceptors/multiDataInterceptor_test.go +++ b/process/interceptors/multiDataInterceptor_test.go @@ -153,7 +153,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *testi arg := createMockArgMultiDataInterceptor() mdi, _ := interceptors.NewMultiDataInterceptor(arg) - err := mdi.ProcessReceivedMessage(nil, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(nil, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, process.ErrNilMessage, err) } @@ -188,7 +188,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalFailsShouldErr(t *t DataField: []byte("data to be processed"), PeerField: originatorPid, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, errExpeced, err) assert.True(t, originatorBlackListed) @@ -209,7 +209,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageUnmarshalReturnsEmptySliceSh msg := &p2pmocks.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, process.ErrNoDataInMessage, err) } @@ -251,7 +251,7 @@ func TestMultiDataInterceptor_ProcessReceivedCreateFailsShouldErr(t *testing.T) DataField: dataField, PeerField: originatorPid, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -301,7 +301,7 @@ func TestMultiDataInterceptor_ProcessReceivedPartiallyCorrectDataShouldErr(t *te msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -360,7 +360,7 @@ func testProcessReceiveMessageMultiData(t *testing.T, isForCurrentShard bool, ex msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -401,7 +401,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageCheckBatchErrors(t *testing. msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -443,7 +443,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageCheckBatchIsIncomplete(t *te msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -496,7 +496,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageCheckBatchIsComplete(t *test msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -541,7 +541,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageWhitelistedShouldRetNil(t *t msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -611,7 +611,7 @@ func processReceivedMessageMultiDataInvalidVersion(t *testing.T, expectedErr err PeerField: originator, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) assert.True(t, isFromConnectedPeerBlackListed) assert.True(t, isOriginatorBlackListed) @@ -686,7 +686,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageIsOriginatorNotOkButWhiteLis msg := &p2pmocks.P2PMessageMock{ DataField: dataField, } - err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -699,7 +699,7 @@ func TestMultiDataInterceptor_ProcessReceivedMessageIsOriginatorNotOkButWhiteLis whiteListHandler.IsWhiteListedCalled = func(interceptedData process.InterceptedData) bool { return false } - err = mdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err = mdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) assert.Equal(t, err, errOriginator) diff --git a/process/interceptors/singleDataInterceptor.go b/process/interceptors/singleDataInterceptor.go index 1fee5493cb6..84f3296acd7 100644 --- a/process/interceptors/singleDataInterceptor.go +++ b/process/interceptors/singleDataInterceptor.go @@ -74,7 +74,7 @@ func NewSingleDataInterceptor(arg ArgSingleDataInterceptor) (*SingleDataIntercep // ProcessReceivedMessage is the callback func from the p2p.Messenger and will be called each time a new message was received // (for the topic this validator was registered to) -func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { +func (sdi *SingleDataInterceptor) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, _ p2p.MessageHandler) error { err := sdi.preProcessMesage(message, fromConnectedPeer) if err != nil { return err diff --git a/process/interceptors/singleDataInterceptor_test.go b/process/interceptors/singleDataInterceptor_test.go index c95a43db238..515c2a8724c 100644 --- a/process/interceptors/singleDataInterceptor_test.go +++ b/process/interceptors/singleDataInterceptor_test.go @@ -164,7 +164,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageNilMessageShouldErr(t *test arg := createMockArgSingleDataInterceptor() sdi, _ := interceptors.NewSingleDataInterceptor(arg) - err := sdi.ProcessReceivedMessage(nil, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(nil, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, process.ErrNilMessage, err) } @@ -198,7 +198,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageFactoryCreationErrorShouldE DataField: []byte("data to be processed"), PeerField: originatorPid, } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, errExpected, err) assert.True(t, originatorBlackListed) @@ -250,7 +250,7 @@ func testProcessReceiveMessage(t *testing.T, isForCurrentShard bool, validityErr msg := &p2pmocks.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -294,7 +294,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageWhitelistedShouldWork(t *te msg := &p2pmocks.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -362,7 +362,7 @@ func processReceivedMessageSingleDataInvalidVersion(t *testing.T, expectedErr er DataField: []byte("data to be processed"), PeerField: originator, } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) assert.Equal(t, expectedErr, err) assert.True(t, isFromConnectedPeerBlackListed) assert.True(t, isOriginatorBlackListed) @@ -407,7 +407,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageWithOriginator(t *testing.T msg := &p2pmocks.P2PMessageMock{ DataField: []byte("data to be processed"), } - err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err := sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) @@ -421,7 +421,7 @@ func TestSingleDataInterceptor_ProcessReceivedMessageWithOriginator(t *testing.T return false } - err = sdi.ProcessReceivedMessage(msg, fromConnectedPeerId) + err = sdi.ProcessReceivedMessage(msg, fromConnectedPeerId, &p2pmocks.MessengerStub{}) time.Sleep(time.Second) diff --git a/process/interface.go b/process/interface.go index 8b131f1d7da..5f0e4cb2ef8 100644 --- a/process/interface.go +++ b/process/interface.go @@ -527,7 +527,7 @@ type BlockChainHookHandler interface { // Interceptor defines what a data interceptor should do // It should also adhere to the p2p.MessageProcessor interface so it can wire to a p2p.Messenger type Interceptor interface { - ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error + ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error SetInterceptedDebugHandler(handler InterceptedDebugger) error RegisterHandler(handler func(topic string, hash []byte, data interface{})) Close() error diff --git a/testscommon/interceptorStub.go b/testscommon/interceptorStub.go index db346803d2a..54fc5be30af 100644 --- a/testscommon/interceptorStub.go +++ b/testscommon/interceptorStub.go @@ -15,7 +15,7 @@ type InterceptorStub struct { } // ProcessReceivedMessage - -func (is *InterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID) error { +func (is *InterceptorStub) ProcessReceivedMessage(message p2p.MessageP2P, _ core.PeerID, _ p2p.MessageHandler) error { if is.ProcessReceivedMessageCalled != nil { return is.ProcessReceivedMessageCalled(message) } diff --git a/testscommon/p2pmocks/messengerStub.go b/testscommon/p2pmocks/messengerStub.go index 5dd1722402d..01d7c90823d 100644 --- a/testscommon/p2pmocks/messengerStub.go +++ b/testscommon/p2pmocks/messengerStub.go @@ -44,6 +44,8 @@ type MessengerStub struct { BroadcastUsingPrivateKeyCalled func(topic string, buff []byte, pid core.PeerID, skBytes []byte) BroadcastOnChannelUsingPrivateKeyCalled func(channel string, topic string, buff []byte, pid core.PeerID, skBytes []byte) SignUsingPrivateKeyCalled func(skBytes []byte, payload []byte) ([]byte, error) + ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error + TypeCalled func() p2p.MessageHandlerType } // ID - @@ -351,6 +353,22 @@ func (ms *MessengerStub) SignUsingPrivateKey(skBytes []byte, payload []byte) ([] return make([]byte, 0), nil } +// ProcessReceivedMessage - +func (ms *MessengerStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error { + if ms.ProcessReceivedMessageCalled != nil { + return ms.ProcessReceivedMessageCalled(message, fromConnectedPeer, source) + } + return nil +} + +// Type - +func (ms *MessengerStub) Type() p2p.MessageHandlerType { + if ms.TypeCalled != nil { + return ms.TypeCalled() + } + return p2p.RegularMessageHandler +} + // IsInterfaceNil returns true if there is no value under the interface func (ms *MessengerStub) IsInterfaceNil() bool { return ms == nil diff --git a/testscommon/p2pmocks/p2pMessageMock.go b/testscommon/p2pmocks/p2pMessageMock.go index 575c101712d..a6e09016606 100644 --- a/testscommon/p2pmocks/p2pMessageMock.go +++ b/testscommon/p2pmocks/p2pMessageMock.go @@ -17,7 +17,6 @@ type P2PMessageMock struct { PayloadField []byte TimestampField int64 BroadcastMethodField p2p.BroadcastMethod - NetworkField p2p.Network } // From - @@ -70,11 +69,6 @@ func (msg *P2PMessageMock) BroadcastMethod() p2p.BroadcastMethod { return msg.BroadcastMethodField } -// Network - -func (msg *P2PMessageMock) Network() p2p.Network { - return msg.NetworkField -} - // IsInterfaceNil returns true if there is no value under the interface func (msg *P2PMessageMock) IsInterfaceNil() bool { return msg == nil diff --git a/update/factory/fullSyncRequestersContainerFactory.go b/update/factory/fullSyncRequestersContainerFactory.go index e3e0d89f3aa..ede951b8a64 100644 --- a/update/factory/fullSyncRequestersContainerFactory.go +++ b/update/factory/fullSyncRequestersContainerFactory.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers/requesters" "github.com/multiversx/mx-chain-go/dataRetriever/topicSender" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/update" @@ -26,8 +27,8 @@ const ( type requestersContainerFactory struct { shardCoordinator sharding.Coordinator - mainMessenger dataRetriever.TopicMessageHandler - fullArchiveMessenger dataRetriever.TopicMessageHandler + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger marshaller marshal.Marshalizer intRandomizer dataRetriever.IntRandomizer container dataRetriever.RequestersContainer @@ -39,8 +40,8 @@ type requestersContainerFactory struct { // ArgsRequestersContainerFactory defines the arguments for the requestersContainerFactory constructor type ArgsRequestersContainerFactory struct { ShardCoordinator sharding.Coordinator - MainMessenger dataRetriever.TopicMessageHandler - FullArchiveMessenger dataRetriever.TopicMessageHandler + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger Marshaller marshal.Marshalizer ExistingRequesters dataRetriever.RequestersContainer OutputAntifloodHandler dataRetriever.P2PAntifloodHandler diff --git a/update/factory/fullSyncResolversContainerFactory.go b/update/factory/fullSyncResolversContainerFactory.go index b6865bfb009..e2227993ef9 100644 --- a/update/factory/fullSyncResolversContainerFactory.go +++ b/update/factory/fullSyncResolversContainerFactory.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" "github.com/multiversx/mx-chain-go/dataRetriever/topicSender" "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/update" @@ -22,8 +23,8 @@ const defaultTargetShardID = uint32(0) type resolversContainerFactory struct { shardCoordinator sharding.Coordinator - mainMessenger dataRetriever.TopicMessageHandler - fullArchiveMessenger dataRetriever.TopicMessageHandler + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger marshalizer marshal.Marshalizer dataTrieContainer common.TriesHolder container dataRetriever.ResolversContainer @@ -35,8 +36,8 @@ type resolversContainerFactory struct { // ArgsNewResolversContainerFactory defines the arguments for the resolversContainerFactory constructor type ArgsNewResolversContainerFactory struct { ShardCoordinator sharding.Coordinator - MainMessenger dataRetriever.TopicMessageHandler - FullArchiveMessenger dataRetriever.TopicMessageHandler + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger Marshalizer marshal.Marshalizer DataTrieContainer common.TriesHolder ExistingResolvers dataRetriever.ResolversContainer From 82e208b33a9eea49f0a876e486b35390b20224d2 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 23 Jun 2023 14:58:47 +0300 Subject: [PATCH 24/38] updated mx-chain-communication-go to latest and removed the message handler type concept --- cmd/seednode/main.go | 1 - dataRetriever/errors.go | 3 --- dataRetriever/topicSender/baseTopicSender.go | 5 +++-- dataRetriever/topicSender/topicRequestSender.go | 12 ++++-------- dataRetriever/topicSender/topicResolverSender.go | 9 +-------- .../topicSender/topicResolverSender_test.go | 12 ------------ go.mod | 2 +- go.sum | 4 ++-- heartbeat/sender/multikeyHeartbeatSender.go | 1 - integrationTests/testHeartbeatNode.go | 4 ++-- integrationTests/testInitializer.go | 9 +++------ integrationTests/testProcessorNode.go | 4 ++-- p2p/constants.go | 9 --------- p2p/disabled/networkMessenger.go | 5 ----- testscommon/p2pmocks/messengerStub.go | 9 --------- 15 files changed, 18 insertions(+), 71 deletions(-) diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index 6c184d3e4cc..c76756357d5 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -269,7 +269,6 @@ func createNode( P2pSingleSigner: p2pSingleSigner, P2pKeyGenerator: p2pKeyGen, Logger: logger.GetOrCreate("seed/p2p"), - MessageHandlerType: p2p.RegularMessageHandler, } return p2pFactory.NewNetworkMessenger(arg) diff --git a/dataRetriever/errors.go b/dataRetriever/errors.go index 99f542b422c..a015e6e10ed 100644 --- a/dataRetriever/errors.go +++ b/dataRetriever/errors.go @@ -265,6 +265,3 @@ var ErrNilValidatorInfoStorage = errors.New("nil validator info storage") // ErrValidatorInfoNotFound signals that no validator info was found var ErrValidatorInfoNotFound = errors.New("validator info not found") - -// ErrUnknownMessageHandlerType signals that an unknown message handler has been provided -var ErrUnknownMessageHandlerType = errors.New("unknown message handler type") diff --git a/dataRetriever/topicSender/baseTopicSender.go b/dataRetriever/topicSender/baseTopicSender.go index e6aeed6972a..c301995fe57 100644 --- a/dataRetriever/topicSender/baseTopicSender.go +++ b/dataRetriever/topicSender/baseTopicSender.go @@ -80,7 +80,6 @@ func (baseSender *baseTopicSender) sendToConnectedPeer( buff []byte, peer core.PeerID, messenger p2p.MessageHandler, - preferredPeersHolder dataRetriever.PreferredPeersHolderHandler, ) error { msg := &factory.Message{ DataField: buff, @@ -88,7 +87,9 @@ func (baseSender *baseTopicSender) sendToConnectedPeer( TopicField: topic, } - shouldAvoidAntiFloodCheck := preferredPeersHolder.Contains(peer) + isPreferredOnMain := baseSender.mainPreferredPeersHolderHandler.Contains(peer) + isPreferredOnFullArchive := baseSender.fullArchivePreferredPeersHolderHandler.Contains(peer) + shouldAvoidAntiFloodCheck := isPreferredOnMain || isPreferredOnFullArchive if shouldAvoidAntiFloodCheck { return messenger.SendToConnectedPeer(topic, buff, peer) } diff --git a/dataRetriever/topicSender/topicRequestSender.go b/dataRetriever/topicSender/topicRequestSender.go index 0f1569391d8..0662f9240ed 100644 --- a/dataRetriever/topicSender/topicRequestSender.go +++ b/dataRetriever/topicSender/topicRequestSender.go @@ -135,8 +135,7 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, trs.numCrossShardPeers, core.CrossShardPeer.String(), trs.mainMessenger, - trs.mainPeersRatingHandler, - trs.mainPreferredPeersHolderHandler) + trs.mainPeersRatingHandler) intraPeers = trs.peerListCreator.IntraShardPeerList() preferredPeer = trs.getPreferredPeer(trs.selfShardId) @@ -148,8 +147,7 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, trs.numIntraShardPeers, core.IntraShardPeer.String(), trs.mainMessenger, - trs.mainPeersRatingHandler, - trs.mainPreferredPeersHolderHandler) + trs.mainPeersRatingHandler) } else { preferredPeer := trs.getPreferredFullArchivePeer() fullHistoryPeers = trs.fullArchiveMessenger.ConnectedPeers() @@ -162,8 +160,7 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, trs.numFullHistoryPeers, core.FullHistoryPeer.String(), trs.fullArchiveMessenger, - trs.fullArchivePeersRatingHandler, - trs.fullArchivePreferredPeersHolderHandler) + trs.fullArchivePeersRatingHandler) } trs.callDebugHandler(originalHashes, numSentIntra, numSentCross) @@ -205,7 +202,6 @@ func (trs *topicRequestSender) sendOnTopic( peerType string, messenger p2p.MessageHandler, peersRatingHandler dataRetriever.PeersRatingHandler, - preferredPeersHolder dataRetriever.PreferredPeersHolderHandler, ) int { if len(peerList) == 0 || maxToSend == 0 { return 0 @@ -227,7 +223,7 @@ func (trs *topicRequestSender) sendOnTopic( for idx := 0; idx < len(shuffledIndexes); idx++ { peer := getPeerID(shuffledIndexes[idx], topRatedPeersList, preferredPeer, peerType, topicToSendRequest, histogramMap) - err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer, messenger, preferredPeersHolder) + err := trs.sendToConnectedPeer(topicToSendRequest, buff, peer, messenger) if err != nil { continue } diff --git a/dataRetriever/topicSender/topicResolverSender.go b/dataRetriever/topicSender/topicResolverSender.go index fee483190cb..6c65afed900 100644 --- a/dataRetriever/topicSender/topicResolverSender.go +++ b/dataRetriever/topicSender/topicResolverSender.go @@ -32,14 +32,7 @@ func NewTopicResolverSender(arg ArgTopicResolverSender) (*topicResolverSender, e // Send is used to send an array buffer to a connected peer // It is used when replying to a request func (trs *topicResolverSender) Send(buff []byte, peer core.PeerID, destination p2p.MessageHandler) error { - switch destination.Type() { - case p2p.RegularMessageHandler: - return trs.sendToConnectedPeer(trs.topicName, buff, peer, destination, trs.mainPreferredPeersHolderHandler) - case p2p.FullArchiveMessageHandler: - return trs.sendToConnectedPeer(trs.topicName, buff, peer, destination, trs.fullArchivePreferredPeersHolderHandler) - } - - return dataRetriever.ErrUnknownMessageHandlerType + return trs.sendToConnectedPeer(trs.topicName, buff, peer, destination) } // IsInterfaceNil returns true if there is no value under the interface diff --git a/dataRetriever/topicSender/topicResolverSender_test.go b/dataRetriever/topicSender/topicResolverSender_test.go index 5b51964e1d7..d12716274ec 100644 --- a/dataRetriever/topicSender/topicResolverSender_test.go +++ b/dataRetriever/topicSender/topicResolverSender_test.go @@ -172,9 +172,6 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { return nil }, - TypeCalled: func() p2p.MessageHandlerType { - return p2p.RegularMessageHandler - }, } arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ IsConnectedCalled: func(peerID core.PeerID) bool { @@ -185,9 +182,6 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { return nil }, - TypeCalled: func() p2p.MessageHandlerType { - return p2p.FullArchiveMessageHandler - }, } wasCalled := false arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ @@ -227,9 +221,6 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { return nil }, - TypeCalled: func() p2p.MessageHandlerType { - return p2p.FullArchiveMessageHandler - }, } arg.MainMessenger = &p2pmocks.MessengerStub{ SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { @@ -237,9 +228,6 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { return nil }, - TypeCalled: func() p2p.MessageHandlerType { - return p2p.RegularMessageHandler - }, } wasCalled := false arg.FullArchivePreferredPeersHolder = &p2pmocks.PeersHolderStub{ diff --git a/go.mod b/go.mod index 4b8749a4d5c..a5a0d11b8f0 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230622065724-f6aa2cc5035a + github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623111526-bad086eb28f9 github.com/multiversx/mx-chain-core-go v1.2.6 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.4 diff --git a/go.sum b/go.sum index 0ec1a04e641..4b5956a4e9b 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230622065724-f6aa2cc5035a h1:wEp5/0ri6h7R9EfCGBH5A9R/iEjbZCtNuMPxwQn7jHg= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230622065724-f6aa2cc5035a/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623111526-bad086eb28f9 h1:gvCgL3dDNc1I1AzCRzW2fKlsppYBOTtIVPLO2fssiA0= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623111526-bad086eb28f9/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= diff --git a/heartbeat/sender/multikeyHeartbeatSender.go b/heartbeat/sender/multikeyHeartbeatSender.go index e97c64e4ea4..7f14c9be905 100644 --- a/heartbeat/sender/multikeyHeartbeatSender.go +++ b/heartbeat/sender/multikeyHeartbeatSender.go @@ -186,7 +186,6 @@ func (sender *multikeyHeartbeatSender) sendMessageForKey(pkBytes []byte) error { } sender.mainMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) - sender.fullArchiveMessenger.BroadcastUsingPrivateKey(sender.topic, buff, pid, p2pSk) return nil } diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index 482b7dbb43c..c833954ad53 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -150,7 +150,7 @@ func NewTestHeartbeatNode( shardCoordinator, _ := sharding.NewMultiShardCoordinator(maxShards, nodeShardId) p2pKey := mock.NewPrivateKeyMock() - messenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey, p2p.RegularMessageHandler) + messenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey) pidPk, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pkShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pidShardId, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) @@ -170,7 +170,7 @@ func NewTestHeartbeatNode( log.Error("error setting NewPeerShardMapper in p2p messenger", "error", err) } - fullArchiveMessenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey, p2p.FullArchiveMessageHandler) + fullArchiveMessenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey) pidPkFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pkShardIdFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) pidShardIdFullArch, _ := storageunit.NewCache(storageunit.CacheConfig{Type: storageunit.LRUCache, Capacity: 1000}) diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 8855ae6e8ee..713611d8ced 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -167,7 +167,6 @@ func CreateMessengerWithKadDht(initialAddr string) p2p.Messenger { P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), - MessageHandlerType: p2p.RegularMessageHandler, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -190,7 +189,6 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), - MessageHandlerType: p2p.RegularMessageHandler, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -200,7 +198,7 @@ func CreateMessengerFromConfig(p2pConfig p2pConfig.P2PConfig) p2p.Messenger { } // CreateMessengerFromConfigWithPeersRatingHandler creates a new libp2p messenger with provided configuration -func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler, p2pKey crypto.PrivateKey, messageHandlerType p2p.MessageHandlerType) p2p.Messenger { +func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConfig, peersRatingHandler p2p.PeersRatingHandler, p2pKey crypto.PrivateKey) p2p.Messenger { arg := p2pFactory.ArgsNetworkMessenger{ Marshaller: TestMarshalizer, ListenAddress: p2p.ListenLocalhostAddrWithIp4AndTcp, @@ -213,7 +211,6 @@ func CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig p2pConfig.P2PConf P2pSingleSigner: &mock.SignerMock{}, P2pKeyGenerator: &mock.KeyGenMock{}, Logger: logger.GetOrCreate("tests/p2p"), - MessageHandlerType: messageHandlerType, } libP2PMes, err := p2pFactory.NewNetworkMessenger(arg) @@ -245,7 +242,7 @@ func CreateMessengerWithNoDiscovery() p2p.Messenger { } // CreateMessengerWithNoDiscoveryAndPeersRatingHandler creates a new libp2p messenger with no peer discovery -func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler, p2pKey crypto.PrivateKey, messageHandlerType p2p.MessageHandlerType) p2p.Messenger { +func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p.PeersRatingHandler, p2pKey crypto.PrivateKey) p2p.Messenger { p2pCfg := p2pConfig.P2PConfig{ Node: p2pConfig.NodeConfig{ Port: "0", @@ -258,7 +255,7 @@ func CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHanlder p2p. }, } - return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder, p2pKey, messageHandlerType) + return CreateMessengerFromConfigWithPeersRatingHandler(p2pCfg, peersRatingHanlder, p2pKey) } // CreateFixedNetworkOf8Peers assembles a network as following: diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index ab288c603ac..57d9d47941b 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -468,8 +468,8 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { } p2pKey := mock.NewPrivateKeyMock() - messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey, p2p.RegularMessageHandler) - fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(fullArchivePeersRatingHandler, p2pKey, p2p.FullArchiveMessageHandler) + messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey) + fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(fullArchivePeersRatingHandler, p2pKey) var peersRatingMonitor p2p.PeersRatingMonitor peersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} diff --git a/p2p/constants.go b/p2p/constants.go index 8e26c13b889..4f0807484b7 100644 --- a/p2p/constants.go +++ b/p2p/constants.go @@ -30,12 +30,3 @@ const ListenLocalhostAddrWithIp4AndTcp = "/ip4/127.0.0.1/tcp/" // BroadcastMethod defines the broadcast method of the message type BroadcastMethod = p2p.BroadcastMethod - -// MessageHandlerType defines the type of the message handler -type MessageHandlerType = p2p.MessageHandlerType - -// RegularMessageHandler defines a message handler for the main network -const RegularMessageHandler = p2p.RegularMessageHandler - -// FullArchiveMessageHandler defines a message handler for the full archive network -const FullArchiveMessageHandler = p2p.FullArchiveMessageHandler diff --git a/p2p/disabled/networkMessenger.go b/p2p/disabled/networkMessenger.go index 029f4280c7b..cd64969f476 100644 --- a/p2p/disabled/networkMessenger.go +++ b/p2p/disabled/networkMessenger.go @@ -185,11 +185,6 @@ func (netMes *networkMessenger) ProcessReceivedMessage(_ p2p.MessageP2P, _ core. return nil } -// Type returns regular message handler as it is disabled -func (netMes *networkMessenger) Type() p2p.MessageHandlerType { - return p2p.RegularMessageHandler -} - // IsInterfaceNil returns true if there is no value under the interface func (netMes *networkMessenger) IsInterfaceNil() bool { return netMes == nil diff --git a/testscommon/p2pmocks/messengerStub.go b/testscommon/p2pmocks/messengerStub.go index 01d7c90823d..51721307898 100644 --- a/testscommon/p2pmocks/messengerStub.go +++ b/testscommon/p2pmocks/messengerStub.go @@ -45,7 +45,6 @@ type MessengerStub struct { BroadcastOnChannelUsingPrivateKeyCalled func(channel string, topic string, buff []byte, pid core.PeerID, skBytes []byte) SignUsingPrivateKeyCalled func(skBytes []byte, payload []byte) ([]byte, error) ProcessReceivedMessageCalled func(message p2p.MessageP2P, fromConnectedPeer core.PeerID, source p2p.MessageHandler) error - TypeCalled func() p2p.MessageHandlerType } // ID - @@ -361,14 +360,6 @@ func (ms *MessengerStub) ProcessReceivedMessage(message p2p.MessageP2P, fromConn return nil } -// Type - -func (ms *MessengerStub) Type() p2p.MessageHandlerType { - if ms.TypeCalled != nil { - return ms.TypeCalled() - } - return p2p.RegularMessageHandler -} - // IsInterfaceNil returns true if there is no value under the interface func (ms *MessengerStub) IsInterfaceNil() bool { return ms == nil From 6a46c685a3ffef027e9e7ba201310e3aae2518e7 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 23 Jun 2023 15:17:55 +0300 Subject: [PATCH 25/38] fixed tests --- .../topicSender/topicResolverSender_test.go | 28 +++++------ .../sender/multikeyHeartbeatSender_test.go | 46 +------------------ 2 files changed, 13 insertions(+), 61 deletions(-) diff --git a/dataRetriever/topicSender/topicResolverSender_test.go b/dataRetriever/topicSender/topicResolverSender_test.go index d12716274ec..2e8df0f47c9 100644 --- a/dataRetriever/topicSender/topicResolverSender_test.go +++ b/dataRetriever/topicSender/topicResolverSender_test.go @@ -174,26 +174,23 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { }, } arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ - IsConnectedCalled: func(peerID core.PeerID) bool { - return false - }, SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { assert.Fail(t, "should have not been called") return nil }, } - wasCalled := false + wasMainCalled := false arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ ContainsCalled: func(peerID core.PeerID) bool { - wasCalled = true + wasMainCalled = true return false }, } + wasFullArchiveCalled := false arg.FullArchivePreferredPeersHolder = &p2pmocks.PeersHolderStub{ ContainsCalled: func(peerID core.PeerID) bool { - assert.Fail(t, "should have not been called") - + wasFullArchiveCalled = true return false }, } @@ -203,16 +200,14 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, sentToPid1) - assert.True(t, wasCalled) + assert.True(t, wasMainCalled) + assert.True(t, wasFullArchiveCalled) }) t.Run("on full archive network", func(t *testing.T) { t.Parallel() arg := createMockArgTopicResolverSender() arg.FullArchiveMessenger = &p2pmocks.MessengerStub{ - IsConnectedCalled: func(peerID core.PeerID) bool { - return true - }, SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { if bytes.Equal(peerID.Bytes(), pID1.Bytes()) && bytes.Equal(buff, buffToSend) { @@ -229,17 +224,17 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { return nil }, } - wasCalled := false + wasFullArchiveCalled := false arg.FullArchivePreferredPeersHolder = &p2pmocks.PeersHolderStub{ ContainsCalled: func(peerID core.PeerID) bool { - wasCalled = true + wasFullArchiveCalled = true return false }, } + wasMainCalled := false arg.MainPreferredPeersHolder = &p2pmocks.PeersHolderStub{ ContainsCalled: func(peerID core.PeerID) bool { - assert.Fail(t, "should have not been called") - + wasMainCalled = true return false }, } @@ -249,7 +244,8 @@ func TestTopicResolverSender_SendShouldWork(t *testing.T) { assert.Nil(t, err) assert.True(t, sentToPid1) - assert.True(t, wasCalled) + assert.True(t, wasMainCalled) + assert.True(t, wasFullArchiveCalled) }) } diff --git a/heartbeat/sender/multikeyHeartbeatSender_test.go b/heartbeat/sender/multikeyHeartbeatSender_test.go index 74c1f0014d9..5d341bd9de7 100644 --- a/heartbeat/sender/multikeyHeartbeatSender_test.go +++ b/heartbeat/sender/multikeyHeartbeatSender_test.go @@ -332,7 +332,6 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { args := createMockMultikeyHeartbeatSenderArgs(createMockBaseArgs()) recordedMainMessages := make(map[core.PeerID][][]byte) - recordedFullArchiveMessages := make(map[core.PeerID][][]byte) args.mainMessenger = &p2pmocks.MessengerStub{ BroadcastCalled: func(topic string, buff []byte) { assert.Equal(t, args.topic, topic) @@ -343,16 +342,6 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { recordedMainMessages[pid] = append(recordedMainMessages[pid], buff) }, } - args.fullArchiveMessenger = &p2pmocks.MessengerStub{ - BroadcastCalled: func(topic string, buff []byte) { - assert.Equal(t, args.topic, topic) - recordedFullArchiveMessages[args.mainMessenger.ID()] = append(recordedFullArchiveMessages[args.mainMessenger.ID()], buff) - }, - BroadcastUsingPrivateKeyCalled: func(topic string, buff []byte, pid core.PeerID, skBytes []byte) { - assert.Equal(t, args.topic, topic) - recordedFullArchiveMessages[pid] = append(recordedFullArchiveMessages[pid], buff) - }, - } args.managedPeersHolder = &testscommon.ManagedPeersHolderStub{ IsKeyManagedByCurrentNodeCalled: func(pkBytes []byte) bool { return string(pkBytes) != "dd" @@ -390,8 +379,7 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { err := senderInstance.execute() assert.Nil(t, err) - assert.Equal(t, 4, len(recordedMainMessages)) // current pid, aa, bb, cc - assert.Equal(t, 4, len(recordedFullArchiveMessages)) // current pid, aa, bb, cc + assert.Equal(t, 4, len(recordedMainMessages)) // current pid, aa, bb, cc checkRecordedMessages(t, recordedMainMessages, @@ -425,38 +413,6 @@ func TestMultikeyHeartbeatSender_execute(t *testing.T) { "cc_pid", core.RegularPeer) - checkRecordedMessages(t, - recordedFullArchiveMessages, - args, - args.versionNumber, - args.nodeDisplayName, - args.mainMessenger.ID(), - core.FullHistoryObserver) - - checkRecordedMessages(t, - recordedFullArchiveMessages, - args, - args.baseVersionNumber+"/aa_machineID", - "aa_name", - "aa_pid", - core.RegularPeer) - - checkRecordedMessages(t, - recordedFullArchiveMessages, - args, - args.baseVersionNumber+"/bb_machineID", - "bb_name", - "bb_pid", - core.RegularPeer) - - checkRecordedMessages(t, - recordedFullArchiveMessages, - args, - args.baseVersionNumber+"/cc_machineID", - "cc_name", - "cc_pid", - core.RegularPeer) - assert.Equal(t, uint64(1), args.currentBlockProvider.GetCurrentBlockHeader().GetNonce()) }) } From ea1fe3c3e25786f3a23b9eaf6c43f6d8a3399728 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 23 Jun 2023 16:29:25 +0300 Subject: [PATCH 26/38] refactored to only use one peersRatingHandler and peersRatingMonitor --- api/groups/nodeGroup.go | 16 +- api/groups/nodeGroup_test.go | 56 +++---- api/mock/facadeStub.go | 4 +- api/shared/interface.go | 2 +- .../factory/requestersContainer/args.go | 3 +- .../baseRequestersContainerFactory.go | 29 ++-- .../metaRequestersContainerFactory.go | 3 +- .../metaRequestersContainerFactory_test.go | 17 +-- .../shardRequestersContainerFactory.go | 3 +- .../shardRequestersContainerFactory_test.go | 18 +-- .../topicSender/topicRequestSender.go | 46 +++--- .../topicSender/topicRequestSender_test.go | 45 ++---- epochStart/bootstrap/process.go | 3 +- facade/initial/initialNodeFacade.go | 6 +- facade/initial/initialNodeFacade_test.go | 3 +- facade/interface.go | 2 +- facade/mock/nodeStub.go | 6 +- facade/nodeFacade.go | 4 +- facade/nodeFacade_test.go | 7 +- factory/disabled/peersRatingHandler.go | 29 ---- factory/disabled/peersRatingMonitor.go | 19 --- factory/interface.go | 2 - factory/mock/networkComponentsMock.go | 30 ++-- factory/network/networkComponents.go | 92 ++++++------ factory/network/networkComponentsHandler.go | 42 +----- .../network/networkComponentsHandler_test.go | 4 - factory/processing/processComponents.go | 3 +- factory/processing/processComponents_test.go | 15 +- go.mod | 2 +- go.sum | 4 +- integrationTests/interface.go | 2 +- .../mock/networkComponentsMock.go | 34 ++--- .../multiShard/hardFork/hardFork_test.go | 3 +- .../p2p/peersRating/peersRating_test.go | 19 +-- integrationTests/testHeartbeatNode.go | 3 +- integrationTests/testProcessorNode.go | 139 +++++++----------- node/mock/factory/networkComponentsMock.go | 30 ++-- node/node.go | 6 +- p2p/interface.go | 8 +- .../p2pmocks/peersRatingMonitorStub.go | 10 +- update/factory/exportHandlerFactory.go | 15 +- .../fullSyncRequestersContainerFactory.go | 75 +++++----- 42 files changed, 335 insertions(+), 524 deletions(-) delete mode 100644 factory/disabled/peersRatingHandler.go delete mode 100644 factory/disabled/peersRatingMonitor.go diff --git a/api/groups/nodeGroup.go b/api/groups/nodeGroup.go index 9225d110b32..997cfbc90e9 100644 --- a/api/groups/nodeGroup.go +++ b/api/groups/nodeGroup.go @@ -36,7 +36,7 @@ type nodeFacadeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() string + GetConnectedPeersRatings() (string, error) IsInterfaceNil() bool } @@ -327,7 +327,19 @@ func (ng *nodeGroup) bootstrapMetrics(c *gin.Context) { // connectedPeersRatings returns the node's connected peers ratings func (ng *nodeGroup) connectedPeersRatings(c *gin.Context) { - ratings := ng.getFacade().GetConnectedPeersRatings() + ratings, err := ng.getFacade().GetConnectedPeersRatings() + if err != nil { + c.JSON( + http.StatusInternalServerError, + shared.GenericAPIResponse{ + Data: nil, + Error: err.Error(), + Code: shared.ReturnCodeInternalError, + }, + ) + return + } + c.JSON( http.StatusOK, shared.GenericAPIResponse{ diff --git a/api/groups/nodeGroup_test.go b/api/groups/nodeGroup_test.go index eb21fe40bd1..383dd099f8d 100644 --- a/api/groups/nodeGroup_test.go +++ b/api/groups/nodeGroup_test.go @@ -252,35 +252,41 @@ func TestBootstrapStatusMetrics_ShouldWork(t *testing.T) { assert.True(t, valuesFound) } -func TestBootstrapGetConnectedPeersRatings_ShouldWork(t *testing.T) { - providedRatings := map[string]string{ - "pid1": "100", - "pid2": "-50", - "pid3": "-5", - } - buff, _ := json.Marshal(providedRatings) - facade := mock.FacadeStub{ - GetConnectedPeersRatingsCalled: func() string { - return string(buff) - }, - } +func TestNodeGroup_GetConnectedPeersRatings(t *testing.T) { + t.Parallel() - nodeGroup, err := groups.NewNodeGroup(&facade) - require.NoError(t, err) + t.Run("should work", func(t *testing.T) { + t.Parallel() - ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + providedRatings := map[string]string{ + "pid1": "100", + "pid2": "-50", + "pid3": "-5", + } + buff, _ := json.Marshal(providedRatings) + facade := mock.FacadeStub{ + GetConnectedPeersRatingsCalled: func() (string, error) { + return string(buff), nil + }, + } - req, _ := http.NewRequest("GET", "/node/connected-peers-ratings", nil) - resp := httptest.NewRecorder() - ws.ServeHTTP(resp, req) + nodeGroup, err := groups.NewNodeGroup(&facade) + require.NoError(t, err) - response := &shared.GenericAPIResponse{} - loadResponse(resp.Body, response) - respMap, ok := response.Data.(map[string]interface{}) - assert.True(t, ok) - ratings, ok := respMap["ratings"].(string) - assert.True(t, ok) - assert.Equal(t, string(buff), ratings) + ws := startWebServer(nodeGroup, "node", getNodeRoutesConfig()) + + req, _ := http.NewRequest("GET", "/node/connected-peers-ratings", nil) + resp := httptest.NewRecorder() + ws.ServeHTTP(resp, req) + + response := &shared.GenericAPIResponse{} + loadResponse(resp.Body, response) + respMap, ok := response.Data.(map[string]interface{}) + assert.True(t, ok) + ratings, ok := respMap["ratings"].(string) + assert.True(t, ok) + assert.Equal(t, string(buff), ratings) + }) } func TestStatusMetrics_ShouldDisplayNonP2pMetrics(t *testing.T) { diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index 76e52faf1a9..8841e2244a4 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -42,7 +42,7 @@ type FacadeStub struct { GetValueForKeyCalled func(address string, key string, options api.AccountQueryOptions) (string, api.BlockInfo, error) GetGuardianDataCalled func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetPeerInfoCalled func(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatingsCalled func() string + GetConnectedPeersRatingsCalled func() (string, error) GetEpochStartDataAPICalled func(epoch uint32) (*common.EpochStartDataAPI, error) GetThrottlerForEndpointCalled func(endpoint string) (core.Throttler, bool) GetUsernameCalled func(address string, options api.AccountQueryOptions) (string, api.BlockInfo, error) @@ -384,7 +384,7 @@ func (f *FacadeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { } // GetConnectedPeersRatings - -func (f *FacadeStub) GetConnectedPeersRatings() string { +func (f *FacadeStub) GetConnectedPeersRatings() (string, error) { return f.GetConnectedPeersRatingsCalled() } diff --git a/api/shared/interface.go b/api/shared/interface.go index c9c69e3c009..806de769fa5 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -99,7 +99,7 @@ type FacadeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() string + GetConnectedPeersRatings() (string, error) GetProof(rootHash string, address string) (*common.GetProofResponse, error) GetProofDataTrie(rootHash string, address string, key string) (*common.GetProofResponse, *common.GetProofResponse, error) GetProofCurrentRootHash(address string) (*common.GetProofResponse, error) diff --git a/dataRetriever/factory/requestersContainer/args.go b/dataRetriever/factory/requestersContainer/args.go index 6963c975696..96f09453cb9 100644 --- a/dataRetriever/factory/requestersContainer/args.go +++ b/dataRetriever/factory/requestersContainer/args.go @@ -21,7 +21,6 @@ type FactoryArgs struct { CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler MainPreferredPeersHolder p2p.PreferredPeersHolderHandler FullArchivePreferredPeersHolder p2p.PreferredPeersHolderHandler - MainPeersRatingHandler dataRetriever.PeersRatingHandler - FullArchivePeersRatingHandler dataRetriever.PeersRatingHandler + PeersRatingHandler dataRetriever.PeersRatingHandler SizeCheckDelta uint32 } diff --git a/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go index f668eb895c8..2ec10054d8d 100644 --- a/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/baseRequestersContainerFactory.go @@ -35,8 +35,7 @@ type baseRequestersContainerFactory struct { currentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler mainPreferredPeersHolder dataRetriever.PreferredPeersHolderHandler fullArchivePreferredPeersHolder dataRetriever.PreferredPeersHolderHandler - mainPeersRatingHandler dataRetriever.PeersRatingHandler - fullArchivePeersRatingHandler dataRetriever.PeersRatingHandler + peersRatingHandler dataRetriever.PeersRatingHandler numCrossShardPeers int numIntraShardPeers int numTotalPeers int @@ -71,11 +70,8 @@ func (brcf *baseRequestersContainerFactory) checkParams() error { if check.IfNil(brcf.fullArchivePreferredPeersHolder) { return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilPreferredPeersHolder) } - if check.IfNil(brcf.mainPeersRatingHandler) { - return fmt.Errorf("%w on main network", dataRetriever.ErrNilPeersRatingHandler) - } - if check.IfNil(brcf.fullArchivePeersRatingHandler) { - return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilPeersRatingHandler) + if check.IfNil(brcf.peersRatingHandler) { + return dataRetriever.ErrNilPeersRatingHandler } if brcf.numCrossShardPeers <= 0 { return fmt.Errorf("%w for numCrossShardPeers", dataRetriever.ErrInvalidValue) @@ -288,16 +284,15 @@ func (brcf *baseRequestersContainerFactory) createOneRequestSenderWithSpecifiedN FullArchivePreferredPeersHolder: brcf.fullArchivePreferredPeersHolder, TargetShardId: targetShardId, }, - Marshaller: brcf.marshaller, - Randomizer: brcf.intRandomizer, - PeerListCreator: peerListCreator, - NumIntraShardPeers: numIntraShardPeers, - NumCrossShardPeers: numCrossShardPeers, - NumFullHistoryPeers: brcf.numFullHistoryPeers, - CurrentNetworkEpochProvider: brcf.currentNetworkEpochProvider, - SelfShardIdProvider: brcf.shardCoordinator, - MainPeersRatingHandler: brcf.mainPeersRatingHandler, - FullArchivePeersRatingHandler: brcf.fullArchivePeersRatingHandler, + Marshaller: brcf.marshaller, + Randomizer: brcf.intRandomizer, + PeerListCreator: peerListCreator, + NumIntraShardPeers: numIntraShardPeers, + NumCrossShardPeers: numCrossShardPeers, + NumFullHistoryPeers: brcf.numFullHistoryPeers, + CurrentNetworkEpochProvider: brcf.currentNetworkEpochProvider, + SelfShardIdProvider: brcf.shardCoordinator, + PeersRatingHandler: brcf.peersRatingHandler, } return topicsender.NewTopicRequestSender(arg) } diff --git a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go index 59de81ae816..c718f5b22a1 100644 --- a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory.go @@ -38,8 +38,7 @@ func NewMetaRequestersContainerFactory( currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, mainPreferredPeersHolder: args.MainPreferredPeersHolder, fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, - mainPeersRatingHandler: args.MainPeersRatingHandler, - fullArchivePeersRatingHandler: args.FullArchivePeersRatingHandler, + peersRatingHandler: args.PeersRatingHandler, numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), numIntraShardPeers: int(numIntraShardPeers), numTotalPeers: int(args.RequesterConfig.NumTotalPeers), diff --git a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go index 2f373ecc2d7..e68f4c7e5a5 100644 --- a/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go +++ b/dataRetriever/factory/requestersContainer/metaRequestersContainerFactory_test.go @@ -89,26 +89,15 @@ func TestNewMetaRequestersContainerFactory_NilFullArchivePreferredPeersHolderSho assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } -func TestNewMetaRequestersContainerFactory_NilMainPeersRatingHandlerShouldErr(t *testing.T) { +func TestNewMetaRequestersContainerFactory_NilPeersRatingHandlerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.MainPeersRatingHandler = nil + args.PeersRatingHandler = nil rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) assert.Nil(t, rcf) - assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) -} - -func TestNewMetaRequestersContainerFactory_NilFullArchivePeersRatingHandlerShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.FullArchivePeersRatingHandler = nil - rcf, err := requesterscontainer.NewMetaRequestersContainerFactory(args) - - assert.Nil(t, rcf) - assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) + assert.Equal(t, dataRetriever.ErrNilPeersRatingHandler, err) } func TestNewMetaRequestersContainerFactory_NilUint64SliceConverterShouldErr(t *testing.T) { diff --git a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go index adc945c35fb..d7468d5302d 100644 --- a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go +++ b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory.go @@ -37,8 +37,7 @@ func NewShardRequestersContainerFactory( currentNetworkEpochProvider: args.CurrentNetworkEpochProvider, mainPreferredPeersHolder: args.MainPreferredPeersHolder, fullArchivePreferredPeersHolder: args.FullArchivePreferredPeersHolder, - mainPeersRatingHandler: args.MainPeersRatingHandler, - fullArchivePeersRatingHandler: args.FullArchivePeersRatingHandler, + peersRatingHandler: args.PeersRatingHandler, numCrossShardPeers: int(args.RequesterConfig.NumCrossShardPeers), numIntraShardPeers: int(numIntraShardPeers), numTotalPeers: int(args.RequesterConfig.NumTotalPeers), diff --git a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go index 42c8300184b..e4c94491487 100644 --- a/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go +++ b/dataRetriever/factory/requestersContainer/shardRequestersContainerFactory_test.go @@ -123,22 +123,11 @@ func TestNewShardRequestersContainerFactory_NilFullArchivePreferredPeersHolderSh assert.True(t, errors.Is(err, dataRetriever.ErrNilPreferredPeersHolder)) } -func TestNewShardRequestersContainerFactory_NilMainPeersRatingHandlerShouldErr(t *testing.T) { +func TestNewShardRequestersContainerFactory_NilPeersRatingHandlerShouldErr(t *testing.T) { t.Parallel() args := getArguments() - args.MainPeersRatingHandler = nil - rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) - - assert.Nil(t, rcf) - assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) -} - -func TestNewShardRequestersContainerFactory_NilFullArchivePeersRatingHandlerShouldErr(t *testing.T) { - t.Parallel() - - args := getArguments() - args.FullArchivePeersRatingHandler = nil + args.PeersRatingHandler = nil rcf, err := requesterscontainer.NewShardRequestersContainerFactory(args) assert.Nil(t, rcf) @@ -286,8 +275,7 @@ func getArguments() requesterscontainer.FactoryArgs { CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - MainPeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, - FullArchivePeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, SizeCheckDelta: 0, } } diff --git a/dataRetriever/topicSender/topicRequestSender.go b/dataRetriever/topicSender/topicRequestSender.go index 0662f9240ed..4358cfe5c1d 100644 --- a/dataRetriever/topicSender/topicRequestSender.go +++ b/dataRetriever/topicSender/topicRequestSender.go @@ -17,16 +17,15 @@ var _ dataRetriever.TopicRequestSender = (*topicRequestSender)(nil) // ArgTopicRequestSender is the argument structure used to create new topic request sender instance type ArgTopicRequestSender struct { ArgBaseTopicSender - Marshaller marshal.Marshalizer - Randomizer dataRetriever.IntRandomizer - PeerListCreator dataRetriever.PeerListCreator - NumIntraShardPeers int - NumCrossShardPeers int - NumFullHistoryPeers int - CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler - SelfShardIdProvider dataRetriever.SelfShardIDProvider - MainPeersRatingHandler dataRetriever.PeersRatingHandler - FullArchivePeersRatingHandler dataRetriever.PeersRatingHandler + Marshaller marshal.Marshalizer + Randomizer dataRetriever.IntRandomizer + PeerListCreator dataRetriever.PeerListCreator + NumIntraShardPeers int + NumCrossShardPeers int + NumFullHistoryPeers int + CurrentNetworkEpochProvider dataRetriever.CurrentNetworkEpochProviderHandler + SelfShardIdProvider dataRetriever.SelfShardIDProvider + PeersRatingHandler dataRetriever.PeersRatingHandler } type topicRequestSender struct { @@ -39,8 +38,7 @@ type topicRequestSender struct { numCrossShardPeers int numFullHistoryPeers int currentNetworkEpochProviderHandler dataRetriever.CurrentNetworkEpochProviderHandler - mainPeersRatingHandler dataRetriever.PeersRatingHandler - fullArchivePeersRatingHandler dataRetriever.PeersRatingHandler + peersRatingHandler dataRetriever.PeersRatingHandler selfShardId uint32 } @@ -60,8 +58,7 @@ func NewTopicRequestSender(args ArgTopicRequestSender) (*topicRequestSender, err numCrossShardPeers: args.NumCrossShardPeers, numFullHistoryPeers: args.NumFullHistoryPeers, currentNetworkEpochProviderHandler: args.CurrentNetworkEpochProvider, - mainPeersRatingHandler: args.MainPeersRatingHandler, - fullArchivePeersRatingHandler: args.FullArchivePeersRatingHandler, + peersRatingHandler: args.PeersRatingHandler, selfShardId: args.SelfShardIdProvider.SelfId(), }, nil } @@ -83,11 +80,8 @@ func checkArgs(args ArgTopicRequestSender) error { if check.IfNil(args.CurrentNetworkEpochProvider) { return dataRetriever.ErrNilCurrentNetworkEpochProvider } - if check.IfNil(args.MainPeersRatingHandler) { - return fmt.Errorf("%w on main network", dataRetriever.ErrNilPeersRatingHandler) - } - if check.IfNil(args.FullArchivePeersRatingHandler) { - return fmt.Errorf("%w on full archive network", dataRetriever.ErrNilPeersRatingHandler) + if check.IfNil(args.PeersRatingHandler) { + return dataRetriever.ErrNilPeersRatingHandler } if check.IfNil(args.SelfShardIdProvider) { return dataRetriever.ErrNilSelfShardIDProvider @@ -134,8 +128,7 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, buff, trs.numCrossShardPeers, core.CrossShardPeer.String(), - trs.mainMessenger, - trs.mainPeersRatingHandler) + trs.mainMessenger) intraPeers = trs.peerListCreator.IntraShardPeerList() preferredPeer = trs.getPreferredPeer(trs.selfShardId) @@ -146,8 +139,7 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, buff, trs.numIntraShardPeers, core.IntraShardPeer.String(), - trs.mainMessenger, - trs.mainPeersRatingHandler) + trs.mainMessenger) } else { preferredPeer := trs.getPreferredFullArchivePeer() fullHistoryPeers = trs.fullArchiveMessenger.ConnectedPeers() @@ -159,8 +151,7 @@ func (trs *topicRequestSender) SendOnRequestTopic(rd *dataRetriever.RequestData, buff, trs.numFullHistoryPeers, core.FullHistoryPeer.String(), - trs.fullArchiveMessenger, - trs.fullArchivePeersRatingHandler) + trs.fullArchiveMessenger) } trs.callDebugHandler(originalHashes, numSentIntra, numSentCross) @@ -201,7 +192,6 @@ func (trs *topicRequestSender) sendOnTopic( maxToSend int, peerType string, messenger p2p.MessageHandler, - peersRatingHandler dataRetriever.PeersRatingHandler, ) int { if len(peerList) == 0 || maxToSend == 0 { return 0 @@ -209,7 +199,7 @@ func (trs *topicRequestSender) sendOnTopic( histogramMap := make(map[string]int) - topRatedPeersList := peersRatingHandler.GetTopRatedPeersFromList(peerList, maxToSend) + topRatedPeersList := trs.peersRatingHandler.GetTopRatedPeersFromList(peerList, maxToSend) indexes := createIndexList(len(topRatedPeersList)) shuffledIndexes := random.FisherYatesShuffle(indexes, trs.randomizer) @@ -227,7 +217,7 @@ func (trs *topicRequestSender) sendOnTopic( if err != nil { continue } - peersRatingHandler.DecreaseRating(peer) + trs.peersRatingHandler.DecreaseRating(peer) logData = append(logData, peerType) logData = append(logData, peer.Pretty()) diff --git a/dataRetriever/topicSender/topicRequestSender_test.go b/dataRetriever/topicSender/topicRequestSender_test.go index aa83c03f2b3..cff654b3fe4 100644 --- a/dataRetriever/topicSender/topicRequestSender_test.go +++ b/dataRetriever/topicSender/topicRequestSender_test.go @@ -40,17 +40,16 @@ func createMockArgBaseTopicSender() topicsender.ArgBaseTopicSender { func createMockArgTopicRequestSender() topicsender.ArgTopicRequestSender { return topicsender.ArgTopicRequestSender{ - ArgBaseTopicSender: createMockArgBaseTopicSender(), - Marshaller: &mock.MarshalizerMock{}, - Randomizer: &mock.IntRandomizerStub{}, - PeerListCreator: &mock.PeerListCreatorStub{}, - NumIntraShardPeers: 2, - NumCrossShardPeers: 2, - NumFullHistoryPeers: 2, - CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, - SelfShardIdProvider: mock.NewMultipleShardsCoordinatorMock(), - MainPeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, - FullArchivePeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + ArgBaseTopicSender: createMockArgBaseTopicSender(), + Marshaller: &mock.MarshalizerMock{}, + Randomizer: &mock.IntRandomizerStub{}, + PeerListCreator: &mock.PeerListCreatorStub{}, + NumIntraShardPeers: 2, + NumCrossShardPeers: 2, + NumFullHistoryPeers: 2, + CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + SelfShardIdProvider: mock.NewMultipleShardsCoordinatorMock(), + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, } } @@ -138,23 +137,14 @@ func TestNewTopicRequestSender(t *testing.T) { assert.True(t, check.IfNil(trs)) assert.Equal(t, dataRetriever.ErrNilCurrentNetworkEpochProvider, err) }) - t.Run("nil MainPeersRatingHandler should error", func(t *testing.T) { + t.Run("nil PeersRatingHandler should error", func(t *testing.T) { t.Parallel() arg := createMockArgTopicRequestSender() - arg.MainPeersRatingHandler = nil + arg.PeersRatingHandler = nil trs, err := topicsender.NewTopicRequestSender(arg) assert.True(t, check.IfNil(trs)) - assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) - }) - t.Run("nil FullArchivePeersRatingHandler should error", func(t *testing.T) { - t.Parallel() - - arg := createMockArgTopicRequestSender() - arg.FullArchivePeersRatingHandler = nil - trs, err := topicsender.NewTopicRequestSender(arg) - assert.True(t, check.IfNil(trs)) - assert.True(t, errors.Is(err, dataRetriever.ErrNilPeersRatingHandler)) + assert.Equal(t, dataRetriever.ErrNilPeersRatingHandler, err) }) t.Run("nil SelfShardIdProvider should error", func(t *testing.T) { t.Parallel() @@ -291,7 +281,7 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { }, } decreaseCalledCounter := 0 - arg.MainPeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ + arg.PeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ DecreaseRatingCalled: func(pid core.PeerID) { decreaseCalledCounter++ if !bytes.Equal(pid.Bytes(), pID1.Bytes()) && !bytes.Equal(pid.Bytes(), pID2.Bytes()) { @@ -339,13 +329,8 @@ func TestTopicResolverSender_SendOnRequestTopic(t *testing.T) { return false }, } - arg.MainPeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ - DecreaseRatingCalled: func(pid core.PeerID) { - assert.Fail(t, "should have not been called") - }, - } decreaseCalledCounter := 0 - arg.FullArchivePeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ + arg.PeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{ DecreaseRatingCalled: func(pid core.PeerID) { decreaseCalledCounter++ assert.Equal(t, pIDfullHistory, pid) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 542cf6bb66f..faf51dc185c 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1255,8 +1255,7 @@ func (e *epochStartBootstrap) createRequestHandler() error { CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), MainPreferredPeersHolder: disabled.NewPreferredPeersHolder(), FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), - MainPeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), - FullArchivePeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), + PeersRatingHandler: disabled.NewDisabledPeersRatingHandler(), SizeCheckDelta: 0, } requestersFactory, err := requesterscontainer.NewMetaRequestersContainerFactory(requestersContainerArgs) diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index 05ebfd68ba9..341654aee9f 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -240,9 +240,9 @@ func (inf *initialNodeFacade) GetPeerInfo(_ string) ([]core.QueryP2PPeerInfo, er return nil, errNodeStarting } -// GetConnectedPeersRatings returns empty string -func (inf *initialNodeFacade) GetConnectedPeersRatings() string { - return "" +// GetConnectedPeersRatings returns empty string and error +func (inf *initialNodeFacade) GetConnectedPeersRatings() (string, error) { + return "", errNodeStarting } // GetEpochStartDataAPI returns nil and error diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index 70ebf524359..19c88e17030 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -255,8 +255,9 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, stakeValue) assert.Equal(t, errNodeStarting, err) - ratings := inf.GetConnectedPeersRatings() + ratings, err := inf.GetConnectedPeersRatings() assert.Equal(t, "", ratings) + assert.Equal(t, errNodeStarting, err) epochStartData, err := inf.GetEpochStartDataAPI(0) assert.Nil(t, epochStartData) diff --git a/facade/interface.go b/facade/interface.go index d3d7f883fc8..7426767bd9c 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -93,7 +93,7 @@ type NodeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() string + GetConnectedPeersRatings() (string, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index f4b4d643ebf..b4263791b7f 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -38,7 +38,7 @@ type NodeStub struct { GetValueForKeyCalled func(address string, key string, options api.AccountQueryOptions) (string, api.BlockInfo, error) GetGuardianDataCalled func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetPeerInfoCalled func(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatingsCalled func() string + GetConnectedPeersRatingsCalled func() (string, error) GetEpochStartDataAPICalled func(epoch uint32) (*common.EpochStartDataAPI, error) GetUsernameCalled func(address string, options api.AccountQueryOptions) (string, api.BlockInfo, error) GetCodeHashCalled func(address string, options api.AccountQueryOptions) ([]byte, api.BlockInfo, error) @@ -216,12 +216,12 @@ func (ns *NodeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { } // GetConnectedPeersRatings - -func (ns *NodeStub) GetConnectedPeersRatings() string { +func (ns *NodeStub) GetConnectedPeersRatings() (string, error) { if ns.GetConnectedPeersRatingsCalled != nil { return ns.GetConnectedPeersRatingsCalled() } - return "" + return "", nil } // GetEpochStartDataAPI - diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index c577b882e7d..200eb6cc1a4 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -466,8 +466,8 @@ func (nf *nodeFacade) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return nf.node.GetPeerInfo(pid) } -// GetConnectedPeersRatings returns the connected peers ratings -func (nf *nodeFacade) GetConnectedPeersRatings() string { +// GetConnectedPeersRatings returns the connected peers ratings on the main network +func (nf *nodeFacade) GetConnectedPeersRatings() (string, error) { return nf.node.GetConnectedPeersRatings() } diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index 71505fa6ddc..abf9ce35a59 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -2118,13 +2118,14 @@ func TestNodeFacade_GetConnectedPeersRatings(t *testing.T) { providedResponse := "ratings" args := createMockArguments() args.Node = &mock.NodeStub{ - GetConnectedPeersRatingsCalled: func() string { - return providedResponse + GetConnectedPeersRatingsCalled: func() (string, error) { + return providedResponse, nil }, } nf, _ := NewNodeFacade(args) - response := nf.GetConnectedPeersRatings() + response, err := nf.GetConnectedPeersRatings() + require.NoError(t, err) require.Equal(t, providedResponse, response) } diff --git a/factory/disabled/peersRatingHandler.go b/factory/disabled/peersRatingHandler.go deleted file mode 100644 index 4bccd2bd7da..00000000000 --- a/factory/disabled/peersRatingHandler.go +++ /dev/null @@ -1,29 +0,0 @@ -package disabled - -import "github.com/multiversx/mx-chain-core-go/core" - -type peersRatingHandler struct { -} - -// NewPeersRatingHandler returns a new disabled PeersRatingHandler implementation -func NewPeersRatingHandler() *peersRatingHandler { - return &peersRatingHandler{} -} - -// IncreaseRating does nothing as it is disabled -func (handler *peersRatingHandler) IncreaseRating(_ core.PeerID) { -} - -// DecreaseRating does nothing as it is disabled -func (handler *peersRatingHandler) DecreaseRating(_ core.PeerID) { -} - -// GetTopRatedPeersFromList returns the provided peers list as it is disabled -func (handler *peersRatingHandler) GetTopRatedPeersFromList(peers []core.PeerID, _ int) []core.PeerID { - return peers -} - -// IsInterfaceNil returns true if there is no value under the interface -func (handler *peersRatingHandler) IsInterfaceNil() bool { - return handler == nil -} diff --git a/factory/disabled/peersRatingMonitor.go b/factory/disabled/peersRatingMonitor.go deleted file mode 100644 index c846d4dde4d..00000000000 --- a/factory/disabled/peersRatingMonitor.go +++ /dev/null @@ -1,19 +0,0 @@ -package disabled - -type peersRatingMonitor struct { -} - -// NewPeersRatingMonitor returns a new disabled PeersRatingMonitor implementation -func NewPeersRatingMonitor() *peersRatingMonitor { - return &peersRatingMonitor{} -} - -// GetConnectedPeersRatings returns an empty string as it is disabled -func (monitor *peersRatingMonitor) GetConnectedPeersRatings() string { - return "" -} - -// IsInterfaceNil returns true if there is no value under the interface -func (monitor *peersRatingMonitor) IsInterfaceNil() bool { - return monitor == nil -} diff --git a/factory/interface.go b/factory/interface.go index 4221af84544..3579ea08535 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -248,8 +248,6 @@ type NetworkComponentsHolder interface { PeersRatingHandler() p2p.PeersRatingHandler PeersRatingMonitor() p2p.PeersRatingMonitor FullArchiveNetworkMessenger() p2p.Messenger - FullArchivePeersRatingHandler() p2p.PeersRatingHandler - FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor FullArchivePreferredPeersHolderHandler() PreferredPeersHolderHandler IsInterfaceNil() bool } diff --git a/factory/mock/networkComponentsMock.go b/factory/mock/networkComponentsMock.go index fed47d83ffc..a5677c75db9 100644 --- a/factory/mock/networkComponentsMock.go +++ b/factory/mock/networkComponentsMock.go @@ -8,17 +8,15 @@ import ( // NetworkComponentsMock - type NetworkComponentsMock struct { - Messenger p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PreferredPeersHolder factory.PreferredPeersHolderHandler - PeersRatingHandlerField p2p.PeersRatingHandler - PeersRatingMonitorField p2p.PeersRatingMonitor - FullArchiveNetworkMessengerField p2p.Messenger - FullArchivePeersRatingHandlerField p2p.PeersRatingHandler - FullArchivePeersRatingMonitorField p2p.PeersRatingMonitor - FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler + Messenger p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler + PeersRatingMonitorField p2p.PeersRatingMonitor + FullArchiveNetworkMessengerField p2p.Messenger + FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler } // PubKeyCacher - @@ -86,16 +84,6 @@ func (ncm *NetworkComponentsMock) FullArchiveNetworkMessenger() p2p.Messenger { return ncm.FullArchiveNetworkMessengerField } -// FullArchivePeersRatingHandler - -func (ncm *NetworkComponentsMock) FullArchivePeersRatingHandler() p2p.PeersRatingHandler { - return ncm.FullArchivePeersRatingHandlerField -} - -// FullArchivePeersRatingMonitor - -func (ncm *NetworkComponentsMock) FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor { - return ncm.FullArchivePeersRatingMonitorField -} - // FullArchivePreferredPeersHolderHandler - func (ncm *NetworkComponentsMock) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { return ncm.FullArchivePreferredPeersHolder diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index b153d86c62c..713cd182b5e 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -60,8 +60,6 @@ type networkComponentsFactory struct { type networkComponentsHolder struct { netMessenger p2p.Messenger - peersRatingHandler p2p.PeersRatingHandler - peersRatingMonitor p2p.PeersRatingMonitor preferredPeersHolder p2p.PreferredPeersHolderHandler } @@ -69,6 +67,8 @@ type networkComponentsHolder struct { type networkComponents struct { mainNetworkHolder networkComponentsHolder fullArchiveNetworkHolder networkComponentsHolder + peersRatingHandler p2p.PeersRatingHandler + peersRatingMonitor p2p.PeersRatingMonitor inputAntifloodHandler factory.P2PAntifloodHandler outputAntifloodHandler factory.P2PAntifloodHandler pubKeyTimeCacher process.TimeCacher @@ -121,12 +121,17 @@ func NewNetworkComponentsFactory( // Create creates and returns the network components func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { - mainNetworkComp, err := ncf.createMainNetworkHolder() + peersRatingHandler, peersRatingMonitor, err := ncf.createPeersRatingComponents() if err != nil { return nil, err } - fullArchiveNetworkComp, err := ncf.createFullArchiveNetworkHolder() + mainNetworkComp, err := ncf.createMainNetworkHolder(peersRatingHandler) + if err != nil { + return nil, err + } + + fullArchiveNetworkComp, err := ncf.createFullArchiveNetworkHolder(peersRatingHandler) if err != nil { return nil, err } @@ -158,6 +163,8 @@ func (ncf *networkComponentsFactory) Create() (*networkComponents, error) { return &networkComponents{ mainNetworkHolder: mainNetworkComp, fullArchiveNetworkHolder: fullArchiveNetworkComp, + peersRatingHandler: peersRatingHandler, + peersRatingMonitor: peersRatingMonitor, inputAntifloodHandler: inputAntifloodHandler, outputAntifloodHandler: outputAntifloodHandler, pubKeyTimeCacher: antiFloodComponents.PubKeysCacher, @@ -228,6 +235,7 @@ func (ncf *networkComponentsFactory) createPeerHonestyHandler( func (ncf *networkComponentsFactory) createNetworkHolder( p2pConfig p2pConfig.P2PConfig, logger p2p.Logger, + peersRatingHandler p2p.PeersRatingHandler, ) (networkComponentsHolder, error) { peersHolder, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) @@ -235,26 +243,6 @@ func (ncf *networkComponentsFactory) createNetworkHolder( return networkComponentsHolder{}, err } - peersRatingCfg := ncf.mainConfig.PeersRatingConfig - topRatedCache, err := cache.NewLRUCache(peersRatingCfg.TopRatedCacheCapacity) - if err != nil { - return networkComponentsHolder{}, err - } - badRatedCache, err := cache.NewLRUCache(peersRatingCfg.BadRatedCacheCapacity) - if err != nil { - return networkComponentsHolder{}, err - } - - argsPeersRatingHandler := p2pFactory.ArgPeersRatingHandler{ - TopRatedCache: topRatedCache, - BadRatedCache: badRatedCache, - Logger: logger, - } - peersRatingHandler, err := p2pFactory.NewPeersRatingHandler(argsPeersRatingHandler) - if err != nil { - return networkComponentsHolder{}, err - } - argsMessenger := p2pFactory.ArgsNetworkMessenger{ ListenAddress: ncf.listenAddress, Marshaller: ncf.marshalizer, @@ -273,42 +261,62 @@ func (ncf *networkComponentsFactory) createNetworkHolder( return networkComponentsHolder{}, err } - argsPeersRatingMonitor := p2pFactory.ArgPeersRatingMonitor{ - TopRatedCache: topRatedCache, - BadRatedCache: badRatedCache, - ConnectionsProvider: networkMessenger, - } - peersRatingMonitor, err := p2pFactory.NewPeersRatingMonitor(argsPeersRatingMonitor) - if err != nil { - return networkComponentsHolder{}, err - } - return networkComponentsHolder{ netMessenger: networkMessenger, - peersRatingHandler: peersRatingHandler, - peersRatingMonitor: peersRatingMonitor, preferredPeersHolder: peersHolder, }, nil } -func (ncf *networkComponentsFactory) createMainNetworkHolder() (networkComponentsHolder, error) { +func (ncf *networkComponentsFactory) createMainNetworkHolder(peersRatingHandler p2p.PeersRatingHandler) (networkComponentsHolder, error) { loggerInstance := logger.GetOrCreate("main/p2p") - return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance) + return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance, peersRatingHandler) } -func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder() (networkComponentsHolder, error) { +func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder(peersRatingHandler p2p.PeersRatingHandler) (networkComponentsHolder, error) { if ncf.nodeOperationMode != p2p.FullArchiveMode { return networkComponentsHolder{ netMessenger: p2pDisabled.NewNetworkMessenger(), - peersRatingHandler: disabled.NewPeersRatingHandler(), - peersRatingMonitor: disabled.NewPeersRatingMonitor(), preferredPeersHolder: disabled.NewPreferredPeersHolder(), }, nil } loggerInstance := logger.GetOrCreate("full-archive/p2p") - return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance) + return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance, peersRatingHandler) +} + +func (ncf *networkComponentsFactory) createPeersRatingComponents() (p2p.PeersRatingHandler, p2p.PeersRatingMonitor, error) { + peersRatingCfg := ncf.mainConfig.PeersRatingConfig + topRatedCache, err := cache.NewLRUCache(peersRatingCfg.TopRatedCacheCapacity) + if err != nil { + return nil, nil, err + } + badRatedCache, err := cache.NewLRUCache(peersRatingCfg.BadRatedCacheCapacity) + if err != nil { + return nil, nil, err + } + + peersRatingLogger := logger.GetOrCreate("peersRating") + argsPeersRatingHandler := p2pFactory.ArgPeersRatingHandler{ + TopRatedCache: topRatedCache, + BadRatedCache: badRatedCache, + Logger: peersRatingLogger, + } + peersRatingHandler, err := p2pFactory.NewPeersRatingHandler(argsPeersRatingHandler) + if err != nil { + return nil, nil, err + } + + argsPeersRatingMonitor := p2pFactory.ArgPeersRatingMonitor{ + TopRatedCache: topRatedCache, + BadRatedCache: badRatedCache, + } + peersRatingMonitor, err := p2pFactory.NewPeersRatingMonitor(argsPeersRatingMonitor) + if err != nil { + return nil, nil, err + } + + return peersRatingHandler, peersRatingMonitor, nil } // Close closes all underlying components that need closing diff --git a/factory/network/networkComponentsHandler.go b/factory/network/networkComponentsHandler.go index d76a18cf322..eda76bb8f28 100644 --- a/factory/network/networkComponentsHandler.go +++ b/factory/network/networkComponentsHandler.go @@ -82,22 +82,16 @@ func (mnc *managedNetworkComponents) CheckSubcomponents() error { if check.IfNil(mnc.mainNetworkHolder.netMessenger) { return fmt.Errorf("%w %s", errors.ErrNilMessenger, errorOnMainNetworkString) } - if check.IfNil(mnc.mainNetworkHolder.peersRatingHandler) { - return fmt.Errorf("%w %s", errors.ErrNilPeersRatingHandler, errorOnMainNetworkString) + if check.IfNil(mnc.peersRatingHandler) { + return errors.ErrNilPeersRatingHandler } - if check.IfNil(mnc.mainNetworkHolder.peersRatingMonitor) { - return fmt.Errorf("%w %s", errors.ErrNilPeersRatingMonitor, errorOnMainNetworkString) + if check.IfNil(mnc.peersRatingMonitor) { + return errors.ErrNilPeersRatingMonitor } if check.IfNil(mnc.fullArchiveNetworkHolder.netMessenger) { return fmt.Errorf("%w %s", errors.ErrNilMessenger, errorOnFullArchiveNetworkString) } - if check.IfNil(mnc.fullArchiveNetworkHolder.peersRatingHandler) { - return fmt.Errorf("%w %s", errors.ErrNilPeersRatingHandler, errorOnFullArchiveNetworkString) - } - if check.IfNil(mnc.fullArchiveNetworkHolder.peersRatingMonitor) { - return fmt.Errorf("%w %s", errors.ErrNilPeersRatingMonitor, errorOnFullArchiveNetworkString) - } if check.IfNil(mnc.inputAntifloodHandler) { return errors.ErrNilInputAntiFloodHandler @@ -208,7 +202,7 @@ func (mnc *managedNetworkComponents) PeersRatingHandler() p2p.PeersRatingHandler return nil } - return mnc.mainNetworkHolder.peersRatingHandler + return mnc.peersRatingHandler } // PeersRatingMonitor returns the peers rating monitor of the main network @@ -220,7 +214,7 @@ func (mnc *managedNetworkComponents) PeersRatingMonitor() p2p.PeersRatingMonitor return nil } - return mnc.mainNetworkHolder.peersRatingMonitor + return mnc.peersRatingMonitor } // FullArchiveNetworkMessenger returns the p2p messenger of the full archive network @@ -235,30 +229,6 @@ func (mnc *managedNetworkComponents) FullArchiveNetworkMessenger() p2p.Messenger return mnc.fullArchiveNetworkHolder.netMessenger } -// FullArchivePeersRatingHandler returns the peers rating handler of the full archive network -func (mnc *managedNetworkComponents) FullArchivePeersRatingHandler() p2p.PeersRatingHandler { - mnc.mutNetworkComponents.RLock() - defer mnc.mutNetworkComponents.RUnlock() - - if mnc.networkComponents == nil { - return nil - } - - return mnc.fullArchiveNetworkHolder.peersRatingHandler -} - -// FullArchivePeersRatingMonitor returns the peers rating monitor of the full archive network -func (mnc *managedNetworkComponents) FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor { - mnc.mutNetworkComponents.RLock() - defer mnc.mutNetworkComponents.RUnlock() - - if mnc.networkComponents == nil { - return nil - } - - return mnc.fullArchiveNetworkHolder.peersRatingMonitor -} - // FullArchivePreferredPeersHolderHandler returns the preferred peers holder of the full archive network func (mnc *managedNetworkComponents) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { mnc.mutNetworkComponents.RLock() diff --git a/factory/network/networkComponentsHandler_test.go b/factory/network/networkComponentsHandler_test.go index 84f44e3b235..811af70cc30 100644 --- a/factory/network/networkComponentsHandler_test.go +++ b/factory/network/networkComponentsHandler_test.go @@ -62,8 +62,6 @@ func TestManagedNetworkComponents_Create(t *testing.T) { require.Nil(t, managedNetworkComponents.PeerHonestyHandler()) require.Nil(t, managedNetworkComponents.PeersRatingHandler()) require.Nil(t, managedNetworkComponents.FullArchiveNetworkMessenger()) - require.Nil(t, managedNetworkComponents.FullArchivePeersRatingHandler()) - require.Nil(t, managedNetworkComponents.FullArchivePeersRatingMonitor()) require.Nil(t, managedNetworkComponents.FullArchivePreferredPeersHolderHandler()) err = managedNetworkComponents.Create() @@ -77,8 +75,6 @@ func TestManagedNetworkComponents_Create(t *testing.T) { require.NotNil(t, managedNetworkComponents.PeerHonestyHandler()) require.NotNil(t, managedNetworkComponents.PeersRatingHandler()) require.NotNil(t, managedNetworkComponents.FullArchiveNetworkMessenger()) - require.NotNil(t, managedNetworkComponents.FullArchivePeersRatingHandler()) - require.NotNil(t, managedNetworkComponents.FullArchivePeersRatingMonitor()) require.NotNil(t, managedNetworkComponents.FullArchivePreferredPeersHolderHandler()) require.Equal(t, factory.NetworkComponentsName, managedNetworkComponents.String()) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index f8f622c54cb..bd0d8ae8c4a 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -1432,8 +1432,7 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( CurrentNetworkEpochProvider: currentEpochProvider, MainPreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), FullArchivePreferredPeersHolder: pcf.network.FullArchivePreferredPeersHolderHandler(), - MainPeersRatingHandler: pcf.network.PeersRatingHandler(), - FullArchivePeersRatingHandler: pcf.network.FullArchivePeersRatingHandler(), + PeersRatingHandler: pcf.network.PeersRatingHandler(), SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, } diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index c5f7e218e9d..08eac3d4af9 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -215,14 +215,13 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, }, Network: &testsMocks.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, - FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, - InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, - OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + Messenger: &p2pmocks.MessengerStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, }, BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), diff --git a/go.mod b/go.mod index a5a0d11b8f0..11cfe7c45c5 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623111526-bad086eb28f9 + github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623125633-31ec97de0b72 github.com/multiversx/mx-chain-core-go v1.2.6 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.4 diff --git a/go.sum b/go.sum index 4b5956a4e9b..15a0b579614 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623111526-bad086eb28f9 h1:gvCgL3dDNc1I1AzCRzW2fKlsppYBOTtIVPLO2fssiA0= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623111526-bad086eb28f9/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623125633-31ec97de0b72 h1:o5v7pDhjDtUT5MUn/+RlnNiy+tdA++xT+MbK89+1fh8= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623125633-31ec97de0b72/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= diff --git a/integrationTests/interface.go b/integrationTests/interface.go index 634e1eb447e..69142d6ee91 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -85,7 +85,7 @@ type Facade interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() string + GetConnectedPeersRatings() (string, error) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) ValidateTransaction(tx *transaction.Transaction) error ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error diff --git a/integrationTests/mock/networkComponentsMock.go b/integrationTests/mock/networkComponentsMock.go index 198fb751b88..8c4e849907f 100644 --- a/integrationTests/mock/networkComponentsMock.go +++ b/integrationTests/mock/networkComponentsMock.go @@ -8,19 +8,17 @@ import ( // NetworkComponentsStub - type NetworkComponentsStub struct { - Messenger p2p.Messenger - MessengerCalled func() p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PeerHonesty factory.PeerHonestyHandler - PreferredPeersHolder factory.PreferredPeersHolderHandler - PeersRatingHandlerField p2p.PeersRatingHandler - PeersRatingMonitorField p2p.PeersRatingMonitor - FullArchiveNetworkMessengerField p2p.Messenger - FullArchivePeersRatingHandlerField p2p.PeersRatingHandler - FullArchivePeersRatingMonitorField p2p.PeersRatingMonitor - FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler + Messenger p2p.Messenger + MessengerCalled func() p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PeerHonesty factory.PeerHonestyHandler + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler + PeersRatingMonitorField p2p.PeersRatingMonitor + FullArchiveNetworkMessengerField p2p.Messenger + FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler } // PubKeyCacher - @@ -91,16 +89,6 @@ func (ncs *NetworkComponentsStub) FullArchiveNetworkMessenger() p2p.Messenger { return ncs.FullArchiveNetworkMessengerField } -// FullArchivePeersRatingHandler - -func (ncs *NetworkComponentsStub) FullArchivePeersRatingHandler() p2p.PeersRatingHandler { - return ncs.FullArchivePeersRatingHandlerField -} - -// FullArchivePeersRatingMonitor - -func (ncs *NetworkComponentsStub) FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor { - return ncs.FullArchivePeersRatingMonitorField -} - // FullArchivePreferredPeersHolderHandler - func (ncs *NetworkComponentsStub) FullArchivePreferredPeersHolderHandler() factory.PreferredPeersHolderHandler { return ncs.FullArchivePreferredPeersHolder diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 47d55f5078e..506ef9bf8fb 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -579,8 +579,7 @@ func createHardForkExporter( networkComponents := integrationTests.GetDefaultNetworkComponents() networkComponents.Messenger = node.MainMessenger networkComponents.FullArchiveNetworkMessengerField = node.FullArchiveMessenger - networkComponents.PeersRatingHandlerField = node.MainPeersRatingHandler - networkComponents.FullArchivePeersRatingHandlerField = node.FullArchivePeersRatingHandler + networkComponents.PeersRatingHandlerField = node.PeersRatingHandler networkComponents.InputAntiFlood = &mock.NilAntifloodHandler{} networkComponents.OutputAntiFlood = &mock.NilAntifloodHandler{} argsExportHandler := factory.ArgsExporter{ diff --git a/integrationTests/p2p/peersRating/peersRating_test.go b/integrationTests/p2p/peersRating/peersRating_test.go index 32128c77d10..6b8ca67f86a 100644 --- a/integrationTests/p2p/peersRating/peersRating_test.go +++ b/integrationTests/p2p/peersRating/peersRating_test.go @@ -67,7 +67,7 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { resolverNode.DataPool.Headers().AddHeader(hdrHash, hdr) requestHeader(requesterNode, numOfRequests, hdrHash, resolverNode.ShardCoordinator.SelfId()) - peerRatingsMap := getRatingsMap(t, requesterNode.MainPeersRatingMonitor) + peerRatingsMap := getRatingsMap(t, requesterNode.PeersRatingMonitor, requesterNode.MainMessenger) // resolver node should have received and responded to numOfRequests initialResolverRating, exists := peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) @@ -83,7 +83,7 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { numOfRequests = 120 requestHeader(requesterNode, numOfRequests, hdrHash, resolverNode.ShardCoordinator.SelfId()) - peerRatingsMap = getRatingsMap(t, requesterNode.MainPeersRatingMonitor) + peerRatingsMap = getRatingsMap(t, requesterNode.PeersRatingMonitor, requesterNode.MainMessenger) // Resolver should have reached max limit and timestamps still update initialResolverRating, exists = peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) @@ -100,7 +100,7 @@ func TestPeersRatingAndResponsiveness(t *testing.T) { numOfRequests = 10 requestHeader(requesterNode, numOfRequests, hdrHash, resolverNode.ShardCoordinator.SelfId()) - peerRatingsMap = getRatingsMap(t, requesterNode.MainPeersRatingMonitor) + peerRatingsMap = getRatingsMap(t, requesterNode.PeersRatingMonitor, requesterNode.MainMessenger) // resolver node should have the max rating + numOfRequests that didn't answer to resolverRating, exists := peerRatingsMap[resolverNode.MainMessenger.ID().Pretty()] require.True(t, exists) @@ -176,7 +176,7 @@ func TestPeersRatingAndResponsivenessOnFullArchive(t *testing.T) { } requestHeader(requesterFullArchiveNode, numOfRequests, hdrHash, resolverFullArchiveNode.ShardCoordinator.SelfId()) - peerRatingsMap := getRatingsMap(t, requesterFullArchiveNode.FullArchivePeersRatingMonitor) + peerRatingsMap := getRatingsMap(t, requesterFullArchiveNode.PeersRatingMonitor, requesterFullArchiveNode.FullArchiveMessenger) // resolver node should have received and responded to numOfRequests initialResolverRating, exists := peerRatingsMap[resolverFullArchiveNode.MainMessenger.ID().Pretty()] require.True(t, exists) @@ -192,10 +192,10 @@ func TestPeersRatingAndResponsivenessOnFullArchive(t *testing.T) { return true // force the full archive requester to request from main network } requestHeader(requesterFullArchiveNode, numOfRequests, hdrHash, regularNode.ShardCoordinator.SelfId()) - peerRatingsMap = getRatingsMap(t, requesterFullArchiveNode.MainPeersRatingMonitor) + peerRatingsMap = getRatingsMap(t, requesterFullArchiveNode.PeersRatingMonitor, requesterFullArchiveNode.MainMessenger) _, exists = peerRatingsMap[resolverFullArchiveNode.MainMessenger.ID().Pretty()] - require.False(t, exists) // should not be any request on the main monitor to the full archive resolver + require.False(t, exists) // resolverFullArchiveNode is not even connected to requesterFullArchiveNode on main network mainResolverRating, exists := peerRatingsMap[regularNode.MainMessenger.ID().Pretty()] require.True(t, exists) @@ -240,11 +240,12 @@ func getHeader() (*block.Header, []byte, []byte) { return hdr, hdrHash, hdrBuff } -func getRatingsMap(t *testing.T, monitor p2p.PeersRatingMonitor) map[string]string { - peerRatingsStr := monitor.GetConnectedPeersRatings() +func getRatingsMap(t *testing.T, monitor p2p.PeersRatingMonitor, connectionsHandler p2p.ConnectionsHandler) map[string]string { + peerRatingsStr, err := monitor.GetConnectedPeersRatings(connectionsHandler) + require.Nil(t, err) peerRatingsMap := make(map[string]string) - err := json.Unmarshal([]byte(peerRatingsStr), &peerRatingsMap) + err = json.Unmarshal([]byte(peerRatingsStr), &peerRatingsMap) require.Nil(t, err) return peerRatingsMap diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index c833954ad53..e87316442b6 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -505,8 +505,7 @@ func (thn *TestHeartbeatNode) initResolversAndRequesters() { CurrentNetworkEpochProvider: &mock.CurrentNetworkEpochProviderStub{}, MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - MainPeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, - FullArchivePeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, + PeersRatingHandler: &p2pmocks.PeersRatingHandlerStub{}, SizeCheckDelta: 0, } diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 57d9d47941b..0fc87b9b26f 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -394,14 +394,12 @@ type TestProcessorNode struct { EnableEpochsHandler common.EnableEpochsHandler UseValidVmBlsSigVerifier bool - TransactionLogProcessor process.TransactionLogProcessor - MainPeersRatingHandler p2p.PeersRatingHandler - FullArchivePeersRatingHandler p2p.PeersRatingHandler - MainPeersRatingMonitor p2p.PeersRatingMonitor - FullArchivePeersRatingMonitor p2p.PeersRatingMonitor - HardforkTrigger node.HardforkTrigger - AppStatusHandler core.AppStatusHandler - StatusMetrics external.StatusMetricsHandler + TransactionLogProcessor process.TransactionLogProcessor + PeersRatingHandler p2p.PeersRatingHandler + PeersRatingMonitor p2p.PeersRatingMonitor + HardforkTrigger node.HardforkTrigger + AppStatusHandler core.AppStatusHandler + StatusMetrics external.StatusMetricsHandler } // CreatePkBytes creates 'numShards' public key-like byte slices @@ -445,50 +443,27 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { var peersRatingHandler p2p.PeersRatingHandler peersRatingHandler = &p2pmocks.PeersRatingHandlerStub{} - topRatedCache := testscommon.NewCacherMock() - badRatedCache := testscommon.NewCacherMock() - var fullArchivePeersRatingHandler p2p.PeersRatingHandler - fullArchivePeersRatingHandler = &p2pmocks.PeersRatingHandlerStub{} - fullArchiveTopRatedCache := testscommon.NewCacherMock() - fullArchiveBadRatedCache := testscommon.NewCacherMock() + var peersRatingMonitor p2p.PeersRatingMonitor + peersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} if args.WithPeersRatingHandler { + topRatedCache := testscommon.NewCacherMock() + badRatedCache := testscommon.NewCacherMock() peersRatingHandler, _ = p2pFactory.NewPeersRatingHandler( p2pFactory.ArgPeersRatingHandler{ TopRatedCache: topRatedCache, BadRatedCache: badRatedCache, Logger: &testscommon.LoggerStub{}, }) - - fullArchivePeersRatingHandler, _ = p2pFactory.NewPeersRatingHandler( - p2pFactory.ArgPeersRatingHandler{ - TopRatedCache: fullArchiveTopRatedCache, - BadRatedCache: fullArchiveBadRatedCache, - Logger: &testscommon.LoggerStub{}, + peersRatingMonitor, _ = p2pFactory.NewPeersRatingMonitor( + p2pFactory.ArgPeersRatingMonitor{ + TopRatedCache: topRatedCache, + BadRatedCache: badRatedCache, }) } p2pKey := mock.NewPrivateKeyMock() messenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey) - fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(fullArchivePeersRatingHandler, p2pKey) - - var peersRatingMonitor p2p.PeersRatingMonitor - peersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} - var fullArchivePeersRatingMonitor p2p.PeersRatingMonitor - fullArchivePeersRatingMonitor = &p2pmocks.PeersRatingMonitorStub{} - if args.WithPeersRatingHandler { - peersRatingMonitor, _ = p2pFactory.NewPeersRatingMonitor( - p2pFactory.ArgPeersRatingMonitor{ - TopRatedCache: topRatedCache, - BadRatedCache: badRatedCache, - ConnectionsProvider: messenger, - }) - fullArchivePeersRatingMonitor, _ = p2pFactory.NewPeersRatingMonitor( - p2pFactory.ArgPeersRatingMonitor{ - TopRatedCache: fullArchiveTopRatedCache, - BadRatedCache: fullArchiveBadRatedCache, - ConnectionsProvider: fullArchiveMessenger, - }) - } + fullArchiveMessenger := CreateMessengerWithNoDiscoveryAndPeersRatingHandler(peersRatingHandler, p2pKey) genericEpochNotifier := forking.NewGenericEpochNotifier() epochsConfig := args.EpochsConfig @@ -504,35 +479,33 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { logsProcessor, _ := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{Marshalizer: TestMarshalizer}) tpn := &TestProcessorNode{ - ShardCoordinator: shardCoordinator, - MainMessenger: messenger, - FullArchiveMessenger: fullArchiveMessenger, - NodeOperationMode: nodeOperationMode, - NodesCoordinator: nodesCoordinatorInstance, - ChainID: ChainID, - MinTransactionVersion: MinTransactionVersion, - NodesSetup: nodesSetup, - HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, - EpochNotifier: genericEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - EpochProvider: &mock.CurrentNetworkEpochProviderStub{}, - WasmVMChangeLocker: &sync.RWMutex{}, - TransactionLogProcessor: logsProcessor, - Bootstrapper: mock.NewTestBootstrapperMock(), - MainPeersRatingHandler: peersRatingHandler, - FullArchivePeersRatingHandler: fullArchivePeersRatingHandler, - MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), - FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), - EnableEpochs: *epochsConfig, - UseValidVmBlsSigVerifier: args.WithBLSSigVerifier, - StorageBootstrapper: &mock.StorageBootstrapperMock{}, - BootstrapStorer: &mock.BoostrapStorerMock{}, - RatingsData: args.RatingsData, - EpochStartNotifier: args.EpochStartSubscriber, - GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, - AppStatusHandler: appStatusHandler, - MainPeersRatingMonitor: peersRatingMonitor, - FullArchivePeersRatingMonitor: fullArchivePeersRatingMonitor, + ShardCoordinator: shardCoordinator, + MainMessenger: messenger, + FullArchiveMessenger: fullArchiveMessenger, + NodeOperationMode: nodeOperationMode, + NodesCoordinator: nodesCoordinatorInstance, + ChainID: ChainID, + MinTransactionVersion: MinTransactionVersion, + NodesSetup: nodesSetup, + HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, + EpochNotifier: genericEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + EpochProvider: &mock.CurrentNetworkEpochProviderStub{}, + WasmVMChangeLocker: &sync.RWMutex{}, + TransactionLogProcessor: logsProcessor, + Bootstrapper: mock.NewTestBootstrapperMock(), + PeersRatingHandler: peersRatingHandler, + MainPeerShardMapper: mock.NewNetworkShardingCollectorMock(), + FullArchivePeerShardMapper: mock.NewNetworkShardingCollectorMock(), + EnableEpochs: *epochsConfig, + UseValidVmBlsSigVerifier: args.WithBLSSigVerifier, + StorageBootstrapper: &mock.StorageBootstrapperMock{}, + BootstrapStorer: &mock.BoostrapStorerMock{}, + RatingsData: args.RatingsData, + EpochStartNotifier: args.EpochStartSubscriber, + GuardedAccountHandler: &guardianMocks.GuardedAccountHandlerStub{}, + AppStatusHandler: appStatusHandler, + PeersRatingMonitor: peersRatingMonitor, } tpn.NodeKeys = args.NodeKeys @@ -1458,8 +1431,7 @@ func (tpn *TestProcessorNode) initRequesters() { CurrentNetworkEpochProvider: tpn.EpochProvider, MainPreferredPeersHolder: &p2pmocks.PeersHolderStub{}, FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - MainPeersRatingHandler: tpn.MainPeersRatingHandler, - FullArchivePeersRatingHandler: tpn.FullArchivePeersRatingHandler, + PeersRatingHandler: tpn.PeersRatingHandler, SizeCheckDelta: 0, } @@ -2466,10 +2438,8 @@ func (tpn *TestProcessorNode) initNode() { networkComponents := GetDefaultNetworkComponents() networkComponents.Messenger = tpn.MainMessenger networkComponents.FullArchiveNetworkMessengerField = tpn.FullArchiveMessenger - networkComponents.PeersRatingHandlerField = tpn.MainPeersRatingHandler - networkComponents.FullArchivePeersRatingHandlerField = tpn.FullArchivePeersRatingHandler - networkComponents.PeersRatingMonitorField = tpn.MainPeersRatingMonitor - networkComponents.FullArchivePeersRatingMonitorField = tpn.FullArchivePeersRatingMonitor + networkComponents.PeersRatingHandlerField = tpn.PeersRatingHandler + networkComponents.PeersRatingMonitorField = tpn.PeersRatingMonitor tpn.Node, err = node.NewNode( node.WithAddressSignatureSize(64), @@ -3319,16 +3289,15 @@ func GetDefaultStateComponents() *testFactory.StateComponentsMock { // GetDefaultNetworkComponents - func GetDefaultNetworkComponents() *mock.NetworkComponentsStub { return &mock.NetworkComponentsStub{ - Messenger: &p2pmocks.MessengerStub{}, - InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, - OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, - PeerBlackList: &mock.PeerBlackListCacherStub{}, - PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, - PeersRatingMonitorField: &p2pmocks.PeersRatingMonitorStub{}, - FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, - FullArchivePeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, - PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, - FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &mock.P2PAntifloodHandlerStub{}, + PeerBlackList: &mock.PeerBlackListCacherStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + PeersRatingMonitorField: &p2pmocks.PeersRatingMonitorStub{}, + FullArchiveNetworkMessengerField: &p2pmocks.MessengerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + FullArchivePreferredPeersHolder: &p2pmocks.PeersHolderStub{}, } } diff --git a/node/mock/factory/networkComponentsMock.go b/node/mock/factory/networkComponentsMock.go index 1422633a13c..88acf5468ea 100644 --- a/node/mock/factory/networkComponentsMock.go +++ b/node/mock/factory/networkComponentsMock.go @@ -8,17 +8,15 @@ import ( // NetworkComponentsMock - type NetworkComponentsMock struct { - Messenger p2p.Messenger - InputAntiFlood factory.P2PAntifloodHandler - OutputAntiFlood factory.P2PAntifloodHandler - PeerBlackList process.PeerBlackListCacher - PreferredPeersHolder factory.PreferredPeersHolderHandler - PeersRatingHandlerField p2p.PeersRatingHandler - PeersRatingMonitorField p2p.PeersRatingMonitor - FullArchiveNetworkMessengerField p2p.Messenger - FullArchivePeersRatingHandlerField p2p.PeersRatingHandler - FullArchivePeersRatingMonitorField p2p.PeersRatingMonitor - FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler + Messenger p2p.Messenger + InputAntiFlood factory.P2PAntifloodHandler + OutputAntiFlood factory.P2PAntifloodHandler + PeerBlackList process.PeerBlackListCacher + PreferredPeersHolder factory.PreferredPeersHolderHandler + PeersRatingHandlerField p2p.PeersRatingHandler + PeersRatingMonitorField p2p.PeersRatingMonitor + FullArchiveNetworkMessengerField p2p.Messenger + FullArchivePreferredPeersHolder factory.PreferredPeersHolderHandler } // PubKeyCacher - @@ -86,16 +84,6 @@ func (ncm *NetworkComponentsMock) FullArchiveNetworkMessenger() p2p.Messenger { return ncm.FullArchiveNetworkMessengerField } -// FullArchivePeersRatingHandler - -func (ncm *NetworkComponentsMock) FullArchivePeersRatingHandler() p2p.PeersRatingHandler { - return ncm.FullArchivePeersRatingHandlerField -} - -// FullArchivePeersRatingMonitor - -func (ncm *NetworkComponentsMock) FullArchivePeersRatingMonitor() p2p.PeersRatingMonitor { - return ncm.FullArchivePeersRatingMonitorField -} - // String - func (ncm *NetworkComponentsMock) String() string { return "NetworkComponentsMock" diff --git a/node/node.go b/node/node.go index b08a4b8b925..91623de8ed6 100644 --- a/node/node.go +++ b/node/node.go @@ -1091,9 +1091,9 @@ func (n *Node) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return peerInfoSlice, nil } -// GetConnectedPeersRatings returns the connected peers ratings -func (n *Node) GetConnectedPeersRatings() string { - return n.networkComponents.PeersRatingMonitor().GetConnectedPeersRatings() +// GetConnectedPeersRatings returns the connected peers ratings on the main network +func (n *Node) GetConnectedPeersRatings() (string, error) { + return n.networkComponents.PeersRatingMonitor().GetConnectedPeersRatings(n.networkComponents.NetworkMessenger()) } // GetEpochStartDataAPI returns epoch start data of a given epoch diff --git a/p2p/interface.go b/p2p/interface.go index 2fbfb86e169..aeb39a33689 100644 --- a/p2p/interface.go +++ b/p2p/interface.go @@ -100,10 +100,7 @@ type PeersRatingHandler interface { } // PeersRatingMonitor represents an entity able to provide peers ratings -type PeersRatingMonitor interface { - GetConnectedPeersRatings() string - IsInterfaceNil() bool -} +type PeersRatingMonitor = p2p.PeersRatingMonitor // PeerTopicNotifier represents an entity able to handle new notifications on a new peer on a topic type PeerTopicNotifier = p2p.PeerTopicNotifier @@ -131,3 +128,6 @@ type P2PKeyConverter interface { // Logger defines the behavior of a data logger component type Logger = p2p.Logger + +// ConnectionsHandler defines the behaviour of a component able to handle connections +type ConnectionsHandler = p2p.ConnectionsHandler diff --git a/testscommon/p2pmocks/peersRatingMonitorStub.go b/testscommon/p2pmocks/peersRatingMonitorStub.go index 22325a03d05..35e9cc83304 100644 --- a/testscommon/p2pmocks/peersRatingMonitorStub.go +++ b/testscommon/p2pmocks/peersRatingMonitorStub.go @@ -1,16 +1,18 @@ package p2pmocks +import "github.com/multiversx/mx-chain-go/p2p" + // PeersRatingMonitorStub - type PeersRatingMonitorStub struct { - GetConnectedPeersRatingsCalled func() string + GetConnectedPeersRatingsCalled func(connectionsHandler p2p.ConnectionsHandler) (string, error) } // GetConnectedPeersRatings - -func (stub *PeersRatingMonitorStub) GetConnectedPeersRatings() string { +func (stub *PeersRatingMonitorStub) GetConnectedPeersRatings(connectionsHandler p2p.ConnectionsHandler) (string, error) { if stub.GetConnectedPeersRatingsCalled != nil { - return stub.GetConnectedPeersRatingsCalled() + return stub.GetConnectedPeersRatingsCalled(connectionsHandler) } - return "" + return "", nil } // IsInterfaceNil - diff --git a/update/factory/exportHandlerFactory.go b/update/factory/exportHandlerFactory.go index 5692ada6c23..7fde050935b 100644 --- a/update/factory/exportHandlerFactory.go +++ b/update/factory/exportHandlerFactory.go @@ -374,14 +374,13 @@ func (e *exportHandlerFactory) Create() (update.ExportHandler, error) { }) argsRequesters := ArgsRequestersContainerFactory{ - ShardCoordinator: e.shardCoordinator, - MainMessenger: e.networkComponents.NetworkMessenger(), - FullArchiveMessenger: e.networkComponents.FullArchiveNetworkMessenger(), - Marshaller: e.coreComponents.InternalMarshalizer(), - ExistingRequesters: e.existingRequesters, - OutputAntifloodHandler: e.networkComponents.OutputAntiFloodHandler(), - MainPeersRatingHandler: e.networkComponents.PeersRatingHandler(), - FullArchivePeersRatingHandler: e.networkComponents.FullArchivePeersRatingHandler(), + ShardCoordinator: e.shardCoordinator, + MainMessenger: e.networkComponents.NetworkMessenger(), + FullArchiveMessenger: e.networkComponents.FullArchiveNetworkMessenger(), + Marshaller: e.coreComponents.InternalMarshalizer(), + ExistingRequesters: e.existingRequesters, + OutputAntifloodHandler: e.networkComponents.OutputAntiFloodHandler(), + PeersRatingHandler: e.networkComponents.PeersRatingHandler(), } requestersFactory, err := NewRequestersContainerFactory(argsRequesters) if err != nil { diff --git a/update/factory/fullSyncRequestersContainerFactory.go b/update/factory/fullSyncRequestersContainerFactory.go index ede951b8a64..cce6ee407d7 100644 --- a/update/factory/fullSyncRequestersContainerFactory.go +++ b/update/factory/fullSyncRequestersContainerFactory.go @@ -26,27 +26,25 @@ const ( ) type requestersContainerFactory struct { - shardCoordinator sharding.Coordinator - mainMessenger p2p.Messenger - fullArchiveMessenger p2p.Messenger - marshaller marshal.Marshalizer - intRandomizer dataRetriever.IntRandomizer - container dataRetriever.RequestersContainer - outputAntifloodHandler dataRetriever.P2PAntifloodHandler - mainPeersRatingHandler dataRetriever.PeersRatingHandler - fullArchivePeersRatingHandler dataRetriever.PeersRatingHandler + shardCoordinator sharding.Coordinator + mainMessenger p2p.Messenger + fullArchiveMessenger p2p.Messenger + marshaller marshal.Marshalizer + intRandomizer dataRetriever.IntRandomizer + container dataRetriever.RequestersContainer + outputAntifloodHandler dataRetriever.P2PAntifloodHandler + peersRatingHandler dataRetriever.PeersRatingHandler } // ArgsRequestersContainerFactory defines the arguments for the requestersContainerFactory constructor type ArgsRequestersContainerFactory struct { - ShardCoordinator sharding.Coordinator - MainMessenger p2p.Messenger - FullArchiveMessenger p2p.Messenger - Marshaller marshal.Marshalizer - ExistingRequesters dataRetriever.RequestersContainer - OutputAntifloodHandler dataRetriever.P2PAntifloodHandler - MainPeersRatingHandler dataRetriever.PeersRatingHandler - FullArchivePeersRatingHandler dataRetriever.PeersRatingHandler + ShardCoordinator sharding.Coordinator + MainMessenger p2p.Messenger + FullArchiveMessenger p2p.Messenger + Marshaller marshal.Marshalizer + ExistingRequesters dataRetriever.RequestersContainer + OutputAntifloodHandler dataRetriever.P2PAntifloodHandler + PeersRatingHandler dataRetriever.PeersRatingHandler } // NewRequestersContainerFactory creates a new container filled with topic requesters @@ -69,23 +67,19 @@ func NewRequestersContainerFactory(args ArgsRequestersContainerFactory) (*reques if check.IfNil(args.OutputAntifloodHandler) { return nil, update.ErrNilAntiFloodHandler } - if check.IfNil(args.MainPeersRatingHandler) { - return nil, fmt.Errorf("%w on main network", update.ErrNilPeersRatingHandler) - } - if check.IfNil(args.FullArchivePeersRatingHandler) { - return nil, fmt.Errorf("%w on full archive network", update.ErrNilPeersRatingHandler) + if check.IfNil(args.PeersRatingHandler) { + return nil, update.ErrNilPeersRatingHandler } return &requestersContainerFactory{ - shardCoordinator: args.ShardCoordinator, - mainMessenger: args.MainMessenger, - fullArchiveMessenger: args.FullArchiveMessenger, - marshaller: args.Marshaller, - intRandomizer: &random.ConcurrentSafeIntRandomizer{}, - container: args.ExistingRequesters, - outputAntifloodHandler: args.OutputAntifloodHandler, - mainPeersRatingHandler: args.MainPeersRatingHandler, - fullArchivePeersRatingHandler: args.FullArchivePeersRatingHandler, + shardCoordinator: args.ShardCoordinator, + mainMessenger: args.MainMessenger, + fullArchiveMessenger: args.FullArchiveMessenger, + marshaller: args.Marshaller, + intRandomizer: &random.ConcurrentSafeIntRandomizer{}, + container: args.ExistingRequesters, + outputAntifloodHandler: args.OutputAntifloodHandler, + peersRatingHandler: args.PeersRatingHandler, }, nil } @@ -179,16 +173,15 @@ func (rcf *requestersContainerFactory) createTrieNodesRequester(baseTopic string FullArchivePreferredPeersHolder: disabled.NewPreferredPeersHolder(), TargetShardId: defaultTargetShardID, }, - Marshaller: rcf.marshaller, - Randomizer: rcf.intRandomizer, - PeerListCreator: peerListCreator, - NumIntraShardPeers: numIntraShardPeers, - NumCrossShardPeers: numCrossShardPeers, - NumFullHistoryPeers: numFullHistoryPeers, - CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), - SelfShardIdProvider: rcf.shardCoordinator, - MainPeersRatingHandler: rcf.mainPeersRatingHandler, - FullArchivePeersRatingHandler: rcf.fullArchivePeersRatingHandler, + Marshaller: rcf.marshaller, + Randomizer: rcf.intRandomizer, + PeerListCreator: peerListCreator, + NumIntraShardPeers: numIntraShardPeers, + NumCrossShardPeers: numCrossShardPeers, + NumFullHistoryPeers: numFullHistoryPeers, + CurrentNetworkEpochProvider: disabled.NewCurrentNetworkEpochProviderHandler(), + SelfShardIdProvider: rcf.shardCoordinator, + PeersRatingHandler: rcf.peersRatingHandler, } requestSender, err := topicsender.NewTopicRequestSender(arg) if err != nil { From 177c5ffcc078c24500f4265d9d6bc2f740a9523f Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Mon, 26 Jun 2023 12:20:15 +0300 Subject: [PATCH 27/38] fixes after review --- go.mod | 2 +- go.sum | 4 ++-- integrationTests/testHeartbeatNode.go | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index a5a0d11b8f0..0f817b1097b 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623111526-bad086eb28f9 + github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230626090737-03f29a193aa3 github.com/multiversx/mx-chain-core-go v1.2.6 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.4 diff --git a/go.sum b/go.sum index 4b5956a4e9b..4d660690e9c 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623111526-bad086eb28f9 h1:gvCgL3dDNc1I1AzCRzW2fKlsppYBOTtIVPLO2fssiA0= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230623111526-bad086eb28f9/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230626090737-03f29a193aa3 h1:BZhjdS4dQrVpMwShGmymGJCjGAklZjJwhvVhhVhJ+tQ= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230626090737-03f29a193aa3/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= diff --git a/integrationTests/testHeartbeatNode.go b/integrationTests/testHeartbeatNode.go index c833954ad53..6889e48237f 100644 --- a/integrationTests/testHeartbeatNode.go +++ b/integrationTests/testHeartbeatNode.go @@ -167,7 +167,7 @@ func NewTestHeartbeatNode( } err = messenger.SetPeerShardResolver(peerShardMapper) if err != nil { - log.Error("error setting NewPeerShardMapper in p2p messenger", "error", err) + log.Error("error setting the peer shard mapper for the main p2p messenger", "error", err) } fullArchiveMessenger := CreateMessengerFromConfigWithPeersRatingHandler(p2pConfig, &p2pmocks.PeersRatingHandlerStub{}, p2pKey) @@ -187,7 +187,7 @@ func NewTestHeartbeatNode( } err = fullArchiveMessenger.SetPeerShardResolver(peerShardMapperFullArch) if err != nil { - log.Error("error setting NewPeerShardMapper in p2p messenger for full archive network", "error", err) + log.Error("error setting the peer shard mapper for the full archive p2p messenger", "error", err) } thn := &TestHeartbeatNode{ @@ -262,7 +262,7 @@ func NewTestHeartbeatNodeWithCoordinator( } err = messenger.SetPeerShardResolver(peerShardMapper) if err != nil { - log.Error("error setting NewPeerShardMapper in p2p messenger", "error", err) + log.Error("error setting the peer shard mapper for the main p2p messenger", "error", err) } thn := &TestHeartbeatNode{ From d6e8b7e851e2c205b6166e7d2f21e83ec6da4171 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Mon, 26 Jun 2023 12:33:27 +0300 Subject: [PATCH 28/38] updated mx-chain-communication-go --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0f817b1097b..0ad75fcf80c 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230626090737-03f29a193aa3 + github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230626092825-e1c46638c379 github.com/multiversx/mx-chain-core-go v1.2.6 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.4 diff --git a/go.sum b/go.sum index 4d660690e9c..9f0fa174da8 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230626090737-03f29a193aa3 h1:BZhjdS4dQrVpMwShGmymGJCjGAklZjJwhvVhhVhJ+tQ= -github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230626090737-03f29a193aa3/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230626092825-e1c46638c379 h1:uVlyjbBXWg+LhY3fvGhbseY44yQNo+HtjZMegRE0y4o= +github.com/multiversx/mx-chain-communication-go v1.0.3-0.20230626092825-e1c46638c379/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= From 5c591f576a0a2a3e236dbbfc4ce4be725d98ea23 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Mon, 26 Jun 2023 12:50:34 +0300 Subject: [PATCH 29/38] fix after review, GetConnectedPeersRatings -> GetConnectedPeersRatingsOnMainNetwork --- api/groups/nodeGroup.go | 4 ++-- api/groups/nodeGroup_test.go | 2 +- api/mock/facadeStub.go | 8 ++++---- api/shared/interface.go | 2 +- facade/initial/initialNodeFacade.go | 4 ++-- facade/initial/initialNodeFacade_test.go | 2 +- facade/interface.go | 2 +- facade/mock/nodeStub.go | 10 +++++----- facade/nodeFacade.go | 6 +++--- facade/nodeFacade_test.go | 6 +++--- integrationTests/interface.go | 2 +- node/node.go | 4 ++-- 12 files changed, 26 insertions(+), 26 deletions(-) diff --git a/api/groups/nodeGroup.go b/api/groups/nodeGroup.go index 997cfbc90e9..08dfb3706c1 100644 --- a/api/groups/nodeGroup.go +++ b/api/groups/nodeGroup.go @@ -36,7 +36,7 @@ type nodeFacadeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() (string, error) + GetConnectedPeersRatingsOnMainNetwork() (string, error) IsInterfaceNil() bool } @@ -327,7 +327,7 @@ func (ng *nodeGroup) bootstrapMetrics(c *gin.Context) { // connectedPeersRatings returns the node's connected peers ratings func (ng *nodeGroup) connectedPeersRatings(c *gin.Context) { - ratings, err := ng.getFacade().GetConnectedPeersRatings() + ratings, err := ng.getFacade().GetConnectedPeersRatingsOnMainNetwork() if err != nil { c.JSON( http.StatusInternalServerError, diff --git a/api/groups/nodeGroup_test.go b/api/groups/nodeGroup_test.go index 383dd099f8d..c212cacfa9a 100644 --- a/api/groups/nodeGroup_test.go +++ b/api/groups/nodeGroup_test.go @@ -265,7 +265,7 @@ func TestNodeGroup_GetConnectedPeersRatings(t *testing.T) { } buff, _ := json.Marshal(providedRatings) facade := mock.FacadeStub{ - GetConnectedPeersRatingsCalled: func() (string, error) { + GetConnectedPeersRatingsOnMainNetworkCalled: func() (string, error) { return string(buff), nil }, } diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index 8841e2244a4..094e617e2e2 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -42,7 +42,7 @@ type FacadeStub struct { GetValueForKeyCalled func(address string, key string, options api.AccountQueryOptions) (string, api.BlockInfo, error) GetGuardianDataCalled func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetPeerInfoCalled func(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatingsCalled func() (string, error) + GetConnectedPeersRatingsOnMainNetworkCalled func() (string, error) GetEpochStartDataAPICalled func(epoch uint32) (*common.EpochStartDataAPI, error) GetThrottlerForEndpointCalled func(endpoint string) (core.Throttler, bool) GetUsernameCalled func(address string, options api.AccountQueryOptions) (string, api.BlockInfo, error) @@ -383,9 +383,9 @@ func (f *FacadeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return f.GetPeerInfoCalled(pid) } -// GetConnectedPeersRatings - -func (f *FacadeStub) GetConnectedPeersRatings() (string, error) { - return f.GetConnectedPeersRatingsCalled() +// GetConnectedPeersRatingsOnMainNetwork - +func (f *FacadeStub) GetConnectedPeersRatingsOnMainNetwork() (string, error) { + return f.GetConnectedPeersRatingsOnMainNetworkCalled() } // GetEpochStartDataAPI - diff --git a/api/shared/interface.go b/api/shared/interface.go index 806de769fa5..5bd36a95bd5 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -99,7 +99,7 @@ type FacadeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() (string, error) + GetConnectedPeersRatingsOnMainNetwork() (string, error) GetProof(rootHash string, address string) (*common.GetProofResponse, error) GetProofDataTrie(rootHash string, address string, key string) (*common.GetProofResponse, *common.GetProofResponse, error) GetProofCurrentRootHash(address string) (*common.GetProofResponse, error) diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index 341654aee9f..6b3137e9f29 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -240,8 +240,8 @@ func (inf *initialNodeFacade) GetPeerInfo(_ string) ([]core.QueryP2PPeerInfo, er return nil, errNodeStarting } -// GetConnectedPeersRatings returns empty string and error -func (inf *initialNodeFacade) GetConnectedPeersRatings() (string, error) { +// GetConnectedPeersRatingsOnMainNetwork returns empty string and error +func (inf *initialNodeFacade) GetConnectedPeersRatingsOnMainNetwork() (string, error) { return "", errNodeStarting } diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index 19c88e17030..7ee2e26de2e 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -255,7 +255,7 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Nil(t, stakeValue) assert.Equal(t, errNodeStarting, err) - ratings, err := inf.GetConnectedPeersRatings() + ratings, err := inf.GetConnectedPeersRatingsOnMainNetwork() assert.Equal(t, "", ratings) assert.Equal(t, errNodeStarting, err) diff --git a/facade/interface.go b/facade/interface.go index 7426767bd9c..09497830e51 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -93,7 +93,7 @@ type NodeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() (string, error) + GetConnectedPeersRatingsOnMainNetwork() (string, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index b4263791b7f..c74363c4ebb 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -38,7 +38,7 @@ type NodeStub struct { GetValueForKeyCalled func(address string, key string, options api.AccountQueryOptions) (string, api.BlockInfo, error) GetGuardianDataCalled func(address string, options api.AccountQueryOptions) (api.GuardianData, api.BlockInfo, error) GetPeerInfoCalled func(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatingsCalled func() (string, error) + GetConnectedPeersRatingsOnMainNetworkCalled func() (string, error) GetEpochStartDataAPICalled func(epoch uint32) (*common.EpochStartDataAPI, error) GetUsernameCalled func(address string, options api.AccountQueryOptions) (string, api.BlockInfo, error) GetCodeHashCalled func(address string, options api.AccountQueryOptions) ([]byte, api.BlockInfo, error) @@ -215,10 +215,10 @@ func (ns *NodeStub) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return make([]core.QueryP2PPeerInfo, 0), nil } -// GetConnectedPeersRatings - -func (ns *NodeStub) GetConnectedPeersRatings() (string, error) { - if ns.GetConnectedPeersRatingsCalled != nil { - return ns.GetConnectedPeersRatingsCalled() +// GetConnectedPeersRatingsOnMainNetwork - +func (ns *NodeStub) GetConnectedPeersRatingsOnMainNetwork() (string, error) { + if ns.GetConnectedPeersRatingsOnMainNetworkCalled != nil { + return ns.GetConnectedPeersRatingsOnMainNetworkCalled() } return "", nil diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index 200eb6cc1a4..6a8d317614c 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -466,9 +466,9 @@ func (nf *nodeFacade) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return nf.node.GetPeerInfo(pid) } -// GetConnectedPeersRatings returns the connected peers ratings on the main network -func (nf *nodeFacade) GetConnectedPeersRatings() (string, error) { - return nf.node.GetConnectedPeersRatings() +// GetConnectedPeersRatingsOnMainNetwork returns the connected peers ratings on the main network +func (nf *nodeFacade) GetConnectedPeersRatingsOnMainNetwork() (string, error) { + return nf.node.GetConnectedPeersRatingsOnMainNetwork() } // GetThrottlerForEndpoint returns the throttler for a given endpoint if found diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index abf9ce35a59..80849ff7546 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -2112,19 +2112,19 @@ func TestNodeFacade_GetEpochStartDataAPI(t *testing.T) { require.Equal(t, providedResponse, response) } -func TestNodeFacade_GetConnectedPeersRatings(t *testing.T) { +func TestNodeFacade_GetConnectedPeersRatingsOnMainNetwork(t *testing.T) { t.Parallel() providedResponse := "ratings" args := createMockArguments() args.Node = &mock.NodeStub{ - GetConnectedPeersRatingsCalled: func() (string, error) { + GetConnectedPeersRatingsOnMainNetworkCalled: func() (string, error) { return providedResponse, nil }, } nf, _ := NewNodeFacade(args) - response, err := nf.GetConnectedPeersRatings() + response, err := nf.GetConnectedPeersRatingsOnMainNetwork() require.NoError(t, err) require.Equal(t, providedResponse, response) } diff --git a/integrationTests/interface.go b/integrationTests/interface.go index 69142d6ee91..571b83f4b09 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -85,7 +85,7 @@ type Facade interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatings() (string, error) + GetConnectedPeersRatingsOnMainNetwork() (string, error) CreateTransaction(txArgs *external.ArgsCreateTransaction) (*transaction.Transaction, []byte, error) ValidateTransaction(tx *transaction.Transaction) error ValidateTransactionForSimulation(tx *transaction.Transaction, bypassSignature bool) error diff --git a/node/node.go b/node/node.go index 91623de8ed6..62a93dc34fa 100644 --- a/node/node.go +++ b/node/node.go @@ -1091,8 +1091,8 @@ func (n *Node) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) { return peerInfoSlice, nil } -// GetConnectedPeersRatings returns the connected peers ratings on the main network -func (n *Node) GetConnectedPeersRatings() (string, error) { +// GetConnectedPeersRatingsOnMainNetwork returns the connected peers ratings on the main network +func (n *Node) GetConnectedPeersRatingsOnMainNetwork() (string, error) { return n.networkComponents.PeersRatingMonitor().GetConnectedPeersRatings(n.networkComponents.NetworkMessenger()) } From d337cc328b2654690b7d7e82296c48840416ab18 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Tue, 27 Jun 2023 11:31:56 +0300 Subject: [PATCH 30/38] fixes after merge + update mx-chain-communication-go --- go.mod | 2 +- go.sum | 3 ++- integrationTests/testProcessorNode.go | 5 +++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index d084da1b44d..40face3af92 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.3 + github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230627075501-f62c0874a0a6 github.com/multiversx/mx-chain-core-go v1.2.7 github.com/multiversx/mx-chain-crypto-go v1.2.7 github.com/multiversx/mx-chain-es-indexer-go v1.4.5 diff --git a/go.sum b/go.sum index 51e7403ffeb..20e243c5843 100644 --- a/go.sum +++ b/go.sum @@ -621,8 +621,9 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.3 h1:42LkNQX+PdT4UEDO2/kbOMT/iDpEMfE0ncNldFTyFD4= github.com/multiversx/mx-chain-communication-go v1.0.3/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230627075501-f62c0874a0a6 h1:WiXLEeICPQNUnoGbfo9d8JewlN2JIUYGtD4geuRb6pE= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230627075501-f62c0874a0a6/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 2844cec0498..07d2ce3697d 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -502,8 +502,8 @@ func newBaseTestProcessorNode(args ArgTestProcessorNode) *TestProcessorNode { NodesSetup: nodesSetup, HistoryRepository: &dblookupextMock.HistoryRepositoryStub{}, EpochNotifier: genericEpochNotifier, - RoundNotifier: genericRoundNotifier, - EnableRoundsHandler: enableRoundsHandler, + RoundNotifier: genericRoundNotifier, + EnableRoundsHandler: enableRoundsHandler, EnableEpochsHandler: enableEpochsHandler, EpochProvider: &mock.CurrentNetworkEpochProviderStub{}, WasmVMChangeLocker: &sync.RWMutex{}, @@ -2121,6 +2121,7 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { coreComponents.EnableEpochsHandlerField = tpn.EnableEpochsHandler coreComponents.EpochNotifierField = tpn.EpochNotifier coreComponents.EconomicsDataField = tpn.EconomicsData + coreComponents.RoundNotifierField = tpn.RoundNotifier dataComponents := GetDefaultDataComponents() dataComponents.Store = tpn.Storage From afb667529407ae93cf206753eae6d3190b25eccd Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Tue, 27 Jun 2023 12:29:25 +0300 Subject: [PATCH 31/38] fix race in tests --- process/transaction/shardProcess_test.go | 44 ++++++++++++++---------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/process/transaction/shardProcess_test.go b/process/transaction/shardProcess_test.go index 98aa46d7727..ecaace3207d 100644 --- a/process/transaction/shardProcess_test.go +++ b/process/transaction/shardProcess_test.go @@ -90,9 +90,9 @@ func createArgsForTxProcessor() txproc.ArgsNewTxProcessor { EnableEpochsHandler: &enableEpochsHandlerMock.EnableEpochsHandlerStub{ IsPenalizedTooMuchGasFlagEnabledField: true, }, - GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, - TxLogsProcessor: &mock.TxLogsProcessorStub{}, + GuardianChecker: &guardianMocks.GuardedAccountHandlerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxLogsProcessor: &mock.TxLogsProcessorStub{}, EnableRoundsHandler: &testscommon.EnableRoundsHandlerStub{}, } return args @@ -3258,37 +3258,43 @@ func TestTxProcessor_shouldIncreaseNonce(t *testing.T) { func TestTxProcessor_AddNonExecutableLog(t *testing.T) { t.Parallel() - args := createArgsForTxProcessor() - sender := []byte("sender") - relayer := []byte("relayer") - originalTx := &transaction.Transaction{ - SndAddr: relayer, - RcvAddr: sender, - } - originalTxHash, err := core.CalculateHash(args.Marshalizer, args.Hasher, originalTx) - assert.Nil(t, err) - t.Run("not a non-executable error should not record log", func(t *testing.T) { t.Parallel() - argsLocal := args - argsLocal.TxLogsProcessor = &mock.TxLogsProcessorStub{ + args := createArgsForTxProcessor() + sender := []byte("sender") + relayer := []byte("relayer") + originalTx := &transaction.Transaction{ + SndAddr: relayer, + RcvAddr: sender, + } + originalTxHash, err := core.CalculateHash(args.Marshalizer, args.Hasher, originalTx) + assert.Nil(t, err) + args.TxLogsProcessor = &mock.TxLogsProcessorStub{ SaveLogCalled: func(txHash []byte, tx data.TransactionHandler, vmLogs []*vmcommon.LogEntry) error { assert.Fail(t, "should have not called SaveLog") return nil }, } - txProc, _ := txproc.NewTxProcessor(argsLocal) + txProc, _ := txproc.NewTxProcessor(args) err = txProc.AddNonExecutableLog(errors.New("random error"), originalTxHash, originalTx) assert.Nil(t, err) }) t.Run("is non executable tx error should record log", func(t *testing.T) { t.Parallel() - argsLocal := args + args := createArgsForTxProcessor() + sender := []byte("sender") + relayer := []byte("relayer") + originalTx := &transaction.Transaction{ + SndAddr: relayer, + RcvAddr: sender, + } + originalTxHash, err := core.CalculateHash(args.Marshalizer, args.Hasher, originalTx) + assert.Nil(t, err) numLogsSaved := 0 - argsLocal.TxLogsProcessor = &mock.TxLogsProcessorStub{ + args.TxLogsProcessor = &mock.TxLogsProcessorStub{ SaveLogCalled: func(txHash []byte, tx data.TransactionHandler, vmLogs []*vmcommon.LogEntry) error { assert.Equal(t, originalTxHash, txHash) assert.Equal(t, originalTx, tx) @@ -3304,7 +3310,7 @@ func TestTxProcessor_AddNonExecutableLog(t *testing.T) { }, } - txProc, _ := txproc.NewTxProcessor(argsLocal) + txProc, _ := txproc.NewTxProcessor(args) err = txProc.AddNonExecutableLog(process.ErrLowerNonceInTransaction, originalTxHash, originalTx) assert.Nil(t, err) From e3483b46d51bd3162c6d80b5368278c544f636e7 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Mon, 10 Jul 2023 10:25:42 +0300 Subject: [PATCH 32/38] fix after merge --- cmd/node/CLI.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/node/CLI.md b/cmd/node/CLI.md index a3b51892e1c..05a3be078c8 100644 --- a/cmd/node/CLI.md +++ b/cmd/node/CLI.md @@ -26,6 +26,7 @@ GLOBAL OPTIONS: --config-preferences [path] The [path] for the preferences configuration file. This TOML file contains preferences configurations, such as the node display name or the shard to start in when starting as observer (default: "./config/prefs.toml") --config-external [path] The [path] for the external configuration file. This TOML file contains external configurations such as ElasticSearch's URL and login information (default: "./config/external.toml") --p2p-config [path] The [path] for the p2p configuration file. This TOML file contains peer-to-peer configurations such as port, target peer count or KadDHT settings (default: "./config/p2p.toml") + --full-archive-p2p-config [path] The [path] for the p2p configuration file for the full archive network. This TOML file contains peer-to-peer configurations such as port, target peer count or KadDHT settings (default: "./config/fullArchiveP2P.toml") --epoch-config [path] The [path] for the epoch configuration file. This TOML file contains activation epochs configurations (default: "./config/enableEpochs.toml") --round-config [path] The [path] for the round configuration file. This TOML file contains activation round configurations (default: "./config/enableRounds.toml") --gas-costs-config [path] The [path] for the gas costs configuration directory. (default: "./config/gasSchedules") @@ -33,6 +34,7 @@ GLOBAL OPTIONS: --validator-key-pem-file filepath The filepath for the PEM file which contains the secret keys for the validator key. (default: "./config/validatorKey.pem") --all-validator-keys-pem-file filepath The filepath for the PEM file which contains all the secret keys managed by the current node. (default: "./config/allValidatorsKeys.pem") --port [p2p port] The [p2p port] number on which the application will start. Can use single values such as `0, 10230, 15670` or range of ports such as `5000-10000` (default: "0") + --full-archive-port [p2p port] The [p2p port] number on which the application will start the second network when running in full archive mode. Can use single values such as `0, 10230, 15670` or range of ports such as `5000-10000` (default: "0") --profile-mode Boolean option for enabling the profiling mode. If set, the /debug/pprof routes will be available on the node for profiling the application. --use-health-service Boolean option for enabling the health service. --storage-cleanup Boolean option for starting the node with clean storage. If set, the Node will empty its storage before starting, otherwise it will start from the last state stored on disk.. From 74fbb2f4e5389ff28d3dfb8214e87dc4a3f364b3 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Tue, 11 Jul 2023 18:19:45 +0300 Subject: [PATCH 33/38] updated mx-chain-communication-go to latest --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cad6799d423..b887ce7b3bc 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230627075501-f62c0874a0a6 + github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230711151847-0a3789df9544 github.com/multiversx/mx-chain-core-go v1.2.8 github.com/multiversx/mx-chain-crypto-go v1.2.7 github.com/multiversx/mx-chain-es-indexer-go v1.4.5 diff --git a/go.sum b/go.sum index aa2775310b0..8212e8f2555 100644 --- a/go.sum +++ b/go.sum @@ -622,8 +622,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.3/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230627075501-f62c0874a0a6 h1:WiXLEeICPQNUnoGbfo9d8JewlN2JIUYGtD4geuRb6pE= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230627075501-f62c0874a0a6/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230711151847-0a3789df9544 h1:E5dIUhpD4BZrxNtMiJnEoTN0RQn8XUa/e2Ago/XQ9O8= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230711151847-0a3789df9544/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= From 33bfd7097539e811659a6e8c1b555531354a56d0 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 13 Jul 2023 16:50:53 +0300 Subject: [PATCH 34/38] updated mx-chain-communication-go with NetworkType --- cmd/seednode/main.go | 1 + factory/network/networkComponents.go | 6 ++++-- go.mod | 2 +- go.sum | 4 ++-- p2p/constants.go | 9 +++++++++ 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/cmd/seednode/main.go b/cmd/seednode/main.go index c76756357d5..f2f354830be 100644 --- a/cmd/seednode/main.go +++ b/cmd/seednode/main.go @@ -268,6 +268,7 @@ func createNode( P2pPrivateKey: p2pKey, P2pSingleSigner: p2pSingleSigner, P2pKeyGenerator: p2pKeyGen, + NetworkType: p2p.MainNetwork, Logger: logger.GetOrCreate("seed/p2p"), } diff --git a/factory/network/networkComponents.go b/factory/network/networkComponents.go index 713cd182b5e..1ba0ba7c15f 100644 --- a/factory/network/networkComponents.go +++ b/factory/network/networkComponents.go @@ -236,6 +236,7 @@ func (ncf *networkComponentsFactory) createNetworkHolder( p2pConfig p2pConfig.P2PConfig, logger p2p.Logger, peersRatingHandler p2p.PeersRatingHandler, + networkType p2p.NetworkType, ) (networkComponentsHolder, error) { peersHolder, err := p2pFactory.NewPeersHolder(ncf.preferredPeersSlices) @@ -254,6 +255,7 @@ func (ncf *networkComponentsFactory) createNetworkHolder( P2pPrivateKey: ncf.cryptoComponents.P2pPrivateKey(), P2pSingleSigner: ncf.cryptoComponents.P2pSingleSigner(), P2pKeyGenerator: ncf.cryptoComponents.P2pKeyGen(), + NetworkType: networkType, Logger: logger, } networkMessenger, err := p2pFactory.NewNetworkMessenger(argsMessenger) @@ -269,7 +271,7 @@ func (ncf *networkComponentsFactory) createNetworkHolder( func (ncf *networkComponentsFactory) createMainNetworkHolder(peersRatingHandler p2p.PeersRatingHandler) (networkComponentsHolder, error) { loggerInstance := logger.GetOrCreate("main/p2p") - return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance, peersRatingHandler) + return ncf.createNetworkHolder(ncf.mainP2PConfig, loggerInstance, peersRatingHandler, p2p.MainNetwork) } func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder(peersRatingHandler p2p.PeersRatingHandler) (networkComponentsHolder, error) { @@ -282,7 +284,7 @@ func (ncf *networkComponentsFactory) createFullArchiveNetworkHolder(peersRatingH loggerInstance := logger.GetOrCreate("full-archive/p2p") - return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance, peersRatingHandler) + return ncf.createNetworkHolder(ncf.fullArchiveP2PConfig, loggerInstance, peersRatingHandler, p2p.FullArchiveNetwork) } func (ncf *networkComponentsFactory) createPeersRatingComponents() (p2p.PeersRatingHandler, p2p.PeersRatingMonitor, error) { diff --git a/go.mod b/go.mod index b887ce7b3bc..a843e142b99 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230711151847-0a3789df9544 + github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713134740-1099861bb66e github.com/multiversx/mx-chain-core-go v1.2.8 github.com/multiversx/mx-chain-crypto-go v1.2.7 github.com/multiversx/mx-chain-es-indexer-go v1.4.5 diff --git a/go.sum b/go.sum index 8212e8f2555..73442251d64 100644 --- a/go.sum +++ b/go.sum @@ -622,8 +622,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.3/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230711151847-0a3789df9544 h1:E5dIUhpD4BZrxNtMiJnEoTN0RQn8XUa/e2Ago/XQ9O8= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230711151847-0a3789df9544/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713134740-1099861bb66e h1:mmp7vNzX0U6ehJrOfZp3dICdCCv6nLBImKjQy1Gnq4s= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713134740-1099861bb66e/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= diff --git a/p2p/constants.go b/p2p/constants.go index 4f0807484b7..620339577dc 100644 --- a/p2p/constants.go +++ b/p2p/constants.go @@ -13,6 +13,15 @@ const NormalOperation = p2p.NormalOperation // FullArchiveMode defines the node operation as a full archive mode const FullArchiveMode = p2p.FullArchiveMode +// NetworkType defines the type of the network a messenger is running on +type NetworkType = p2p.NetworkType + +// MainNetwork defines the main network +const MainNetwork NetworkType = "main" + +// FullArchiveNetwork defines the full archive network +const FullArchiveNetwork NetworkType = "full archive" + // ListsSharder is the variant that uses lists const ListsSharder = p2p.ListsSharder From 8186ad17a585a8f60b77110b8ae12d189beca285 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 13 Jul 2023 17:44:10 +0300 Subject: [PATCH 35/38] updated mx-chain-communication-go --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a843e142b99..c8f9c67cf78 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713134740-1099861bb66e + github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713144229-bd674292b50f github.com/multiversx/mx-chain-core-go v1.2.8 github.com/multiversx/mx-chain-crypto-go v1.2.7 github.com/multiversx/mx-chain-es-indexer-go v1.4.5 diff --git a/go.sum b/go.sum index 73442251d64..3708f648b10 100644 --- a/go.sum +++ b/go.sum @@ -622,8 +622,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.3/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713134740-1099861bb66e h1:mmp7vNzX0U6ehJrOfZp3dICdCCv6nLBImKjQy1Gnq4s= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713134740-1099861bb66e/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713144229-bd674292b50f h1:5sNmVZtwRm5PkgENI/y5oAv2KfFLfT24NuH0KpYfdX4= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713144229-bd674292b50f/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= From 8324b1f74136a12dae1a4f5646c10730dce08219 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Tue, 18 Jul 2023 17:35:10 +0300 Subject: [PATCH 36/38] update mx chain communication go --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c8f9c67cf78..824ce80c35a 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713144229-bd674292b50f + github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230718143252-e941f469ac89 github.com/multiversx/mx-chain-core-go v1.2.8 github.com/multiversx/mx-chain-crypto-go v1.2.7 github.com/multiversx/mx-chain-es-indexer-go v1.4.5 diff --git a/go.sum b/go.sum index 3708f648b10..9f98f41bafc 100644 --- a/go.sum +++ b/go.sum @@ -622,8 +622,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.3/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713144229-bd674292b50f h1:5sNmVZtwRm5PkgENI/y5oAv2KfFLfT24NuH0KpYfdX4= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230713144229-bd674292b50f/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230718143252-e941f469ac89 h1:+5uUKAtJ5C1eV82VEnMlFUlf725fAInGxrTVIbp13i8= +github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230718143252-e941f469ac89/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= From 2f2d8fc820fee62dee2b905aead788b3db0c3699 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 20 Jul 2023 14:22:58 +0300 Subject: [PATCH 37/38] fix after merge --- api/groups/nodeGroup.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/groups/nodeGroup.go b/api/groups/nodeGroup.go index 4d7905a5c57..021ad389ed7 100644 --- a/api/groups/nodeGroup.go +++ b/api/groups/nodeGroup.go @@ -40,7 +40,7 @@ type nodeFacadeHandler interface { GetQueryHandler(name string) (debug.QueryHandler, error) GetEpochStartDataAPI(epoch uint32) (*common.EpochStartDataAPI, error) GetPeerInfo(pid string) ([]core.QueryP2PPeerInfo, error) - GetConnectedPeersRatingsOnMainNetwork() string + GetConnectedPeersRatingsOnMainNetwork() (string, error) GetManagedKeysCount() int GetManagedKeys() []string GetEligibleManagedKeys() ([]string, error) From b2d782243e2f008241146a0e1493618f7692b9fe Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 21 Jul 2023 10:35:03 +0300 Subject: [PATCH 38/38] updated to tag --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1cf6c8907e5..a5c2ed8cd7b 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230718143252-e941f469ac89 + github.com/multiversx/mx-chain-communication-go v1.0.4 github.com/multiversx/mx-chain-core-go v1.2.9 github.com/multiversx/mx-chain-crypto-go v1.2.7 github.com/multiversx/mx-chain-es-indexer-go v1.4.6 diff --git a/go.sum b/go.sum index 2fbe465b34a..513acfb4321 100644 --- a/go.sum +++ b/go.sum @@ -622,8 +622,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.3/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230718143252-e941f469ac89 h1:+5uUKAtJ5C1eV82VEnMlFUlf725fAInGxrTVIbp13i8= -github.com/multiversx/mx-chain-communication-go v1.0.4-0.20230718143252-e941f469ac89/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= +github.com/multiversx/mx-chain-communication-go v1.0.4 h1:77DJZp1J8R9YsX61GVXVi7WNLVi4m0Z34gwgCEi6urc= +github.com/multiversx/mx-chain-communication-go v1.0.4/go.mod h1:7oTI77XfWmRWwVEbCq+pjH5CO3mJ6vEiHGMvQv6vF3Y= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ=