From bbff28a563e8d900765bef28f57cf8b7e672d5cc Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 28 Mar 2023 18:24:42 +0300 Subject: [PATCH 01/12] - added unit tests for the node runner --- node/nodeRunner_test.go | 178 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100644 node/nodeRunner_test.go diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go new file mode 100644 index 00000000000..deeebe3e8f3 --- /dev/null +++ b/node/nodeRunner_test.go @@ -0,0 +1,178 @@ +//go:build !race +// +build !race + +package node + +import ( + "io/ioutil" + "os/exec" + "path" + "strings" + "syscall" + "testing" + "time" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func createConfigs(tb testing.TB) *config.Configs { + tempDir := tb.TempDir() + + originalConfigsPath := "../cmd/node/config" + newConfigsPath := path.Join(tempDir, "config") + + cmd := exec.Command("cp", "-r", originalConfigsPath, newConfigsPath) + err := cmd.Run() + require.Nil(tb, err) + + newGenesisSmartContractsFilename := path.Join(newConfigsPath, "genesisSmartContracts.json") + correctTestPathInGenesisSmartContracts(tb, tempDir, newGenesisSmartContractsFilename) + + apiConfig, err := common.LoadApiConfig(path.Join(newConfigsPath, "api.toml")) + require.Nil(tb, err) + + generalConfig, err := common.LoadMainConfig(path.Join(newConfigsPath, "config.toml")) + require.Nil(tb, err) + + ratingsConfig, err := common.LoadRatingsConfig(path.Join(newConfigsPath, "ratings.toml")) + require.Nil(tb, err) + + economicsConfig, err := common.LoadEconomicsConfig(path.Join(newConfigsPath, "economics.toml")) + require.Nil(tb, err) + + prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) + require.Nil(tb, err) + + p2pConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) + require.Nil(tb, err) + + externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) + require.Nil(tb, err) + + systemSCConfig, err := common.LoadSystemSmartContractsConfig(path.Join(newConfigsPath, "systemSmartContractsConfig.toml")) + require.Nil(tb, err) + + epochConfig, err := common.LoadEpochConfig(path.Join(newConfigsPath, "enableEpochs.toml")) + require.Nil(tb, err) + + roundConfig, err := common.LoadRoundConfig(path.Join(newConfigsPath, "enableRounds.toml")) + require.Nil(tb, err) + + // make the node pass the network wait constraints + p2pConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 + p2pConfig.Node.ThresholdMinConnectedPeers = 0 + + return &config.Configs{ + GeneralConfig: generalConfig, + ApiRoutesConfig: apiConfig, + EconomicsConfig: economicsConfig, + SystemSCConfig: systemSCConfig, + RatingsConfig: ratingsConfig, + PreferencesConfig: prefsConfig, + ExternalConfig: externalConfig, + P2pConfig: p2pConfig, + FlagsConfig: &config.ContextFlagsConfig{ + WorkingDir: tempDir, + NoKeyProvided: true, + Version: "test version", + }, + ImportDbConfig: &config.ImportDbConfig{}, + ConfigurationPathsHolder: &config.ConfigurationPathsHolder{ + GasScheduleDirectoryName: path.Join(newConfigsPath, "gasSchedules"), + Nodes: path.Join(newConfigsPath, "nodesSetup.json"), + Genesis: path.Join(newConfigsPath, "genesis.json"), + SmartContracts: newGenesisSmartContractsFilename, + ValidatorKey: "validatorKey.pem", + }, + EpochConfig: epochConfig, + RoundConfig: roundConfig, + } +} + +func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGenesisSmartContractsFilename string) { + input, err := ioutil.ReadFile(newGenesisSmartContractsFilename) + require.Nil(tb, err) + + lines := strings.Split(string(input), "\n") + for i, line := range lines { + if strings.Contains(line, "./config") { + lines[i] = strings.Replace(line, "./config", path.Join(tempDir, "config"), 1) + } + } + output := strings.Join(lines, "\n") + err = ioutil.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) + require.Nil(tb, err) +} + +func TestNewNodeRunner(t *testing.T) { + t.Parallel() + + t.Run("nil configs should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "nil configs provided" + runner, err := NewNodeRunner(nil) + assert.Nil(t, runner) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("with valid configs should work", func(t *testing.T) { + t.Parallel() + + configs := createConfigs(t) + runner, err := NewNodeRunner(configs) + assert.NotNil(t, runner) + assert.Nil(t, err) + }) +} + +type applicationRunningTrigger struct { + chanClose chan struct{} +} + +func newApplicationRunningTrigger() *applicationRunningTrigger { + return &applicationRunningTrigger{ + chanClose: make(chan struct{}), + } +} + +func (trigger *applicationRunningTrigger) Write(p []byte) (n int, err error) { + if strings.Contains(string(p), "application is now running") { + log.Info("got signal, trying to gracefully close the node") + close(trigger.chanClose) + } + + return 0, nil +} + +func TestNodeRunner_StartAndCloseNode(t *testing.T) { + t.Parallel() + + configs := createConfigs(t) + runner, _ := NewNodeRunner(configs) + + trigger := newApplicationRunningTrigger() + err := logger.AddLogObserver(trigger, &logger.PlainFormatter{}) + require.Nil(t, err) + + // start a go routine that will send the SIGINT message after 1 minute + go func() { + timeout := time.Minute * 5 + select { + case <-trigger.chanClose: + case <-time.After(timeout): + require.Fail(t, "timeout waiting for application to start") + } + time.Sleep(time.Second) + + log.Info("sending SIGINT to self") + errKill := syscall.Kill(syscall.Getpid(), syscall.SIGINT) + assert.Nil(t, errKill) + }() + + err = runner.Start() + assert.Nil(t, err) +} From 786af3b3bc7e52921bfa879e624985044cb14316 Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 28 Mar 2023 19:52:33 +0300 Subject: [PATCH 02/12] - added unit tests for the node.go --- node/node_test.go | 205 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 203 insertions(+), 2 deletions(-) diff --git a/node/node_test.go b/node/node_test.go index c1d1b47a4a4..22e884cecb2 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -41,6 +41,7 @@ import ( nodeMockFactory "github.com/multiversx/mx-chain-go/node/mock/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" @@ -53,7 +54,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/multiversx/mx-chain-go/testscommon/storage" + mockStorage "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/testscommon/txsSenderMock" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" @@ -4142,6 +4143,206 @@ func TestNode_GetHeartbeats(t *testing.T) { assert.True(t, sameMessages(providedMessages, receivedMessages)) } +func TestNode_Getters(t *testing.T) { + t.Parallel() + + coreComponents := getDefaultCoreComponents() + statusCoreComponents := &factoryTests.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandlerMock.AppStatusHandlerStub{}, + } + cryptoComponents := getDefaultCryptoComponents() + stateComponents := getDefaultStateComponents() + bootstrapComponents := getDefaultBootstrapComponents() + dataComponents := getDefaultDataComponents() + heartbeatComponents := &factoryMock.HeartbeatV2ComponentsStub{} + networkComponents := getDefaultNetworkComponents() + processComponents := getDefaultProcessComponents() + consensusGroupSize := 10 + + n, err := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithStatusCoreComponents(statusCoreComponents), + node.WithCryptoComponents(cryptoComponents), + node.WithStateComponents(stateComponents), + node.WithBootstrapComponents(bootstrapComponents), + node.WithDataComponents(dataComponents), + node.WithHeartbeatV2Components(heartbeatComponents), + node.WithNetworkComponents(networkComponents), + node.WithProcessComponents(processComponents), + node.WithConsensusGroupSize(consensusGroupSize), + node.WithImportMode(true), + ) + require.Nil(t, err) + + //pointer testing + assert.True(t, n.GetCoreComponents() == coreComponents) + assert.True(t, n.GetStatusCoreComponents() == statusCoreComponents) + assert.True(t, n.GetCryptoComponents() == cryptoComponents) + assert.True(t, n.GetStateComponents() == stateComponents) + assert.True(t, n.GetBootstrapComponents() == bootstrapComponents) + assert.True(t, n.GetDataComponents() == dataComponents) + assert.True(t, n.GetHeartbeatV2Components() == heartbeatComponents) + assert.True(t, n.GetNetworkComponents() == networkComponents) + assert.True(t, n.GetProcessComponents() == processComponents) + assert.Equal(t, consensusGroupSize, n.GetConsensusGroupSize()) + assert.True(t, n.IsInImportMode()) +} + +func TestNode_GetEpochStartDataAPI(t *testing.T) { + t.Parallel() + + prevHash := []byte("prevHash") + rootHash := []byte("rootHash") + accumulatedFees := big.NewInt(100) + developerFees := big.NewInt(200) + + dataComponents := getDefaultDataComponents() + blockchain := dataComponents.BlockChain.(*testscommon.ChainHandlerStub) + timestamp := uint64(778899) + shardID := uint32(2) + blockchain.GetGenesisHeaderCalled = func() data.HeaderHandler { + return &block.Header{ + TimeStamp: timestamp, + ShardID: shardID, + PrevHash: prevHash, + RootHash: rootHash, + AccumulatedFees: accumulatedFees, + DeveloperFees: developerFees, + } + } + + bootstrapComponents := getDefaultBootstrapComponents() + shardCoordinator := bootstrapComponents.ShardCoordinator().(*mock.ShardCoordinatorMock) + + coreComponents := getDefaultCoreComponents() + + n, _ := node.NewNode( + node.WithCoreComponents(coreComponents), + node.WithDataComponents(dataComponents), + node.WithBootstrapComponents(bootstrapComponents), + ) + epoch := uint32(37) + nonce := uint64(112233) + round := uint64(445566) + + t.Run("genesis block should work", func(t *testing.T) { + result, err := n.GetEpochStartDataAPI(0) + assert.Nil(t, err) + expectedResult := &common.EpochStartDataAPI{ + Nonce: 0, + Round: 0, + Timestamp: int64(timestamp), + Epoch: 0, + Shard: shardID, + PrevBlockHash: hex.EncodeToString(prevHash), + StateRootHash: hex.EncodeToString(rootHash), + ScheduledRootHash: "", + AccumulatedFees: accumulatedFees.String(), + DeveloperFees: developerFees.String(), + } + assert.Equal(t, expectedResult, result) + }) + t.Run("should work for metachain", func(t *testing.T) { + shardCoordinator.SelfShardId = core.MetachainShardId + + returnedHeader := &block.MetaBlock{ + Nonce: nonce, + Epoch: epoch, + Round: round, + TimeStamp: timestamp, + PrevHash: prevHash, + RootHash: rootHash, + AccumulatedFees: accumulatedFees, + DeveloperFees: developerFees, + } + + headerBytes, err := coreComponents.IntMarsh.Marshal(returnedHeader) + require.Nil(t, err) + + unit := &mockStorage.StorerStub{ + GetFromEpochCalled: func(key []byte, epoch uint32) ([]byte, error) { + expectedIdentifier := core.EpochStartIdentifier(epoch) + require.Equal(t, expectedIdentifier, string(key)) + + return headerBytes, nil + }, + } + + storageService := dataComponents.StorageService().(*mockStorage.ChainStorerStub) + storageService.GetStorerCalled = func(unitType dataRetriever.UnitType) (storage.Storer, error) { + require.Equal(t, dataRetriever.MetaBlockUnit, unitType) + return unit, nil + } + + result, err := n.GetEpochStartDataAPI(epoch) + assert.Nil(t, err) + + expectedResult := &common.EpochStartDataAPI{ + Nonce: nonce, + Round: round, + Timestamp: int64(timestamp), + Epoch: epoch, + Shard: core.MetachainShardId, + PrevBlockHash: hex.EncodeToString(prevHash), + StateRootHash: hex.EncodeToString(rootHash), + ScheduledRootHash: "", + AccumulatedFees: accumulatedFees.String(), + DeveloperFees: developerFees.String(), + } + assert.Equal(t, expectedResult, result) + }) + t.Run("should work for shard chain", func(t *testing.T) { + shardCoordinator.SelfShardId = 0 + + returnedHeader := &block.Header{ + Nonce: nonce, + Epoch: epoch, + Round: round, + ShardID: shardID, + TimeStamp: timestamp, + PrevHash: prevHash, + RootHash: rootHash, + AccumulatedFees: accumulatedFees, + DeveloperFees: developerFees, + } + + headerBytes, err := coreComponents.IntMarsh.Marshal(returnedHeader) + require.Nil(t, err) + + unit := &mockStorage.StorerStub{ + GetFromEpochCalled: func(key []byte, epoch uint32) ([]byte, error) { + expectedIdentifier := core.EpochStartIdentifier(epoch) + require.Equal(t, expectedIdentifier, string(key)) + + return headerBytes, nil + }, + } + + storageService := dataComponents.StorageService().(*mockStorage.ChainStorerStub) + storageService.GetStorerCalled = func(unitType dataRetriever.UnitType) (storage.Storer, error) { + require.Equal(t, dataRetriever.BlockHeaderUnit, unitType) + return unit, nil + } + + result, err := n.GetEpochStartDataAPI(epoch) + assert.Nil(t, err) + + expectedResult := &common.EpochStartDataAPI{ + Nonce: nonce, + Round: round, + Timestamp: int64(timestamp), + Epoch: epoch, + Shard: shardID, + PrevBlockHash: hex.EncodeToString(prevHash), + StateRootHash: hex.EncodeToString(rootHash), + ScheduledRootHash: "", + AccumulatedFees: accumulatedFees.String(), + DeveloperFees: developerFees.String(), + } + assert.Equal(t, expectedResult, result) + }) +} + func createMockHeartbeatV2Components(providedMessages []heartbeatData.PubKeyHeartbeat) *factoryMock.HeartbeatV2ComponentsStub { heartbeatV2Components := &factoryMock.HeartbeatV2ComponentsStub{} heartbeatV2Components.MonitorField = &integrationTestsMock.HeartbeatMonitorStub{ @@ -4284,7 +4485,7 @@ func getDefaultDataComponents() *nodeMockFactory.DataComponentsMock { return &nodeMockFactory.DataComponentsMock{ BlockChain: chainHandler, - Store: &storage.ChainStorerStub{}, + Store: &mockStorage.ChainStorerStub{}, DataPool: &dataRetrieverMock.PoolsHolderMock{}, MbProvider: &mock.MiniBlocksProviderStub{}, } From bae8fe113ff8818f69cc96ad1a6d9b83cabcfb7c Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 28 Mar 2023 20:55:17 +0300 Subject: [PATCH 03/12] - added tests for metrics.go --- node/metrics/metrics_test.go | 141 +++++++++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index cabb8674c14..6d6bd316a34 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -5,9 +5,12 @@ import ( "strconv" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -329,3 +332,141 @@ func TestInitRatingsMetrics(t *testing.T) { assert.Equal(t, v, keys[k]) } } + +func TestInitMetrics(t *testing.T) { + t.Parallel() + + appStatusHandler := &statusHandler.AppStatusHandlerStub{} + pubkeyString := "pub key" + nodeType := core.NodeTypeValidator + shardCoordinator := &testscommon.ShardsCoordinatorMock{ + NoShards: 3, + SelfIDCalled: func() uint32 { + return 0 + }, + } + nodesSetup := &testscommon.NodesSetupStub{ + GetShardConsensusGroupSizeCalled: func() uint32 { + return 63 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 400 + }, + GetRoundDurationCalled: func() uint64 { + return 6000 + }, + MinNumberOfMetaNodesCalled: func() uint32 { + return 401 + }, + MinNumberOfShardNodesCalled: func() uint32 { + return 402 + }, + InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { + validators := map[uint32][]nodesCoordinator.GenesisNodeInfoHandler{ + 0: { + &shardingMocks.NodeInfoMock{}, + &shardingMocks.NodeInfoMock{}, + }, + 1: { + &shardingMocks.NodeInfoMock{}, + }, + } + + return validators, make(map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) + }, + GetStartTimeCalled: func() int64 { + return 111111 + }, + } + version := "version" + economicsConfigs := &config.EconomicsConfig{ + RewardsSettings: config.RewardsSettings{ + RewardsConfigByEpoch: []config.EpochRewardSettings{ + { + LeaderPercentage: 2, + }, + }, + }, + GlobalSettings: config.GlobalSettings{ + Denomination: 4, + }, + } + roundsPerEpoch := int64(200) + minTransactionVersion := uint32(1) + + t.Run("nil app status handler should error", func(t *testing.T) { + t.Parallel() + + err := InitMetrics(nil, pubkeyString, nodeType, shardCoordinator, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Equal(t, ErrNilAppStatusHandler, err) + }) + t.Run("nil shard coordinator should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "nil shard coordinator when initializing metrics" + err := InitMetrics(appStatusHandler, pubkeyString, nodeType, nil, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("nil nodes configs should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "nil nodes config when initializing metrics" + err := InitMetrics(appStatusHandler, pubkeyString, nodeType, shardCoordinator, nil, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("nil economics configs should error", func(t *testing.T) { + t.Parallel() + + expectedErrorString := "nil economics config when initializing metrics" + err := InitMetrics(appStatusHandler, pubkeyString, nodeType, shardCoordinator, nodesSetup, version, nil, roundsPerEpoch, minTransactionVersion) + assert.Equal(t, expectedErrorString, err.Error()) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + keys := make(map[string]interface{}) + localStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + keys[key] = value + }, + SetStringValueHandler: func(key string, value string) { + keys[key] = value + }, + } + + err := InitMetrics(localStatusHandler, pubkeyString, nodeType, shardCoordinator, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Nil(t, err) + + expectedValues := map[string]interface{}{ + common.MetricPublicKeyBlockSign: pubkeyString, + common.MetricShardId: uint64(shardCoordinator.SelfId()), + common.MetricNumShardsWithoutMetachain: uint64(shardCoordinator.NoShards), + common.MetricNodeType: string(nodeType), + common.MetricRoundTime: uint64(6), + common.MetricAppVersion: version, + common.MetricRoundsPerEpoch: uint64(roundsPerEpoch), + common.MetricCrossCheckBlockHeight: "0", + common.MetricCrossCheckBlockHeight + "_0": uint64(0), + common.MetricCrossCheckBlockHeight + "_1": uint64(0), + common.MetricCrossCheckBlockHeight + "_2": uint64(0), + common.MetricCrossCheckBlockHeightMeta: uint64(0), + common.MetricIsSyncing: uint64(1), + common.MetricLeaderPercentage: fmt.Sprintf("%f", 2.0), + common.MetricDenomination: uint64(4), + common.MetricShardConsensusGroupSize: uint64(63), + common.MetricMetaConsensusGroupSize: uint64(400), + common.MetricNumNodesPerShard: uint64(402), + common.MetricNumMetachainNodes: uint64(401), + common.MetricStartTime: uint64(111111), + common.MetricRoundDuration: uint64(6000), + common.MetricMinTransactionVersion: uint64(1), + common.MetricNumValidators: uint64(2), + common.MetricConsensusGroupSize: uint64(63), + } + + assert.Equal(t, len(expectedValues), len(keys)) + for k, v := range expectedValues { + assert.Equal(t, v, keys[k], fmt.Sprintf("for key %s", k)) + } + }) +} From 25259326ecdc91594391c767ccc24b3135fd9ed2 Mon Sep 17 00:00:00 2001 From: jules01 Date: Wed, 29 Mar 2023 15:59:41 +0300 Subject: [PATCH 04/12] - added more tests, refactored existing code --- node/metrics/metrics.go | 14 ++ node/metrics/metrics_test.go | 44 ++++ node/mock/applicationRunningTrigger.go | 34 +++ node/nodeRunner.go | 63 ++--- node/nodeRunner_test.go | 303 +++++++++++++++++++++++-- 5 files changed, 406 insertions(+), 52 deletions(-) create mode 100644 node/mock/applicationRunningTrigger.go diff --git a/node/metrics/metrics.go b/node/metrics/metrics.go index 2071f37cf04..df9ea699f06 100644 --- a/node/metrics/metrics.go +++ b/node/metrics/metrics.go @@ -2,6 +2,7 @@ package metrics import ( "fmt" + "runtime/debug" "sort" "strconv" @@ -10,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/sharding" + logger "github.com/multiversx/mx-chain-logger-go" ) const millisecondsInSecond = 1000 @@ -18,6 +20,8 @@ const initInt = int64(0) const initString = "" const initZeroString = "0" +var log = logger.GetOrCreate("node/metrics") + // InitBaseMetrics will initialize base, default metrics to 0 values func InitBaseMetrics(appStatusHandler core.AppStatusHandler) error { if check.IfNil(appStatusHandler) { @@ -271,10 +275,20 @@ func InitMetrics( // SaveUint64Metric will save an uint64 metric in status handler func SaveUint64Metric(ash core.AppStatusHandler, key string, value uint64) { + if check.IfNil(ash) { + log.Error("programming error: nil AppStatusHandler in SaveUint64Metric", "stack", string(debug.Stack())) + return + } + ash.SetUInt64Value(key, value) } // SaveStringMetric will save a string metric in status handler func SaveStringMetric(ash core.AppStatusHandler, key, value string) { + if check.IfNil(ash) { + log.Error("programming error: nil AppStatusHandler in SaveStringMetric", "stack", string(debug.Stack())) + return + } + ash.SetStringValue(key, value) } diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 6d6bd316a34..0e4e8ea0b48 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -470,3 +470,47 @@ func TestInitMetrics(t *testing.T) { } }) } + +func TestSaveStringMetric(t *testing.T) { + t.Parallel() + + t.Run("should not panic if appStatusHandler is nil", func(t *testing.T) { + assert.NotPanics(t, func() { + SaveStringMetric(nil, "key", "value") + }) + }) + t.Run("should work", func(t *testing.T) { + wasCalled := false + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetStringValueHandler: func(key string, value string) { + wasCalled = true + assert.Equal(t, "key", key) + assert.Equal(t, "value", value) + }, + } + SaveStringMetric(appStatusHandler, "key", "value") + assert.True(t, wasCalled) + }) +} + +func TestSaveUint64Metric(t *testing.T) { + t.Parallel() + + t.Run("should not panic if appStatusHandler is nil", func(t *testing.T) { + assert.NotPanics(t, func() { + SaveUint64Metric(nil, "key", 1) + }) + }) + t.Run("should work", func(t *testing.T) { + wasCalled := false + appStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + wasCalled = true + assert.Equal(t, "key", key) + assert.Equal(t, uint64(1), value) + }, + } + SaveUint64Metric(appStatusHandler, "key", 1) + assert.True(t, wasCalled) + }) +} diff --git a/node/mock/applicationRunningTrigger.go b/node/mock/applicationRunningTrigger.go new file mode 100644 index 00000000000..1ab9b23ccf4 --- /dev/null +++ b/node/mock/applicationRunningTrigger.go @@ -0,0 +1,34 @@ +package mock + +import ( + "strings" + + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("node/mock") + +type applicationRunningTrigger struct { + chanClose chan struct{} +} + +// NewApplicationRunningTrigger - +func NewApplicationRunningTrigger() *applicationRunningTrigger { + return &applicationRunningTrigger{ + chanClose: make(chan struct{}), + } +} + +func (trigger *applicationRunningTrigger) Write(p []byte) (n int, err error) { + if strings.Contains(string(p), "application is now running") { + log.Info("got signal, trying to gracefully close the node") + close(trigger.chanClose) + } + + return 0, nil +} + +// ChanClose - +func (trigger *applicationRunningTrigger) ChanClose() chan struct{} { + return trigger.chanClose +} diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 7bbc5941cc1..d8e313d6394 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -66,6 +66,8 @@ import ( logger "github.com/multiversx/mx-chain-logger-go" ) +type nextOperationForNode int + const ( // TODO: remove this after better handling VM versions switching // delayBeforeScQueriesStart represents the delay before the sc query processor should start to allow external queries @@ -74,6 +76,9 @@ const ( maxTimeToClose = 10 * time.Second // SoftRestartMessage is the custom message used when the node does a soft restart operation SoftRestartMessage = "Shuffled out - soft restart" + + nextOperationShouldRestart nextOperationForNode = 1 + nextOperationShouldStop nextOperationForNode = 2 ) // nodeRunner holds the node runner configuration and controls running of a node @@ -509,7 +514,7 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( allowExternalVMQueriesChan := make(chan struct{}) log.Debug("updating the API service after creating the node facade") - ef, err := nr.createApiFacade(currentNode, webServerHandler, gasScheduleNotifier, allowExternalVMQueriesChan) + facadeInstance, err := nr.createApiFacade(currentNode, webServerHandler, gasScheduleNotifier, allowExternalVMQueriesChan) if err != nil { return true, err } @@ -526,20 +531,17 @@ func (nr *nodeRunner) executeOneComponentCreationCycle( sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - err = waitForSignal( + nextOperation := waitForSignal( sigs, managedCoreComponents.ChanStopNodeProcess(), healthService, - ef, + facadeInstance, webServerHandler, currentNode, goRoutinesNumberStart, ) - if err != nil { - return true, nil - } - return false, nil + return nextOperation == nextOperationShouldStop, nil } func addSyncersToAccountsDB( @@ -923,11 +925,11 @@ func waitForSignal( sigs chan os.Signal, chanStopNodeProcess chan endProcess.ArgEndProcess, healthService closing.Closer, - ef closing.Closer, + facade closing.Closer, httpServer shared.UpgradeableHttpServerHandler, currentNode *Node, goRoutinesNumberStart int, -) error { +) nextOperationForNode { var sig endProcess.ArgEndProcess reshuffled := false wrongConfig := false @@ -949,7 +951,7 @@ func waitForSignal( chanCloseComponents := make(chan struct{}) go func() { - closeAllComponents(healthService, ef, httpServer, currentNode, chanCloseComponents) + closeAllComponents(healthService, facade, httpServer, currentNode, chanCloseComponents) }() select { @@ -960,14 +962,14 @@ func waitForSignal( "error", "closeAllComponents did not finish on time", "stack", goroutines.GetGoRoutines()) - return fmt.Errorf("did NOT close all components gracefully") + return nextOperationShouldStop } if wrongConfig { // hang the node's process because it cannot continue with the current configuration and a restart doesn't // change this behaviour for { - log.Error("wrong configuration. stopped processing", "description", wrongConfigDescription) + log.Error("wrong configuration. stopped the processing and left the node unclosed", "description", wrongConfigDescription) time.Sleep(1 * time.Minute) } } @@ -976,10 +978,10 @@ func waitForSignal( log.Info("=============================" + SoftRestartMessage + "==================================") core.DumpGoRoutinesToLog(goRoutinesNumberStart, log) - return nil + return nextOperationShouldRestart } - return fmt.Errorf("not reshuffled, closing") + return nextOperationShouldStop } func (nr *nodeRunner) logInformation( @@ -1080,18 +1082,19 @@ func (nr *nodeRunner) logSessionInformation( statsFolder, configurationPaths.GasScheduleDirectoryName, []string{ + configurationPaths.ApiRoutes, configurationPaths.MainConfig, configurationPaths.Economics, - configurationPaths.Ratings, - configurationPaths.Preferences, - configurationPaths.P2p, + configurationPaths.Epoch, + configurationPaths.RoundActivation, + configurationPaths.External, configurationPaths.Genesis, + configurationPaths.SmartContracts, configurationPaths.Nodes, - configurationPaths.ApiRoutes, - configurationPaths.External, + configurationPaths.P2p, + configurationPaths.Preferences, + configurationPaths.Ratings, configurationPaths.SystemSC, - configurationPaths.RoundActivation, - configurationPaths.Epoch, }) statsFile := filepath.Join(statsFolder, "session.info") @@ -1572,11 +1575,12 @@ func cleanupStorageIfNecessary(workingDir string, cleanupStorage bool) error { return os.RemoveAll(dbPath) } -func copyConfigToStatsFolder(statsFolder string, gasScheduleFolder string, configs []string) { +func copyConfigToStatsFolder(statsFolder string, gasScheduleDirectory string, configs []string) { err := os.MkdirAll(statsFolder, os.ModePerm) log.LogIfError(err) - err = copyDirectory(gasScheduleFolder, statsFolder) + newGasScheduleDirectory := path.Join(statsFolder, filepath.Base(gasScheduleDirectory)) + err = copyDirectory(gasScheduleDirectory, newGasScheduleDirectory) log.LogIfError(err) for _, configFile := range configs { @@ -1584,7 +1588,6 @@ func copyConfigToStatsFolder(statsFolder string, gasScheduleFolder string, confi } } -// TODO: add some unit tests func copyDirectory(source string, destination string) error { fileDescriptors, err := ioutil.ReadDir(source) if err != nil { @@ -1603,21 +1606,21 @@ func copyDirectory(source string, destination string) error { for _, fd := range fileDescriptors { srcFilePath := path.Join(source, fd.Name()) - dstFilePath := path.Join(destination, fd.Name()) if fd.IsDir() { + dstFilePath := path.Join(destination, filepath.Base(srcFilePath)) err = copyDirectory(srcFilePath, dstFilePath) log.LogIfError(err) } else { - copySingleFile(dstFilePath, srcFilePath) + copySingleFile(destination, srcFilePath) } } return nil } -func copySingleFile(folder string, configFile string) { - fileName := filepath.Base(configFile) +func copySingleFile(destinationDirectory string, sourceFile string) { + fileName := filepath.Base(sourceFile) - source, err := core.OpenFile(configFile) + source, err := core.OpenFile(sourceFile) if err != nil { return } @@ -1628,7 +1631,7 @@ func copySingleFile(folder string, configFile string) { } }() - destPath := filepath.Join(folder, fileName) + destPath := filepath.Join(destinationDirectory, fileName) destination, err := os.Create(destPath) if err != nil { return diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index deeebe3e8f3..95de455f940 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -5,6 +5,7 @@ package node import ( "io/ioutil" + "os" "os/exec" "path" "strings" @@ -12,8 +13,11 @@ import ( "testing" "time" + "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/node/mock" + "github.com/multiversx/mx-chain-go/testscommon/api" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -129,32 +133,13 @@ func TestNewNodeRunner(t *testing.T) { }) } -type applicationRunningTrigger struct { - chanClose chan struct{} -} - -func newApplicationRunningTrigger() *applicationRunningTrigger { - return &applicationRunningTrigger{ - chanClose: make(chan struct{}), - } -} - -func (trigger *applicationRunningTrigger) Write(p []byte) (n int, err error) { - if strings.Contains(string(p), "application is now running") { - log.Info("got signal, trying to gracefully close the node") - close(trigger.chanClose) - } - - return 0, nil -} - -func TestNodeRunner_StartAndCloseNode(t *testing.T) { +func TestNodeRunner_StartAndCloseNodeWithShuffleOut(t *testing.T) { t.Parallel() configs := createConfigs(t) runner, _ := NewNodeRunner(configs) - trigger := newApplicationRunningTrigger() + trigger := mock.NewApplicationRunningTrigger() err := logger.AddLogObserver(trigger, &logger.PlainFormatter{}) require.Nil(t, err) @@ -162,7 +147,7 @@ func TestNodeRunner_StartAndCloseNode(t *testing.T) { go func() { timeout := time.Minute * 5 select { - case <-trigger.chanClose: + case <-trigger.ChanClose(): case <-time.After(timeout): require.Fail(t, "timeout waiting for application to start") } @@ -176,3 +161,277 @@ func TestNodeRunner_StartAndCloseNode(t *testing.T) { err = runner.Start() assert.Nil(t, err) } + +func TestCopyDirectory(t *testing.T) { + t.Parallel() + + file1Name := "file1.toml" + file1Contents := []byte("file1") + file2Name := "file2.toml" + file2Contents := []byte("file2") + file3Name := "file3.toml" + file3Contents := []byte("file3") + file4Name := "file4.toml" + file4Contents := []byte("file4") + + tempDir := t.TempDir() + + // generating dummy structure like + // file1 + // src + // +- file2 + // +- dir1 + // +- file3 + // +- dir2 + // +- file4 + + err := ioutil.WriteFile(path.Join(tempDir, file1Name), file1Contents, os.ModePerm) + require.Nil(t, err) + err = os.MkdirAll(path.Join(tempDir, "src", "dir1"), os.ModePerm) + require.Nil(t, err) + err = os.MkdirAll(path.Join(tempDir, "src", "dir2"), os.ModePerm) + require.Nil(t, err) + err = ioutil.WriteFile(path.Join(tempDir, "src", file2Name), file2Contents, os.ModePerm) + require.Nil(t, err) + err = ioutil.WriteFile(path.Join(tempDir, "src", "dir1", file3Name), file3Contents, os.ModePerm) + require.Nil(t, err) + err = ioutil.WriteFile(path.Join(tempDir, "src", "dir2", file4Name), file4Contents, os.ModePerm) + require.Nil(t, err) + + err = copyDirectory(path.Join(tempDir, "src"), path.Join(tempDir, "dst")) + require.Nil(t, err) + copySingleFile(path.Join(tempDir, "dst"), path.Join(tempDir, file1Name)) + + // after copy, check that the files are the same + buff, err := ioutil.ReadFile(path.Join(tempDir, "dst", file1Name)) + require.Nil(t, err) + assert.Equal(t, file1Contents, buff) + + buff, err = ioutil.ReadFile(path.Join(tempDir, "dst", file2Name)) + require.Nil(t, err) + assert.Equal(t, file2Contents, buff) + + buff, err = ioutil.ReadFile(path.Join(tempDir, "dst", "dir1", file3Name)) + require.Nil(t, err) + assert.Equal(t, file3Contents, buff) + + buff, err = ioutil.ReadFile(path.Join(tempDir, "dst", "dir2", file4Name)) + require.Nil(t, err) + assert.Equal(t, file4Contents, buff) +} + +func TestWaitForSignal(t *testing.T) { + t.Parallel() + + closedCalled := make(map[string]struct{}) + healthServiceClosableComponent := &mock.CloserStub{ + CloseCalled: func() error { + closedCalled["healthService"] = struct{}{} + return nil + }, + } + facadeClosableComponent := &mock.CloserStub{ + CloseCalled: func() error { + closedCalled["facade"] = struct{}{} + return nil + }, + } + httpClosableComponent := &api.UpgradeableHttpServerHandlerStub{ + CloseCalled: func() error { + closedCalled["http"] = struct{}{} + return nil + }, + } + internalNodeClosableComponent1 := &mock.CloserStub{ + CloseCalled: func() error { + closedCalled["node closable component 1"] = struct{}{} + return nil + }, + } + internalNodeClosableComponent2 := &mock.CloserStub{ + CloseCalled: func() error { + closedCalled["node closable component 2"] = struct{}{} + return nil + }, + } + n, _ := NewNode() + n.closableComponents = append(n.closableComponents, internalNodeClosableComponent1) + n.closableComponents = append(n.closableComponents, internalNodeClosableComponent2) + + // do not run these tests in parallel as they are using the same map + t.Run("should return nextOperationShouldStop if SIGINT is received", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess) + sigs := make(chan os.Signal, 1) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + sigs <- syscall.SIGINT + }() + + nextOperation := waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + + assert.Equal(t, nextOperationShouldStop, nextOperation) + checkCloseCalledMap(t, closedCalled) + }) + t.Run("should return nextOperationShouldRestart if shuffled out is received", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess, 1) + sigs := make(chan os.Signal) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + stopChan <- endProcess.ArgEndProcess{ + Reason: common.ShuffledOut, + Description: "test", + } + }() + + nextOperation := waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + + assert.Equal(t, nextOperationShouldRestart, nextOperation) + checkCloseCalledMap(t, closedCalled) + }) + t.Run("wrong configuration should not stop the node", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess, 1) + sigs := make(chan os.Signal) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + stopChan <- endProcess.ArgEndProcess{ + Reason: common.WrongConfiguration, + Description: "test", + } + }() + + functionFinished := make(chan struct{}) + go func() { + _ = waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + close(functionFinished) + }() + + select { + case <-functionFinished: + assert.Fail(t, "function should not have finished") + case <-time.After(maxTimeToClose + time.Second*2): + // ok, timeout reached, function did not finish + } + + checkCloseCalledMap(t, closedCalled) + }) + + delayedComponent := &mock.CloserStub{ + CloseCalled: func() error { + time.Sleep(time.Minute) + return nil + }, + } + n.closableComponents = append(n.closableComponents, delayedComponent) + + t.Run("force closing the node when SIGINT is received", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess) + sigs := make(chan os.Signal, 1) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + sigs <- syscall.SIGINT + }() + + nextOperation := waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + + // these exceptions appear because the delayedComponent prevented the call of the first 2 components + // as the closable components are called in revered order + exceptions := []string{"node closable component 1", "node closable component 2"} + assert.Equal(t, nextOperationShouldStop, nextOperation) + checkCloseCalledMap(t, closedCalled, exceptions...) + }) + t.Run("force closing the node when shuffle out is received", func(t *testing.T) { + closedCalled = make(map[string]struct{}) + stopChan := make(chan endProcess.ArgEndProcess, 1) + sigs := make(chan os.Signal) + + go func() { + time.Sleep(time.Millisecond * 100) // wait for the waitForSignal to start + stopChan <- endProcess.ArgEndProcess{ + Reason: common.ShuffledOut, + Description: "test", + } + }() + + nextOperation := waitForSignal( + sigs, + stopChan, + healthServiceClosableComponent, + facadeClosableComponent, + httpClosableComponent, + n, + 1, + ) + + // these exceptions appear because the delayedComponent prevented the call of the first 2 components + // as the closable components are called in revered order + exceptions := []string{"node closable component 1", "node closable component 2"} + // in this case, even if the node is shuffled out, it should stop as some components were not closed + assert.Equal(t, nextOperationShouldStop, nextOperation) + checkCloseCalledMap(t, closedCalled, exceptions...) + }) +} + +func checkCloseCalledMap(tb testing.TB, closedCalled map[string]struct{}, exceptions ...string) { + allKeys := []string{"healthService", "facade", "http", "node closable component 1", "node closable component 2"} + numKeys := 0 + for _, key := range allKeys { + if contains(key, exceptions) { + continue + } + + numKeys++ + assert.Contains(tb, closedCalled, key) + } + + assert.Equal(tb, numKeys, len(closedCalled)) +} + +func contains(needle string, haystack []string) bool { + for _, element := range haystack { + if needle == element { + return true + } + } + + return false +} From abb70bc0004fdb9164e4397708cc0bb14d0044b9 Mon Sep 17 00:00:00 2001 From: jules01 Date: Wed, 29 Mar 2023 16:04:07 +0300 Subject: [PATCH 05/12] - added missing stub --- .../api/upgradeableHttpServerHandlerStub.go | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 testscommon/api/upgradeableHttpServerHandlerStub.go diff --git a/testscommon/api/upgradeableHttpServerHandlerStub.go b/testscommon/api/upgradeableHttpServerHandlerStub.go new file mode 100644 index 00000000000..24eb753bb45 --- /dev/null +++ b/testscommon/api/upgradeableHttpServerHandlerStub.go @@ -0,0 +1,42 @@ +package api + +import "github.com/multiversx/mx-chain-go/api/shared" + +// UpgradeableHttpServerHandlerStub - +type UpgradeableHttpServerHandlerStub struct { + StartHttpServerCalled func() error + UpdateFacadeCalled func(facade shared.FacadeHandler) error + CloseCalled func() error +} + +// StartHttpServer - +func (stub *UpgradeableHttpServerHandlerStub) StartHttpServer() error { + if stub.StartHttpServerCalled != nil { + return stub.StartHttpServerCalled() + } + + return nil +} + +// UpdateFacade - +func (stub *UpgradeableHttpServerHandlerStub) UpdateFacade(facade shared.FacadeHandler) error { + if stub.UpdateFacadeCalled != nil { + return stub.UpdateFacadeCalled(facade) + } + + return nil +} + +// Close - +func (stub *UpgradeableHttpServerHandlerStub) Close() error { + if stub.CloseCalled != nil { + return stub.CloseCalled() + } + + return nil +} + +// IsInterfaceNil - +func (stub *UpgradeableHttpServerHandlerStub) IsInterfaceNil() bool { + return stub == nil +} From 4ba64190346a58572705e9300a56f6a0bac2f0be Mon Sep 17 00:00:00 2001 From: jules01 Date: Wed, 29 Mar 2023 16:19:36 +0300 Subject: [PATCH 06/12] - fixed test name --- node/nodeRunner_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index 95de455f940..e7de941d13a 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -133,7 +133,7 @@ func TestNewNodeRunner(t *testing.T) { }) } -func TestNodeRunner_StartAndCloseNodeWithShuffleOut(t *testing.T) { +func TestNodeRunner_StartAndCloseNodeWith(t *testing.T) { t.Parallel() configs := createConfigs(t) From 12fe487080b92d8b9445907f45b2ae41645a0f68 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 30 Mar 2023 09:50:49 +0300 Subject: [PATCH 07/12] - fix after review - part 1 --- node/mock/applicationRunningTrigger.go | 1 + node/nodeRunner_test.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/node/mock/applicationRunningTrigger.go b/node/mock/applicationRunningTrigger.go index 1ab9b23ccf4..ade030ca801 100644 --- a/node/mock/applicationRunningTrigger.go +++ b/node/mock/applicationRunningTrigger.go @@ -19,6 +19,7 @@ func NewApplicationRunningTrigger() *applicationRunningTrigger { } } +// Write - func (trigger *applicationRunningTrigger) Write(p []byte) (n int, err error) { if strings.Contains(string(p), "application is now running") { log.Info("got signal, trying to gracefully close the node") diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index e7de941d13a..7349ed20381 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -181,7 +181,7 @@ func TestCopyDirectory(t *testing.T) { // src // +- file2 // +- dir1 - // +- file3 + // +- file3 // +- dir2 // +- file4 From 12d8046cf58e15ca2760eef49921ade7fafd00bf Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 30 Mar 2023 11:42:33 +0300 Subject: [PATCH 08/12] - fix after review - part 2 --- node/nodeRunner_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index 7349ed20381..8174e344581 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -133,7 +133,7 @@ func TestNewNodeRunner(t *testing.T) { }) } -func TestNodeRunner_StartAndCloseNodeWith(t *testing.T) { +func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { t.Parallel() configs := createConfigs(t) @@ -143,7 +143,7 @@ func TestNodeRunner_StartAndCloseNodeWith(t *testing.T) { err := logger.AddLogObserver(trigger, &logger.PlainFormatter{}) require.Nil(t, err) - // start a go routine that will send the SIGINT message after 1 minute + // start a go routine that will send the SIGINT message after 1 second after the node has started go func() { timeout := time.Minute * 5 select { From bf923c74f0ae637581af530d3df3f7ae9264a548 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 30 Mar 2023 15:05:22 +0300 Subject: [PATCH 09/12] - fixes after review - part 3 --- node/metrics/metrics_test.go | 169 +++++++++++++++++++++++++++-------- node/nodeRunner_test.go | 4 +- 2 files changed, 132 insertions(+), 41 deletions(-) diff --git a/node/metrics/metrics_test.go b/node/metrics/metrics_test.go index 0e4e8ea0b48..7e2e3d324d8 100644 --- a/node/metrics/metrics_test.go +++ b/node/metrics/metrics_test.go @@ -137,49 +137,59 @@ func TestInitConfigMetrics(t *testing.T) { ESDTTransferRoleEnableEpoch: 33, BuiltInFunctionOnMetaEnableEpoch: 34, WaitingListFixEnableEpoch: 35, + MaxNodesChangeEnableEpoch: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 1, + NodesToShufflePerShard: 2, + }, + }, }, } expectedValues := map[string]interface{}{ - "erd_smart_contract_deploy_enable_epoch": uint32(1), - "erd_built_in_functions_enable_epoch": uint32(2), - "erd_relayed_transactions_enable_epoch": uint32(3), - "erd_penalized_too_much_gas_enable_epoch": uint32(4), - "erd_switch_jail_waiting_enable_epoch": uint32(5), - "erd_switch_hysteresis_for_min_nodes_enable_epoch": uint32(6), - "erd_below_signed_threshold_enable_epoch": uint32(7), - "erd_transaction_signed_with_txhash_enable_epoch": uint32(8), - "erd_meta_protection_enable_epoch": uint32(9), - "erd_ahead_of_time_gas_usage_enable_epoch": uint32(10), - "erd_gas_price_modifier_enable_epoch": uint32(11), - "erd_repair_callback_enable_epoch": uint32(12), - "erd_block_gas_and_fee_recheck_enable_epoch": uint32(13), - "erd_staking_v2_enable_epoch": uint32(14), - "erd_stake_enable_epoch": uint32(15), - "erd_double_key_protection_enable_epoch": uint32(16), - "erd_esdt_enable_epoch": uint32(17), - "erd_governance_enable_epoch": uint32(18), - "erd_delegation_manager_enable_epoch": uint32(19), - "erd_delegation_smart_contract_enable_epoch": uint32(20), - "erd_correct_last_unjailed_enable_epoch": uint32(21), - "erd_balance_waiting_lists_enable_epoch": uint32(22), - "erd_return_data_to_last_transfer_enable_epoch": uint32(23), - "erd_sender_in_out_transfer_enable_epoch": uint32(24), - "erd_relayed_transactions_v2_enable_epoch": uint32(25), - "erd_unbond_tokens_v2_enable_epoch": uint32(26), - "erd_save_jailed_always_enable_epoch": uint32(27), - "erd_validator_to_delegation_enable_epoch": uint32(28), - "erd_redelegate_below_min_check_enable_epoch": uint32(29), - "erd_increment_scr_nonce_in_multi_transfer_enable_epoch": uint32(30), - "erd_esdt_multi_transfer_enable_epoch": uint32(31), - "erd_global_mint_burn_disable_epoch": uint32(32), - "erd_esdt_transfer_role_enable_epoch": uint32(33), - "erd_builtin_function_on_meta_enable_epoch": uint32(34), - "erd_waiting_list_fix_enable_epoch": uint32(35), - "erd_max_nodes_change_enable_epoch": nil, - "erd_total_supply": "12345", - "erd_hysteresis": "0.100000", - "erd_adaptivity": "true", + "erd_smart_contract_deploy_enable_epoch": uint32(1), + "erd_built_in_functions_enable_epoch": uint32(2), + "erd_relayed_transactions_enable_epoch": uint32(3), + "erd_penalized_too_much_gas_enable_epoch": uint32(4), + "erd_switch_jail_waiting_enable_epoch": uint32(5), + "erd_switch_hysteresis_for_min_nodes_enable_epoch": uint32(6), + "erd_below_signed_threshold_enable_epoch": uint32(7), + "erd_transaction_signed_with_txhash_enable_epoch": uint32(8), + "erd_meta_protection_enable_epoch": uint32(9), + "erd_ahead_of_time_gas_usage_enable_epoch": uint32(10), + "erd_gas_price_modifier_enable_epoch": uint32(11), + "erd_repair_callback_enable_epoch": uint32(12), + "erd_block_gas_and_fee_recheck_enable_epoch": uint32(13), + "erd_staking_v2_enable_epoch": uint32(14), + "erd_stake_enable_epoch": uint32(15), + "erd_double_key_protection_enable_epoch": uint32(16), + "erd_esdt_enable_epoch": uint32(17), + "erd_governance_enable_epoch": uint32(18), + "erd_delegation_manager_enable_epoch": uint32(19), + "erd_delegation_smart_contract_enable_epoch": uint32(20), + "erd_correct_last_unjailed_enable_epoch": uint32(21), + "erd_balance_waiting_lists_enable_epoch": uint32(22), + "erd_return_data_to_last_transfer_enable_epoch": uint32(23), + "erd_sender_in_out_transfer_enable_epoch": uint32(24), + "erd_relayed_transactions_v2_enable_epoch": uint32(25), + "erd_unbond_tokens_v2_enable_epoch": uint32(26), + "erd_save_jailed_always_enable_epoch": uint32(27), + "erd_validator_to_delegation_enable_epoch": uint32(28), + "erd_redelegate_below_min_check_enable_epoch": uint32(29), + "erd_increment_scr_nonce_in_multi_transfer_enable_epoch": uint32(30), + "erd_esdt_multi_transfer_enable_epoch": uint32(31), + "erd_global_mint_burn_disable_epoch": uint32(32), + "erd_esdt_transfer_role_enable_epoch": uint32(33), + "erd_builtin_function_on_meta_enable_epoch": uint32(34), + "erd_waiting_list_fix_enable_epoch": uint32(35), + "erd_max_nodes_change_enable_epoch": nil, + "erd_total_supply": "12345", + "erd_hysteresis": "0.100000", + "erd_adaptivity": "true", + "erd_max_nodes_change_enable_epoch0_epoch_enable": uint32(0), + "erd_max_nodes_change_enable_epoch0_max_num_nodes": uint32(1), + "erd_max_nodes_change_enable_epoch0_nodes_to_shuffle_per_shard": uint32(2), } economicsConfig := config.EconomicsConfig{ @@ -385,6 +395,9 @@ func TestInitMetrics(t *testing.T) { { LeaderPercentage: 2, }, + { + LeaderPercentage: 2, + }, }, }, GlobalSettings: config.GlobalSettings{ @@ -469,6 +482,84 @@ func TestInitMetrics(t *testing.T) { assert.Equal(t, v, keys[k], fmt.Sprintf("for key %s", k)) } }) + t.Run("should work - metachain", func(t *testing.T) { + t.Parallel() + + keys := make(map[string]interface{}) + localStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + keys[key] = value + }, + SetStringValueHandler: func(key string, value string) { + keys[key] = value + }, + } + localShardCoordinator := &testscommon.ShardsCoordinatorMock{ + NoShards: 3, + SelfIDCalled: func() uint32 { + return common.MetachainShardId + }, + } + + err := InitMetrics(localStatusHandler, pubkeyString, nodeType, localShardCoordinator, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Nil(t, err) + + expectedValues := map[string]interface{}{ + common.MetricPublicKeyBlockSign: pubkeyString, + common.MetricShardId: uint64(localShardCoordinator.SelfId()), + common.MetricNumShardsWithoutMetachain: uint64(localShardCoordinator.NoShards), + common.MetricNodeType: string(nodeType), + common.MetricRoundTime: uint64(6), + common.MetricAppVersion: version, + common.MetricRoundsPerEpoch: uint64(roundsPerEpoch), + common.MetricCrossCheckBlockHeight: "0", + common.MetricCrossCheckBlockHeight + "_0": uint64(0), + common.MetricCrossCheckBlockHeight + "_1": uint64(0), + common.MetricCrossCheckBlockHeight + "_2": uint64(0), + common.MetricCrossCheckBlockHeightMeta: uint64(0), + common.MetricIsSyncing: uint64(1), + common.MetricLeaderPercentage: fmt.Sprintf("%f", 2.0), + common.MetricDenomination: uint64(4), + common.MetricShardConsensusGroupSize: uint64(63), + common.MetricMetaConsensusGroupSize: uint64(400), + common.MetricNumNodesPerShard: uint64(402), + common.MetricNumMetachainNodes: uint64(401), + common.MetricStartTime: uint64(111111), + common.MetricRoundDuration: uint64(6000), + common.MetricMinTransactionVersion: uint64(1), + common.MetricNumValidators: uint64(0), + common.MetricConsensusGroupSize: uint64(400), + } + + assert.Equal(t, len(expectedValues), len(keys)) + for k, v := range expectedValues { + assert.Equal(t, v, keys[k], fmt.Sprintf("for key %s", k)) + } + }) + t.Run("should work - invalid shard id", func(t *testing.T) { + t.Parallel() + + keys := make(map[string]interface{}) + localStatusHandler := &statusHandler.AppStatusHandlerStub{ + SetUInt64ValueHandler: func(key string, value uint64) { + keys[key] = value + }, + SetStringValueHandler: func(key string, value string) { + keys[key] = value + }, + } + localShardCoordinator := &testscommon.ShardsCoordinatorMock{ + NoShards: 3, + SelfIDCalled: func() uint32 { + return 10 + }, + } + + err := InitMetrics(localStatusHandler, pubkeyString, nodeType, localShardCoordinator, nodesSetup, version, economicsConfigs, roundsPerEpoch, minTransactionVersion) + assert.Nil(t, err) + + assert.Equal(t, uint64(0), keys[common.MetricConsensusGroupSize]) + }) } func TestSaveStringMetric(t *testing.T) { diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index 8174e344581..a7dd9dac1a1 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -374,7 +374,7 @@ func TestWaitForSignal(t *testing.T) { ) // these exceptions appear because the delayedComponent prevented the call of the first 2 components - // as the closable components are called in revered order + // as the closable components are called in reversed order exceptions := []string{"node closable component 1", "node closable component 2"} assert.Equal(t, nextOperationShouldStop, nextOperation) checkCloseCalledMap(t, closedCalled, exceptions...) @@ -403,7 +403,7 @@ func TestWaitForSignal(t *testing.T) { ) // these exceptions appear because the delayedComponent prevented the call of the first 2 components - // as the closable components are called in revered order + // as the closable components are called in reversed order exceptions := []string{"node closable component 1", "node closable component 2"} // in this case, even if the node is shuffled out, it should stop as some components were not closed assert.Equal(t, nextOperationShouldStop, nextOperation) From 56bf14189c690e71ad05b869de2e7acad00175b2 Mon Sep 17 00:00:00 2001 From: jules01 Date: Fri, 31 Mar 2023 18:37:09 +0300 Subject: [PATCH 10/12] - refactored the IsInterfaceNil tests --- node/external/logs/logsFacade_test.go | 21 +++++++++++++--- node/external/nodeApiResolver_test.go | 13 +++++++++- .../timemachine/fee/feeComputer_test.go | 17 ++++++++++++- node/node_test.go | 12 ++++++++- .../delegatedListProcessor_test.go | 25 +++++++++++-------- .../directStakedListProcessor_test.go | 25 +++++++++++-------- 6 files changed, 87 insertions(+), 26 deletions(-) diff --git a/node/external/logs/logsFacade_test.go b/node/external/logs/logsFacade_test.go index f277ea811f6..61da4911aa8 100644 --- a/node/external/logs/logsFacade_test.go +++ b/node/external/logs/logsFacade_test.go @@ -23,7 +23,7 @@ func TestNewLogsFacade(t *testing.T) { facade, err := NewLogsFacade(arguments) require.ErrorIs(t, err, errCannotCreateLogsFacade) require.ErrorContains(t, err, core.ErrNilStore.Error()) - require.True(t, check.IfNil(facade)) + require.Nil(t, facade) }) t.Run("NilMarshaller", func(t *testing.T) { @@ -36,7 +36,7 @@ func TestNewLogsFacade(t *testing.T) { facade, err := NewLogsFacade(arguments) require.ErrorIs(t, err, errCannotCreateLogsFacade) require.ErrorContains(t, err, core.ErrNilMarshalizer.Error()) - require.True(t, check.IfNil(facade)) + require.Nil(t, facade) }) t.Run("NilPubKeyConverter", func(t *testing.T) { @@ -49,7 +49,7 @@ func TestNewLogsFacade(t *testing.T) { facade, err := NewLogsFacade(arguments) require.ErrorIs(t, err, errCannotCreateLogsFacade) require.ErrorContains(t, err, core.ErrNilPubkeyConverter.Error()) - require.True(t, check.IfNil(facade)) + require.Nil(t, facade) }) } @@ -144,3 +144,18 @@ func TestLogsFacade_IncludeLogsInTransactionsShouldWork(t *testing.T) { require.Nil(t, transactions[2].Logs) require.Equal(t, "fourth", transactions[3].Logs.Events[0].Identifier) } + +func TestLogsFacade_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var lf *logsFacade + require.True(t, check.IfNil(lf)) + + arguments := ArgsNewLogsFacade{ + StorageService: genericMocks.NewChainStorerMock(7), + Marshaller: &marshal.GogoProtoMarshalizer{}, + PubKeyConverter: testscommon.NewPubkeyConverterMock(32), + } + lf, _ = NewLogsFacade(arguments) + require.False(t, check.IfNil(lf)) +} diff --git a/node/external/nodeApiResolver_test.go b/node/external/nodeApiResolver_test.go index 0f4528ba2c7..2efe85f6db7 100644 --- a/node/external/nodeApiResolver_test.go +++ b/node/external/nodeApiResolver_test.go @@ -127,7 +127,7 @@ func TestNewNodeApiResolver_ShouldWork(t *testing.T) { nar, err := external.NewNodeApiResolver(arg) assert.Nil(t, err) - assert.False(t, check.IfNil(nar)) + assert.NotNil(t, nar) } func TestNodeApiResolver_CloseShouldReturnNil(t *testing.T) { @@ -676,3 +676,14 @@ func TestNodeApiResolver_GetGasConfigs(t *testing.T) { _ = nar.GetGasConfigs() require.True(t, wasCalled) } + +func TestNodeApiResolver_IsInterfaceNil(t *testing.T) { + t.Parallel() + + nar, _ := external.NewNodeApiResolver(external.ArgNodeApiResolver{}) + require.True(t, check.IfNil(nar)) + + arg := createMockArgs() + nar, _ = external.NewNodeApiResolver(arg) + require.False(t, check.IfNil(nar)) +} diff --git a/node/external/timemachine/fee/feeComputer_test.go b/node/external/timemachine/fee/feeComputer_test.go index 14e11af7792..889d0d3a4c9 100644 --- a/node/external/timemachine/fee/feeComputer_test.go +++ b/node/external/timemachine/fee/feeComputer_test.go @@ -24,7 +24,7 @@ func TestNewFeeComputer(t *testing.T) { computer, err := NewFeeComputer(arguments) require.Equal(t, process.ErrNilBuiltInFunctionsCostHandler, err) - require.True(t, check.IfNil(computer)) + require.Nil(t, computer) }) t.Run("AllArgumentsProvided", func(t *testing.T) { @@ -216,3 +216,18 @@ func TestFeeComputer_InHighConcurrency(t *testing.T) { wg.Wait() } + +func TestFullHistoryPruningStorer_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var fc *feeComputer + require.True(t, check.IfNil(fc)) + + arguments := ArgsNewFeeComputer{ + BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, + EconomicsConfig: testscommon.GetEconomicsConfig(), + } + + fc, _ = NewFeeComputer(arguments) + require.False(t, check.IfNil(fc)) +} diff --git a/node/node_test.go b/node/node_test.go index 22e884cecb2..bbc8026ee5f 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -133,7 +133,7 @@ func TestNewNode(t *testing.T) { n, err := node.NewNode() assert.Nil(t, err) - assert.False(t, check.IfNil(n)) + assert.NotNil(t, n) } func TestNewNode_NilOptionShouldError(t *testing.T) { @@ -4504,3 +4504,13 @@ func getDefaultBootstrapComponents() *mainFactoryMocks.BootstrapComponentsStub { HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, } } + +func TestNode_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var n *node.Node + require.True(t, check.IfNil(n)) + + n, _ = node.NewNode() + require.False(t, check.IfNil(n)) +} diff --git a/node/trieIterators/delegatedListProcessor_test.go b/node/trieIterators/delegatedListProcessor_test.go index 090f8ce68e1..81c9209257c 100644 --- a/node/trieIterators/delegatedListProcessor_test.go +++ b/node/trieIterators/delegatedListProcessor_test.go @@ -42,24 +42,19 @@ func TestNewDelegatedListProcessor(t *testing.T) { }, exError: ErrNilAccountsAdapter, }, - { - name: "ShouldWork", - argsFunc: func() ArgTrieIteratorProcessor { - return createMockArgs() - }, - exError: nil, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := NewDelegatedListProcessor(tt.argsFunc()) + dlp, err := NewDelegatedListProcessor(tt.argsFunc()) require.True(t, errors.Is(err, tt.exError)) + require.Nil(t, dlp) }) } - dlp, _ := NewDelegatedListProcessor(createMockArgs()) - assert.False(t, check.IfNil(dlp)) + dlp, err := NewDelegatedListProcessor(createMockArgs()) + require.NotNil(t, dlp) + require.Nil(t, err) } func TestDelegatedListProc_GetDelegatorsListGetAllContractAddressesFailsShouldErr(t *testing.T) { @@ -217,6 +212,16 @@ func TestDelegatedListProc_GetDelegatorsListShouldWork(t *testing.T) { assert.Equal(t, []*api.Delegator{&expectedDelegator1, &expectedDelegator2}, delegatorsValues) } +func TestDelegatedListProcessor_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var dlp *delegatedListProcessor + require.True(t, check.IfNil(dlp)) + + dlp, _ = NewDelegatedListProcessor(createMockArgs()) + require.False(t, check.IfNil(dlp)) +} + func createDelegationScAccount(address []byte, leaves [][]byte, rootHash []byte, timeSleep time.Duration) state.UserAccountHandler { acc, _ := state.NewUserAccount(address) acc.SetDataTrie(&trieMock.TrieStub{ diff --git a/node/trieIterators/directStakedListProcessor_test.go b/node/trieIterators/directStakedListProcessor_test.go index 18b0bba952d..36b4297c8c3 100644 --- a/node/trieIterators/directStakedListProcessor_test.go +++ b/node/trieIterators/directStakedListProcessor_test.go @@ -41,24 +41,19 @@ func TestNewDirectStakedListProcessor(t *testing.T) { }, exError: ErrNilAccountsAdapter, }, - { - name: "ShouldWork", - argsFunc: func() ArgTrieIteratorProcessor { - return createMockArgs() - }, - exError: nil, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - _, err := NewDirectStakedListProcessor(tt.argsFunc()) + dslp, err := NewDirectStakedListProcessor(tt.argsFunc()) require.True(t, errors.Is(err, tt.exError)) + require.Nil(t, dslp) }) } - dslp, _ := NewDirectStakedListProcessor(createMockArgs()) - assert.False(t, check.IfNil(dslp)) + dslp, err := NewDirectStakedListProcessor(createMockArgs()) + require.NotNil(t, dslp) + require.Nil(t, err) } func TestDirectStakedListProc_GetDelegatorsListContextShouldTimeout(t *testing.T) { @@ -171,3 +166,13 @@ func createValidatorScAccount(address []byte, leaves [][]byte, rootHash []byte, return acc } + +func TestDirectStakedListProcessor_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var dslp *directStakedListProcessor + require.True(t, check.IfNil(dslp)) + + dslp, _ = NewDirectStakedListProcessor(createMockArgs()) + require.False(t, check.IfNil(dslp)) +} From d4ab76fd0a8678060e725ae2dd2c9c69d6c0e42f Mon Sep 17 00:00:00 2001 From: jules01 Date: Fri, 31 Mar 2023 18:47:28 +0300 Subject: [PATCH 11/12] - fixed test name --- node/external/timemachine/fee/feeComputer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/external/timemachine/fee/feeComputer_test.go b/node/external/timemachine/fee/feeComputer_test.go index 889d0d3a4c9..33581dbc338 100644 --- a/node/external/timemachine/fee/feeComputer_test.go +++ b/node/external/timemachine/fee/feeComputer_test.go @@ -217,7 +217,7 @@ func TestFeeComputer_InHighConcurrency(t *testing.T) { wg.Wait() } -func TestFullHistoryPruningStorer_IsInterfaceNil(t *testing.T) { +func TestFeeComputer_IsInterfaceNil(t *testing.T) { t.Parallel() var fc *feeComputer From b9953f4242eaaf5b34b31127ee01cf117caaaea6 Mon Sep 17 00:00:00 2001 From: jules01 Date: Fri, 31 Mar 2023 20:43:54 +0300 Subject: [PATCH 12/12] refactor part 2 for the IsInterfaceNil tests --- node/external/logs/logsFacade_test.go | 5 ++--- node/external/nodeApiResolver_test.go | 5 ++--- node/external/timemachine/fee/feeComputer_test.go | 5 ++--- node/node_test.go | 4 ++-- node/trieIterators/delegatedListProcessor_test.go | 5 ++--- node/trieIterators/directStakedListProcessor_test.go | 5 ++--- 6 files changed, 12 insertions(+), 17 deletions(-) diff --git a/node/external/logs/logsFacade_test.go b/node/external/logs/logsFacade_test.go index 61da4911aa8..21d11f99c59 100644 --- a/node/external/logs/logsFacade_test.go +++ b/node/external/logs/logsFacade_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/testscommon" @@ -149,7 +148,7 @@ func TestLogsFacade_IsInterfaceNil(t *testing.T) { t.Parallel() var lf *logsFacade - require.True(t, check.IfNil(lf)) + require.True(t, lf.IsInterfaceNil()) arguments := ArgsNewLogsFacade{ StorageService: genericMocks.NewChainStorerMock(7), @@ -157,5 +156,5 @@ func TestLogsFacade_IsInterfaceNil(t *testing.T) { PubKeyConverter: testscommon.NewPubkeyConverterMock(32), } lf, _ = NewLogsFacade(arguments) - require.False(t, check.IfNil(lf)) + require.False(t, lf.IsInterfaceNil()) } diff --git a/node/external/nodeApiResolver_test.go b/node/external/nodeApiResolver_test.go index 2efe85f6db7..1132c7bbdcf 100644 --- a/node/external/nodeApiResolver_test.go +++ b/node/external/nodeApiResolver_test.go @@ -7,7 +7,6 @@ import ( "math/big" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" @@ -681,9 +680,9 @@ func TestNodeApiResolver_IsInterfaceNil(t *testing.T) { t.Parallel() nar, _ := external.NewNodeApiResolver(external.ArgNodeApiResolver{}) - require.True(t, check.IfNil(nar)) + require.True(t, nar.IsInterfaceNil()) arg := createMockArgs() nar, _ = external.NewNodeApiResolver(arg) - require.False(t, check.IfNil(nar)) + require.False(t, nar.IsInterfaceNil()) } diff --git a/node/external/timemachine/fee/feeComputer_test.go b/node/external/timemachine/fee/feeComputer_test.go index 33581dbc338..fb91db19049 100644 --- a/node/external/timemachine/fee/feeComputer_test.go +++ b/node/external/timemachine/fee/feeComputer_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process" @@ -221,7 +220,7 @@ func TestFeeComputer_IsInterfaceNil(t *testing.T) { t.Parallel() var fc *feeComputer - require.True(t, check.IfNil(fc)) + require.True(t, fc.IsInterfaceNil()) arguments := ArgsNewFeeComputer{ BuiltInFunctionsCostHandler: &testscommon.BuiltInCostHandlerStub{}, @@ -229,5 +228,5 @@ func TestFeeComputer_IsInterfaceNil(t *testing.T) { } fc, _ = NewFeeComputer(arguments) - require.False(t, check.IfNil(fc)) + require.False(t, fc.IsInterfaceNil()) } diff --git a/node/node_test.go b/node/node_test.go index bbc8026ee5f..057bc262b17 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -4509,8 +4509,8 @@ func TestNode_IsInterfaceNil(t *testing.T) { t.Parallel() var n *node.Node - require.True(t, check.IfNil(n)) + require.True(t, n.IsInterfaceNil()) n, _ = node.NewNode() - require.False(t, check.IfNil(n)) + require.False(t, n.IsInterfaceNil()) } diff --git a/node/trieIterators/delegatedListProcessor_test.go b/node/trieIterators/delegatedListProcessor_test.go index 81c9209257c..c51b926ce59 100644 --- a/node/trieIterators/delegatedListProcessor_test.go +++ b/node/trieIterators/delegatedListProcessor_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" @@ -216,10 +215,10 @@ func TestDelegatedListProcessor_IsInterfaceNil(t *testing.T) { t.Parallel() var dlp *delegatedListProcessor - require.True(t, check.IfNil(dlp)) + require.True(t, dlp.IsInterfaceNil()) dlp, _ = NewDelegatedListProcessor(createMockArgs()) - require.False(t, check.IfNil(dlp)) + require.False(t, dlp.IsInterfaceNil()) } func createDelegationScAccount(address []byte, leaves [][]byte, rootHash []byte, timeSleep time.Duration) state.UserAccountHandler { diff --git a/node/trieIterators/directStakedListProcessor_test.go b/node/trieIterators/directStakedListProcessor_test.go index 36b4297c8c3..a56311d9108 100644 --- a/node/trieIterators/directStakedListProcessor_test.go +++ b/node/trieIterators/directStakedListProcessor_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" @@ -171,8 +170,8 @@ func TestDirectStakedListProcessor_IsInterfaceNil(t *testing.T) { t.Parallel() var dslp *directStakedListProcessor - require.True(t, check.IfNil(dslp)) + require.True(t, dslp.IsInterfaceNil()) dslp, _ = NewDirectStakedListProcessor(createMockArgs()) - require.False(t, check.IfNil(dslp)) + require.False(t, dslp.IsInterfaceNil()) }