From e135ee5478457a2b767d24de8bce5f02f08a0791 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 9 Mar 2023 11:18:14 +0200 Subject: [PATCH 01/20] create a new errChan struct that wraps an err chan --- common/errChan/errChan.go | 62 ++++++++++ common/errChan/errChan_test.go | 81 +++++++++++++ common/interface.go | 10 +- debug/process/stateExport.go | 5 +- epochStart/metachain/systemSCs.go | 5 +- factory/processing/processComponents.go | 5 +- .../stateTrieClose/stateTrieClose_test.go | 21 ++-- .../state/stateTrieSync/stateTrieSync_test.go | 13 +- integrationTests/testProcessorNode.go | 5 +- node/node.go | 17 +-- node/node_test.go | 24 ++-- node/trieIterators/delegatedListProcessor.go | 5 +- .../delegatedListProcessor_test.go | 2 +- .../directStakedListProcessor.go | 5 +- .../directStakedListProcessor_test.go | 2 +- node/trieIterators/stakeValuesProcessor.go | 5 +- .../stakeValuesProcessor_test.go | 4 +- process/block/baseProcess.go | 9 +- process/block/baseProcess_test.go | 10 +- process/peer/process.go | 5 +- process/peer/process_test.go | 10 +- process/txsimulator/wrappedAccountsDB_test.go | 5 +- state/accountsDB.go | 29 ++--- state/accountsDB_test.go | 76 ++++++++---- state/peerAccountsDB.go | 5 +- state/syncer/userAccountsSyncer.go | 5 +- trie/node_test.go | 5 +- trie/patriciaMerkleTrie.go | 8 +- trie/patriciaMerkleTrie_test.go | 21 ++-- trie/trieStorageManager.go | 10 +- trie/trieStorageManagerFactory_test.go | 5 +- ...ieStorageManagerWithoutCheckpoints_test.go | 5 +- .../trieStorageManagerWithoutSnapshot_test.go | 3 +- trie/trieStorageManager_test.go | 113 +++++++++++------- update/genesis/common.go | 2 +- update/genesis/export.go | 7 +- update/genesis/export_test.go | 4 +- 37 files changed, 419 insertions(+), 189 deletions(-) create mode 100644 common/errChan/errChan.go create mode 100644 common/errChan/errChan_test.go diff --git a/common/errChan/errChan.go b/common/errChan/errChan.go new file mode 100644 index 00000000000..a2f08d1b3fb --- /dev/null +++ b/common/errChan/errChan.go @@ -0,0 +1,62 @@ +package errChan + +import "sync" + +type errChan struct { + ch chan error + closed bool + closeMutex sync.Mutex +} + +// NewErrChan creates a new errChan +func NewErrChan() *errChan { + return &errChan{ + ch: make(chan error, 1), + closed: false, + } +} + +// WriteInChanNonBlocking will send the given error on the channel if the chan is not blocked +func (ec *errChan) WriteInChanNonBlocking(err error) { + select { + case ec.ch <- err: + default: + } +} + +// ReadFromChanNonBlocking will read from the channel, or return nil if no error was sent on the channel +func (ec *errChan) ReadFromChanNonBlocking() error { + select { + case err := <-ec.ch: + return err + default: + return nil + } +} + +// Close will close the channel +func (ec *errChan) Close() { + ec.closeMutex.Lock() + defer ec.closeMutex.Unlock() + + if ec.closed { + return + } + + if ec.ch == nil { + return + } + + close(ec.ch) + ec.closed = true +} + +// Len returns the length of the channel +func (ec *errChan) Len() int { + return len(ec.ch) +} + +// IsInterfaceNil returns true if there is no value under the interface +func (ec *errChan) IsInterfaceNil() bool { + return ec == nil +} diff --git a/common/errChan/errChan_test.go b/common/errChan/errChan_test.go new file mode 100644 index 00000000000..b753a4ce224 --- /dev/null +++ b/common/errChan/errChan_test.go @@ -0,0 +1,81 @@ +package errChan + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/stretchr/testify/assert" +) + +func TestNewErrChan(t *testing.T) { + t.Parallel() + + ec := NewErrChan() + assert.False(t, check.IfNil(ec)) + assert.Equal(t, 1, cap(ec.ch)) +} + +func TestErrChan_WriteInChanNonBlocking(t *testing.T) { + t.Parallel() + + expectedErr := fmt.Errorf("err1") + ec := NewErrChan() + ec.WriteInChanNonBlocking(expectedErr) + ec.WriteInChanNonBlocking(fmt.Errorf("err2")) + ec.WriteInChanNonBlocking(fmt.Errorf("err3")) + + assert.Equal(t, 1, len(ec.ch)) + assert.Equal(t, expectedErr, <-ec.ch) + assert.Equal(t, 0, len(ec.ch)) +} + +func TestErrChan_ReadFromChanNonBlocking(t *testing.T) { + t.Parallel() + + expectedErr := fmt.Errorf("err1") + ec := NewErrChan() + ec.ch <- expectedErr + + assert.Equal(t, 1, len(ec.ch)) + assert.Equal(t, expectedErr, ec.ReadFromChanNonBlocking()) + assert.Equal(t, 0, len(ec.ch)) + assert.Nil(t, ec.ReadFromChanNonBlocking()) +} + +func TestErrChan_Close(t *testing.T) { + t.Parallel() + + t.Run("close an already closed channel", func(t *testing.T) { + t.Parallel() + + ec := NewErrChan() + ec.Close() + + assert.True(t, ec.closed) + ec.Close() + }) + + t.Run("close a nil channel", func(t *testing.T) { + t.Parallel() + + ec := NewErrChan() + ec.ch = nil + ec.Close() + + assert.False(t, ec.closed) + }) +} + +func TestErrChan_Len(t *testing.T) { + t.Parallel() + + ec := NewErrChan() + assert.Equal(t, 0, ec.Len()) + + ec.ch <- fmt.Errorf("err1") + assert.Equal(t, 1, ec.Len()) + + ec.WriteInChanNonBlocking(fmt.Errorf("err2")) + assert.Equal(t, 1, ec.Len()) +} diff --git a/common/interface.go b/common/interface.go index a58b6aa94db..16f448179ec 100644 --- a/common/interface.go +++ b/common/interface.go @@ -13,7 +13,15 @@ import ( // TrieIteratorChannels defines the channels that are being used when iterating the trie nodes type TrieIteratorChannels struct { LeavesChan chan core.KeyValueHolder - ErrChan chan error + ErrChan BufferedErrChan +} + +// BufferedErrChan is an interface that defines the methods for a buffered error channel +type BufferedErrChan interface { + WriteInChanNonBlocking(err error) + ReadFromChanNonBlocking() error + Close() + IsInterfaceNil() bool } // Trie is an interface for Merkle Trees implementations diff --git a/debug/process/stateExport.go b/debug/process/stateExport.go index 9fbdd6ce1bc..b8cd8128255 100644 --- a/debug/process/stateExport.go +++ b/debug/process/stateExport.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/state" ) @@ -66,7 +67,7 @@ func getCode(accountsDB state.AccountsAdapter, codeHash []byte) ([]byte, error) func getData(accountsDB state.AccountsAdapter, rootHash []byte, address []byte) ([]string, error) { leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err := accountsDB.GetAllLeaves(leavesChannels, context.Background(), rootHash) @@ -89,7 +90,7 @@ func getData(accountsDB state.AccountsAdapter, rootHash []byte, address []byte) hex.EncodeToString(valWithoutSuffix))) } - err = <-leavesChannels.ErrChan + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, fmt.Errorf("%w while trying to export data on hex root hash %s, address %s", err, hex.EncodeToString(rootHash), hex.EncodeToString(address)) diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index 642053ad7d1..faf909008fe 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" vInfo "github.com/multiversx/mx-chain-go/common/validatorInfo" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/epochStart" @@ -1102,7 +1103,7 @@ func (s *systemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAc leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = userValidatorAccount.DataTrie().GetAllLeavesOnChannel(leavesChannels, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -1125,7 +1126,7 @@ func (s *systemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAc } } - err = common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 49c62ae9a50..347a2645790 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/outport" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/dataRetriever" @@ -874,7 +875,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = pcf.state.AccountsAdapter().GetAllLeaves(leavesChannels, context.Background(), rootHash) if err != nil { @@ -901,7 +902,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string } } - err = common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/integrationTests/state/stateTrieClose/stateTrieClose_test.go b/integrationTests/state/stateTrieClose/stateTrieClose_test.go index ab18ce244b6..cfa352c9b24 100644 --- a/integrationTests/state/stateTrieClose/stateTrieClose_test.go +++ b/integrationTests/state/stateTrieClose/stateTrieClose_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/testscommon" @@ -36,25 +37,25 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ := tr.RootHash() leavesChannel1 := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) time.Sleep(time.Second) // allow the go routine to start idx, _ := gc.Snapshot() diff := gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 1) // can be 0 on a fast running host - err := common.GetErrorFromChanNonBlocking(leavesChannel1.ErrChan) + err := leavesChannel1.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) leavesChannel1 = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 2) - err = common.GetErrorFromChanNonBlocking(leavesChannel1.ErrChan) + err = leavesChannel1.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) _ = tr.Update([]byte("god"), []byte("puppy")) @@ -63,13 +64,13 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ = tr.RootHash() leavesChannel1 = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.Equal(t, 3, len(diff), fmt.Sprintf("%v", diff)) - err = common.GetErrorFromChanNonBlocking(leavesChannel1.ErrChan) + err = leavesChannel1.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) _ = tr.Update([]byte("eggod"), []byte("cat")) @@ -78,14 +79,14 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ = tr.RootHash() leavesChannel2 := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } _ = tr.GetAllLeavesOnChannel(leavesChannel2, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) time.Sleep(time.Second) // allow the go routine to start idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 4) - err = common.GetErrorFromChanNonBlocking(leavesChannel2.ErrChan) + err = leavesChannel2.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) for range leavesChannel1.LeavesChan { @@ -94,7 +95,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 3) - err = common.GetErrorFromChanNonBlocking(leavesChannel1.ErrChan) + err = leavesChannel1.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) for range leavesChannel2.LeavesChan { @@ -103,7 +104,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { idx, _ = gc.Snapshot() diff = gc.DiffGoRoutines(idxInitial, idx) assert.True(t, len(diff) <= 2) - err = common.GetErrorFromChanNonBlocking(leavesChannel2.ErrChan) + err = leavesChannel2.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) err = tr.Close() diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 0e7387825fd..175f3a3c460 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -3,6 +3,7 @@ package stateTrieSync import ( "context" "fmt" + "github.com/multiversx/mx-chain-go/common/errChan" "math/big" "strconv" "testing" @@ -329,13 +330,13 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves rootHash, _ := accState.RootHash() leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = accState.GetAllLeaves(leavesChannel, context.Background(), rootHash) for range leavesChannel.LeavesChan { } require.Nil(t, err) - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() require.Nil(t, err) requesterTrie := nRequester.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) @@ -357,7 +358,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves leavesChannel = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = nRequester.AccntState.GetAllLeaves(leavesChannel, context.Background(), rootHash) assert.Nil(t, err) @@ -365,7 +366,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves for range leavesChannel.LeavesChan { numLeaves++ } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() require.Nil(t, err) assert.Equal(t, numAccounts, numLeaves) checkAllDataTriesAreSynced(t, numDataTrieLeaves, requesterTrie, dataTrieRootHashes) @@ -559,7 +560,7 @@ func addAccountsToState(t *testing.T, numAccounts int, numDataTrieLeaves int, ac func getNumLeaves(t *testing.T, tr common.Trie, rootHash []byte) int { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) require.Nil(t, err) @@ -569,7 +570,7 @@ func getNumLeaves(t *testing.T, tr common.Trie, rootHash []byte) int { numLeaves++ } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() require.Nil(t, err) return numLeaves diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 532b7a64673..8d3b1834f34 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -34,6 +34,7 @@ import ( nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/enablers" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/forking" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/consensus" @@ -3148,7 +3149,7 @@ func GetTokenIdentifier(nodes []*TestProcessorNode, ticker []byte) []byte { rootHash, _ := userAcc.DataTrie().RootHash() chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } _ = userAcc.DataTrie().GetAllLeavesOnChannel(chLeaves, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) for leaf := range chLeaves.LeavesChan { @@ -3159,7 +3160,7 @@ func GetTokenIdentifier(nodes []*TestProcessorNode, ticker []byte) []byte { return leaf.Key() } - err := common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err := chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { log.Error("error getting all leaves from channel", "err", err) } diff --git a/node/node.go b/node/node.go index ce56b3a02e4..3af65f4d36b 100644 --- a/node/node.go +++ b/node/node.go @@ -22,6 +22,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/transaction" disabledSig "github.com/multiversx/mx-chain-crypto-go/signing/disabled/singlesig" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/debug" "github.com/multiversx/mx-chain-go/facade" @@ -215,7 +216,7 @@ func (n *Node) GetAllIssuedESDTs(tokenType string, ctx context.Context) ([]strin chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -243,7 +244,7 @@ func (n *Node) GetAllIssuedESDTs(tokenType string, ctx context.Context) ([]strin } } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } @@ -291,7 +292,7 @@ func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -310,7 +311,7 @@ func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, mapToReturn[hex.EncodeToString(leaf.Key())] = hex.EncodeToString(value) } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, api.BlockInfo{}, err } @@ -399,7 +400,7 @@ func (n *Node) getTokensIDsWithFilter( chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -422,7 +423,7 @@ func (n *Node) getTokensIDsWithFilter( } } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, api.BlockInfo{}, err } @@ -535,7 +536,7 @@ func (n *Node) GetAllESDTTokens(address string, options api.AccountQueryOptions, chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -573,7 +574,7 @@ func (n *Node) GetAllESDTTokens(address string, options api.AccountQueryOptions, allESDTs[tokenName] = esdtToken } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, api.BlockInfo{}, err } diff --git a/node/node_test.go b/node/node_test.go index 59d0c18cfc9..8915cdaac92 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -310,7 +310,7 @@ func TestNode_GetKeyValuePairs(t *testing.T) { trieLeaf2 := keyValStorage.NewKeyValStorage(k2, append(v2, suffix...)) leavesChannels.LeavesChan <- trieLeaf2 close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -369,7 +369,7 @@ func TestNode_GetKeyValuePairs_GetAllLeavesShouldFail(t *testing.T) { &trieMock.TrieStub{ GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, _ common.KeyBuilder) error { go func() { - leavesChannels.ErrChan <- expectedErr + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) close(leavesChannels.LeavesChan) }() @@ -424,7 +424,7 @@ func TestNode_GetKeyValuePairsContextShouldTimeout(t *testing.T) { go func() { time.Sleep(time.Second) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -627,7 +627,7 @@ func TestNode_GetAllESDTTokens(t *testing.T) { trieLeaf := keyValStorage.NewKeyValStorage(esdtKey, nil) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -683,7 +683,7 @@ func TestNode_GetAllESDTTokens_GetAllLeavesShouldFail(t *testing.T) { &trieMock.TrieStub{ GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, _ common.KeyBuilder) error { go func() { - leavesChannels.ErrChan <- expectedErr + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) close(leavesChannels.LeavesChan) }() @@ -740,7 +740,7 @@ func TestNode_GetAllESDTTokensContextShouldTimeout(t *testing.T) { go func() { time.Sleep(time.Second) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -834,7 +834,7 @@ func TestNode_GetAllESDTTokensShouldReturnEsdtAndFormattedNft(t *testing.T) { leavesChannels.LeavesChan <- trieLeaf wg.Done() close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() wg.Wait() @@ -920,7 +920,7 @@ func TestNode_GetAllIssuedESDTs(t *testing.T) { trieLeaf = keyValStorage.NewKeyValStorage(nftToken, append(nftMarshalledData, nftSuffix...)) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -1006,7 +1006,7 @@ func TestNode_GetESDTsWithRole(t *testing.T) { trieLeaf := keyValStorage.NewKeyValStorage(esdtToken, append(marshalledData, esdtSuffix...)) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -1086,7 +1086,7 @@ func TestNode_GetESDTsRoles(t *testing.T) { trieLeaf := keyValStorage.NewKeyValStorage(esdtToken, append(marshalledData, esdtSuffix...)) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -1151,7 +1151,7 @@ func TestNode_GetNFTTokenIDsRegisteredByAddress(t *testing.T) { trieLeaf := keyValStorage.NewKeyValStorage(esdtToken, append(marshalledData, esdtSuffix...)) leavesChannels.LeavesChan <- trieLeaf close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -1208,7 +1208,7 @@ func TestNode_GetNFTTokenIDsRegisteredByAddressContextShouldTimeout(t *testing.T go func() { time.Sleep(time.Second) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil diff --git a/node/trieIterators/delegatedListProcessor.go b/node/trieIterators/delegatedListProcessor.go index 5db7ecb4116..c77d54a0880 100644 --- a/node/trieIterators/delegatedListProcessor.go +++ b/node/trieIterators/delegatedListProcessor.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/trie/keyBuilder" @@ -129,7 +130,7 @@ func (dlp *delegatedListProcessor) getDelegatorsList(delegationSC []byte, ctx co chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = delegatorAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -146,7 +147,7 @@ func (dlp *delegatedListProcessor) getDelegatorsList(delegationSC []byte, ctx co delegators = append(delegators, leafKey) } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/node/trieIterators/delegatedListProcessor_test.go b/node/trieIterators/delegatedListProcessor_test.go index 090f8ce68e1..c669b43924e 100644 --- a/node/trieIterators/delegatedListProcessor_test.go +++ b/node/trieIterators/delegatedListProcessor_test.go @@ -232,7 +232,7 @@ func createDelegationScAccount(address []byte, leaves [][]byte, rootHash []byte, } close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil diff --git a/node/trieIterators/directStakedListProcessor.go b/node/trieIterators/directStakedListProcessor.go index 0ff046919b4..3e4ca62daeb 100644 --- a/node/trieIterators/directStakedListProcessor.go +++ b/node/trieIterators/directStakedListProcessor.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/vm" @@ -56,7 +57,7 @@ func (dslp *directStakedListProcessor) getAllStakedAccounts(validatorAccount sta chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = validatorAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -90,7 +91,7 @@ func (dslp *directStakedListProcessor) getAllStakedAccounts(validatorAccount sta stakedAccounts = append(stakedAccounts, val) } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/node/trieIterators/directStakedListProcessor_test.go b/node/trieIterators/directStakedListProcessor_test.go index 18b0bba952d..29e19f82542 100644 --- a/node/trieIterators/directStakedListProcessor_test.go +++ b/node/trieIterators/directStakedListProcessor_test.go @@ -162,7 +162,7 @@ func createValidatorScAccount(address []byte, leaves [][]byte, rootHash []byte, } close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil diff --git a/node/trieIterators/stakeValuesProcessor.go b/node/trieIterators/stakeValuesProcessor.go index c77169203d3..b0f01baff76 100644 --- a/node/trieIterators/stakeValuesProcessor.go +++ b/node/trieIterators/stakeValuesProcessor.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/trie/keyBuilder" @@ -98,7 +99,7 @@ func (svp *stakedValuesProcessor) computeBaseStakedAndTopUp(ctx context.Context) // TODO investigate if a call to GetAllLeavesKeysOnChannel (without values) might increase performance chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = validatorAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -123,7 +124,7 @@ func (svp *stakedValuesProcessor) computeBaseStakedAndTopUp(ctx context.Context) totalTopUp = totalTopUp.Add(totalTopUp, info.topUpValue) } - err = common.GetErrorFromChanNonBlocking(chLeaves.ErrChan) + err = chLeaves.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, nil, err } diff --git a/node/trieIterators/stakeValuesProcessor_test.go b/node/trieIterators/stakeValuesProcessor_test.go index 166b4fc37f0..6a81e0ddd76 100644 --- a/node/trieIterators/stakeValuesProcessor_test.go +++ b/node/trieIterators/stakeValuesProcessor_test.go @@ -195,7 +195,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue_ContextShouldTimeout(t *t GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, _ context.Context, _ []byte, _ common.KeyBuilder) error { time.Sleep(time.Second) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() return nil }, RootCalled: func() ([]byte, error) { @@ -297,7 +297,7 @@ func TestTotalStakedValueProcessor_GetTotalStakedValue(t *testing.T) { channels.LeavesChan <- leaf6 close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() }() return nil diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index a9c47516a55..b327d045b63 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/common/logging" "github.com/multiversx/mx-chain-go/config" @@ -1733,7 +1734,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = userAccountsDb.GetAllLeaves(iteratorChannels, context.Background(), rootHash) if err != nil { @@ -1762,7 +1763,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl if len(rh) != 0 { dataTrie := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } errDataTrieGet := userAccountsDb.GetAllLeaves(dataTrie, context.Background(), rh) if errDataTrieGet != nil { @@ -1774,7 +1775,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl currentSize += len(lf.Value()) } - err = common.GetErrorFromChanNonBlocking(dataTrie.ErrChan) + err = dataTrie.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } @@ -1790,7 +1791,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl balanceSum.Add(balanceSum, userAccount.GetBalance()) } - err = common.GetErrorFromChanNonBlocking(iteratorChannels.ErrChan) + err = iteratorChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index a8525909b4f..ba89195248f 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -1892,7 +1892,7 @@ func TestBaseProcessor_commitTrieEpochRootHashIfNeededShouldWork(t *testing.T) { }, GetAllLeavesCalled: func(channels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() return nil }, }, @@ -1936,7 +1936,7 @@ func TestBaseProcessor_commitTrieEpochRootHashIfNeeded_GetAllLeaves(t *testing.T }, GetAllLeavesCalled: func(channels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() return expectedErr }, }, @@ -1973,7 +1973,7 @@ func TestBaseProcessor_commitTrieEpochRootHashIfNeeded_GetAllLeaves(t *testing.T return rootHash, nil }, GetAllLeavesCalled: func(channels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { - channels.ErrChan <- expectedErr + channels.ErrChan.WriteInChanNonBlocking(expectedErr) close(channels.LeavesChan) return nil }, @@ -2033,14 +2033,14 @@ func TestBaseProcessor_commitTrieEpochRootHashIfNeededShouldUseDataTrieIfNeededW if bytes.Equal(rootHash, rh) { calledWithUserAccountRootHash = true close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() return nil } go func() { channels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("address"), []byte("bytes")) close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() }() return nil diff --git a/process/peer/process.go b/process/peer/process.go index d5ed5d06b2e..76f264917f9 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/validatorInfo" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" @@ -447,7 +448,7 @@ func (vs *validatorStatistics) getValidatorDataFromLeaves( validators[currentShardId] = append(validators[currentShardId], validatorInfoData) } - err := common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err := leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } @@ -562,7 +563,7 @@ func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err := vs.peerAdapter.GetAllLeaves(leavesChannels, context.Background(), rootHash) if err != nil { diff --git a/process/peer/process_test.go b/process/peer/process_test.go index ee1bab03e7f..5f84e61d749 100644 --- a/process/peer/process_test.go +++ b/process/peer/process_test.go @@ -1969,7 +1969,7 @@ func TestValidatorStatistics_ResetValidatorStatisticsAtNewEpoch(t *testing.T) { go func() { ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytes0, marshalizedPa0) close(ch.LeavesChan) - close(ch.ErrChan) + ch.ErrChan.Close() }() return nil @@ -2032,7 +2032,7 @@ func TestValidatorStatistics_Process(t *testing.T) { ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytes0, marshalizedPa0) ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytesMeta, marshalizedPaMeta) close(ch.LeavesChan) - close(ch.ErrChan) + ch.ErrChan.Close() }() return nil @@ -2078,7 +2078,7 @@ func TestValidatorStatistics_GetValidatorInfoForRootHash(t *testing.T) { peerAdapter.GetAllLeavesCalled = func(ch *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { if bytes.Equal(rootHash, hash) { go func() { - ch.ErrChan <- expectedErr + ch.ErrChan.WriteInChanNonBlocking(expectedErr) close(ch.LeavesChan) }() @@ -2108,7 +2108,7 @@ func TestValidatorStatistics_GetValidatorInfoForRootHash(t *testing.T) { ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytes0, marshalizedPa0) ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytesMeta, marshalizedPaMeta) close(ch.LeavesChan) - close(ch.ErrChan) + ch.ErrChan.Close() }() return nil @@ -2555,7 +2555,7 @@ func updateArgumentsWithNeeded(arguments peer.ArgValidatorStatisticsProcessor) { ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytes0, marshalizedPa0) ch.LeavesChan <- keyValStorage.NewKeyValStorage(addrBytesMeta, marshalizedPaMeta) close(ch.LeavesChan) - close(ch.ErrChan) + ch.ErrChan.Close() }() return nil diff --git a/process/txsimulator/wrappedAccountsDB_test.go b/process/txsimulator/wrappedAccountsDB_test.go index 1bf48e18531..016a6f6a0f1 100644 --- a/process/txsimulator/wrappedAccountsDB_test.go +++ b/process/txsimulator/wrappedAccountsDB_test.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/state" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" @@ -150,11 +151,11 @@ func TestReadOnlyAccountsDB_ReadOperationsShouldWork(t *testing.T) { allLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = roAccDb.GetAllLeaves(allLeaves, context.Background(), nil) require.NoError(t, err) - err = common.GetErrorFromChanNonBlocking(allLeaves.ErrChan) + err = allLeaves.ErrChan.ReadFromChanNonBlocking() require.NoError(t, err) } diff --git a/state/accountsDB.go b/state/accountsDB.go index 534e94d703c..e767ac459fe 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/keyBuilder" @@ -1036,7 +1037,7 @@ func (adb *AccountsDB) recreateTrie(options common.RootHashHolder) error { func (adb *AccountsDB) RecreateAllTries(rootHash []byte) (map[string]common.Trie, error) { leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } mainTrie := adb.getMainTrie() err := mainTrie.GetAllLeavesOnChannel(leavesChannels, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) @@ -1067,7 +1068,7 @@ func (adb *AccountsDB) RecreateAllTries(rootHash []byte) (map[string]common.Trie } } - err = common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } @@ -1142,7 +1143,7 @@ func (adb *AccountsDB) SnapshotState(rootHash []byte) { missingNodesChannel := make(chan []byte, missingNodesChannelSize) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } stats := newSnapshotStatistics(1, 1) @@ -1253,7 +1254,7 @@ func (adb *AccountsDB) processSnapshotCompletion( stats *snapshotStatistics, trieStorageManager common.StorageManager, missingNodesCh chan []byte, - errChan chan error, + errChan common.BufferedErrChan, rootHash []byte, metrics *accountMetrics, epoch uint32, @@ -1263,15 +1264,15 @@ func (adb *AccountsDB) processSnapshotCompletion( defer func() { adb.isSnapshotInProgress.Reset() adb.updateMetricsOnSnapshotCompletion(metrics, stats) - close(errChan) + errChan.Close() }() - containsErrorDuringSnapshot := emptyErrChanReturningHadContained(errChan) - shouldNotMarkActive := trieStorageManager.IsClosed() || containsErrorDuringSnapshot + errorDuringSnapshot := errChan.ReadFromChanNonBlocking() + shouldNotMarkActive := trieStorageManager.IsClosed() || errorDuringSnapshot != nil if shouldNotMarkActive { log.Debug("will not set activeDB in epoch as the snapshot might be incomplete", "epoch", epoch, "trie storage manager closed", trieStorageManager.IsClosed(), - "errors during snapshot found", containsErrorDuringSnapshot) + "errors during snapshot found", errorDuringSnapshot) return } @@ -1283,7 +1284,7 @@ func (adb *AccountsDB) processSnapshotCompletion( handleLoggingWhenError("error while putting active DB value into main storer", errPut) } -func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan chan error, stats *snapshotStatistics, syncer AccountsDBSyncer) { +func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan common.BufferedErrChan, stats *snapshotStatistics, syncer AccountsDBSyncer) { defer stats.SyncFinished() if check.IfNil(syncer) { @@ -1291,7 +1292,7 @@ func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan ch for missingNode := range missingNodesChan { log.Warn("could not sync node", "hash", missingNode) } - errChan <- ErrNilTrieSyncer + errChan.WriteInChanNonBlocking(ErrNilTrieSyncer) return } @@ -1302,7 +1303,7 @@ func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan ch "missing node hash", missingNode, "error", err, ) - errChan <- err + errChan.WriteInChanNonBlocking(err) } } } @@ -1370,7 +1371,7 @@ func (adb *AccountsDB) setStateCheckpoint(rootHash []byte) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } missingNodesChannel := make(chan []byte, missingNodesChannelSize) stats := newSnapshotStatistics(1, 1) @@ -1436,7 +1437,7 @@ func (adb *AccountsDB) GetStatsForRootHash(rootHash []byte) (common.TriesStatist iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err := mainTrie.GetAllLeavesOnChannel(iteratorChannels, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) if err != nil { @@ -1459,7 +1460,7 @@ func (adb *AccountsDB) GetStatsForRootHash(rootHash []byte) (common.TriesStatist collectStats(tr, stats, account.RootHash, address) } - err = common.GetErrorFromChanNonBlocking(iteratorChannels.ErrChan) + err = iteratorChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index f0ddcf55616..3cc0f40a149 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/keyValStorage" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/process/mock" @@ -1027,7 +1028,7 @@ func TestAccountsDB_SnapshotStateWithErrorsShouldNotMarkActiveDB(t *testing.T) { return true }, TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, iteratorChannels *common.TrieIteratorChannels, _ chan []byte, stats common.SnapshotStatisticsHandler, _ uint32) { - iteratorChannels.ErrChan <- expectedErr + iteratorChannels.ErrChan.WriteInChanNonBlocking(expectedErr) close(iteratorChannels.LeavesChan) stats.SnapshotFinished() }, @@ -1428,7 +1429,7 @@ func TestAccountsDB_GetAllLeaves(t *testing.T) { GetAllLeavesOnChannelCalled: func(channels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, builder common.KeyBuilder) error { getAllLeavesCalled = true close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() return nil }, @@ -1441,13 +1442,13 @@ func TestAccountsDB_GetAllLeaves(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err := adb.GetAllLeaves(leavesChannel, context.Background(), []byte("root hash")) assert.Nil(t, err) assert.True(t, getAllLeavesCalled) - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) } @@ -2324,10 +2325,10 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { go func() { leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("key"), []byte("val")) - leavesChannels.ErrChan <- expectedErr + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -2355,7 +2356,7 @@ func TestAccountsDB_RecreateAllTries(t *testing.T) { leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("key"), []byte("val")) close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil @@ -2733,17 +2734,17 @@ func TestEmptyErrChanReturningHadContained(t *testing.T) { t.Run("unbuffered chan", func(t *testing.T) { t.Parallel() - errChan := make(chan error) - assert.False(t, state.EmptyErrChanReturningHadContained(errChan)) - assert.Equal(t, 0, len(errChan)) + errChannel := make(chan error) + assert.False(t, state.EmptyErrChanReturningHadContained(errChannel)) + assert.Equal(t, 0, len(errChannel)) }) t.Run("buffered chan", func(t *testing.T) { t.Parallel() for i := 1; i < 10; i++ { - errChan := make(chan error, i) - assert.False(t, state.EmptyErrChanReturningHadContained(errChan)) - assert.Equal(t, 0, len(errChan)) + errChannel := make(chan error, i) + assert.False(t, state.EmptyErrChanReturningHadContained(errChannel)) + assert.Equal(t, 0, len(errChannel)) } }) }) @@ -2753,27 +2754,27 @@ func TestEmptyErrChanReturningHadContained(t *testing.T) { t.Run("unbuffered chan", func(t *testing.T) { t.Parallel() - errChan := make(chan error) + errChannel := make(chan error) go func() { - errChan <- errors.New("test") + errChannel <- errors.New("test") }() time.Sleep(time.Second) // allow the go routine to start - assert.True(t, state.EmptyErrChanReturningHadContained(errChan)) - assert.Equal(t, 0, len(errChan)) + assert.True(t, state.EmptyErrChanReturningHadContained(errChannel)) + assert.Equal(t, 0, len(errChannel)) }) t.Run("buffered chan", func(t *testing.T) { t.Parallel() for i := 1; i < 10; i++ { - errChan := make(chan error, i) + errChannel := make(chan error, i) for j := 0; j < i; j++ { - errChan <- errors.New("test") + errChannel <- errors.New("test") } - assert.True(t, state.EmptyErrChanReturningHadContained(errChan)) - assert.Equal(t, 0, len(errChan)) + assert.True(t, state.EmptyErrChanReturningHadContained(errChannel)) + assert.Equal(t, 0, len(errChannel)) } }) }) @@ -2900,6 +2901,39 @@ func TestAccountsDB_SyncMissingSnapshotNodes(t *testing.T) { assert.True(t, isMissingNodeCalled) }) + + t.Run("should not deadlock if sync err after another err", func(t *testing.T) { + t.Parallel() + + missingNodeError := errors.New("missing trie node") + isMissingNodeCalled := false + + memDbMock := testscommon.NewMemDbMock() + memDbMock.PutCalled = func(key, val []byte) error { + return fmt.Errorf("put error") + } + memDbMock.GetCalled = func(key []byte) ([]byte, error) { + if bytes.Equal(key, []byte(common.ActiveDBKey)) { + return []byte(common.ActiveDBVal), nil + } + + isMissingNodeCalled = true + return nil, missingNodeError + } + + tr, adb := getDefaultTrieAndAccountsDbWithCustomDB(&testscommon.SnapshotPruningStorerMock{MemDbMock: memDbMock}) + prepareTrie(tr, 3) + + rootHash, _ := tr.RootHash() + + adb.SnapshotState(rootHash) + + for tr.GetStorageManager().IsPruningBlocked() { + time.Sleep(time.Millisecond * 100) + } + + assert.True(t, isMissingNodeCalled) + }) } func prepareTrie(tr common.Trie, numKeys int) { diff --git a/state/peerAccountsDB.go b/state/peerAccountsDB.go index ed1f080069e..1e597d884af 100644 --- a/state/peerAccountsDB.go +++ b/state/peerAccountsDB.go @@ -2,6 +2,7 @@ package state import ( "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" ) // PeerAccountsDB will save and synchronize data from peer processor, plus will synchronize with nodesCoordinator @@ -56,7 +57,7 @@ func (adb *PeerAccountsDB) SnapshotState(rootHash []byte) { missingNodesChannel := make(chan []byte, missingNodesChannelSize) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } stats := newSnapshotStatistics(0, 1) stats.NewSnapshotStarted() @@ -92,7 +93,7 @@ func (adb *PeerAccountsDB) SetStateCheckpoint(rootHash []byte) { stats.NewSnapshotStarted() iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } trieStorageManager.SetCheckpoint(rootHash, rootHash, iteratorChannels, missingNodesChannel, stats) diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index ca2e1142266..f508341b749 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" @@ -213,7 +214,7 @@ func (u *userAccountsSyncer) syncAccountDataTries( leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = mainTrie.GetAllLeavesOnChannel(leavesChannels, context.Background(), mainRootHash, keyBuilder.NewDisabledKeyBuilder()) if err != nil { @@ -265,7 +266,7 @@ func (u *userAccountsSyncer) syncAccountDataTries( wg.Wait() - err = common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } diff --git a/trie/node_test.go b/trie/node_test.go index dbc30aa4174..95c439aa91a 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -8,6 +8,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" dataMock "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/trie/keyBuilder" @@ -518,7 +519,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesCollapsedTrie(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), tr.root.getHash(), keyBuilder.NewKeyBuilder()) assert.Nil(t, err) @@ -528,7 +529,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesCollapsedTrie(t *testing.T) { leaves[string(l.Key())] = l.Value() } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) assert.Equal(t, 3, len(leaves)) diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index e6d22323566..352772ab219 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -457,14 +457,14 @@ func (tr *patriciaMerkleTrie) GetAllLeavesOnChannel( if err != nil { tr.mutOperation.RUnlock() close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() return err } if check.IfNil(newTrie) || newTrie.root == nil { tr.mutOperation.RUnlock() close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() return nil } @@ -481,7 +481,7 @@ func (tr *patriciaMerkleTrie) GetAllLeavesOnChannel( ctx, ) if err != nil { - writeInChanNonBlocking(leavesChannels.ErrChan, err) + leavesChannels.ErrChan.WriteInChanNonBlocking(err) log.Error("could not get all trie leaves: ", "error", err) } @@ -490,7 +490,7 @@ func (tr *patriciaMerkleTrie) GetAllLeavesOnChannel( tr.mutOperation.Unlock() close(leavesChannels.LeavesChan) - close(leavesChannels.ErrChan) + leavesChannels.ErrChan.Close() }() return nil diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index 45b9066e490..ab0518247fc 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/keccak" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/testscommon" @@ -475,7 +476,7 @@ func TestPatriciaMerkleTrie_GetSerializedNodesGetFromCheckpoint(t *testing.T) { storageManager.AddDirtyCheckpointHashes(rootHash, dirtyHashes) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } storageManager.SetCheckpoint(rootHash, make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) trie.WaitForOperationToComplete(storageManager) @@ -562,7 +563,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err := tr.GetAllLeavesOnChannel(iteratorChannels, context.Background(), []byte{}, keyBuilder.NewDisabledKeyBuilder()) assert.Equal(t, trie.ErrNilTrieIteratorLeavesChannel, err) @@ -588,7 +589,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), []byte{}, keyBuilder.NewDisabledKeyBuilder()) assert.Nil(t, err) @@ -597,7 +598,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { _, ok := <-leavesChannel.LeavesChan assert.False(t, ok) - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) }) @@ -610,7 +611,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } expectedErr := errors.New("expected error") @@ -630,7 +631,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { for leaf := range leavesChannel.LeavesChan { recovered[string(leaf.Key())] = leaf.Value() } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Equal(t, expectedErr, err) assert.Equal(t, 0, len(recovered)) }) @@ -646,7 +647,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } expectedErr := errors.New("expected error") @@ -672,7 +673,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { for leaf := range leavesChannel.LeavesChan { recovered[string(leaf.Key())] = leaf.Value() } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Equal(t, expectedErr, err) expectedLeaves := map[string][]byte{ @@ -695,7 +696,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) assert.Nil(t, err) @@ -705,7 +706,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { for leaf := range leavesChannel.LeavesChan { recovered[string(leaf.Key())] = leaf.Value() } - err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) + err = leavesChannel.ErrChan.ReadFromChanNonBlocking() assert.Nil(t, err) assert.Equal(t, leaves, recovered) }) diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index dc50faff711..78b647030e3 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -438,7 +438,7 @@ func (tsm *trieStorageManager) takeSnapshot(snapshotEntry *snapshotsQueueEntry, stsm, err := newSnapshotTrieStorageManager(tsm, snapshotEntry.epoch) if err != nil { - writeInChanNonBlocking(snapshotEntry.iteratorChannels.ErrChan, err) + snapshotEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) log.Error("takeSnapshot: trie storage manager: newSnapshotTrieStorageManager", "rootHash", snapshotEntry.rootHash, "main trie rootHash", snapshotEntry.mainTrieRootHash, @@ -448,7 +448,7 @@ func (tsm *trieStorageManager) takeSnapshot(snapshotEntry *snapshotsQueueEntry, newRoot, err := newSnapshotNode(stsm, msh, hsh, snapshotEntry.rootHash, snapshotEntry.missingNodesChan) if err != nil { - writeInChanNonBlocking(snapshotEntry.iteratorChannels.ErrChan, err) + snapshotEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) treatSnapshotError(err, "trie storage manager: newSnapshotNode takeSnapshot", snapshotEntry.rootHash, @@ -460,7 +460,7 @@ func (tsm *trieStorageManager) takeSnapshot(snapshotEntry *snapshotsQueueEntry, stats := statistics.NewTrieStatistics() err = newRoot.commitSnapshot(stsm, snapshotEntry.iteratorChannels.LeavesChan, snapshotEntry.missingNodesChan, ctx, stats, tsm.idleProvider, rootDepthLevel) if err != nil { - writeInChanNonBlocking(snapshotEntry.iteratorChannels.ErrChan, err) + snapshotEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) treatSnapshotError(err, "trie storage manager: takeSnapshot commit", snapshotEntry.rootHash, @@ -490,7 +490,7 @@ func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEnt newRoot, err := newSnapshotNode(tsm, msh, hsh, checkpointEntry.rootHash, checkpointEntry.missingNodesChan) if err != nil { - writeInChanNonBlocking(checkpointEntry.iteratorChannels.ErrChan, err) + checkpointEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) treatSnapshotError(err, "trie storage manager: newSnapshotNode takeCheckpoint", checkpointEntry.rootHash, @@ -502,7 +502,7 @@ func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEnt stats := statistics.NewTrieStatistics() err = newRoot.commitCheckpoint(tsm, tsm.checkpointsStorer, tsm.checkpointHashesHolder, checkpointEntry.iteratorChannels.LeavesChan, ctx, stats, tsm.idleProvider, rootDepthLevel) if err != nil { - writeInChanNonBlocking(checkpointEntry.iteratorChannels.ErrChan, err) + checkpointEntry.iteratorChannels.ErrChan.WriteInChanNonBlocking(err) treatSnapshotError(err, "trie storage manager: takeCheckpoint commit", checkpointEntry.rootHash, diff --git a/trie/trieStorageManagerFactory_test.go b/trie/trieStorageManagerFactory_test.go index d5a28801d9c..d79fa2c77fa 100644 --- a/trie/trieStorageManagerFactory_test.go +++ b/trie/trieStorageManagerFactory_test.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/testscommon" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" @@ -134,7 +135,7 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } tsm.SetCheckpoint(nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}) @@ -167,7 +168,7 @@ func testTsmWithoutSnapshot( iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } tsm.TakeSnapshot("", nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}, 10) diff --git a/trie/trieStorageManagerWithoutCheckpoints_test.go b/trie/trieStorageManagerWithoutCheckpoints_test.go index 0f3cf254a77..2f76292d574 100644 --- a/trie/trieStorageManagerWithoutCheckpoints_test.go +++ b/trie/trieStorageManagerWithoutCheckpoints_test.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" @@ -27,14 +28,14 @@ func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) iteratorChannels = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) diff --git a/trie/trieStorageManagerWithoutSnapshot_test.go b/trie/trieStorageManagerWithoutSnapshot_test.go index 4077c71978a..d3ba6073770 100644 --- a/trie/trieStorageManagerWithoutSnapshot_test.go +++ b/trie/trieStorageManagerWithoutSnapshot_test.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" @@ -79,7 +80,7 @@ func TestTrieStorageManagerWithoutSnapshot_TakeSnapshot(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } ts.TakeSnapshot("", nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}, 10) diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index f634024514d..9dc02201ab4 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/testscommon" @@ -36,6 +37,12 @@ func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { } } +// ErrChanWithLen extends the BufferedErrChan interface with a Len method +type ErrChanWithLen interface { + common.BufferedErrChan + Len() int +} + func TestNewTrieStorageManager(t *testing.T) { t.Parallel() @@ -91,7 +98,7 @@ func TestTrieCheckpoint(t *testing.T) { trieStorage.AddDirtyCheckpointHashes(rootHash, dirtyHashes) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) trie.WaitForOperationToComplete(trieStorage) @@ -99,7 +106,10 @@ func TestTrieCheckpoint(t *testing.T) { val, err = trieStorage.GetFromCheckpoint(rootHash) assert.Nil(t, err) assert.NotNil(t, val) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + + ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_SetCheckpointNilErrorChan(t *testing.T) { @@ -131,13 +141,15 @@ func TestTrieStorageManager_SetCheckpointClosedDb(t *testing.T) { rootHash := []byte("rootHash") iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_SetCheckpointEmptyTrieRootHash(t *testing.T) { @@ -149,13 +161,15 @@ func TestTrieStorageManager_SetCheckpointEmptyTrieRootHash(t *testing.T) { rootHash := make([]byte, 32) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { @@ -170,7 +184,7 @@ func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) trie.WaitForOperationToComplete(trieStorage) @@ -178,7 +192,9 @@ func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { val, err = trieStorage.GetFromCheckpoint(rootHash) assert.NotNil(t, err) assert.Nil(t, val) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_IsPruningEnabled(t *testing.T) { @@ -344,13 +360,15 @@ func TestTrieStorageManager_TakeSnapshotClosedDb(t *testing.T) { rootHash := []byte("rootHash") iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } ts.TakeSnapshot("", rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}, 0) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_TakeSnapshotEmptyTrieRootHash(t *testing.T) { @@ -362,13 +380,15 @@ func TestTrieStorageManager_TakeSnapshotEmptyTrieRootHash(t *testing.T) { rootHash := make([]byte, 32) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } ts.TakeSnapshot("", rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}, 0) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - assert.Equal(t, 0, len(iteratorChannels.ErrChan)) + ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + assert.True(t, ok) + assert.Equal(t, 0, ch.Len()) } func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { @@ -381,15 +401,17 @@ func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { rootHash := []byte("rootHash") iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } missingNodesChan := make(chan []byte, 2) ts.TakeSnapshot("", rootHash, rootHash, iteratorChannels, missingNodesChan, &trieMock.MockStatistics{}, 0) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - require.Equal(t, 1, len(iteratorChannels.ErrChan)) - errRecovered := <-iteratorChannels.ErrChan + ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + assert.True(t, ok) + assert.Equal(t, 1, ch.Len()) + errRecovered := iteratorChannels.ErrChan.ReadFromChanNonBlocking() assert.True(t, strings.Contains(errRecovered.Error(), common.GetNodeFromDBErrorString)) } @@ -430,20 +452,20 @@ func TestWriteInChanNonBlocking(t *testing.T) { t.Run("unbuffered, reader has been set up, should add", func(t *testing.T) { t.Parallel() - errChan := make(chan error) + errChannel := make(chan error) var recovered error wg := sync.WaitGroup{} wg.Add(1) // set up the consumer that will be blocked until writing is done go func() { - recovered = <-errChan + recovered = <-errChannel wg.Done() }() time.Sleep(time.Second) // allow the go routine to start - trie.WriteInChanNonBlocking(errChan, err1) + trie.WriteInChanNonBlocking(errChannel, err1) wg.Wait() assert.Equal(t, err1, recovered) @@ -453,8 +475,8 @@ func TestWriteInChanNonBlocking(t *testing.T) { chanFinish := make(chan struct{}) go func() { - errChan := make(chan error) - trie.WriteInChanNonBlocking(errChan, err1) + errChannel := make(chan error) + trie.WriteInChanNonBlocking(errChannel, err1) close(chanFinish) }() @@ -468,53 +490,54 @@ func TestWriteInChanNonBlocking(t *testing.T) { t.Run("buffered (one element), empty chan should add", func(t *testing.T) { t.Parallel() - errChan := make(chan error, 1) - trie.WriteInChanNonBlocking(errChan, err1) - require.Equal(t, 1, len(errChan)) - recovered := <-errChan + errChannel := errChan.NewErrChan() + errChannel.WriteInChanNonBlocking(err1) + + require.Equal(t, 1, errChannel.Len()) + recovered := errChannel.ReadFromChanNonBlocking() assert.Equal(t, err1, recovered) }) t.Run("buffered (1 element), full chan should not add, but should finish", func(t *testing.T) { t.Parallel() - errChan := make(chan error, 1) - trie.WriteInChanNonBlocking(errChan, err1) - trie.WriteInChanNonBlocking(errChan, err2) + errChannel := errChan.NewErrChan() + errChannel.WriteInChanNonBlocking(err1) + errChannel.WriteInChanNonBlocking(err2) - require.Equal(t, 1, len(errChan)) - recovered := <-errChan + require.Equal(t, 1, errChannel.Len()) + recovered := errChannel.ReadFromChanNonBlocking() assert.Equal(t, err1, recovered) }) t.Run("buffered (two elements), empty chan should add", func(t *testing.T) { t.Parallel() - errChan := make(chan error, 2) - trie.WriteInChanNonBlocking(errChan, err1) - require.Equal(t, 1, len(errChan)) - recovered := <-errChan + errChannel := make(chan error, 2) + trie.WriteInChanNonBlocking(errChannel, err1) + require.Equal(t, 1, len(errChannel)) + recovered := <-errChannel assert.Equal(t, err1, recovered) - trie.WriteInChanNonBlocking(errChan, err1) - trie.WriteInChanNonBlocking(errChan, err2) - require.Equal(t, 2, len(errChan)) + trie.WriteInChanNonBlocking(errChannel, err1) + trie.WriteInChanNonBlocking(errChannel, err2) + require.Equal(t, 2, len(errChannel)) - recovered = <-errChan + recovered = <-errChannel assert.Equal(t, err1, recovered) - recovered = <-errChan + recovered = <-errChannel assert.Equal(t, err2, recovered) }) t.Run("buffered (2 elements), full chan should not add, but should finish", func(t *testing.T) { t.Parallel() - errChan := make(chan error, 2) - trie.WriteInChanNonBlocking(errChan, err1) - trie.WriteInChanNonBlocking(errChan, err2) - trie.WriteInChanNonBlocking(errChan, err3) + errChannel := make(chan error, 2) + trie.WriteInChanNonBlocking(errChannel, err1) + trie.WriteInChanNonBlocking(errChannel, err2) + trie.WriteInChanNonBlocking(errChannel, err3) - require.Equal(t, 2, len(errChan)) - recovered := <-errChan + require.Equal(t, 2, len(errChannel)) + recovered := <-errChannel assert.Equal(t, err1, recovered) - recovered = <-errChan + recovered = <-errChannel assert.Equal(t, err2, recovered) }) } diff --git a/update/genesis/common.go b/update/genesis/common.go index 8c62a78ef61..023fe6d7c8d 100644 --- a/update/genesis/common.go +++ b/update/genesis/common.go @@ -34,7 +34,7 @@ func getValidatorDataFromLeaves( validators[currentShardId] = append(validators[currentShardId], validatorInfoData) } - err := common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err := leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return nil, err } diff --git a/update/genesis/export.go b/update/genesis/export.go index f885c9cf55c..ccf2b3cdb62 100644 --- a/update/genesis/export.go +++ b/update/genesis/export.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" @@ -295,7 +296,7 @@ func (se *stateExport) exportTrie(key string, trie common.Trie) error { leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChan(), } err = trie.GetAllLeavesOnChannel(leavesChannels, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -357,7 +358,7 @@ func (se *stateExport) exportDataTries( } } - err := common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err := leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } @@ -379,7 +380,7 @@ func (se *stateExport) exportAccountLeaves( } } - err := common.GetErrorFromChanNonBlocking(leavesChannels.ErrChan) + err := leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { return err } diff --git a/update/genesis/export_test.go b/update/genesis/export_test.go index 44800205606..08be4eee55c 100644 --- a/update/genesis/export_test.go +++ b/update/genesis/export_test.go @@ -294,7 +294,7 @@ func TestStateExport_ExportTrieShouldExportNodesSetupJson(t *testing.T) { go func() { channels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("test"), pacB) - channels.ErrChan <- expectedErr + channels.ErrChan.WriteInChanNonBlocking(expectedErr) close(channels.LeavesChan) }() @@ -344,7 +344,7 @@ func TestStateExport_ExportTrieShouldExportNodesSetupJson(t *testing.T) { go func() { channels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("test"), pacB) close(channels.LeavesChan) - close(channels.ErrChan) + channels.ErrChan.Close() }() return nil From 14cff8180adc94a595de82af3f6df5cfe426e391 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 9 Mar 2023 11:34:12 +0200 Subject: [PATCH 02/20] sort imports --- integrationTests/state/stateTrieSync/stateTrieSync_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 175f3a3c460..2704aa1fd4c 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -3,7 +3,6 @@ package stateTrieSync import ( "context" "fmt" - "github.com/multiversx/mx-chain-go/common/errChan" "math/big" "strconv" "testing" @@ -12,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/throttler" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process/factory" From c240bea8cbee643528f4b1c16376a72ae741586b Mon Sep 17 00:00:00 2001 From: jules01 Date: Fri, 10 Mar 2023 17:16:11 +0200 Subject: [PATCH 03/20] - fixed dependabot issues on external libs --- go.mod | 24 +++++++++++++----------- go.sum | 44 +++++++++++++++++++++++++++++++------------- 2 files changed, 44 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index 9b47348a875..1cc95cc3102 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/shirou/gopsutil v3.21.11+incompatible github.com/stretchr/testify v1.8.1 github.com/urfave/cli v1.22.10 - golang.org/x/crypto v0.5.0 + golang.org/x/crypto v0.7.0 gopkg.in/go-playground/validator.v8 v8.18.2 ) @@ -72,17 +72,18 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/herumi/bls-go-binary v1.0.0 // indirect github.com/huin/goupnp v1.0.3 // indirect - github.com/ipfs/go-cid v0.2.0 // indirect + github.com/ipfs/go-cid v0.3.2 // indirect github.com/ipfs/go-datastore v0.5.1 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-ipns v0.2.0 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect - github.com/ipld/go-ipld-prime v0.9.0 // indirect + github.com/ipld/go-ipld-prime v0.19.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/cpuid/v2 v2.1.0 // indirect github.com/koron/go-ssdp v0.0.3 // indirect @@ -127,7 +128,7 @@ require ( github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.5.0 // indirect + github.com/multiformats/go-multicodec v0.6.0 // indirect github.com/multiformats/go-multihash v0.2.1 // indirect github.com/multiformats/go-multistream v0.3.3 // indirect github.com/multiformats/go-varint v0.0.6 // indirect @@ -141,7 +142,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 // indirect + github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.37.0 // indirect @@ -149,6 +150,7 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/smartystreets/assertions v1.13.0 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect @@ -167,12 +169,12 @@ require ( go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.22.0 // indirect golang.org/x/arch v0.0.0-20210923205945-b76863e36670 // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/tools v0.1.12 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect + golang.org/x/tools v0.6.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/go-playground/assert.v1 v1.2.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index 184ca5a99e7..bc8b955046a 100644 --- a/go.sum +++ b/go.sum @@ -167,6 +167,8 @@ github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwU github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -274,8 +276,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -336,8 +339,9 @@ github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0= github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= +github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= @@ -370,8 +374,9 @@ github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Ax github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipld/go-ipld-prime v0.9.0 h1:N2OjJMb+fhyFPwPnVvJcWU/NsumP8etal+d2v3G4eww= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.19.0 h1:5axC7rJmPc17Emw6TelxGwnzALk0PdupZ2oj2roDj04= +github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -588,8 +593,9 @@ github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPw github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= -github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs= github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -681,8 +687,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -753,8 +760,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= +github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= @@ -816,6 +824,7 @@ github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60Nt github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= +github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -895,8 +904,9 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -932,8 +942,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -990,8 +1001,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1014,8 +1027,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1093,14 +1107,16 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1111,8 +1127,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1170,8 +1187,9 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= From ece55424a44cb53bbbbf60a1f6980f0302efd717 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 13 Mar 2023 16:31:05 +0200 Subject: [PATCH 04/20] use normal PersisterTracker if processing mode is import db --- common/configParser.go | 9 ++++ epochStart/bootstrap/metaStorageHandler.go | 2 + .../bootstrap/metaStorageHandler_test.go | 15 +++--- epochStart/bootstrap/process.go | 6 +++ epochStart/bootstrap/shardStorageHandler.go | 2 + .../bootstrap/shardStorageHandler_test.go | 49 ++++++++++--------- factory/bootstrap/bootstrapComponents.go | 1 + factory/data/dataComponents.go | 5 ++ factory/processing/processComponents.go | 1 + .../startInEpoch/startInEpoch_test.go | 2 + node/nodeRunner.go | 7 +-- storage/factory/pruningStorerFactory.go | 6 ++- testscommon/components/components.go | 1 + 13 files changed, 70 insertions(+), 36 deletions(-) diff --git a/common/configParser.go b/common/configParser.go index 77a46161a71..bc814990528 100644 --- a/common/configParser.go +++ b/common/configParser.go @@ -163,3 +163,12 @@ func GetSkBytesFromP2pKey(p2pKeyFilename string) ([]byte, error) { return skBytes, nil } + +// GetNodeProcessingMode returns the node processing mode based on the provided config +func GetNodeProcessingMode(importDbConfig *config.ImportDbConfig) NodeProcessingMode { + if importDbConfig.IsImportDBMode { + return ImportDb + } + + return Normal +} diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 53c77c23fd0..9f9df9e9880 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -36,6 +36,7 @@ func NewMetaStorageHandler( currentEpoch uint32, uint64Converter typeConverters.Uint64ByteSliceConverter, nodeTypeProvider NodeTypeProviderHandler, + nodeProcessingMode common.NodeProcessingMode, ) (*metaStorageHandler, error) { epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( @@ -49,6 +50,7 @@ func NewMetaStorageHandler( CurrentEpoch: currentEpoch, StorageType: factory.BootstrapStorageService, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: nodeProcessingMode, }, ) if err != nil { diff --git a/epochStart/bootstrap/metaStorageHandler_test.go b/epochStart/bootstrap/metaStorageHandler_test.go index 732c617304f..ac5a3ed13b0 100644 --- a/epochStart/bootstrap/metaStorageHandler_test.go +++ b/epochStart/bootstrap/metaStorageHandler_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/mock" @@ -33,7 +34,7 @@ func TestNewMetaStorageHandler_InvalidConfigErr(t *testing.T) { uit64Cvt := &mock.Uint64ByteSliceConverterMock{} nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - mtStrHandler, err := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider) + mtStrHandler, err := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, common.Normal) assert.True(t, check.IfNil(mtStrHandler)) assert.NotNil(t, err) } @@ -51,7 +52,7 @@ func TestNewMetaStorageHandler_CreateForMetaErr(t *testing.T) { hasher := &hashingMocks.HasherMock{} uit64Cvt := &mock.Uint64ByteSliceConverterMock{} nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - mtStrHandler, err := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider) + mtStrHandler, err := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, common.Normal) assert.False(t, check.IfNil(mtStrHandler)) assert.Nil(t, err) } @@ -70,7 +71,7 @@ func TestMetaStorageHandler_saveLastHeader(t *testing.T) { uit64Cvt := &mock.Uint64ByteSliceConverterMock{} nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider) + mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, common.Normal) header := &block.MetaBlock{Nonce: 0} @@ -98,7 +99,7 @@ func TestMetaStorageHandler_saveLastCrossNotarizedHeaders(t *testing.T) { uit64Cvt := &mock.Uint64ByteSliceConverterMock{} nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider) + mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, common.Normal) hdr1 := &block.Header{Nonce: 1} hdr2 := &block.Header{Nonce: 2} @@ -132,7 +133,7 @@ func TestMetaStorageHandler_saveTriggerRegistry(t *testing.T) { uit64Cvt := &mock.Uint64ByteSliceConverterMock{} nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider) + mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, common.Normal) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -157,7 +158,7 @@ func TestMetaStorageHandler_saveDataToStorage(t *testing.T) { uit64Cvt := &mock.Uint64ByteSliceConverterMock{} nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider) + mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, common.Normal) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Nonce: 3}, @@ -199,7 +200,7 @@ func testMetaWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber uit64Cvt := &mock.Uint64ByteSliceConverterMock{} nodeTypeProvider := &nodeTypeProviderMock.NodeTypeProviderStub{} - mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider) + mtStrHandler, _ := NewMetaStorageHandler(gCfg, prefsConfig, coordinator, pathManager, marshalizer, hasher, 1, uit64Cvt, nodeTypeProvider, common.Normal) counter := 0 mtStrHandler.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 7ae86c77f98..1d7ea39dbae 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -114,6 +114,7 @@ type epochStartBootstrap struct { checkNodesOnDisk bool bootstrapHeartbeatSender update.Closer trieSyncStatisticsProvider common.SizeSyncStatisticsHandler + nodeProcessingMode common.NodeProcessingMode // created components requestHandler process.RequestHandler @@ -177,6 +178,7 @@ type ArgsEpochStartBootstrap struct { DataSyncerCreator types.ScheduledDataSyncerCreator ScheduledSCRsStorer storage.Storer TrieSyncStatisticsProvider common.SizeSyncStatisticsHandler + NodeProcessingMode common.NodeProcessingMode } type dataToSync struct { @@ -223,6 +225,7 @@ func NewEpochStartBootstrap(args ArgsEpochStartBootstrap) (*epochStartBootstrap, storerScheduledSCRs: args.ScheduledSCRsStorer, shardCoordinator: args.GenesisShardCoordinator, trieSyncStatisticsProvider: args.TrieSyncStatisticsProvider, + nodeProcessingMode: args.NodeProcessingMode, } whiteListCache, err := storageunit.NewCache(storageFactory.GetCacherFromConfig(epochStartProvider.generalConfig.WhiteListPool)) @@ -756,6 +759,7 @@ func (e *epochStartBootstrap) requestAndProcessForMeta(peerMiniBlocks []*block.M e.epochStartMeta.GetEpoch(), e.coreComponentsHolder.Uint64ByteSliceConverter(), e.coreComponentsHolder.NodeTypeProvider(), + e.nodeProcessingMode, ) if err != nil { return err @@ -922,6 +926,7 @@ func (e *epochStartBootstrap) requestAndProcessForShard(peerMiniBlocks []*block. e.baseData.lastEpoch, e.coreComponentsHolder.Uint64ByteSliceConverter(), e.coreComponentsHolder.NodeTypeProvider(), + e.nodeProcessingMode, ) if err != nil { return err @@ -1103,6 +1108,7 @@ func (e *epochStartBootstrap) createStorageService( CurrentEpoch: startEpoch, StorageType: storageFactory.BootstrapStorageService, CreateTrieEpochRootHashStorer: createTrieEpochRootHashStorer, + NodeProcessingMode: e.nodeProcessingMode, }) if err != nil { return nil, err diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 79d2993d204..9bb1040bbed 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -40,6 +40,7 @@ func NewShardStorageHandler( currentEpoch uint32, uint64Converter typeConverters.Uint64ByteSliceConverter, nodeTypeProvider core.NodeTypeProviderHandler, + nodeProcessingMode common.NodeProcessingMode, ) (*shardStorageHandler, error) { epochStartNotifier := &disabled.EpochStartNotifier{} storageFactory, err := factory.NewStorageServiceFactory( @@ -53,6 +54,7 @@ func NewShardStorageHandler( CurrentEpoch: currentEpoch, StorageType: factory.BootstrapStorageService, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: nodeProcessingMode, }, ) if err != nil { diff --git a/epochStart/bootstrap/shardStorageHandler_test.go b/epochStart/bootstrap/shardStorageHandler_test.go index 61adf0d4921..922c4c308b9 100644 --- a/epochStart/bootstrap/shardStorageHandler_test.go +++ b/epochStart/bootstrap/shardStorageHandler_test.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/typeConverters" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -39,7 +40,7 @@ func TestNewShardStorageHandler_ShouldWork(t *testing.T) { }() args := createDefaultShardStorageArgs() - shardStorage, err := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, err := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) assert.False(t, check.IfNil(shardStorage)) assert.Nil(t, err) @@ -51,7 +52,7 @@ func TestShardStorageHandler_SaveDataToStorageShardDataNotFound(t *testing.T) { }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{Epoch: 1}, @@ -69,7 +70,7 @@ func TestShardStorageHandler_SaveDataToStorageMissingHeader(t *testing.T) { }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) components := &ComponentsNeededForBootstrap{ EpochStartMetaBlock: &block.MetaBlock{ @@ -110,7 +111,7 @@ func testShardWithMissingStorer(missingUnit dataRetriever.UnitType, atCallNumber counter := 0 args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) shardStorage.storageService = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { counter++ @@ -152,7 +153,7 @@ func TestShardStorageHandler_SaveDataToStorage(t *testing.T) { }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) hash1 := []byte("hash1") hdr1 := block.MetaBlock{ @@ -251,7 +252,7 @@ func TestShardStorageHandler_getCrossProcessedMiniBlockHeadersDestMe(t *testing. mbs := append(intraMbs, crossMbs...) args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) shardHeader := &block.Header{ Nonce: 100, MiniBlockHeaders: mbs, @@ -271,7 +272,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledErrorG t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -289,7 +290,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledNoSche t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, false) @@ -304,7 +305,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduledWrongH t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) scenario := createPendingAndProcessedMiniBlocksScenario() wrongShardHeader := &block.MetaBlock{} @@ -326,7 +327,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithScheduled(t *te t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, err := shardStorage.getProcessedAndPendingMiniBlocksWithScheduled(scenario.metaBlock, scenario.headers, scenario.shardHeader, true) @@ -494,7 +495,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksErrorGettingEpochSt }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{}, @@ -517,7 +518,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksMissingHeader(t *te lastFinishedMetaBlock := "last finished meta block" args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) meta := &block.MetaBlock{ Nonce: 100, EpochStart: block.EpochStart{ @@ -543,7 +544,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWrongHeader(t *test lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -574,7 +575,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNilMetaBlock(t *tes lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) meta := &block.MetaBlock{ @@ -607,7 +608,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksNoProcessedNoPendin lastFinishedMetaBlockHash := "last finished meta block" firstPendingMeta := "first pending meta" args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) lastFinishedHeaders := createDefaultEpochStartShardData([]byte(lastFinishedMetaBlockHash), []byte("headerHash")) lastFinishedHeaders[0].FirstPendingMetaBlock = []byte(firstPendingMeta) lastFinishedHeaders[0].PendingMiniBlockHeaders = nil @@ -636,7 +637,7 @@ func TestShardStorageHandler_getProcessedAndPendingMiniBlocksWithProcessedAndPen t.Parallel() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) scenario := createPendingAndProcessedMiniBlocksScenario() processedMiniBlocks, pendingMiniBlocks, firstPendingMetaBlockHash, err := shardStorage.getProcessedAndPendingMiniBlocks(scenario.metaBlock, scenario.headers) @@ -654,7 +655,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledGetSha }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) headers := map[string]data.HeaderHandler{} meta := &block.MetaBlock{ @@ -675,7 +676,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledMissin }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -704,7 +705,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledWrongT }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -740,7 +741,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduledErrorW args.marshalizer = &testscommon.MarshalizerStub{MarshalCalled: func(obj interface{}) ([]byte, error) { return nil, expectedErr }} - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -771,7 +772,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithoutScheduled(t *te }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -807,7 +808,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduledErrorUpda }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" @@ -837,7 +838,7 @@ func TestShardStorageHandler_saveLastCrossNotarizedHeadersWithScheduled(t *testi }() args := createDefaultShardStorageArgs() - shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider) + shardStorage, _ := NewShardStorageHandler(args.generalConfig, args.prefsConfig, args.shardCoordinator, args.pathManagerHandler, args.marshalizer, args.hasher, 1, args.uint64Converter, args.nodeTypeProvider, args.nodeProcessingMode) shard0HeaderHash := "shard0 header hash" lastFinishedMetaBlock := "last finished meta block" prevMetaHash := "prev metaHlock hash" @@ -1059,6 +1060,7 @@ type shardStorageArgs struct { currentEpoch uint32 uint64Converter typeConverters.Uint64ByteSliceConverter nodeTypeProvider core.NodeTypeProviderHandler + nodeProcessingMode common.NodeProcessingMode } func createDefaultShardStorageArgs() shardStorageArgs { @@ -1072,6 +1074,7 @@ func createDefaultShardStorageArgs() shardStorageArgs { currentEpoch: 0, uint64Converter: &mock.Uint64ByteSliceConverterMock{}, nodeTypeProvider: &nodeTypeProviderMock.NodeTypeProviderStub{}, + nodeProcessingMode: common.Normal, } } diff --git a/factory/bootstrap/bootstrapComponents.go b/factory/bootstrap/bootstrapComponents.go index e0f6ae2b110..4d0e3ab0687 100644 --- a/factory/bootstrap/bootstrapComponents.go +++ b/factory/bootstrap/bootstrapComponents.go @@ -201,6 +201,7 @@ func (bcf *bootstrapComponentsFactory) Create() (*bootstrapComponents, error) { DataSyncerCreator: dataSyncerFactory, ScheduledSCRsStorer: nil, // will be updated after sync from network TrieSyncStatisticsProvider: tss, + NodeProcessingMode: common.GetNodeProcessingMode(&bcf.importDbConfig), } var epochStartBootstrapper factory.EpochStartBootstrapper diff --git a/factory/data/dataComponents.go b/factory/data/dataComponents.go index 70348f9ff81..d2032acd193 100644 --- a/factory/data/dataComponents.go +++ b/factory/data/dataComponents.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" @@ -28,6 +29,7 @@ type DataComponentsFactoryArgs struct { EpochStartNotifier factory.EpochStartNotifier CurrentEpoch uint32 CreateTrieEpochRootHashStorer bool + NodeProcessingMode common.NodeProcessingMode } type dataComponentsFactory struct { @@ -39,6 +41,7 @@ type dataComponentsFactory struct { statusCore factory.StatusCoreComponentsHolder currentEpoch uint32 createTrieEpochRootHashStorer bool + nodeProcessingMode common.NodeProcessingMode } // dataComponents struct holds the data components @@ -84,6 +87,7 @@ func NewDataComponentsFactory(args DataComponentsFactoryArgs) (*dataComponentsFa epochStartNotifier: args.EpochStartNotifier, currentEpoch: args.CurrentEpoch, createTrieEpochRootHashStorer: args.CreateTrieEpochRootHashStorer, + nodeProcessingMode: args.NodeProcessingMode, }, nil } @@ -172,6 +176,7 @@ func (dcf *dataComponentsFactory) createDataStoreFromConfig() (dataRetriever.Sto CurrentEpoch: dcf.currentEpoch, StorageType: storageFactory.ProcessStorageService, CreateTrieEpochRootHashStorer: dcf.createTrieEpochRootHashStorer, + NodeProcessingMode: dcf.nodeProcessingMode, }) if err != nil { return nil, err diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 49c62ae9a50..60860011ef4 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -1454,6 +1454,7 @@ func (pcf *processComponentsFactory) newStorageResolver() (dataRetriever.Resolve CurrentEpoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), StorageType: storageFactory.ProcessStorageService, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: common.GetNodeProcessingMode(&pcf.importDBConfig), }, ) if err != nil { diff --git a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go index 9a54cbaded2..c7c5fd45c40 100644 --- a/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go +++ b/integrationTests/multiShard/endOfEpoch/startInEpoch/startInEpoch_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-core-go/data/typeConverters/uint64ByteSlice" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/bootstrap" @@ -285,6 +286,7 @@ func testNodeStartsInEpoch(t *testing.T, shardID uint32, expectedHighestRound ui CurrentEpoch: 0, StorageType: factory.ProcessStorageService, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: common.Normal, }, ) assert.NoError(t, err) diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 7bbc5941cc1..afccea3d0b0 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -1259,6 +1259,7 @@ func (nr *nodeRunner) CreateManagedDataComponents( EpochStartNotifier: coreComponents.EpochStartNotifierWithConfirm(), CurrentEpoch: storerEpoch, CreateTrieEpochRootHashStorer: configs.ImportDbConfig.ImportDbSaveTrieEpochRootHash, + NodeProcessingMode: common.GetNodeProcessingMode(nr.configs.ImportDbConfig), } dataComponentsFactory, err := dataComp.NewDataComponentsFactory(dataArgs) @@ -1295,17 +1296,13 @@ func (nr *nodeRunner) CreateManagedStateComponents( dataComponents mainFactory.DataComponentsHandler, statusCoreComponents mainFactory.StatusCoreComponentsHolder, ) (mainFactory.StateComponentsHandler, error) { - processingMode := common.Normal - if nr.configs.ImportDbConfig.IsImportDBMode { - processingMode = common.ImportDb - } stateArgs := stateComp.StateComponentsFactoryArgs{ Config: *nr.configs.GeneralConfig, ShardCoordinator: bootstrapComponents.ShardCoordinator(), Core: coreComponents, StatusCore: statusCoreComponents, StorageService: dataComponents.StorageService(), - ProcessingMode: processingMode, + ProcessingMode: common.GetNodeProcessingMode(nr.configs.ImportDbConfig), ShouldSerializeSnapshots: nr.configs.FlagsConfig.SerializeSnapshots, ChainHandler: dataComponents.Blockchain(), } diff --git a/storage/factory/pruningStorerFactory.go b/storage/factory/pruningStorerFactory.go index 34293aaf9f3..899cf46f969 100644 --- a/storage/factory/pruningStorerFactory.go +++ b/storage/factory/pruningStorerFactory.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" @@ -48,6 +49,7 @@ type StorageServiceFactory struct { createTrieEpochRootHashStorer bool currentEpoch uint32 storageType StorageServiceType + nodeProcessingMode common.NodeProcessingMode } // StorageServiceFactoryArgs holds the arguments needed for creating a new storage service factory @@ -61,6 +63,7 @@ type StorageServiceFactoryArgs struct { StorageType StorageServiceType CurrentEpoch uint32 CreateTrieEpochRootHashStorer bool + NodeProcessingMode common.NodeProcessingMode } // NewStorageServiceFactory will return a new instance of StorageServiceFactory @@ -91,6 +94,7 @@ func NewStorageServiceFactory(args StorageServiceFactoryArgs) (*StorageServiceFa createTrieEpochRootHashStorer: args.CreateTrieEpochRootHashStorer, oldDataCleanerProvider: oldDataCleanProvider, storageType: args.StorageType, + nodeProcessingMode: args.NodeProcessingMode, }, nil } @@ -364,7 +368,7 @@ func (psf *StorageServiceFactory) createTriePruningStorer( customDatabaseRemover storage.CustomDatabaseRemoverHandler, ) (storage.Storer, error) { accountsUnitArgs := psf.createPruningStorerArgs(storageConfig, customDatabaseRemover) - if psf.storageType == ProcessStorageService { + if psf.storageType == ProcessStorageService && psf.nodeProcessingMode == common.Normal { accountsUnitArgs.PersistersTracker = pruning.NewTriePersisterTracker(accountsUnitArgs.EpochsData) } diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 1afe538b5b6..7f38e669f7b 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -218,6 +218,7 @@ func GetDataArgs(coreComponents factory.CoreComponentsHolder, shardCoordinator s EpochStartNotifier: &mock.EpochStartNotifierStub{}, CurrentEpoch: 0, CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: common.Normal, } } From b61fd477c2a647bf979e744514c95c2c40928074 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Wed, 15 Mar 2023 10:06:41 +0200 Subject: [PATCH 05/20] fix after review --- common/errChan/errChan.go | 27 ++++--- common/errChan/errChan_test.go | 81 ++++++++++++++++--- debug/process/stateExport.go | 2 +- epochStart/metachain/systemSCs.go | 2 +- factory/processing/processComponents.go | 2 +- .../stateTrieClose/stateTrieClose_test.go | 8 +- .../state/stateTrieSync/stateTrieSync_test.go | 6 +- integrationTests/testProcessorNode.go | 2 +- node/node.go | 8 +- node/trieIterators/delegatedListProcessor.go | 2 +- .../directStakedListProcessor.go | 2 +- node/trieIterators/stakeValuesProcessor.go | 2 +- process/block/baseProcess.go | 4 +- process/peer/process.go | 2 +- process/txsimulator/wrappedAccountsDB_test.go | 2 +- state/accountsDB.go | 8 +- state/accountsDB_test.go | 2 +- state/peerAccountsDB.go | 4 +- state/syncer/userAccountsSyncer.go | 2 +- trie/export_test.go | 5 +- trie/node_test.go | 2 +- trie/patriciaMerkleTrie_test.go | 12 +-- trie/trieStorageManager.go | 7 -- trie/trieStorageManagerFactory_test.go | 4 +- ...ieStorageManagerWithoutCheckpoints_test.go | 4 +- .../trieStorageManagerWithoutSnapshot_test.go | 2 +- trie/trieStorageManager_test.go | 36 ++++----- update/genesis/export.go | 2 +- 28 files changed, 150 insertions(+), 92 deletions(-) diff --git a/common/errChan/errChan.go b/common/errChan/errChan.go index a2f08d1b3fb..47cf29e320b 100644 --- a/common/errChan/errChan.go +++ b/common/errChan/errChan.go @@ -2,22 +2,29 @@ package errChan import "sync" -type errChan struct { +type errChanWrapper struct { ch chan error closed bool - closeMutex sync.Mutex + closeMutex sync.RWMutex } -// NewErrChan creates a new errChan -func NewErrChan() *errChan { - return &errChan{ +// NewErrChanWrapper creates a new errChanWrapper +func NewErrChanWrapper() *errChanWrapper { + return &errChanWrapper{ ch: make(chan error, 1), closed: false, } } // WriteInChanNonBlocking will send the given error on the channel if the chan is not blocked -func (ec *errChan) WriteInChanNonBlocking(err error) { +func (ec *errChanWrapper) WriteInChanNonBlocking(err error) { + ec.closeMutex.RLock() + defer ec.closeMutex.RUnlock() + + if ec.closed { + return + } + select { case ec.ch <- err: default: @@ -25,7 +32,7 @@ func (ec *errChan) WriteInChanNonBlocking(err error) { } // ReadFromChanNonBlocking will read from the channel, or return nil if no error was sent on the channel -func (ec *errChan) ReadFromChanNonBlocking() error { +func (ec *errChanWrapper) ReadFromChanNonBlocking() error { select { case err := <-ec.ch: return err @@ -35,7 +42,7 @@ func (ec *errChan) ReadFromChanNonBlocking() error { } // Close will close the channel -func (ec *errChan) Close() { +func (ec *errChanWrapper) Close() { ec.closeMutex.Lock() defer ec.closeMutex.Unlock() @@ -52,11 +59,11 @@ func (ec *errChan) Close() { } // Len returns the length of the channel -func (ec *errChan) Len() int { +func (ec *errChanWrapper) Len() int { return len(ec.ch) } // IsInterfaceNil returns true if there is no value under the interface -func (ec *errChan) IsInterfaceNil() bool { +func (ec *errChanWrapper) IsInterfaceNil() bool { return ec == nil } diff --git a/common/errChan/errChan_test.go b/common/errChan/errChan_test.go index b753a4ce224..3d88f358015 100644 --- a/common/errChan/errChan_test.go +++ b/common/errChan/errChan_test.go @@ -2,6 +2,7 @@ package errChan import ( "fmt" + "sync" "testing" "github.com/multiversx/mx-chain-core-go/core/check" @@ -11,7 +12,7 @@ import ( func TestNewErrChan(t *testing.T) { t.Parallel() - ec := NewErrChan() + ec := NewErrChanWrapper() assert.False(t, check.IfNil(ec)) assert.Equal(t, 1, cap(ec.ch)) } @@ -19,22 +20,44 @@ func TestNewErrChan(t *testing.T) { func TestErrChan_WriteInChanNonBlocking(t *testing.T) { t.Parallel() - expectedErr := fmt.Errorf("err1") - ec := NewErrChan() - ec.WriteInChanNonBlocking(expectedErr) - ec.WriteInChanNonBlocking(fmt.Errorf("err2")) - ec.WriteInChanNonBlocking(fmt.Errorf("err3")) + t.Run("write in a nil channel", func(t *testing.T) { + t.Parallel() - assert.Equal(t, 1, len(ec.ch)) - assert.Equal(t, expectedErr, <-ec.ch) - assert.Equal(t, 0, len(ec.ch)) + ec := NewErrChanWrapper() + ec.ch = nil + ec.WriteInChanNonBlocking(fmt.Errorf("err1")) + + assert.Equal(t, 0, len(ec.ch)) + }) + + t.Run("write in a closed channel", func(t *testing.T) { + t.Parallel() + + ec := NewErrChanWrapper() + ec.Close() + ec.WriteInChanNonBlocking(fmt.Errorf("err1")) + + assert.Equal(t, 0, len(ec.ch)) + }) + + t.Run("should work", func(t *testing.T) { + expectedErr := fmt.Errorf("err1") + ec := NewErrChanWrapper() + ec.WriteInChanNonBlocking(expectedErr) + ec.WriteInChanNonBlocking(fmt.Errorf("err2")) + ec.WriteInChanNonBlocking(fmt.Errorf("err3")) + + assert.Equal(t, 1, len(ec.ch)) + assert.Equal(t, expectedErr, <-ec.ch) + assert.Equal(t, 0, len(ec.ch)) + }) } func TestErrChan_ReadFromChanNonBlocking(t *testing.T) { t.Parallel() expectedErr := fmt.Errorf("err1") - ec := NewErrChan() + ec := NewErrChanWrapper() ec.ch <- expectedErr assert.Equal(t, 1, len(ec.ch)) @@ -49,7 +72,7 @@ func TestErrChan_Close(t *testing.T) { t.Run("close an already closed channel", func(t *testing.T) { t.Parallel() - ec := NewErrChan() + ec := NewErrChanWrapper() ec.Close() assert.True(t, ec.closed) @@ -59,7 +82,7 @@ func TestErrChan_Close(t *testing.T) { t.Run("close a nil channel", func(t *testing.T) { t.Parallel() - ec := NewErrChan() + ec := NewErrChanWrapper() ec.ch = nil ec.Close() @@ -70,7 +93,7 @@ func TestErrChan_Close(t *testing.T) { func TestErrChan_Len(t *testing.T) { t.Parallel() - ec := NewErrChan() + ec := NewErrChanWrapper() assert.Equal(t, 0, ec.Len()) ec.ch <- fmt.Errorf("err1") @@ -79,3 +102,35 @@ func TestErrChan_Len(t *testing.T) { ec.WriteInChanNonBlocking(fmt.Errorf("err2")) assert.Equal(t, 1, ec.Len()) } + +func TestErrChan_ConcurrentOperations(t *testing.T) { + t.Parallel() + + ec := NewErrChanWrapper() + numOperations := 1000 + numMethods := 2 + wg := sync.WaitGroup{} + wg.Add(numOperations) + for i := 0; i < numOperations; i++ { + go func(idx int) { + + if idx == numOperations-100 { + ec.Close() + } + + operation := idx % numMethods + switch operation { + case 0: + ec.WriteInChanNonBlocking(fmt.Errorf("err")) + case 1: + _ = ec.ReadFromChanNonBlocking() + default: + assert.Fail(t, "invalid numMethods") + } + + wg.Done() + }(i) + } + + wg.Wait() +} diff --git a/debug/process/stateExport.go b/debug/process/stateExport.go index b8cd8128255..831aaebfc0e 100644 --- a/debug/process/stateExport.go +++ b/debug/process/stateExport.go @@ -67,7 +67,7 @@ func getCode(accountsDB state.AccountsAdapter, codeHash []byte) ([]byte, error) func getData(accountsDB state.AccountsAdapter, rootHash []byte, address []byte) ([]string, error) { leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err := accountsDB.GetAllLeaves(leavesChannels, context.Background(), rootHash) diff --git a/epochStart/metachain/systemSCs.go b/epochStart/metachain/systemSCs.go index faf909008fe..645f54ce3ea 100644 --- a/epochStart/metachain/systemSCs.go +++ b/epochStart/metachain/systemSCs.go @@ -1103,7 +1103,7 @@ func (s *systemSCProcessor) getArgumentsForSetOwnerFunctionality(userValidatorAc leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = userValidatorAccount.DataTrie().GetAllLeavesOnChannel(leavesChannels, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) if err != nil { diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 347a2645790..312132b9210 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -875,7 +875,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = pcf.state.AccountsAdapter().GetAllLeaves(leavesChannels, context.Background(), rootHash) if err != nil { diff --git a/integrationTests/state/stateTrieClose/stateTrieClose_test.go b/integrationTests/state/stateTrieClose/stateTrieClose_test.go index cfa352c9b24..985f49c660a 100644 --- a/integrationTests/state/stateTrieClose/stateTrieClose_test.go +++ b/integrationTests/state/stateTrieClose/stateTrieClose_test.go @@ -37,7 +37,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ := tr.RootHash() leavesChannel1 := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) time.Sleep(time.Second) // allow the go routine to start @@ -49,7 +49,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { leavesChannel1 = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) idx, _ = gc.Snapshot() @@ -64,7 +64,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ = tr.RootHash() leavesChannel1 = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } _ = tr.GetAllLeavesOnChannel(leavesChannel1, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) idx, _ = gc.Snapshot() @@ -79,7 +79,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { rootHash, _ = tr.RootHash() leavesChannel2 := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } _ = tr.GetAllLeavesOnChannel(leavesChannel2, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) time.Sleep(time.Second) // allow the go routine to start diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 2704aa1fd4c..7b2e28e5866 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -330,7 +330,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves rootHash, _ := accState.RootHash() leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = accState.GetAllLeaves(leavesChannel, context.Background(), rootHash) for range leavesChannel.LeavesChan { @@ -358,7 +358,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves leavesChannel = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = nRequester.AccntState.GetAllLeaves(leavesChannel, context.Background(), rootHash) assert.Nil(t, err) @@ -560,7 +560,7 @@ func addAccountsToState(t *testing.T, numAccounts int, numDataTrieLeaves int, ac func getNumLeaves(t *testing.T, tr common.Trie, rootHash []byte) int { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) require.Nil(t, err) diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 8d3b1834f34..995d1659d8a 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -3149,7 +3149,7 @@ func GetTokenIdentifier(nodes []*TestProcessorNode, ticker []byte) []byte { rootHash, _ := userAcc.DataTrie().RootHash() chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } _ = userAcc.DataTrie().GetAllLeavesOnChannel(chLeaves, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) for leaf := range chLeaves.LeavesChan { diff --git a/node/node.go b/node/node.go index 3af65f4d36b..4ee5485e8f6 100644 --- a/node/node.go +++ b/node/node.go @@ -216,7 +216,7 @@ func (n *Node) GetAllIssuedESDTs(tokenType string, ctx context.Context) ([]strin chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -292,7 +292,7 @@ func (n *Node) GetKeyValuePairs(address string, options api.AccountQueryOptions, chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -400,7 +400,7 @@ func (n *Node) getTokensIDsWithFilter( chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { @@ -536,7 +536,7 @@ func (n *Node) GetAllESDTTokens(address string, options api.AccountQueryOptions, chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { diff --git a/node/trieIterators/delegatedListProcessor.go b/node/trieIterators/delegatedListProcessor.go index c77d54a0880..acf6c763128 100644 --- a/node/trieIterators/delegatedListProcessor.go +++ b/node/trieIterators/delegatedListProcessor.go @@ -130,7 +130,7 @@ func (dlp *delegatedListProcessor) getDelegatorsList(delegationSC []byte, ctx co chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = delegatorAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { diff --git a/node/trieIterators/directStakedListProcessor.go b/node/trieIterators/directStakedListProcessor.go index 3e4ca62daeb..884607e7d7f 100644 --- a/node/trieIterators/directStakedListProcessor.go +++ b/node/trieIterators/directStakedListProcessor.go @@ -57,7 +57,7 @@ func (dslp *directStakedListProcessor) getAllStakedAccounts(validatorAccount sta chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = validatorAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { diff --git a/node/trieIterators/stakeValuesProcessor.go b/node/trieIterators/stakeValuesProcessor.go index b0f01baff76..17109690b98 100644 --- a/node/trieIterators/stakeValuesProcessor.go +++ b/node/trieIterators/stakeValuesProcessor.go @@ -99,7 +99,7 @@ func (svp *stakedValuesProcessor) computeBaseStakedAndTopUp(ctx context.Context) // TODO investigate if a call to GetAllLeavesKeysOnChannel (without values) might increase performance chLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = validatorAccount.DataTrie().GetAllLeavesOnChannel(chLeaves, ctx, rootHash, keyBuilder.NewKeyBuilder()) if err != nil { diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index b327d045b63..02ca6f7652e 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -1734,7 +1734,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = userAccountsDb.GetAllLeaves(iteratorChannels, context.Background(), rootHash) if err != nil { @@ -1763,7 +1763,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl if len(rh) != 0 { dataTrie := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } errDataTrieGet := userAccountsDb.GetAllLeaves(dataTrie, context.Background(), rh) if errDataTrieGet != nil { diff --git a/process/peer/process.go b/process/peer/process.go index 76f264917f9..3eac66835a8 100644 --- a/process/peer/process.go +++ b/process/peer/process.go @@ -563,7 +563,7 @@ func (vs *validatorStatistics) GetValidatorInfoForRootHash(rootHash []byte) (map leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err := vs.peerAdapter.GetAllLeaves(leavesChannels, context.Background(), rootHash) if err != nil { diff --git a/process/txsimulator/wrappedAccountsDB_test.go b/process/txsimulator/wrappedAccountsDB_test.go index 016a6f6a0f1..e83fe6a0d58 100644 --- a/process/txsimulator/wrappedAccountsDB_test.go +++ b/process/txsimulator/wrappedAccountsDB_test.go @@ -151,7 +151,7 @@ func TestReadOnlyAccountsDB_ReadOperationsShouldWork(t *testing.T) { allLeaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = roAccDb.GetAllLeaves(allLeaves, context.Background(), nil) require.NoError(t, err) diff --git a/state/accountsDB.go b/state/accountsDB.go index e767ac459fe..5dfa0dead7d 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -1037,7 +1037,7 @@ func (adb *AccountsDB) recreateTrie(options common.RootHashHolder) error { func (adb *AccountsDB) RecreateAllTries(rootHash []byte) (map[string]common.Trie, error) { leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } mainTrie := adb.getMainTrie() err := mainTrie.GetAllLeavesOnChannel(leavesChannels, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) @@ -1143,7 +1143,7 @@ func (adb *AccountsDB) SnapshotState(rootHash []byte) { missingNodesChannel := make(chan []byte, missingNodesChannelSize) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } stats := newSnapshotStatistics(1, 1) @@ -1371,7 +1371,7 @@ func (adb *AccountsDB) setStateCheckpoint(rootHash []byte) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } missingNodesChannel := make(chan []byte, missingNodesChannelSize) stats := newSnapshotStatistics(1, 1) @@ -1437,7 +1437,7 @@ func (adb *AccountsDB) GetStatsForRootHash(rootHash []byte) (common.TriesStatist iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, leavesChannelSize), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err := mainTrie.GetAllLeavesOnChannel(iteratorChannels, context.Background(), rootHash, keyBuilder.NewDisabledKeyBuilder()) if err != nil { diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 3cc0f40a149..c5864dc0e98 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -1442,7 +1442,7 @@ func TestAccountsDB_GetAllLeaves(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err := adb.GetAllLeaves(leavesChannel, context.Background(), []byte("root hash")) assert.Nil(t, err) diff --git a/state/peerAccountsDB.go b/state/peerAccountsDB.go index 1e597d884af..171ab6e3d06 100644 --- a/state/peerAccountsDB.go +++ b/state/peerAccountsDB.go @@ -57,7 +57,7 @@ func (adb *PeerAccountsDB) SnapshotState(rootHash []byte) { missingNodesChannel := make(chan []byte, missingNodesChannelSize) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } stats := newSnapshotStatistics(0, 1) stats.NewSnapshotStarted() @@ -93,7 +93,7 @@ func (adb *PeerAccountsDB) SetStateCheckpoint(rootHash []byte) { stats.NewSnapshotStarted() iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } trieStorageManager.SetCheckpoint(rootHash, rootHash, iteratorChannels, missingNodesChannel, stats) diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index f508341b749..2e4f7f1f156 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -214,7 +214,7 @@ func (u *userAccountsSyncer) syncAccountDataTries( leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = mainTrie.GetAllLeavesOnChannel(leavesChannels, context.Background(), mainRootHash, keyBuilder.NewDisabledKeyBuilder()) if err != nil { diff --git a/trie/export_test.go b/trie/export_test.go index 66c17dd56c3..83fa38f3c8f 100644 --- a/trie/export_test.go +++ b/trie/export_test.go @@ -75,7 +75,10 @@ func GetDirtyHashes(tr common.Trie) common.ModifiedHashes { // WriteInChanNonBlocking - func WriteInChanNonBlocking(errChan chan error, err error) { - writeInChanNonBlocking(errChan, err) + select { + case errChan <- err: + default: + } } type StorageManagerExtensionStub struct { diff --git a/trie/node_test.go b/trie/node_test.go index a993d15d4be..bca8482a618 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -520,7 +520,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesCollapsedTrie(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), tr.root.getHash(), keyBuilder.NewKeyBuilder()) assert.Nil(t, err) diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index ab0518247fc..9ec956e96cf 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -476,7 +476,7 @@ func TestPatriciaMerkleTrie_GetSerializedNodesGetFromCheckpoint(t *testing.T) { storageManager.AddDirtyCheckpointHashes(rootHash, dirtyHashes) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } storageManager.SetCheckpoint(rootHash, make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) trie.WaitForOperationToComplete(storageManager) @@ -563,7 +563,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(iteratorChannels, context.Background(), []byte{}, keyBuilder.NewDisabledKeyBuilder()) assert.Equal(t, trie.ErrNilTrieIteratorLeavesChannel, err) @@ -589,7 +589,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), []byte{}, keyBuilder.NewDisabledKeyBuilder()) assert.Nil(t, err) @@ -611,7 +611,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } expectedErr := errors.New("expected error") @@ -647,7 +647,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } expectedErr := errors.New("expected error") @@ -696,7 +696,7 @@ func TestPatriciaMerkleTrie_GetAllLeavesOnChannel(t *testing.T) { leavesChannel := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err := tr.GetAllLeavesOnChannel(leavesChannel, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) assert.Nil(t, err) diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index 78b647030e3..c5304e45428 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -473,13 +473,6 @@ func (tsm *trieStorageManager) takeSnapshot(snapshotEntry *snapshotsQueueEntry, snapshotEntry.stats.AddTrieStats(stats.GetTrieStats()) } -func writeInChanNonBlocking(errChan chan error, err error) { - select { - case errChan <- err: - default: - } -} - func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEntry, msh marshal.Marshalizer, hsh hashing.Hasher, ctx context.Context, goRoutinesThrottler core.Throttler) { defer func() { tsm.finishOperation(checkpointEntry, "trie checkpoint finished") diff --git a/trie/trieStorageManagerFactory_test.go b/trie/trieStorageManagerFactory_test.go index d79fa2c77fa..8045a06d707 100644 --- a/trie/trieStorageManagerFactory_test.go +++ b/trie/trieStorageManagerFactory_test.go @@ -135,7 +135,7 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } tsm.SetCheckpoint(nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}) @@ -168,7 +168,7 @@ func testTsmWithoutSnapshot( iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } tsm.TakeSnapshot("", nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}, 10) diff --git a/trie/trieStorageManagerWithoutCheckpoints_test.go b/trie/trieStorageManagerWithoutCheckpoints_test.go index 2f76292d574..891a14a392e 100644 --- a/trie/trieStorageManagerWithoutCheckpoints_test.go +++ b/trie/trieStorageManagerWithoutCheckpoints_test.go @@ -28,14 +28,14 @@ func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) iteratorChannels = &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } ts.SetCheckpoint([]byte("rootHash"), make([]byte, 0), iteratorChannels, nil, &trieMock.MockStatistics{}) assert.Equal(t, uint32(0), ts.PruningBlockingOperations()) diff --git a/trie/trieStorageManagerWithoutSnapshot_test.go b/trie/trieStorageManagerWithoutSnapshot_test.go index d3ba6073770..309e328433f 100644 --- a/trie/trieStorageManagerWithoutSnapshot_test.go +++ b/trie/trieStorageManagerWithoutSnapshot_test.go @@ -80,7 +80,7 @@ func TestTrieStorageManagerWithoutSnapshot_TakeSnapshot(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } ts.TakeSnapshot("", nil, nil, iteratorChannels, nil, &trieMock.MockStatistics{}, 10) diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index 9dc02201ab4..a0b5a88ce63 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -37,8 +37,8 @@ func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { } } -// ErrChanWithLen extends the BufferedErrChan interface with a Len method -type ErrChanWithLen interface { +// errChanWithLen extends the BufferedErrChan interface with a Len method +type errChanWithLen interface { common.BufferedErrChan Len() int } @@ -98,7 +98,7 @@ func TestTrieCheckpoint(t *testing.T) { trieStorage.AddDirtyCheckpointHashes(rootHash, dirtyHashes) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) trie.WaitForOperationToComplete(trieStorage) @@ -107,7 +107,7 @@ func TestTrieCheckpoint(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, val) - ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) assert.True(t, ok) assert.Equal(t, 0, ch.Len()) } @@ -141,13 +141,13 @@ func TestTrieStorageManager_SetCheckpointClosedDb(t *testing.T) { rootHash := []byte("rootHash") iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) assert.True(t, ok) assert.Equal(t, 0, ch.Len()) } @@ -161,13 +161,13 @@ func TestTrieStorageManager_SetCheckpointEmptyTrieRootHash(t *testing.T) { rootHash := make([]byte, 32) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } ts.SetCheckpoint(rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) assert.True(t, ok) assert.Equal(t, 0, ch.Len()) } @@ -184,7 +184,7 @@ func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: nil, - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } trieStorage.SetCheckpoint(rootHash, []byte{}, iteratorChannels, nil, &trieMock.MockStatistics{}) trie.WaitForOperationToComplete(trieStorage) @@ -192,7 +192,7 @@ func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { val, err = trieStorage.GetFromCheckpoint(rootHash) assert.NotNil(t, err) assert.Nil(t, val) - ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) assert.True(t, ok) assert.Equal(t, 0, ch.Len()) } @@ -360,13 +360,13 @@ func TestTrieStorageManager_TakeSnapshotClosedDb(t *testing.T) { rootHash := []byte("rootHash") iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } ts.TakeSnapshot("", rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}, 0) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) assert.True(t, ok) assert.Equal(t, 0, ch.Len()) } @@ -380,13 +380,13 @@ func TestTrieStorageManager_TakeSnapshotEmptyTrieRootHash(t *testing.T) { rootHash := make([]byte, 32) iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } ts.TakeSnapshot("", rootHash, rootHash, iteratorChannels, nil, &trieMock.MockStatistics{}, 0) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) assert.True(t, ok) assert.Equal(t, 0, ch.Len()) } @@ -401,14 +401,14 @@ func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { rootHash := []byte("rootHash") iteratorChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } missingNodesChan := make(chan []byte, 2) ts.TakeSnapshot("", rootHash, rootHash, iteratorChannels, missingNodesChan, &trieMock.MockStatistics{}, 0) _, ok := <-iteratorChannels.LeavesChan assert.False(t, ok) - ch, ok := iteratorChannels.ErrChan.(ErrChanWithLen) + ch, ok := iteratorChannels.ErrChan.(errChanWithLen) assert.True(t, ok) assert.Equal(t, 1, ch.Len()) errRecovered := iteratorChannels.ErrChan.ReadFromChanNonBlocking() @@ -490,7 +490,7 @@ func TestWriteInChanNonBlocking(t *testing.T) { t.Run("buffered (one element), empty chan should add", func(t *testing.T) { t.Parallel() - errChannel := errChan.NewErrChan() + errChannel := errChan.NewErrChanWrapper() errChannel.WriteInChanNonBlocking(err1) require.Equal(t, 1, errChannel.Len()) @@ -500,7 +500,7 @@ func TestWriteInChanNonBlocking(t *testing.T) { t.Run("buffered (1 element), full chan should not add, but should finish", func(t *testing.T) { t.Parallel() - errChannel := errChan.NewErrChan() + errChannel := errChan.NewErrChanWrapper() errChannel.WriteInChanNonBlocking(err1) errChannel.WriteInChanNonBlocking(err2) diff --git a/update/genesis/export.go b/update/genesis/export.go index ccf2b3cdb62..149f29ef6c1 100644 --- a/update/genesis/export.go +++ b/update/genesis/export.go @@ -296,7 +296,7 @@ func (se *stateExport) exportTrie(key string, trie common.Trie) error { leavesChannels := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), - ErrChan: errChan.NewErrChan(), + ErrChan: errChan.NewErrChanWrapper(), } err = trie.GetAllLeavesOnChannel(leavesChannels, context.Background(), rootHash, keyBuilder.NewKeyBuilder()) if err != nil { From 35fa2808a348952006a85b6680b24167624a047b Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 16 Mar 2023 13:12:13 +0200 Subject: [PATCH 06/20] remove unused code --- common/channels.go | 10 -------- common/channels_test.go | 57 ----------------------------------------- 2 files changed, 67 deletions(-) diff --git a/common/channels.go b/common/channels.go index 3d00dcde162..177ac89f5c5 100644 --- a/common/channels.go +++ b/common/channels.go @@ -7,13 +7,3 @@ func GetClosedUnbufferedChannel() chan struct{} { return ch } - -// GetErrorFromChanNonBlocking will get the error from channel -func GetErrorFromChanNonBlocking(errChan chan error) error { - select { - case err := <-errChan: - return err - default: - return nil - } -} diff --git a/common/channels_test.go b/common/channels_test.go index a5fad97d1a4..4e2828e2d6a 100644 --- a/common/channels_test.go +++ b/common/channels_test.go @@ -1,11 +1,8 @@ package common import ( - "errors" "testing" - "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -39,57 +36,3 @@ func didTriggerHappen(ch chan struct{}) bool { return false } } - -func TestErrFromChannel(t *testing.T) { - t.Parallel() - - t.Run("empty channel, should return nil", func(t *testing.T) { - t.Parallel() - - t.Run("unbuffered chan", func(t *testing.T) { - t.Parallel() - - errChan := make(chan error) - assert.Nil(t, GetErrorFromChanNonBlocking(errChan)) - }) - - t.Run("buffered chan", func(t *testing.T) { - t.Parallel() - - errChan := make(chan error, 1) - assert.Nil(t, GetErrorFromChanNonBlocking(errChan)) - }) - }) - - t.Run("non empty channel, should return error", func(t *testing.T) { - t.Parallel() - - t.Run("unbuffered chan", func(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("expected error") - errChan := make(chan error) - go func() { - errChan <- expectedErr - }() - - time.Sleep(time.Second) // allow the go routine to start - - assert.Equal(t, expectedErr, GetErrorFromChanNonBlocking(errChan)) - }) - - t.Run("buffered chan", func(t *testing.T) { - t.Parallel() - - for i := 1; i < 10; i++ { - errChan := make(chan error, i) - expectedErr := errors.New("expected error") - for j := 0; j < i; j++ { - errChan <- expectedErr - } - - assert.Equal(t, expectedErr, GetErrorFromChanNonBlocking(errChan)) - } - }) - }) -} From 94c2a365964c580a59c4451ee40f7c16eda83e38 Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 21 Mar 2023 10:44:41 +0200 Subject: [PATCH 07/20] - compressed configs --- cmd/node/config/enableEpochs.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 7bd4ffbcd41..7a2bf067ddd 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -240,7 +240,7 @@ AlwaysSaveTokenMetaDataEnableEpoch = 1 # RuntimeCodeSizeFixEnableEpoch represents the epoch when the code size fix in the VM is enabled - RuntimeCodeSizeFixEnableEpoch = 2 + RuntimeCodeSizeFixEnableEpoch = 1 # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers BLSMultiSignerEnableEpoch = [ From 49840fca3dd586662a81a40a415742bdea5d7342 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 23 Mar 2023 16:02:21 +0200 Subject: [PATCH 08/20] - added semi-integration test --- integrationTests/vm/txsFee/asyncESDT_test.go | 75 ++++++++++++++++++ .../vm/txsFee/testdata/third/third.wasm | Bin 0 -> 8199 bytes 2 files changed, 75 insertions(+) create mode 100644 integrationTests/vm/txsFee/testdata/third/third.wasm diff --git a/integrationTests/vm/txsFee/asyncESDT_test.go b/integrationTests/vm/txsFee/asyncESDT_test.go index 28415adc9e5..63d566cb1c6 100644 --- a/integrationTests/vm/txsFee/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/asyncESDT_test.go @@ -6,15 +6,18 @@ package txsFee import ( + "context" "encoding/hex" "math/big" "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/require" ) @@ -474,3 +477,75 @@ func TestSendNFTToContractWith0FunctionNonPayable(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) } + +func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { + testContext, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + require.Nil(t, err) + defer testContext.Close() + + function1 := []byte("add_queued_call") + function2 := []byte("forward_queued_calls") + + egldBalance := big.NewInt(100000000) + ownerAddr := []byte("owner-78901234567890123456789000") + _, _ = vm.CreateAccount(testContext.Accounts, ownerAddr, 0, egldBalance) + + // create an address with ESDT token + sndAddr := []byte("sender-8901234567890123456789000") + + esdtBalance := big.NewInt(100000000) + token := []byte("miiutoken") + utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) + + // deploy contract + gasPrice := uint64(10) + ownerAccount, _ := testContext.Accounts.LoadAccount(ownerAddr) + deployGasLimit := uint64(50000) + scAddress := utils.DoDeploySecond(t, testContext, "./testdata/third/third.wasm", ownerAccount, gasPrice, deployGasLimit, nil, big.NewInt(0)) + + testContext.TxFeeHandler.CreateBlockStarted(getZeroGasAndFees()) + utils.CleanAccumulatedIntermediateTransactions(t, testContext) + + // execute first call + gasLimit := uint64(500000) + tx := utils.CreateESDTTransferTx(0, sndAddr, scAddress, token, big.NewInt(5000), gasPrice, gasLimit) + tx.Data = []byte(string(tx.Data) + "@" + hex.EncodeToString(function1) + "@01@" + hex.EncodeToString(scAddress) + "@" + hex.EncodeToString(function2)) + + retCode, err := testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.UserError, retCode) + require.Nil(t, err) + + // execute second call + tx = utils.CreateESDTTransferTx(1, sndAddr, scAddress, token, big.NewInt(5000), gasPrice, gasLimit) + tx.Data = []byte(string(tx.Data) + "@" + hex.EncodeToString(function2)) + + retCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Equal(t, vmcommon.Ok, retCode) + require.Nil(t, err) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + // try to recreate the data trie + scAccount, err := testContext.Accounts.LoadAccount(scAddress) + require.Nil(t, err) + userScAccount := scAccount.(state.UserAccountHandler) + roothash := userScAccount.GetRootHash() + log.Info("recreating data trie", "roothash", roothash) + + leaves := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, 1), + ErrChan: make(chan error, 1), + } + err = testContext.Accounts.GetAllLeaves(leaves, context.Background(), roothash) + require.Nil(t, err) + + for _ = range leaves.LeavesChan { + // do nothing, just iterate + } + + select { + case err = <-leaves.ErrChan: + require.Nil(t, err) + } +} diff --git a/integrationTests/vm/txsFee/testdata/third/third.wasm b/integrationTests/vm/txsFee/testdata/third/third.wasm new file mode 100644 index 0000000000000000000000000000000000000000..ee7f953b0281b5c5ae490ec67ddd5e29ac1b4559 GIT binary patch literal 8199 zcmb_h+izUQd7qhc_ClUr?#N+mnqulX$5KNkrX^Q0>CkG-S&7vBVv$xZbwaJkBZ^CM zm)u>-5|D~aEEq130(#6t5acO($RAMwEs8!AffE$Hq%DxgKDuZgg4VJ7`^}tPas?S^ zi|Vpx&Ro9vuD|b_Iil9yltKvkt;$WcyDM(WUAfB^euG`#A#Mu%Lq~`X4!Eaq<9Nf0 z2N-^URS(3?-0toJHz|*Yo38#W|5A`P?hf2qTRq?C*p(F#@O^Nzx^w$>+PadyC-Uql zXB~EHD{ZU@#lGjVzJ*S+Ra;FjHEY>`&IZn=ol9wBwR1;EH{Wl9-qW>>jT^O%om7Me z33=kx`<=8c2K+#+0m>^2Ypadg#$2n_Z21+3R@07ct?q25jlL}ow>Lo=l3cX-nm-)j%Q#HWX{&wm(trE z5pn)-CiCg$)_qE_u(7tBiUNneu&<7%Mk-wmrkw*)znJyTx0;((XP2%hUhiFcncn$G z`z->dy8`+Fok_EB8YXU~?Y5U<$XjF~?LcKyCq;?ucIVpm{l+paC5r4GTE3GmuU~0i ztKFy8MUI_ax0qjRtU)!-%$01fZ?x7raEevo^f|V#&&xX-owd1zGmB5T!QpJm+`V*p zr;}c7oJrfA)6GUFz1I;#+^({`k=7d5)6Pz-ai-R(c_xx^+f1*O)##C{3(&LuDbb#%#~DRxYG6KM@tf~ zdhTFX1Y(YZ>oV9i(hk?bW2}q>HGMP=(51;a+9Bi_t{XKYATN4N=)^a{6gI{dU}6e< zpf#byv=UK;ij;}Oo@x|2JcV~7ooWNT>k#{pkcCo6vIOFur1XUF#!V?nG)V*e4vGMO ziGX5u>}U8>Fye3}asyU^jNQ>WyAvJf>?d$3`+}ZGKm^<6M@?w|fqj8%Lw~-Z3nDs1 zUQGgqFjJ2j6_vpfdW8PMC~W*ip$Cna4`;F2uSw%Y9f@eFXEh0fOPEE^Iz>=2BPT`4 z%Iz7!{hee~Cag_|!8VK00QU&3x*JtomkD6MiWO(0Ydy-DE9o!^>_~^U?;uz{DQvgc zzH9eB4Ytu5x!Y>d5cI85-G=ZjD*hW?52H97)MxZ_qYzG?fLLJSq<%fhJYLwprGpE~ zpIHxK03wHIy|d8JLFb0F86yyUq|>7K88>*(xPjcB8z^6S;rF*bimhLe@_{ZVqWaT6 z&;KbHa*Z2@qrX!sx2qmtL*t?eA+d}9vrfDAQ(FFP=-y16W9JxsbzF_WP31(Of&opsDD9|wKGDs<= zqswJUk@(*Wx$a;Z`}~G!j$lo&v8y^5#ZTDvkQ_b4dyBrH&(1|JV3t0b2>VO8fIf#! zguVhdIn?d{d*3gjF=|!m^F1Hcmn+!Z1~Xzf`mK~XA`^fDxs#w+2jX-71^tJ3g(k+3 zEdx8ol>0WM2`0r@B5iOUBKhgo=&u2@F?mXLCnq^L_1jpP)7KwkIENU&26*8LH1gd> z!KYp(;j?8J9}$fl^?5dfBw!kf%lT-9$J5UX-tqHZl`gXeE@lyjER7H$h(r5`%^^0= zJmo(?lP(+!2gm5h;27_H$w9x_^_-te8D^?Ono+aRjno$t3G0A=5ldDK^u`2eLuEp4 zDAp?gxym=_{7xk~7ah)TsR^PlG%;896^x6@gYN5eK;bi}0&uxZ&45&0;rAszrg%#8 zDafZ=7>mm#QVhI^#uEB^7Qv6Bf69bIIYz&r*T*@^2q}rRWQ}nOy8&jyBv0qQSe9L6 zL5opmU=y(qFwW}0e~V%Q&c-NW<)x1Sh>1WN3Ntx)U^n0{A|V8Z&NFYI6V7()LS1$B z82wYz%Q&i!l{qI@k3$Am<_C=OK0Kaust8~$8_s7zQGZQ9ZPnrj4_UrhS$Eb0H_?M0 zKCqS$`eH1g0>r*Z0tU5y1zQ4VLJvUzHitsqzDb}LITwEK8<5vGO0I!DsRP24e$5Ny zaP^%YJk9^#J=K*l8OmkIahm1JZjYq1EPV3V>YaSIX;C?zoVRVdu|{Ipqa73I(E^ zp`$!?M1dEf!T~EN&VD6vDDVO2kk!cQ_y^TJ_vguH;(ffo@xepV@k1{8U4=sK$8IVpxMYltkxX6GpfS}>Td z`|}mvp$}&F?7S5T`-lZR%fq73FQC-;YV?;Zqyxg{9#5=CvVJhUlwfz@r~NkpGWsf7 z$I&1lZs<#RGj;sTt|zRSlbkDFSx*c52J!*>^1knH1QRaLZ;3nQ2s_?8$%%>Jcv4`+ zjcS$C1LIa9XLt=@>^RL#2R;L-UFiUh5-S zZry}>7g<{14CcmxVt3d=bh=~M26%ANW-yR@3=qgw&4-U6{&7;o^hvRSBhm=Z%B(Q< zB^oXr^7&h)t^Y9aT#g&>Hx={XUl{`|m7}wZ;S^V;mOsSK^%gF~=(U+fb9z--KYH zp2BaAzxsrJ(a_M+zyzjEfbAUANnT&ZGE5(%!Aw!# zpfX<&#@Df`wG~NL}B+L&K7!$0Y@@;>sO3k~@DnH!^S) z99E8#1sGm0;4!-n<^Si9D({B$;78tr>DP8WmTK01k{tq&$LxA`ArKsPna_>+Gr_tZ zjR49I2+V303JPs_cSuQ8s`e+Ws1hDO{E;LM^q1pe)q}46Sk$ZTEMn7Y3D*P z2vaqE2DZkz9C6q?m!sWx680yp&i`N{4nGrWBPjz{O}QUzAEM(R&X)Ue7Oy>=#l#-Y zf=Y(7AWEE#K)oNux!eC2^|%7FyaQh=1snkkCv&c(*J9vO0=HziD^zMXoO}1W zkH8g7*tafp>i|ewSQ73pLC-i3@i~0~k@=3NZE*AFogL1B&BuG^cY3gKUgCMu3k({B zmBiy8-36cv03Qqv)Ott%JRx+@DpsZLHjKp1Atz$f!DB+aDS@z);$4&y4(p)bV={bG zKwO8~fJ9H5(3?>m06D{C*&~SfG9Z|n5hp#1_%(GR?6|te3%+sHA%aed@8M45%dxaS zC(aB5XB2KezRA)Q2!o49J;iGegfuUZ060e(jwT|eAnx^_K<5_5#CcBIACiW@7F zK%@_$cmmWzK4Hu~K7m^uJb*=8c$CSGsP0Y&_n-?popUGshSPsCRNZ|WdAJ-8;W8$c zW{4`f#-E$~S>zAUS05hLV|cteid!2{{hYnCZJxmb!0jV}OpE$l!lQ89@+dQ|ZxdPS zsBXa#`NJ(BEUl?!+;%a6yVfuP)wPT(DL9I|ERsX+JR-1xzer>8p%V$AOd@p2{s-}I z5VtxoJ8zQ|^G0?cDDk9BH9DKKHr)+WhMvOD zA%~wseds*YkIs1Qp))4-&>5sMbOsSu1p~Hdv~Ds%^VxT+@LH4GpO$me(2?m3@9xas zF$GgXvf^zWnmlOc^_NDWJjB+td;5b?+|s#X=Mjq?95f%Z;`kDCVjyHx%I<}S*zq$Z z^SDApM#CqJi;Q*)wXfS?I|14QLC_`R9L$b0Zw5U%mg70hL4LV72-HvPn7IHAT+lq(h;$45Dg=8IE zgj0uHgHt+u4^F{dnFt(N0SP2{1VUxN`Ha~S@Fd_qh-Xm3d6o`rf_f~Rcqr2yvih@j zS3Kdwi=vMeZVg!5MZZ>Sjk~ptwH1Tkom5G&*4WzVm`<~4Hku85GF@)ATIq7fGNXgrT4bLXS(ug18lsy*0|kl z`)}HPoXwb9wG~rqwQBdxhWoDFY4YpaQs@3wI(PQcnFHVJ+E0DId*;Wx3ckS&;Xghd ah^<; Date: Thu, 23 Mar 2023 17:29:05 +0200 Subject: [PATCH 09/20] - added concurrency protection --- integrationTests/vm/txsFee/asyncESDT_test.go | 11 +++++-- state/accountsDB.go | 30 +++++++++++-------- state/accountsDB_test.go | 10 +++---- state/dataTriesHolder.go | 10 +++++++ state/export_test.go | 6 ++-- .../storagePruningManager.go | 3 +- trie/node.go | 3 +- trie/patriciaMerkleTrie.go | 2 ++ 8 files changed, 51 insertions(+), 24 deletions(-) diff --git a/integrationTests/vm/txsFee/asyncESDT_test.go b/integrationTests/vm/txsFee/asyncESDT_test.go index 63d566cb1c6..7739e6f0bf3 100644 --- a/integrationTests/vm/txsFee/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/asyncESDT_test.go @@ -494,6 +494,7 @@ func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { sndAddr := []byte("sender-8901234567890123456789000") esdtBalance := big.NewInt(100000000) + esdtTransferValue := big.NewInt(5000) token := []byte("miiutoken") utils.CreateAccountWithESDTBalance(t, testContext.Accounts, sndAddr, egldBalance, token, 0, esdtBalance) @@ -508,13 +509,16 @@ func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { // execute first call gasLimit := uint64(500000) - tx := utils.CreateESDTTransferTx(0, sndAddr, scAddress, token, big.NewInt(5000), gasPrice, gasLimit) + tx := utils.CreateESDTTransferTx(0, sndAddr, scAddress, token, esdtTransferValue, gasPrice, gasLimit) tx.Data = []byte(string(tx.Data) + "@" + hex.EncodeToString(function1) + "@01@" + hex.EncodeToString(scAddress) + "@" + hex.EncodeToString(function2)) retCode, err := testContext.TxProcessor.ProcessTransaction(tx) require.Equal(t, vmcommon.UserError, retCode) require.Nil(t, err) + utils.CheckESDTBalance(t, testContext, sndAddr, token, esdtBalance) + utils.CheckESDTBalance(t, testContext, scAddress, token, big.NewInt(0)) + // execute second call tx = utils.CreateESDTTransferTx(1, sndAddr, scAddress, token, big.NewInt(5000), gasPrice, gasLimit) tx.Data = []byte(string(tx.Data) + "@" + hex.EncodeToString(function2)) @@ -526,6 +530,9 @@ func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { _, err = testContext.Accounts.Commit() require.Nil(t, err) + utils.CheckESDTBalance(t, testContext, sndAddr, token, big.NewInt(0).Sub(esdtBalance, esdtTransferValue)) + utils.CheckESDTBalance(t, testContext, scAddress, token, esdtTransferValue) + // try to recreate the data trie scAccount, err := testContext.Accounts.LoadAccount(scAddress) require.Nil(t, err) @@ -540,7 +547,7 @@ func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { err = testContext.Accounts.GetAllLeaves(leaves, context.Background(), roothash) require.Nil(t, err) - for _ = range leaves.LeavesChan { + for range leaves.LeavesChan { // do nothing, just iterate } diff --git a/state/accountsDB.go b/state/accountsDB.go index 7ff1617e04b..7a9e5fa8049 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -509,12 +509,11 @@ func saveCodeEntry(codeHash []byte, entry *CodeEntry, trie Updater, marshalizer return nil } -// LoadDataTrie retrieves and saves the SC data inside accountHandler object. +// loadDataTrieConcurrentSafe retrieves and saves the SC data inside accountHandler object. // Errors if something went wrong -func (adb *AccountsDB) loadDataTrie(accountHandler baseAccountHandler, mainTrie common.Trie) error { - if len(accountHandler.GetRootHash()) == 0 { - return nil - } +func (adb *AccountsDB) loadDataTrieConcurrentSafe(accountHandler baseAccountHandler, mainTrie common.Trie) error { + adb.mutOp.Lock() + defer adb.mutOp.Unlock() dataTrie := adb.dataTries.Get(accountHandler.AddressBytes()) if dataTrie != nil { @@ -522,6 +521,10 @@ func (adb *AccountsDB) loadDataTrie(accountHandler baseAccountHandler, mainTrie return nil } + if len(accountHandler.GetRootHash()) == 0 { + return nil + } + dataTrie, err := mainTrie.Recreate(accountHandler.GetRootHash()) if err != nil { return fmt.Errorf("trie was not found for hash, rootHash = %s, err = %w", hex.EncodeToString(accountHandler.GetRootHash()), err) @@ -702,7 +705,7 @@ func (adb *AccountsDB) LoadAccount(address []byte) (vmcommon.AccountHandler, err baseAcc, ok := acnt.(baseAccountHandler) if ok { - err = adb.loadDataTrie(baseAcc, mainTrie) + err = adb.loadDataTrieConcurrentSafe(baseAcc, mainTrie) if err != nil { return nil, err } @@ -754,7 +757,7 @@ func (adb *AccountsDB) GetExistingAccount(address []byte) (vmcommon.AccountHandl baseAcc, ok := acnt.(baseAccountHandler) if ok { - err = adb.loadDataTrie(baseAcc, mainTrie) + err = adb.loadDataTrieConcurrentSafe(baseAcc, mainTrie) if err != nil { return nil, err } @@ -784,7 +787,7 @@ func (adb *AccountsDB) GetAccountFromBytes(address []byte, accountBytes []byte) return acnt, nil } - err = adb.loadDataTrie(baseAcc, adb.getMainTrie()) + err = adb.loadDataTrieConcurrentSafe(baseAcc, adb.getMainTrie()) if err != nil { return nil, err } @@ -1099,7 +1102,10 @@ func (adb *AccountsDB) journalize(entry JournalEntry) { } adb.entries = append(adb.entries, entry) - log.Trace("accountsDB.Journalize", "new length", len(adb.entries)) + log.Trace("accountsDB.Journalize", + "new length", len(adb.entries), + "entry type", fmt.Sprintf("%T", entry), + ) if len(adb.entries) == 1 { adb.stackDebug = debug.Stack() @@ -1146,12 +1152,12 @@ func (adb *AccountsDB) SnapshotState(rootHash []byte) { } stats := newSnapshotStatistics(1, 1) - accountMetrics := &accountMetrics{ + accountMetricsInstance := &accountMetrics{ snapshotInProgressKey: common.MetricAccountsSnapshotInProgress, lastSnapshotDurationKey: common.MetricLastAccountsSnapshotDurationSec, snapshotMessage: userTrieSnapshotMsg, } - adb.updateMetricsOnSnapshotStart(accountMetrics) + adb.updateMetricsOnSnapshotStart(accountMetricsInstance) go func() { stats.NewSnapshotStarted() @@ -1164,7 +1170,7 @@ func (adb *AccountsDB) SnapshotState(rootHash []byte) { go adb.syncMissingNodes(missingNodesChannel, stats, adb.trieSyncer) - go adb.processSnapshotCompletion(stats, trieStorageManager, missingNodesChannel, iteratorChannels.ErrChan, rootHash, accountMetrics, epoch) + go adb.processSnapshotCompletion(stats, trieStorageManager, missingNodesChannel, iteratorChannels.ErrChan, rootHash, accountMetricsInstance, epoch) adb.waitForCompletionIfAppropriate(stats) } diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index fb0179db1c3..28f8dbed474 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -698,7 +698,7 @@ func TestAccountsDB_LoadDataNilRootShouldRetNil(t *testing.T) { _, account, adb := generateAddressAccountAccountsDB(tr) // since root is nil, result should be nil and data trie should be nil - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.Nil(t, err) assert.Nil(t, account.DataTrie()) } @@ -715,7 +715,7 @@ func TestAccountsDB_LoadDataBadLengthShouldErr(t *testing.T) { account.SetRootHash([]byte("12345")) // should return error - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.NotNil(t, err) } @@ -733,7 +733,7 @@ func TestAccountsDB_LoadDataMalfunctionTrieShouldErr(t *testing.T) { adb := generateAccountDBFromTrie(mockTrie) // should return error - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.NotNil(t, err) } @@ -751,7 +751,7 @@ func TestAccountsDB_LoadDataNotFoundRootShouldReturnErr(t *testing.T) { account.SetRootHash(rootHash) // should return error - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.NotNil(t, err) fmt.Println(err.Error()) } @@ -795,7 +795,7 @@ func TestAccountsDB_LoadDataWithSomeValuesShouldWork(t *testing.T) { account.SetRootHash(rootHash) // should not return error - err := adb.LoadDataTrie(account) + err := adb.LoadDataTrieConcurrentSafe(account) assert.Nil(t, err) // verify data diff --git a/state/dataTriesHolder.go b/state/dataTriesHolder.go index 4cf51bd9a3d..8333b875fce 100644 --- a/state/dataTriesHolder.go +++ b/state/dataTriesHolder.go @@ -4,6 +4,7 @@ import ( "sync" "github.com/multiversx/mx-chain-go/common" + logger "github.com/multiversx/mx-chain-logger-go" ) type dataTriesHolder struct { @@ -20,6 +21,8 @@ func NewDataTriesHolder() *dataTriesHolder { // Put adds a trie pointer to the tries map func (dth *dataTriesHolder) Put(key []byte, tr common.Trie) { + log.Trace("put trie in data tries holder", "key", key) + dth.mutex.Lock() dth.tries[string(key)] = tr dth.mutex.Unlock() @@ -67,6 +70,13 @@ func (dth *dataTriesHolder) GetAllTries() map[string]common.Trie { // Reset clears the tries map func (dth *dataTriesHolder) Reset() { dth.mutex.Lock() + + if log.GetLevel() == logger.LogTrace { + for key := range dth.tries { + log.Trace("reset data tries holder", "key", key) + } + } + dth.tries = make(map[string]common.Trie) dth.mutex.Unlock() } diff --git a/state/export_test.go b/state/export_test.go index d7b956dd0ec..3ff10d977b2 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -22,9 +22,9 @@ func (adb *AccountsDB) LoadCode(accountHandler baseAccountHandler) error { return adb.loadCode(accountHandler) } -// LoadDataTrie - -func (adb *AccountsDB) LoadDataTrie(accountHandler baseAccountHandler) error { - return adb.loadDataTrie(accountHandler, adb.getMainTrie()) +// LoadDataTrieConcurrentSafe - +func (adb *AccountsDB) LoadDataTrieConcurrentSafe(accountHandler baseAccountHandler) error { + return adb.loadDataTrieConcurrentSafe(accountHandler, adb.getMainTrie()) } // GetAccount - diff --git a/state/storagePruningManager/storagePruningManager.go b/state/storagePruningManager/storagePruningManager.go index 73d9af30847..2dcf7bad076 100644 --- a/state/storagePruningManager/storagePruningManager.go +++ b/state/storagePruningManager/storagePruningManager.go @@ -3,6 +3,7 @@ package storagePruningManager import ( "bytes" "encoding/hex" + "fmt" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" @@ -81,7 +82,7 @@ func (spm *storagePruningManager) markForEviction( return err } - logMapWithTrace("MarkForEviction "+string(identifier), "hash", hashes) + logMapWithTrace(fmt.Sprintf("MarkForEviction %d", identifier), "hash", hashes) return nil } diff --git a/trie/node.go b/trie/node.go index 2fc924ef457..cd62c4df2ec 100644 --- a/trie/node.go +++ b/trie/node.go @@ -5,6 +5,7 @@ import ( "context" "encoding/hex" "fmt" + "runtime/debug" "time" "github.com/multiversx/mx-chain-core-go/hashing" @@ -119,7 +120,7 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { - log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) + log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) return nil, fmt.Errorf(common.GetNodeFromDBErrorString+" %w for key %v", err, hex.EncodeToString(n)) } diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index e6d22323566..59b6d988f59 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -216,9 +216,11 @@ func (tr *patriciaMerkleTrie) Commit() error { defer tr.mutOperation.Unlock() if tr.root == nil { + log.Trace("trying to commit empty trie") return nil } if !tr.root.isDirty() { + log.Trace("trying to commit clean trie", "root", tr.root.getHash()) return nil } err := tr.root.setRootHash() From d9a1bad9710af955ec8abc0588aaccefc34580f3 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 23 Mar 2023 17:33:40 +0200 Subject: [PATCH 10/20] - linter fix --- integrationTests/vm/txsFee/asyncESDT_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/integrationTests/vm/txsFee/asyncESDT_test.go b/integrationTests/vm/txsFee/asyncESDT_test.go index 7739e6f0bf3..a4318ad54f0 100644 --- a/integrationTests/vm/txsFee/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/asyncESDT_test.go @@ -551,8 +551,6 @@ func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { // do nothing, just iterate } - select { - case err = <-leaves.ErrChan: - require.Nil(t, err) - } + err = <-leaves.ErrChan + require.Nil(t, err) } From bcf043b5e5bcaf3de3554c35ad4fcdc6a1f551b1 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 23 Mar 2023 17:37:00 +0200 Subject: [PATCH 11/20] add unit tests --- state/accountsDB_test.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index fb0179db1c3..8f623178686 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -2812,6 +2812,34 @@ func TestAccountsDb_Concurrent(t *testing.T) { testAccountMethodsConcurrency(t, adb, accountsAddresses, rootHash) } +func TestAccountsDB_SaveKeyValAfterAccountIsReverted(t *testing.T) { + t.Parallel() + + _, adb := getDefaultTrieAndAccountsDb() + addr := generateRandomByteArray(32) + + acc, _ := adb.LoadAccount(addr) + _ = adb.SaveAccount(acc) + + acc, _ = adb.LoadAccount(addr) + acc.(state.UserAccountHandler).IncreaseNonce(1) + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key"), []byte("value")) + _ = adb.SaveAccount(acc) + + err := adb.RevertToSnapshot(1) + require.Nil(t, err) + + acc, _ = adb.LoadAccount(addr) + _ = acc.(state.UserAccountHandler).SaveKeyValue([]byte("key"), []byte("value")) + _ = adb.SaveAccount(acc) + + _, err = adb.Commit() + + acc, err = adb.LoadAccount(addr) + require.Nil(t, err) + require.NotNil(t, acc) +} + func testAccountMethodsConcurrency( t *testing.T, adb state.AccountsAdapter, From 16822d117d05669f779febb2eed5d9c176d8af4a Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 23 Mar 2023 17:54:11 +0200 Subject: [PATCH 12/20] - linter fix --- state/accountsDB_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index e7c3a0b1438..644e358abb9 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -2834,6 +2834,7 @@ func TestAccountsDB_SaveKeyValAfterAccountIsReverted(t *testing.T) { _ = adb.SaveAccount(acc) _, err = adb.Commit() + require.Nil(t, err) acc, err = adb.LoadAccount(addr) require.Nil(t, err) From 2ad690e2e0fc0520f3986a3b6086f1b0da6a3696 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 23 Mar 2023 18:14:46 +0200 Subject: [PATCH 13/20] - changed log level --- trie/node.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trie/node.go b/trie/node.go index cd62c4df2ec..e248733f6d4 100644 --- a/trie/node.go +++ b/trie/node.go @@ -120,7 +120,7 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { - log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) + log.Warn(common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) return nil, fmt.Errorf(common.GetNodeFromDBErrorString+" %w for key %v", err, hex.EncodeToString(n)) } From ee6bb31e2e06b0b25d44f2cc942efe255c5c4490 Mon Sep 17 00:00:00 2001 From: jules01 Date: Fri, 24 Mar 2023 11:10:07 +0200 Subject: [PATCH 14/20] - fixed log print on getNodeFromDB error --- trie/node.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/trie/node.go b/trie/node.go index e248733f6d4..fd229363a24 100644 --- a/trie/node.go +++ b/trie/node.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/keyBuilder" + logger "github.com/multiversx/mx-chain-logger-go" ) const ( @@ -120,7 +121,12 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { - log.Warn(common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) + logLevel := logger.LogWarning + if errors.IsClosingError(err) { + logLevel = logger.LogTrace + } + + log.Log(logLevel, common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) return nil, fmt.Errorf(common.GetNodeFromDBErrorString+" %w for key %v", err, hex.EncodeToString(n)) } From bf672100f8c3469f9071c27de0b5d6cec20989ab Mon Sep 17 00:00:00 2001 From: jules01 Date: Fri, 24 Mar 2023 11:34:50 +0200 Subject: [PATCH 15/20] - refactored log print + added unit tests --- trie/node.go | 18 ++++++++++---- trie/node_test.go | 62 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 5 deletions(-) diff --git a/trie/node.go b/trie/node.go index fd229363a24..e75c29d2ff3 100644 --- a/trie/node.go +++ b/trie/node.go @@ -121,18 +121,26 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { - logLevel := logger.LogWarning - if errors.IsClosingError(err) { - logLevel = logger.LogTrace - } + treatLogError(log, err, n) - log.Log(logLevel, common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) return nil, fmt.Errorf(common.GetNodeFromDBErrorString+" %w for key %v", err, hex.EncodeToString(n)) } return decodeNode(encChild, marshalizer, hasher) } +func treatLogError(logInstance logger.Logger, err error, key []byte) { + logLevel := logger.LogTrace + extraInfo := make([]interface{}, 0, 6) + extraInfo = append(extraInfo, "error", err, "key", key) + if !errors.IsClosingError(err) { + logLevel = logger.LogWarning + extraInfo = append(extraInfo, "stack trace", string(debug.Stack())) + } + + logInstance.Log(logLevel, common.GetNodeFromDBErrorString, extraInfo...) +} + func resolveIfCollapsed(n node, pos byte, db common.DBWriteCacher) error { err := n.isEmptyOrNil() if err != nil { diff --git a/trie/node_test.go b/trie/node_test.go index dbc30aa4174..3eb3e9ffb51 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -2,6 +2,7 @@ package trie import ( "context" + "errors" "testing" "time" @@ -9,9 +10,13 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-go/common" dataMock "github.com/multiversx/mx-chain-go/dataRetriever/mock" + mxErrors "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/trie/keyBuilder" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNode_hashChildrenAndNodeBranchNode(t *testing.T) { @@ -623,6 +628,63 @@ func TestShouldStopIfContextDoneBlockingIfBusy(t *testing.T) { }) } +func TestTreatLogError(t *testing.T) { + t.Parallel() + + t.Run("error is not of type of closing error", func(t *testing.T) { + t.Parallel() + + key := []byte("key") + err := errors.New("trie was not found") + wasCalled := false + logInstance := &testscommon.LoggerStub{ + LogCalled: func(logLevel logger.LogLevel, message string, args ...interface{}) { + wasCalled = true + require.Equal(t, logger.LogWarning, logLevel) + require.Equal(t, common.GetNodeFromDBErrorString, message) + require.Equal(t, 6, len(args)) + expectedFirst5Args := []interface{}{"error", err, "key", key, "stack trace"} + require.Equal(t, expectedFirst5Args, args[:5]) + }, + } + + treatLogError(logInstance, err, key) + assert.True(t, wasCalled) + treatLogError(log, err, key) //display only + }) + t.Run("error is of type of closing error", func(t *testing.T) { + t.Parallel() + + key := []byte("key") + numCalled := 0 + var err error + + logInstance := &testscommon.LoggerStub{ + LogCalled: func(logLevel logger.LogLevel, message string, args ...interface{}) { + numCalled++ + require.Equal(t, logger.LogTrace, logLevel) + require.Equal(t, common.GetNodeFromDBErrorString, message) + require.Equal(t, 4, len(args)) + expectedFirst5Args := []interface{}{"error", err, "key", key} + require.Equal(t, expectedFirst5Args, args) + }, + } + + t.Run("db is closed", func(t *testing.T) { + crtCounter := numCalled + err = storage.ErrDBIsClosed + treatLogError(logInstance, err, key) + assert.Equal(t, crtCounter+1, numCalled) + }) + t.Run("context closing", func(t *testing.T) { + crtCounter := numCalled + err = mxErrors.ErrContextClosing + treatLogError(logInstance, err, key) + assert.Equal(t, crtCounter+1, numCalled) + }) + }) +} + func Benchmark_ShouldStopIfContextDoneBlockingIfBusy(b *testing.B) { ctx := context.Background() b.ResetTimer() From cfaaacabaff24a6ddde2285e63d0f9e1ef278cfd Mon Sep 17 00:00:00 2001 From: jules01 Date: Fri, 24 Mar 2023 17:44:39 +0200 Subject: [PATCH 16/20] - refactored print function --- trie/node.go | 10 +++------ trie/node_test.go | 53 +++++++++++++++++------------------------------ 2 files changed, 22 insertions(+), 41 deletions(-) diff --git a/trie/node.go b/trie/node.go index e75c29d2ff3..c69d0a9d484 100644 --- a/trie/node.go +++ b/trie/node.go @@ -130,15 +130,11 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh } func treatLogError(logInstance logger.Logger, err error, key []byte) { - logLevel := logger.LogTrace - extraInfo := make([]interface{}, 0, 6) - extraInfo = append(extraInfo, "error", err, "key", key) - if !errors.IsClosingError(err) { - logLevel = logger.LogWarning - extraInfo = append(extraInfo, "stack trace", string(debug.Stack())) + if logInstance.GetLevel() != logger.LogTrace { + return } - logInstance.Log(logLevel, common.GetNodeFromDBErrorString, extraInfo...) + logInstance.Trace(common.GetNodeFromDBErrorString, "error", err, "key", key, "stack trace", string(debug.Stack())) } func resolveIfCollapsed(n node, pos byte, db common.DBWriteCacher) error { diff --git a/trie/node_test.go b/trie/node_test.go index 3eb3e9ffb51..35b5de45d35 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -10,8 +10,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-go/common" dataMock "github.com/multiversx/mx-chain-go/dataRetriever/mock" - mxErrors "github.com/multiversx/mx-chain-go/errors" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/trie/keyBuilder" logger "github.com/multiversx/mx-chain-logger-go" @@ -631,57 +629,44 @@ func TestShouldStopIfContextDoneBlockingIfBusy(t *testing.T) { func TestTreatLogError(t *testing.T) { t.Parallel() - t.Run("error is not of type of closing error", func(t *testing.T) { + t.Run("logger instance is not in Trace mode, should not call", func(t *testing.T) { t.Parallel() key := []byte("key") err := errors.New("trie was not found") - wasCalled := false logInstance := &testscommon.LoggerStub{ - LogCalled: func(logLevel logger.LogLevel, message string, args ...interface{}) { - wasCalled = true - require.Equal(t, logger.LogWarning, logLevel) - require.Equal(t, common.GetNodeFromDBErrorString, message) - require.Equal(t, 6, len(args)) - expectedFirst5Args := []interface{}{"error", err, "key", key, "stack trace"} - require.Equal(t, expectedFirst5Args, args[:5]) + GetLevelCalled: func() logger.LogLevel { + return logger.LogDebug + }, + TraceCalled: func(message string, args ...interface{}) { + assert.Fail(t, "should have not called Log") }, } treatLogError(logInstance, err, key) - assert.True(t, wasCalled) treatLogError(log, err, key) //display only }) - t.Run("error is of type of closing error", func(t *testing.T) { + t.Run("logger instance is in Trace mode, should call", func(t *testing.T) { t.Parallel() key := []byte("key") - numCalled := 0 - var err error - + wasCalled := false + err := errors.New("error") logInstance := &testscommon.LoggerStub{ - LogCalled: func(logLevel logger.LogLevel, message string, args ...interface{}) { - numCalled++ - require.Equal(t, logger.LogTrace, logLevel) + GetLevelCalled: func() logger.LogLevel { + return logger.LogTrace + }, + TraceCalled: func(message string, args ...interface{}) { + wasCalled = true require.Equal(t, common.GetNodeFromDBErrorString, message) - require.Equal(t, 4, len(args)) - expectedFirst5Args := []interface{}{"error", err, "key", key} - require.Equal(t, expectedFirst5Args, args) + require.Equal(t, 6, len(args)) + expectedFirst5Args := []interface{}{"error", err, "key", key, "stack trace"} + require.Equal(t, expectedFirst5Args, args[:5]) }, } - t.Run("db is closed", func(t *testing.T) { - crtCounter := numCalled - err = storage.ErrDBIsClosed - treatLogError(logInstance, err, key) - assert.Equal(t, crtCounter+1, numCalled) - }) - t.Run("context closing", func(t *testing.T) { - crtCounter := numCalled - err = mxErrors.ErrContextClosing - treatLogError(logInstance, err, key) - assert.Equal(t, crtCounter+1, numCalled) - }) + treatLogError(logInstance, err, key) + assert.True(t, wasCalled) }) } From 1ab258885cc1162524ac3b2663f383d0f6e1511f Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 28 Mar 2023 12:30:23 +0300 Subject: [PATCH 17/20] - fix rewards broadcast by not filtering out the metachain shard. The metachain nodes will be able to broadcast them without the exception. - fixed a print in miniblock tracker --- .../interceptedRewardTransaction.go | 5 -- .../interceptedRewardTransaction_test.go | 83 +++++++++++++++++++ process/track/miniBlockTrack.go | 2 +- 3 files changed, 84 insertions(+), 6 deletions(-) diff --git a/process/rewardTransaction/interceptedRewardTransaction.go b/process/rewardTransaction/interceptedRewardTransaction.go index 73e19fed81d..e96a3cf0eca 100644 --- a/process/rewardTransaction/interceptedRewardTransaction.go +++ b/process/rewardTransaction/interceptedRewardTransaction.go @@ -82,11 +82,6 @@ func (inRTx *InterceptedRewardTransaction) processFields(rewardTxBuff []byte) er inRTx.rcvShard = inRTx.coordinator.ComputeId(inRTx.rTx.RcvAddr) inRTx.sndShard = core.MetachainShardId - if inRTx.coordinator.SelfId() == core.MetachainShardId { - inRTx.isForCurrentShard = false - return nil - } - isForCurrentShardRecv := inRTx.rcvShard == inRTx.coordinator.SelfId() isForCurrentShardSender := inRTx.sndShard == inRTx.coordinator.SelfId() inRTx.isForCurrentShard = isForCurrentShardRecv || isForCurrentShardSender diff --git a/process/rewardTransaction/interceptedRewardTransaction_test.go b/process/rewardTransaction/interceptedRewardTransaction_test.go index 1fe5c3d1412..7a6698a1e40 100644 --- a/process/rewardTransaction/interceptedRewardTransaction_test.go +++ b/process/rewardTransaction/interceptedRewardTransaction_test.go @@ -1,6 +1,7 @@ package rewardTransaction_test import ( + "bytes" "fmt" "math/big" "testing" @@ -317,6 +318,88 @@ func TestNewInterceptedRewardTransaction_CheckValidityShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestNewInterceptedRewardTransaction_IsForCurrentShard(t *testing.T) { + t.Parallel() + + receiverAddress := []byte("receiver address") + testShardID := uint32(2) + value := big.NewInt(100) + rewTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: value, + RcvAddr: receiverAddress, + } + + mockShardCoordinator := &mock.ShardCoordinatorStub{} + marshalizer := &mock.MarshalizerMock{} + txBuff, _ := marshalizer.Marshal(&rewTx) + t.Run("same shard ID with the receiver should return true", func(t *testing.T) { + mockShardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + if bytes.Equal(address, receiverAddress) { + return testShardID + } + + return 0 + } + mockShardCoordinator.SelfIdCalled = func() uint32 { + return testShardID + } + + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &hashingMocks.HasherMock{}, + createMockPubkeyConverter(), + mockShardCoordinator) + assert.Nil(t, err) + + assert.True(t, irt.IsForCurrentShard()) + }) + t.Run("metachain should return true", func(t *testing.T) { + mockShardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + if bytes.Equal(address, receiverAddress) { + return testShardID + } + + return 0 + } + mockShardCoordinator.SelfIdCalled = func() uint32 { + return core.MetachainShardId + } + + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &hashingMocks.HasherMock{}, + createMockPubkeyConverter(), + mockShardCoordinator) + assert.Nil(t, err) + assert.True(t, irt.IsForCurrentShard()) + }) + t.Run("different shard should return true", func(t *testing.T) { + mockShardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + if bytes.Equal(address, receiverAddress) { + return testShardID + } + + return 0 + } + mockShardCoordinator.SelfIdCalled = func() uint32 { + return testShardID + 1 // different with the receiver but not metachain + } + + irt, err := rewardTransaction.NewInterceptedRewardTransaction( + txBuff, + marshalizer, + &hashingMocks.HasherMock{}, + createMockPubkeyConverter(), + mockShardCoordinator) + assert.Nil(t, err) + assert.False(t, irt.IsForCurrentShard()) + }) +} + func TestInterceptedRewardTransaction_Type(t *testing.T) { t.Parallel() diff --git a/process/track/miniBlockTrack.go b/process/track/miniBlockTrack.go index 538dbdf0740..900846f67ff 100644 --- a/process/track/miniBlockTrack.go +++ b/process/track/miniBlockTrack.go @@ -73,7 +73,7 @@ func (mbt *miniBlockTrack) receivedMiniBlock(key []byte, value interface{}) { return } - log.Trace("miniBlockTrack.receivedMiniBlock", + log.Debug("received miniblock from network in block tracker", "hash", key, "sender", miniBlock.SenderShardID, "receiver", miniBlock.ReceiverShardID, From 00d3cfdb41693b4c18e7e71c452db9688560319b Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 28 Mar 2023 12:42:40 +0300 Subject: [PATCH 18/20] - fixed test name --- process/rewardTransaction/interceptedRewardTransaction_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process/rewardTransaction/interceptedRewardTransaction_test.go b/process/rewardTransaction/interceptedRewardTransaction_test.go index 7a6698a1e40..eba6830841a 100644 --- a/process/rewardTransaction/interceptedRewardTransaction_test.go +++ b/process/rewardTransaction/interceptedRewardTransaction_test.go @@ -377,7 +377,7 @@ func TestNewInterceptedRewardTransaction_IsForCurrentShard(t *testing.T) { assert.Nil(t, err) assert.True(t, irt.IsForCurrentShard()) }) - t.Run("different shard should return true", func(t *testing.T) { + t.Run("different shard should return false", func(t *testing.T) { mockShardCoordinator.ComputeIdCalled = func(address []byte) uint32 { if bytes.Equal(address, receiverAddress) { return testShardID From 6102c135f64e8dc5341f4b8a22498daadebb650c Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 28 Mar 2023 15:53:56 +0300 Subject: [PATCH 19/20] - removed t.Skip from tests --- factory/api/apiResolverFactory_test.go | 4 -- .../bootstrapComponentsHandler_test.go | 18 ----- factory/bootstrap/bootstrapComponents_test.go | 24 ------- .../consensusComponentsHandler_test.go | 9 --- factory/consensus/consensusComponents_test.go | 60 ---------------- factory/core/coreComponentsHandler_test.go | 9 --- factory/core/coreComponents_test.go | 39 ----------- .../crypto/cryptoComponentsHandler_test.go | 15 ---- factory/crypto/cryptoComponents_test.go | 69 ------------------- factory/crypto/multiSignerContainer_test.go | 9 --- factory/data/dataComponentsHandler_test.go | 12 ---- factory/data/dataComponents_test.go | 24 ------- .../network/networkComponentsHandler_test.go | 9 --- factory/network/networkComponents_test.go | 18 ----- .../processing/blockProcessorCreator_test.go | 6 -- .../processComponentsHandler_test.go | 9 --- factory/processing/processComponents_test.go | 9 --- factory/state/stateComponentsHandler_test.go | 15 ---- factory/state/stateComponents_test.go | 15 ---- .../status/statusComponentsHandler_test.go | 12 ---- factory/status/statusComponents_test.go | 27 -------- .../statusCoreComponentsHandler_test.go | 9 --- .../statusCore/statusCoreComponents_test.go | 12 ---- genesis/process/genesisBlockCreator_test.go | 25 ++----- 24 files changed, 5 insertions(+), 453 deletions(-) diff --git a/factory/api/apiResolverFactory_test.go b/factory/api/apiResolverFactory_test.go index 7462ae0496e..ac0157c1ba7 100644 --- a/factory/api/apiResolverFactory_test.go +++ b/factory/api/apiResolverFactory_test.go @@ -15,10 +15,6 @@ import ( ) func TestCreateApiResolver(t *testing.T) { - if testing.Short() { - t.Skip("this is not a short test") - } - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) coreComponents := componentsMock.GetCoreComponents() cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) diff --git a/factory/bootstrap/bootstrapComponentsHandler_test.go b/factory/bootstrap/bootstrapComponentsHandler_test.go index c4934611449..edea9b7ecde 100644 --- a/factory/bootstrap/bootstrapComponentsHandler_test.go +++ b/factory/bootstrap/bootstrapComponentsHandler_test.go @@ -13,9 +13,6 @@ import ( // ------------ Test ManagedBootstrapComponents -------------------- func TestNewManagedBootstrapComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) @@ -27,9 +24,6 @@ func TestNewManagedBootstrapComponents(t *testing.T) { func TestNewBootstrapComponentsFactory_NilFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } mbc, err := bootstrap.NewManagedBootstrapComponents(nil) @@ -39,9 +33,6 @@ func TestNewBootstrapComponentsFactory_NilFactory(t *testing.T) { func TestManagedBootstrapComponents_CheckSubcomponentsNoCreate(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) @@ -53,9 +44,6 @@ func TestManagedBootstrapComponents_CheckSubcomponentsNoCreate(t *testing.T) { func TestManagedBootstrapComponents_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() bcf, _ := bootstrap.NewBootstrapComponentsFactory(args) @@ -70,9 +58,6 @@ func TestManagedBootstrapComponents_Create(t *testing.T) { func TestManagedBootstrapComponents_CreateNilInternalMarshalizer(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() coreComponents := componentsMock.GetDefaultCoreComponents() @@ -87,9 +72,6 @@ func TestManagedBootstrapComponents_CreateNilInternalMarshalizer(t *testing.T) { func TestManagedBootstrapComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() diff --git a/factory/bootstrap/bootstrapComponents_test.go b/factory/bootstrap/bootstrapComponents_test.go index dcbb5a0c8c4..fa4dee8ef82 100644 --- a/factory/bootstrap/bootstrapComponents_test.go +++ b/factory/bootstrap/bootstrapComponents_test.go @@ -13,9 +13,6 @@ import ( // ------------ Test BootstrapComponentsFactory -------------------- func TestNewBootstrapComponentsFactory_OkValuesShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() @@ -27,9 +24,6 @@ func TestNewBootstrapComponentsFactory_OkValuesShouldWork(t *testing.T) { func TestNewBootstrapComponentsFactory_NilCoreComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() args.CoreComponents = nil @@ -42,9 +36,6 @@ func TestNewBootstrapComponentsFactory_NilCoreComponents(t *testing.T) { func TestNewBootstrapComponentsFactory_NilCryptoComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() args.CryptoComponents = nil @@ -57,9 +48,6 @@ func TestNewBootstrapComponentsFactory_NilCryptoComponents(t *testing.T) { func TestNewBootstrapComponentsFactory_NilNetworkComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() args.NetworkComponents = nil @@ -72,9 +60,6 @@ func TestNewBootstrapComponentsFactory_NilNetworkComponents(t *testing.T) { func TestNewBootstrapComponentsFactory_NilWorkingDir(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() args.WorkingDir = "" @@ -87,9 +72,6 @@ func TestNewBootstrapComponentsFactory_NilWorkingDir(t *testing.T) { func TestBootstrapComponentsFactory_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() @@ -103,9 +85,6 @@ func TestBootstrapComponentsFactory_CreateShouldWork(t *testing.T) { func TestBootstrapComponentsFactory_CreateBootstrapDataProviderCreationFail(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() coreComponents := componentsMock.GetDefaultCoreComponents() @@ -122,9 +101,6 @@ func TestBootstrapComponentsFactory_CreateBootstrapDataProviderCreationFail(t *t func TestBootstrapComponentsFactory_CreateEpochStartBootstrapCreationFail(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetBootStrapFactoryArgs() coreComponents := componentsMock.GetDefaultCoreComponents() diff --git a/factory/consensus/consensusComponentsHandler_test.go b/factory/consensus/consensusComponentsHandler_test.go index 20b0e5b4e3a..87d28ce6960 100644 --- a/factory/consensus/consensusComponentsHandler_test.go +++ b/factory/consensus/consensusComponentsHandler_test.go @@ -14,9 +14,6 @@ import ( // ------------ Test ManagedConsensusComponentsFactory -------------------- func TestManagedConsensusComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -36,9 +33,6 @@ func TestManagedConsensusComponents_CreateWithInvalidArgsShouldErr(t *testing.T) func TestManagedConsensusComponents_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -62,9 +56,6 @@ func TestManagedConsensusComponents_CreateShouldWork(t *testing.T) { func TestManagedConsensusComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) consensusArgs := componentsMock.GetConsensusArgs(shardCoordinator) diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index 0fb97ab3d4f..1fb5291510e 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -24,9 +24,6 @@ import ( // ------------ Test ConsensusComponentsFactory -------------------- func TestNewConsensusComponentsFactory_OkValuesShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -39,9 +36,6 @@ func TestNewConsensusComponentsFactory_OkValuesShouldWork(t *testing.T) { func TestNewConsensusComponentsFactory_NilCoreComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -55,9 +49,6 @@ func TestNewConsensusComponentsFactory_NilCoreComponents(t *testing.T) { func TestNewConsensusComponentsFactory_NilDataComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -71,9 +62,6 @@ func TestNewConsensusComponentsFactory_NilDataComponents(t *testing.T) { func TestNewConsensusComponentsFactory_NilCryptoComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -87,9 +75,6 @@ func TestNewConsensusComponentsFactory_NilCryptoComponents(t *testing.T) { func TestNewConsensusComponentsFactory_NilNetworkComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -103,9 +88,6 @@ func TestNewConsensusComponentsFactory_NilNetworkComponents(t *testing.T) { func TestNewConsensusComponentsFactory_NilProcessComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -119,9 +101,6 @@ func TestNewConsensusComponentsFactory_NilProcessComponents(t *testing.T) { func TestNewConsensusComponentsFactory_NilStateComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -136,9 +115,6 @@ func TestNewConsensusComponentsFactory_NilStateComponents(t *testing.T) { // ------------ Test Old Use Cases -------------------- func TestConsensusComponentsFactory_CreateGenesisBlockNotInitializedShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) consensusArgs := componentsMock.GetConsensusArgs(shardCoordinator) @@ -163,9 +139,6 @@ func TestConsensusComponentsFactory_CreateGenesisBlockNotInitializedShouldErr(t func TestConsensusComponentsFactory_CreateForShard(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -192,9 +165,6 @@ func (wp *wrappedProcessComponents) ShardCoordinator() sharding.Coordinator { func TestConsensusComponentsFactory_CreateForMeta(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -212,9 +182,6 @@ func TestConsensusComponentsFactory_CreateForMeta(t *testing.T) { func TestConsensusComponentsFactory_CreateNilShardCoordinator(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) consensusArgs := componentsMock.GetConsensusArgs(shardCoordinator) @@ -230,9 +197,6 @@ func TestConsensusComponentsFactory_CreateNilShardCoordinator(t *testing.T) { func TestConsensusComponentsFactory_CreateConsensusTopicCreateTopicError(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } localError := errors.New("error") shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -260,9 +224,6 @@ func TestConsensusComponentsFactory_CreateConsensusTopicCreateTopicError(t *test func TestConsensusComponentsFactory_CreateConsensusTopicNilMessageProcessor(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -279,9 +240,6 @@ func TestConsensusComponentsFactory_CreateConsensusTopicNilMessageProcessor(t *t func TestConsensusComponentsFactory_CreateNilSyncTimer(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -297,9 +255,6 @@ func TestConsensusComponentsFactory_CreateNilSyncTimer(t *testing.T) { func TestStartConsensus_ShardBootstrapperNilAccounts(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -315,9 +270,6 @@ func TestStartConsensus_ShardBootstrapperNilAccounts(t *testing.T) { func TestStartConsensus_ShardBootstrapperNilPoolHolder(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) shardCoordinator.CurrentShard = 0 @@ -336,9 +288,6 @@ func TestStartConsensus_ShardBootstrapperNilPoolHolder(t *testing.T) { func TestStartConsensus_MetaBootstrapperNilPoolHolder(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) shardCoordinator.CurrentShard = core.MetachainShardId @@ -365,9 +314,6 @@ func TestStartConsensus_MetaBootstrapperNilPoolHolder(t *testing.T) { func TestStartConsensus_MetaBootstrapperWrongNumberShards(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) args := componentsMock.GetConsensusArgs(shardCoordinator) @@ -384,9 +330,6 @@ func TestStartConsensus_MetaBootstrapperWrongNumberShards(t *testing.T) { func TestStartConsensus_ShardBootstrapperPubKeyToByteArrayError(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } localErr := errors.New("err") shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -406,9 +349,6 @@ func TestStartConsensus_ShardBootstrapperPubKeyToByteArrayError(t *testing.T) { func TestStartConsensus_ShardBootstrapperInvalidConsensusType(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetConsensusArgs(shardCoordinator) diff --git a/factory/core/coreComponentsHandler_test.go b/factory/core/coreComponentsHandler_test.go index c271963a8ea..7c5e3f3ac51 100644 --- a/factory/core/coreComponentsHandler_test.go +++ b/factory/core/coreComponentsHandler_test.go @@ -12,9 +12,6 @@ import ( // ------------ Test ManagedCoreComponents -------------------- func TestManagedCoreComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreArgs := componentsMock.GetCoreArgs() coreArgs.Config.Marshalizer = config.MarshalizerConfig{ @@ -31,9 +28,6 @@ func TestManagedCoreComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { func TestManagedCoreComponents_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreArgs := componentsMock.GetCoreArgs() coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(coreArgs) @@ -75,9 +69,6 @@ func TestManagedCoreComponents_CreateShouldWork(t *testing.T) { func TestManagedCoreComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreArgs := componentsMock.GetCoreArgs() coreComponentsFactory, _ := coreComp.NewCoreComponentsFactory(coreArgs) diff --git a/factory/core/coreComponents_test.go b/factory/core/coreComponents_test.go index 6c8981e0812..12b2833d19d 100644 --- a/factory/core/coreComponents_test.go +++ b/factory/core/coreComponents_test.go @@ -14,9 +14,6 @@ import ( func TestNewCoreComponentsFactory_OkValuesShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() ccf, _ := coreComp.NewCoreComponentsFactory(args) @@ -26,9 +23,6 @@ func TestNewCoreComponentsFactory_OkValuesShouldWork(t *testing.T) { func TestCoreComponentsFactory_CreateCoreComponentsNoHasherConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -46,9 +40,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsNoHasherConfigShouldErr(t *te func TestCoreComponentsFactory_CreateCoreComponentsInvalidHasherConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -69,9 +60,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidHasherConfigShouldErr( func TestCoreComponentsFactory_CreateCoreComponentsNoInternalMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -88,9 +76,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsNoInternalMarshallerConfigSho func TestCoreComponentsFactory_CreateCoreComponentsInvalidInternalMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -111,9 +96,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidInternalMarshallerConf func TestCoreComponentsFactory_CreateCoreComponentsNoVmMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -134,9 +116,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsNoVmMarshallerConfigShouldErr func TestCoreComponentsFactory_CreateCoreComponentsInvalidVmMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -160,9 +139,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidVmMarshallerConfigShou func TestCoreComponentsFactory_CreateCoreComponentsNoTxSignMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -186,9 +162,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsNoTxSignMarshallerConfigShoul func TestCoreComponentsFactory_CreateCoreComponentsInvalidTxSignMarshallerConfigShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config = config.Config{ @@ -215,9 +188,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidTxSignMarshallerConfig func TestCoreComponentsFactory_CreateCoreComponentsInvalidValPubKeyConverterShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config.ValidatorPubkeyConverter.Type = "invalid" @@ -230,9 +200,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidValPubKeyConverterShou func TestCoreComponentsFactory_CreateCoreComponentsInvalidAddrPubKeyConverterShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() args.Config.AddressPubkeyConverter.Type = "invalid" @@ -245,9 +212,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsInvalidAddrPubKeyConverterSho func TestCoreComponentsFactory_CreateCoreComponentsShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() ccf, _ := coreComp.NewCoreComponentsFactory(args) @@ -260,9 +224,6 @@ func TestCoreComponentsFactory_CreateCoreComponentsShouldWork(t *testing.T) { // ------------ Test CoreComponents -------------------- func TestCoreComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCoreArgs() ccf, _ := coreComp.NewCoreComponentsFactory(args) diff --git a/factory/crypto/cryptoComponentsHandler_test.go b/factory/crypto/cryptoComponentsHandler_test.go index 218f84c43f2..f16e97be957 100644 --- a/factory/crypto/cryptoComponentsHandler_test.go +++ b/factory/crypto/cryptoComponentsHandler_test.go @@ -13,9 +13,6 @@ import ( // ------------ Test ManagedCryptoComponents -------------------- func TestManagedCryptoComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -30,9 +27,6 @@ func TestManagedCryptoComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { func TestManagedCryptoComponents_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -61,9 +55,6 @@ func TestManagedCryptoComponents_CreateShouldWork(t *testing.T) { func TestManagedCryptoComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } managedCryptoComponents := getManagedCryptoComponents(t) @@ -73,9 +64,6 @@ func TestManagedCryptoComponents_CheckSubcomponents(t *testing.T) { func TestManagedCryptoComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } managedCryptoComponents := getManagedCryptoComponents(t) @@ -101,9 +89,6 @@ func getManagedCryptoComponents(t *testing.T) factory.CryptoComponentsHandler { func TestManagedCryptoComponents_Clone(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) diff --git a/factory/crypto/cryptoComponents_test.go b/factory/crypto/cryptoComponents_test.go index 1f0496b129f..da1ecf53a5d 100644 --- a/factory/crypto/cryptoComponents_test.go +++ b/factory/crypto/cryptoComponents_test.go @@ -17,9 +17,6 @@ import ( func TestNewCryptoComponentsFactory_NiCoreComponentsHandlerShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetCryptoArgs(nil) ccf, err := cryptoComp.NewCryptoComponentsFactory(args) @@ -29,9 +26,6 @@ func TestNewCryptoComponentsFactory_NiCoreComponentsHandlerShouldErr(t *testing. func TestNewCryptoComponentsFactory_NilPemFileShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -43,9 +37,6 @@ func TestNewCryptoComponentsFactory_NilPemFileShouldErr(t *testing.T) { func TestCryptoComponentsFactory_CreateCryptoParamsNilKeyLoaderShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -58,9 +49,6 @@ func TestCryptoComponentsFactory_CreateCryptoParamsNilKeyLoaderShouldErr(t *test func TestNewCryptoComponentsFactory_OkValsShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -71,9 +59,6 @@ func TestNewCryptoComponentsFactory_OkValsShouldWork(t *testing.T) { func TestNewCryptoComponentsFactory_DisabledSigShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -85,9 +70,6 @@ func TestNewCryptoComponentsFactory_DisabledSigShouldWork(t *testing.T) { func TestNewCryptoComponentsFactory_CreateInvalidConsensusTypeShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -101,9 +83,6 @@ func TestNewCryptoComponentsFactory_CreateInvalidConsensusTypeShouldErr(t *testi func TestCryptoComponentsFactory_CreateShouldErrDueToMissingConfig(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -124,9 +103,6 @@ func TestCryptoComponentsFactory_CreateShouldErrDueToMissingConfig(t *testing.T) func TestCryptoComponentsFactory_CreateInvalidMultiSigHasherShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -141,9 +117,6 @@ func TestCryptoComponentsFactory_CreateInvalidMultiSigHasherShouldErr(t *testing func TestCryptoComponentsFactory_CreateOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -156,9 +129,6 @@ func TestCryptoComponentsFactory_CreateOK(t *testing.T) { func TestCryptoComponentsFactory_CreateWithDisabledSig(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -172,9 +142,6 @@ func TestCryptoComponentsFactory_CreateWithDisabledSig(t *testing.T) { func TestCryptoComponentsFactory_CreateWithAutoGenerateKey(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -188,9 +155,6 @@ func TestCryptoComponentsFactory_CreateWithAutoGenerateKey(t *testing.T) { func TestCryptoComponentsFactory_CreateSingleSignerInvalidConsensusTypeShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -206,9 +170,6 @@ func TestCryptoComponentsFactory_CreateSingleSignerInvalidConsensusTypeShouldErr func TestCryptoComponentsFactory_CreateSingleSignerOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -223,9 +184,6 @@ func TestCryptoComponentsFactory_CreateSingleSignerOK(t *testing.T) { func TestCryptoComponentsFactory_CreateMultiSignerInvalidConsensusTypeShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -241,9 +199,6 @@ func TestCryptoComponentsFactory_CreateMultiSignerInvalidConsensusTypeShouldErr( func TestCryptoComponentsFactory_CreateMultiSignerOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -261,9 +216,6 @@ func TestCryptoComponentsFactory_CreateMultiSignerOK(t *testing.T) { func TestCryptoComponentsFactory_GetSuiteInvalidConsensusTypeShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -279,9 +231,6 @@ func TestCryptoComponentsFactory_GetSuiteInvalidConsensusTypeShouldErr(t *testin func TestCryptoComponentsFactory_GetSuiteOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -297,9 +246,6 @@ func TestCryptoComponentsFactory_GetSuiteOK(t *testing.T) { func TestCryptoComponentsFactory_CreateCryptoParamsInvalidPrivateKeyByteArrayShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -316,9 +262,6 @@ func TestCryptoComponentsFactory_CreateCryptoParamsInvalidPrivateKeyByteArraySho func TestCryptoComponentsFactory_CreateCryptoParamsLoadKeysFailShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } expectedError := errors.New("expected error") @@ -337,9 +280,6 @@ func TestCryptoComponentsFactory_CreateCryptoParamsLoadKeysFailShouldErr(t *test func TestCryptoComponentsFactory_CreateCryptoParamsOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) @@ -355,9 +295,6 @@ func TestCryptoComponentsFactory_CreateCryptoParamsOK(t *testing.T) { func TestCryptoComponentsFactory_GetSkPkInvalidSkBytesShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } setSk := []byte("zxwY") setPk := []byte(componentsMock.DummyPk) @@ -374,9 +311,6 @@ func TestCryptoComponentsFactory_GetSkPkInvalidSkBytesShouldErr(t *testing.T) { func TestCryptoComponentsFactory_GetSkPkInvalidPkBytesShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } setSk := []byte(componentsMock.DummySk) setPk := "0" @@ -394,9 +328,6 @@ func TestCryptoComponentsFactory_GetSkPkInvalidPkBytesShouldErr(t *testing.T) { func TestCryptoComponentsFactory_GetSkPkOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() args := componentsMock.GetCryptoArgs(coreComponents) diff --git a/factory/crypto/multiSignerContainer_test.go b/factory/crypto/multiSignerContainer_test.go index 82ed4b0d1a7..6a392b46d72 100644 --- a/factory/crypto/multiSignerContainer_test.go +++ b/factory/crypto/multiSignerContainer_test.go @@ -269,9 +269,6 @@ func TestContainer_sortMultiSignerConfig(t *testing.T) { func Test_getMultiSigHasherFromConfigInvalidHasherShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := createDefaultMultiSignerArgs() args.ConsensusType = "" @@ -284,9 +281,6 @@ func Test_getMultiSigHasherFromConfigInvalidHasherShouldErr(t *testing.T) { func Test_getMultiSigHasherFromConfigMismatchConsensusTypeMultiSigHasher(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := createDefaultMultiSignerArgs() args.MultiSigHasherType = "sha256" @@ -298,9 +292,6 @@ func Test_getMultiSigHasherFromConfigMismatchConsensusTypeMultiSigHasher(t *test func Test_getMultiSigHasherFromConfigOK(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := createDefaultMultiSignerArgs() args.ConsensusType = "bls" diff --git a/factory/data/dataComponentsHandler_test.go b/factory/data/dataComponentsHandler_test.go index f59f99e6948..85c6e84f55a 100644 --- a/factory/data/dataComponentsHandler_test.go +++ b/factory/data/dataComponentsHandler_test.go @@ -13,9 +13,6 @@ import ( // ------------ Test ManagedDataComponents -------------------- func TestManagedDataComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -31,9 +28,6 @@ func TestManagedDataComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { func TestManagedDataComponents_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -54,9 +48,6 @@ func TestManagedDataComponents_CreateShouldWork(t *testing.T) { func TestManagedDataComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -73,9 +64,6 @@ func TestManagedDataComponents_Close(t *testing.T) { func TestManagedDataComponents_Clone(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) diff --git a/factory/data/dataComponents_test.go b/factory/data/dataComponents_test.go index a5de1d1d442..2714951ef7c 100644 --- a/factory/data/dataComponents_test.go +++ b/factory/data/dataComponents_test.go @@ -14,9 +14,6 @@ import ( func TestNewDataComponentsFactory_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) coreComponents := componentsMock.GetCoreComponents() @@ -30,9 +27,6 @@ func TestNewDataComponentsFactory_NilShardCoordinatorShouldErr(t *testing.T) { func TestNewDataComponentsFactory_NilCoreComponentsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args := componentsMock.GetDataArgs(nil, shardCoordinator) @@ -45,9 +39,6 @@ func TestNewDataComponentsFactory_NilCoreComponentsShouldErr(t *testing.T) { func TestNewDataComponentsFactory_NilEpochStartNotifierShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) coreComponents := componentsMock.GetCoreComponents() @@ -61,9 +52,6 @@ func TestNewDataComponentsFactory_NilEpochStartNotifierShouldErr(t *testing.T) { func TestNewDataComponentsFactory_OkValsShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) coreComponents := componentsMock.GetCoreComponents() @@ -75,9 +63,6 @@ func TestNewDataComponentsFactory_OkValsShouldWork(t *testing.T) { func TestDataComponentsFactory_CreateShouldErrDueBadConfig(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) coreComponents := componentsMock.GetCoreComponents() @@ -93,9 +78,6 @@ func TestDataComponentsFactory_CreateShouldErrDueBadConfig(t *testing.T) { func TestDataComponentsFactory_CreateForShardShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -110,9 +92,6 @@ func TestDataComponentsFactory_CreateForShardShouldWork(t *testing.T) { func TestDataComponentsFactory_CreateForMetaShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -129,9 +108,6 @@ func TestDataComponentsFactory_CreateForMetaShouldWork(t *testing.T) { // ------------ Test DataComponents -------------------- func TestManagedDataComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) diff --git a/factory/network/networkComponentsHandler_test.go b/factory/network/networkComponentsHandler_test.go index e40ca1dcee8..03da0ee915a 100644 --- a/factory/network/networkComponentsHandler_test.go +++ b/factory/network/networkComponentsHandler_test.go @@ -12,9 +12,6 @@ import ( // ------------ Test ManagedNetworkComponents -------------------- func TestManagedNetworkComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } networkArgs := componentsMock.GetNetworkFactoryArgs() networkArgs.P2pConfig.Node.Port = "invalid" @@ -28,9 +25,6 @@ func TestManagedNetworkComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { func TestManagedNetworkComponents_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } networkArgs := componentsMock.GetNetworkFactoryArgs() networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) @@ -58,9 +52,6 @@ func TestManagedNetworkComponents_CreateShouldWork(t *testing.T) { func TestManagedNetworkComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } networkArgs := componentsMock.GetNetworkFactoryArgs() networkComponentsFactory, _ := networkComp.NewNetworkComponentsFactory(networkArgs) diff --git a/factory/network/networkComponents_test.go b/factory/network/networkComponents_test.go index 205d3ed5249..0fdd190aa72 100644 --- a/factory/network/networkComponents_test.go +++ b/factory/network/networkComponents_test.go @@ -15,9 +15,6 @@ import ( func TestNewNetworkComponentsFactory_NilStatusHandlerShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetNetworkFactoryArgs() args.StatusHandler = nil @@ -28,9 +25,6 @@ func TestNewNetworkComponentsFactory_NilStatusHandlerShouldErr(t *testing.T) { func TestNewNetworkComponentsFactory_NilMarshalizerShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetNetworkFactoryArgs() args.Marshalizer = nil @@ -41,9 +35,6 @@ func TestNewNetworkComponentsFactory_NilMarshalizerShouldErr(t *testing.T) { func TestNewNetworkComponentsFactory_OkValsShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetNetworkFactoryArgs() ncf, err := networkComp.NewNetworkComponentsFactory(args) @@ -53,9 +44,6 @@ func TestNewNetworkComponentsFactory_OkValsShouldWork(t *testing.T) { func TestNetworkComponentsFactory_CreateShouldErrDueToBadConfig(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetNetworkFactoryArgs() args.MainConfig = config.Config{} @@ -70,9 +58,6 @@ func TestNetworkComponentsFactory_CreateShouldErrDueToBadConfig(t *testing.T) { func TestNetworkComponentsFactory_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetNetworkFactoryArgs() ncf, _ := networkComp.NewNetworkComponentsFactory(args) @@ -86,9 +71,6 @@ func TestNetworkComponentsFactory_CreateShouldWork(t *testing.T) { // ------------ Test NetworkComponents -------------------- func TestNetworkComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetNetworkFactoryArgs() ncf, _ := networkComp.NewNetworkComponentsFactory(args) diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index 5d85dd26931..2842b92221f 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -31,9 +31,6 @@ import ( func Test_newBlockProcessorCreatorForShard(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) pcf, err := processComp.NewProcessComponentsFactory(componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator)) @@ -68,9 +65,6 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { func Test_newBlockProcessorCreatorForMeta(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardC := mock.NewMultiShardsCoordinatorMock(1) diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index 62965992dcb..b8c00301261 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -14,9 +14,6 @@ import ( // ------------ Test TestManagedProcessComponents -------------------- func TestManagedProcessComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) @@ -31,9 +28,6 @@ func TestManagedProcessComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { func TestManagedProcessComponents_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) @@ -147,9 +141,6 @@ func TestManagedProcessComponents_CreateShouldWork(t *testing.T) { func TestManagedProcessComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index d4b3d99c030..ebae3a2c893 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -24,9 +24,6 @@ import ( // ------------ Test TestProcessComponents -------------------- func TestProcessComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) @@ -42,9 +39,6 @@ func TestProcessComponents_CloseShouldWork(t *testing.T) { func TestProcessComponentsFactory_CreateWithInvalidTxAccumulatorTimeExpectError(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) @@ -60,9 +54,6 @@ func TestProcessComponentsFactory_CreateWithInvalidTxAccumulatorTimeExpectError( func TestProcessComponents_IndexGenesisBlocks(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) diff --git a/factory/state/stateComponentsHandler_test.go b/factory/state/stateComponentsHandler_test.go index d055e80efad..91932d43391 100644 --- a/factory/state/stateComponentsHandler_test.go +++ b/factory/state/stateComponentsHandler_test.go @@ -15,9 +15,6 @@ import ( // ------------ Test ManagedStateComponents -------------------- func TestManagedStateComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -33,9 +30,6 @@ func TestManagedStateComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { func TestManagedStateComponents_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -58,9 +52,6 @@ func TestManagedStateComponents_CreateShouldWork(t *testing.T) { func TestManagedStateComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -77,9 +68,6 @@ func TestManagedStateComponents_Close(t *testing.T) { func TestManagedStateComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -95,9 +83,6 @@ func TestManagedStateComponents_CheckSubcomponents(t *testing.T) { func TestManagedStateComponents_Setters(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) diff --git a/factory/state/stateComponents_test.go b/factory/state/stateComponents_test.go index c45259758b7..3dc6ef7aee1 100644 --- a/factory/state/stateComponents_test.go +++ b/factory/state/stateComponents_test.go @@ -12,9 +12,6 @@ import ( func TestNewStateComponentsFactory_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -28,9 +25,6 @@ func TestNewStateComponentsFactory_NilShardCoordinatorShouldErr(t *testing.T) { func TestNewStateComponentsFactory_NilCoreComponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -44,9 +38,6 @@ func TestNewStateComponentsFactory_NilCoreComponents(t *testing.T) { func TestNewStateComponentsFactory_ShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -59,9 +50,6 @@ func TestNewStateComponentsFactory_ShouldWork(t *testing.T) { func TestStateComponentsFactory_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -77,9 +65,6 @@ func TestStateComponentsFactory_CreateShouldWork(t *testing.T) { // ------------ Test StateComponents -------------------- func TestStateComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } coreComponents := componentsMock.GetCoreComponents() shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) diff --git a/factory/status/statusComponentsHandler_test.go b/factory/status/statusComponentsHandler_test.go index 702d911842a..f021cbb4284 100644 --- a/factory/status/statusComponentsHandler_test.go +++ b/factory/status/statusComponentsHandler_test.go @@ -14,9 +14,6 @@ import ( // ------------ Test ManagedStatusComponents -------------------- func TestManagedStatusComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -36,9 +33,6 @@ func TestManagedStatusComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { func TestManagedStatusComponents_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -56,9 +50,6 @@ func TestManagedStatusComponents_CreateShouldWork(t *testing.T) { func TestManagedStatusComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -73,9 +64,6 @@ func TestManagedStatusComponents_Close(t *testing.T) { func TestManagedStatusComponents_CheckSubcomponents(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) diff --git a/factory/status/statusComponents_test.go b/factory/status/statusComponents_test.go index 7a1f0ee83ad..3a52d8bf06e 100644 --- a/factory/status/statusComponents_test.go +++ b/factory/status/statusComponents_test.go @@ -18,9 +18,6 @@ import ( func TestNewStatusComponentsFactory_NilCoreComponentsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -32,9 +29,6 @@ func TestNewStatusComponentsFactory_NilCoreComponentsShouldErr(t *testing.T) { func TestNewStatusComponentsFactory_NilNodesCoordinatorShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -46,9 +40,6 @@ func TestNewStatusComponentsFactory_NilNodesCoordinatorShouldErr(t *testing.T) { func TestNewStatusComponentsFactory_NilEpochStartNotifierShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -60,9 +51,6 @@ func TestNewStatusComponentsFactory_NilEpochStartNotifierShouldErr(t *testing.T) func TestNewStatusComponentsFactory_NilNetworkComponentsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -74,9 +62,6 @@ func TestNewStatusComponentsFactory_NilNetworkComponentsShouldErr(t *testing.T) func TestNewStatusComponentsFactory_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -88,9 +73,6 @@ func TestNewStatusComponentsFactory_NilShardCoordinatorShouldErr(t *testing.T) { func TestNewStatusComponents_InvalidRoundDurationShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) coreArgs := componentsMock.GetCoreArgs() @@ -130,9 +112,6 @@ func TestNewStatusComponents_InvalidRoundDurationShouldErr(t *testing.T) { func TestNewStatusComponentsFactory_ShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -143,9 +122,6 @@ func TestNewStatusComponentsFactory_ShouldWork(t *testing.T) { func TestStatusComponentsFactory_Create(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) @@ -160,9 +136,6 @@ func TestStatusComponentsFactory_Create(t *testing.T) { // ------------ Test StatusComponents -------------------- func TestStatusComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) statusArgs, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) diff --git a/factory/statusCore/statusCoreComponentsHandler_test.go b/factory/statusCore/statusCoreComponentsHandler_test.go index d6cd676833d..ea12dbcb993 100644 --- a/factory/statusCore/statusCoreComponentsHandler_test.go +++ b/factory/statusCore/statusCoreComponentsHandler_test.go @@ -10,9 +10,6 @@ import ( func TestManagedStatusCoreComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetStatusCoreArgs(componentsMock.GetDefaultCoreComponents()) args.Config.ResourceStats.RefreshIntervalInSec = 0 @@ -29,9 +26,6 @@ func TestManagedStatusCoreComponents_CreateWithInvalidArgsShouldErr(t *testing.T func TestManagedStatusCoreComponents_CreateShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(args) @@ -51,9 +45,6 @@ func TestManagedStatusCoreComponents_CreateShouldWork(t *testing.T) { func TestManagedCoreComponents_Close(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) statusCoreComponentsFactory, err := statusCore.NewStatusCoreComponentsFactory(args) diff --git a/factory/statusCore/statusCoreComponents_test.go b/factory/statusCore/statusCoreComponents_test.go index 66c5e6c07ea..f54706fe8f0 100644 --- a/factory/statusCore/statusCoreComponents_test.go +++ b/factory/statusCore/statusCoreComponents_test.go @@ -18,9 +18,6 @@ import ( func TestNewStatusCoreComponentsFactory(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } t.Run("nil core components should error", func(t *testing.T) { t.Parallel() @@ -96,9 +93,6 @@ func TestNewStatusCoreComponentsFactory(t *testing.T) { func TestStatusCoreComponentsFactory_InvalidValueShouldErr(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) args.Config = config.Config{ @@ -116,9 +110,6 @@ func TestStatusCoreComponentsFactory_InvalidValueShouldErr(t *testing.T) { func TestStatusCoreComponentsFactory_CreateStatusCoreComponentsShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) sccf, err := statusCore.NewStatusCoreComponentsFactory(args) @@ -132,9 +123,6 @@ func TestStatusCoreComponentsFactory_CreateStatusCoreComponentsShouldWork(t *tes // ------------ Test CoreComponents -------------------- func TestStatusCoreComponents_CloseShouldWork(t *testing.T) { t.Parallel() - if testing.Short() { - t.Skip("this is not a short test") - } args := componentsMock.GetStatusCoreArgs(componentsMock.GetCoreComponents()) sccf, err := statusCore.NewStatusCoreComponentsFactory(args) diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index 067412c9aad..e324446f8b3 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -1,3 +1,8 @@ +//go:build !race +// +build !race + +// TODO reinstate test after Wasm VM pointer fix + package process import ( @@ -205,11 +210,6 @@ func createMockArgument( } func TestGenesisBlockCreator_CreateGenesisBlockAfterHardForkShouldCreateSCResultingAddresses(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - scAddressBytes, _ := hex.DecodeString("00000000000000000500761b8c4a25d3979359223208b412285f635e71300102") initialNodesSetup := &mock.InitialNodesHandlerStub{ InitialNodesInfoCalled: func() (map[uint32][]nodesCoordinator.GenesisNodeInfoHandler, map[uint32][]nodesCoordinator.GenesisNodeInfoHandler) { @@ -270,11 +270,6 @@ func TestGenesisBlockCreator_CreateGenesisBlockAfterHardForkShouldCreateSCResult } func TestGenesisBlockCreator_CreateGenesisBlocksJustDelegationShouldWorkAndDNS(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - scAddressBytes, _ := hex.DecodeString("00000000000000000500761b8c4a25d3979359223208b412285f635e71300102") stakedAddr, _ := hex.DecodeString("b00102030405060708090001020304050607080900010203040506070809000b") initialNodesSetup := &mock.InitialNodesHandlerStub{ @@ -319,11 +314,6 @@ func TestGenesisBlockCreator_CreateGenesisBlocksJustDelegationShouldWorkAndDNS(t } func TestGenesisBlockCreator_CreateGenesisBlocksStakingAndDelegationShouldWorkAndDNS(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - scAddressBytes, _ := hex.DecodeString("00000000000000000500761b8c4a25d3979359223208b412285f635e71300102") stakedAddr, _ := hex.DecodeString("b00102030405060708090001020304050607080900010203040506070809000b") stakedAddr2, _ := hex.DecodeString("d00102030405060708090001020304050607080900010203040506070809000d") @@ -399,11 +389,6 @@ func TestGenesisBlockCreator_CreateGenesisBlocksStakingAndDelegationShouldWorkAn } func TestGenesisBlockCreator_GetIndexingDataShouldWork(t *testing.T) { - // TODO reinstate test after Wasm VM pointer fix - if testing.Short() { - t.Skip("cannot run with -race -short; requires Wasm VM fix") - } - scAddressBytes, _ := hex.DecodeString("00000000000000000500761b8c4a25d3979359223208b412285f635e71300102") stakedAddr, _ := hex.DecodeString("b00102030405060708090001020304050607080900010203040506070809000b") stakedAddr2, _ := hex.DecodeString("d00102030405060708090001020304050607080900010203040506070809000d") From 5acd29c451456b58428d6cac9ce20401a3f06187 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 3 Apr 2023 12:45:29 +0300 Subject: [PATCH 20/20] fix integration test after merge --- integrationTests/vm/txsFee/asyncESDT_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/integrationTests/vm/txsFee/asyncESDT_test.go b/integrationTests/vm/txsFee/asyncESDT_test.go index a4318ad54f0..1f802023506 100644 --- a/integrationTests/vm/txsFee/asyncESDT_test.go +++ b/integrationTests/vm/txsFee/asyncESDT_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" @@ -542,7 +543,7 @@ func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { leaves := &common.TrieIteratorChannels{ LeavesChan: make(chan core.KeyValueHolder, 1), - ErrChan: make(chan error, 1), + ErrChan: errChan.NewErrChanWrapper(), } err = testContext.Accounts.GetAllLeaves(leaves, context.Background(), roothash) require.Nil(t, err) @@ -551,6 +552,6 @@ func TestAsyncESDTCallForThirdContractShouldWork(t *testing.T) { // do nothing, just iterate } - err = <-leaves.ErrChan + err = leaves.ErrChan.ReadFromChanNonBlocking() require.Nil(t, err) }