From 024bac00e8a0026759da63164371547ee89ff06c Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 13 Oct 2022 09:00:39 +0300 Subject: [PATCH 001/221] propagate getNodeFromDb err to baseSync --- errors/missingTrieNodeError.go | 24 +++++++++++++++++++ process/block/preprocess/basePreProcess.go | 9 +++++-- .../block/preprocess/rewardTxPreProcessor.go | 12 +++++++--- .../block/preprocess/smartContractResults.go | 12 +++++++--- process/block/preprocess/transactions.go | 24 +++++++++++++++---- process/block/preprocess/transactionsV2.go | 11 +++++++-- .../block/preprocess/transactionsV2_test.go | 12 ++++++---- process/rewardTransaction/process.go | 16 ++++++++++--- process/smartContract/process.go | 23 +++++++++++++++++- process/sync/metablock.go | 3 ++- process/sync/shardblock.go | 24 +------------------ process/transaction/shardProcess.go | 5 ++++ state/accountsDB.go | 2 +- state/errors.go | 17 ------------- trie/patriciaMerkleTrie.go | 5 ---- 15 files changed, 130 insertions(+), 69 deletions(-) create mode 100644 errors/missingTrieNodeError.go diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go new file mode 100644 index 00000000000..029cc45069f --- /dev/null +++ b/errors/missingTrieNodeError.go @@ -0,0 +1,24 @@ +package errors + +import ( + "strings" + + "github.com/ElrondNetwork/elrond-go/common" +) + +// IsGetNodeFromDBError returns true if the provided error is of type getNodeFromDB +func IsGetNodeFromDBError(err error) bool { + if err == nil { + return false + } + + if IsClosingError(err) { + return false + } + + if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { + return true + } + + return false +} diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index 8ded3963eec..c6867c97aef 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -14,6 +14,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/state" "github.com/ElrondNetwork/elrond-go/storage" @@ -397,17 +398,21 @@ func (bpp *basePreProcess) requestMissingTxsForShard( return requestedTxs } -func (bpp *basePreProcess) saveAccountBalanceForAddress(address []byte) { +func (bpp *basePreProcess) saveAccountBalanceForAddress(address []byte) error { if bpp.balanceComputation.IsAddressSet(address) { - return + return nil } balance, err := bpp.getBalanceForAddress(address) if err != nil { + if errors.IsGetNodeFromDBError(err) { + return err + } balance = big.NewInt(0) } bpp.balanceComputation.SetBalanceToAddress(address, balance) + return nil } func (bpp *basePreProcess) getBalanceForAddress(address []byte) (*big.Int, error) { diff --git a/process/block/preprocess/rewardTxPreProcessor.go b/process/block/preprocess/rewardTxPreProcessor.go index aafb42dab67..69442eea1ac 100644 --- a/process/block/preprocess/rewardTxPreProcessor.go +++ b/process/block/preprocess/rewardTxPreProcessor.go @@ -265,9 +265,12 @@ func (rtp *rewardTxPreprocessor) ProcessBlockTransactions( return process.ErrWrongTypeAssertion } - rtp.saveAccountBalanceForAddress(rTx.GetRcvAddr()) + err = rtp.saveAccountBalanceForAddress(rTx.GetRcvAddr()) + if err != nil { + return err + } - err := rtp.rewardsProcessor.ProcessRewardTransaction(rTx) + err = rtp.rewardsProcessor.ProcessRewardTransaction(rTx) if err != nil { return err } @@ -491,7 +494,10 @@ func (rtp *rewardTxPreprocessor) ProcessMiniBlock( break } - rtp.saveAccountBalanceForAddress(miniBlockRewardTxs[txIndex].GetRcvAddr()) + err = rtp.saveAccountBalanceForAddress(miniBlockRewardTxs[txIndex].GetRcvAddr()) + if err != nil { + break + } snapshot := rtp.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex]) err = rtp.rewardsProcessor.ProcessRewardTransaction(miniBlockRewardTxs[txIndex]) diff --git a/process/block/preprocess/smartContractResults.go b/process/block/preprocess/smartContractResults.go index 36c2d52d447..0f04a0a1d20 100644 --- a/process/block/preprocess/smartContractResults.go +++ b/process/block/preprocess/smartContractResults.go @@ -329,9 +329,12 @@ func (scr *smartContractResults) ProcessBlockTransactions( scr.gasHandler.SetGasProvided(gasProvidedByTxInSelfShard, txHash) } - scr.saveAccountBalanceForAddress(currScr.GetRcvAddr()) + err = scr.saveAccountBalanceForAddress(currScr.GetRcvAddr()) + if err != nil { + return err + } - _, err := scr.scrProcessor.ProcessSmartContractResult(currScr) + _, err = scr.scrProcessor.ProcessSmartContractResult(currScr) if err != nil { return err } @@ -604,7 +607,10 @@ func (scr *smartContractResults) ProcessMiniBlock( } } - scr.saveAccountBalanceForAddress(miniBlockScrs[txIndex].GetRcvAddr()) + err = scr.saveAccountBalanceForAddress(miniBlockScrs[txIndex].GetRcvAddr()) + if err != nil { + break + } snapshot := scr.handleProcessTransactionInit(preProcessorExecutionInfoHandler, miniBlockTxHashes[txIndex]) _, err = scr.scrProcessor.ProcessSmartContractResult(miniBlockScrs[txIndex]) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 22ec9ee3374..c8717cdd57c 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -18,6 +18,7 @@ import ( logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" + elrondErr "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/state" @@ -574,7 +575,10 @@ func (txs *transactions) processTxsToMe( txs.gasHandler.SetGasProvided(gasProvidedByTxInSelfShard, txHash) } - txs.saveAccountBalanceForAddress(tx.GetRcvAddr()) + err = txs.saveAccountBalanceForAddress(tx.GetRcvAddr()) + if err != nil { + return err + } if scheduledMode { txs.scheduledTxsExecutionHandler.AddScheduledTx(txHash, tx) @@ -710,7 +714,7 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsValidator( txs.sortTransactionsBySenderAndNonce(scheduledTxsFromMe, randomness) - scheduledMiniBlocks := txs.createScheduledMiniBlocks( + scheduledMiniBlocks, err := txs.createScheduledMiniBlocks( haveTime, haveAdditionalTime, isShardStuck, @@ -718,6 +722,9 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsValidator( scheduledTxsFromMe, mapSCTxs, ) + if err != nil { + return nil, err + } if !haveTime() && !haveAdditionalTime() { return nil, process.ErrTimeIsOut @@ -1113,7 +1120,7 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsProposer( } startTime := time.Now() - scheduledMiniBlocks := txs.createScheduledMiniBlocks( + scheduledMiniBlocks, err := txs.createScheduledMiniBlocks( haveTime, haveAdditionalTime, txs.blockTracker.IsShardStuck, @@ -1125,6 +1132,9 @@ func (txs *transactions) createAndProcessScheduledMiniBlocksFromMeAsProposer( log.Debug("elapsed time to createScheduledMiniBlocks", "time [s]", elapsedTime, ) + if err != nil { + return nil, err + } return scheduledMiniBlocks, nil } @@ -1185,6 +1195,9 @@ func (txs *transactions) createAndProcessMiniBlocksFromMeV1( err = txs.processMiniBlockBuilderTx(mbBuilder, wtx, tx) if err != nil { + if elrondErr.IsGetNodeFromDBError(err) { + return nil, nil, err + } continue } @@ -1537,7 +1550,10 @@ func (txs *transactions) ProcessMiniBlock( } } - txs.saveAccountBalanceForAddress(miniBlockTxs[txIndex].GetRcvAddr()) + err = txs.saveAccountBalanceForAddress(miniBlockTxs[txIndex].GetRcvAddr()) + if err != nil { + break + } if !scheduledMode { err = txs.processInNormalMode( diff --git a/process/block/preprocess/transactionsV2.go b/process/block/preprocess/transactionsV2.go index f746d81189d..12876f6a5d7 100644 --- a/process/block/preprocess/transactionsV2.go +++ b/process/block/preprocess/transactionsV2.go @@ -9,6 +9,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/transaction" "github.com/ElrondNetwork/elrond-go/common" + elrondErr "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/storage/txcache" ) @@ -70,6 +71,9 @@ func (txs *transactions) createAndProcessMiniBlocksFromMeV2( receiverShardID, mbInfo) if err != nil { + if elrondErr.IsGetNodeFromDBError(err) { + return nil, nil, nil, err + } if shouldAddToRemaining { remainingTxs = append(remainingTxs, sortedTxs[index]) } @@ -269,7 +273,7 @@ func (txs *transactions) createScheduledMiniBlocks( isMaxBlockSizeReached func(int, int) bool, sortedTxs []*txcache.WrappedTransaction, mapSCTxs map[string]struct{}, -) block.MiniBlockSlice { +) (block.MiniBlockSlice, error) { log.Debug("createScheduledMiniBlocks has been started") mbInfo := txs.initCreateScheduledMiniBlocks() @@ -312,6 +316,9 @@ func (txs *transactions) createScheduledMiniBlocks( receiverShardID, mbInfo) if err != nil { + if elrondErr.IsGetNodeFromDBError(err) { + return nil, err + } continue } @@ -330,7 +337,7 @@ func (txs *transactions) createScheduledMiniBlocks( log.Debug("createScheduledMiniBlocks has been finished") - return miniBlocks + return miniBlocks, nil } func (txs *transactions) verifyTransaction( diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 38ea54be266..59dd6320fa1 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -561,7 +561,8 @@ func TestTransactions_CreateScheduledMiniBlocksShouldWork(t *testing.T) { tx := &txcache.WrappedTransaction{} sortedTxs = append(sortedTxs, tx) - mbs := preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + mbs, err := preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + assert.Nil(t, err) assert.Equal(t, 0, len(mbs)) // should not create scheduled mini blocks when max block size is reached @@ -576,7 +577,8 @@ func TestTransactions_CreateScheduledMiniBlocksShouldWork(t *testing.T) { } sortedTxs = append(sortedTxs, tx) - mbs = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + mbs, err = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + assert.Nil(t, err) assert.Equal(t, 0, len(mbs)) // should not create scheduled mini blocks when verifyTransaction returns error @@ -592,7 +594,8 @@ func TestTransactions_CreateScheduledMiniBlocksShouldWork(t *testing.T) { } sortedTxs = append(sortedTxs, tx) - mbs = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + mbs, err = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + assert.Nil(t, err) assert.Equal(t, 0, len(mbs)) // should create two scheduled mini blocks @@ -623,7 +626,8 @@ func TestTransactions_CreateScheduledMiniBlocksShouldWork(t *testing.T) { sortedTxs = append(sortedTxs, tx3) mapSCTxs["hash1"] = struct{}{} - mbs = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + mbs, err = preprocessor.createScheduledMiniBlocks(haveTimeMethod, haveAdditionalTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs, mapSCTxs) + assert.Nil(t, err) assert.Equal(t, 2, len(mbs)) } diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index 801b18b2e13..8a62b4bd0d7 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -6,6 +6,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/state" @@ -96,7 +97,10 @@ func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) e return err } - rtp.saveAccumulatedRewards(rTx, accHandler) + err = rtp.saveAccumulatedRewards(rTx, accHandler) + if err != nil { + return err + } return rtp.accounts.SaveAccount(accHandler) } @@ -104,9 +108,9 @@ func (rtp *rewardTxProcessor) ProcessRewardTransaction(rTx *rewardTx.RewardTx) e func (rtp *rewardTxProcessor) saveAccumulatedRewards( rtx *rewardTx.RewardTx, userAccount state.UserAccountHandler, -) { +) error { if !core.IsSmartContractAddress(rtx.RcvAddr) { - return + return nil } existingReward := big.NewInt(0) @@ -116,8 +120,14 @@ func (rtp *rewardTxProcessor) saveAccumulatedRewards( existingReward.SetBytes(val) } + if errors.IsGetNodeFromDBError(err) { + return err + } + existingReward.Add(existingReward, rtx.Value) _ = userAccount.SaveKeyValue([]byte(fullRewardKey), existingReward.Bytes()) + + return nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 7599493bbcf..39b4f8b5163 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -19,6 +19,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/marshal" logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/state" @@ -342,6 +343,9 @@ func (sc *scProcessor) doExecuteSmartContractTransaction( var results []data.TransactionHandler results, err = sc.processVMOutput(vmOutput, txHash, tx, vmInput.CallType, vmInput.GasProvided) if err != nil { + if errors.IsGetNodeFromDBError(err) { + return vmcommon.UserError, err + } log.Trace("process vm output returned with problem ", "err", err.Error()) return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(vmOutput.ReturnMessage), snapshot, vmInput.GasLocked) } @@ -377,6 +381,9 @@ func (sc *scProcessor) executeSmartContractCall( vmOutput, err = vmExec.RunSmartContractCall(vmInput) sc.arwenChangeLocker.RUnlock() if err != nil { + if errors.IsGetNodeFromDBError(err) { + return userErrorVmOutput, err + } log.Debug("run smart contract call error", "error", err.Error()) return userErrorVmOutput, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(""), snapshot, vmInput.GasLocked) } @@ -937,6 +944,9 @@ func (sc *scProcessor) doExecuteBuiltInFunction( tmpCreatedAsyncCallback := false tmpCreatedAsyncCallback, newSCRTxs, err = sc.processSCOutputAccounts(newVMOutput, vmInput.CallType, outPutAccounts, tx, txHash) if err != nil { + if errors.IsGetNodeFromDBError(err) { + return vmcommon.ExecutionFailed, err + } return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(err.Error()), snapshot, vmInput.GasLocked) } createdAsyncCallback = createdAsyncCallback || tmpCreatedAsyncCallback @@ -1036,6 +1046,10 @@ func (sc *scProcessor) resolveBuiltInFunctions( GasRemaining: 0, } + if errors.IsGetNodeFromDBError(err) { + return vmOutput, err + } + return vmOutput, nil } @@ -1304,7 +1318,8 @@ func (sc *scProcessor) ProcessIfError( return sc.processIfErrorWithAddedLogs(acntSnd, txHash, tx, returnCode, returnMessage, snapshot, gasLocked, nil) } -func (sc *scProcessor) processIfErrorWithAddedLogs(acntSnd state.UserAccountHandler, +func (sc *scProcessor) processIfErrorWithAddedLogs( + acntSnd state.UserAccountHandler, txHash []byte, tx data.TransactionHandler, returnCode string, @@ -1664,6 +1679,9 @@ func (sc *scProcessor) doDeploySmartContract( sc.arwenChangeLocker.RUnlock() if err != nil { log.Debug("VM error", "error", err.Error()) + if errors.IsGetNodeFromDBError(err) { + return vmcommon.UserError, err + } return vmcommon.UserError, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(""), snapshot, vmInput.GasLocked) } @@ -1686,6 +1704,9 @@ func (sc *scProcessor) doDeploySmartContract( results, err := sc.processVMOutput(vmOutput, txHash, tx, vmInput.CallType, vmInput.GasProvided) if err != nil { log.Trace("Processing error", "error", err.Error()) + if errors.IsGetNodeFromDBError(err) { + return vmcommon.ExecutionFailed, err + } return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(vmOutput.ReturnMessage), snapshot, vmInput.GasLocked) } diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 6d978f67176..6bcf3a7c699 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/state" "github.com/ElrondNetwork/elrond-go/storage" @@ -178,7 +179,7 @@ func (boot *MetaBootstrap) setLastEpochStartRound() { // in the blockchain, and all this mechanism will be reiterated for the next block. func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() - if isErrGetNodeFromDB(err) { + if errors.IsGetNodeFromDBError(err) { errSync := boot.syncAccountsDBs() boot.handleTrieSyncError(errSync, ctx) } diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index f75cd4974a1..02a75b9e513 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -3,13 +3,11 @@ package sync import ( "context" "math" - "strings" "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" - "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" @@ -145,7 +143,7 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { // in the blockchain, and all this mechanism will be reiterated for the next block. func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() - if isErrGetNodeFromDB(err) { + if errors.IsGetNodeFromDBError(err) { errSync := boot.syncUserAccountsState() boot.handleTrieSyncError(errSync, ctx) } @@ -153,26 +151,6 @@ func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { return err } -func isErrGetNodeFromDB(err error) bool { - if err == nil { - return false - } - - if strings.Contains(err.Error(), storage.ErrDBIsClosed.Error()) { - return false - } - - if strings.Contains(err.Error(), errors.ErrContextClosing.Error()) { - return false - } - - if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { - return true - } - - return false -} - // Close closes the synchronization loop func (boot *ShardBootstrap) Close() error { if check.IfNil(boot.baseBootstrap) { diff --git a/process/transaction/shardProcess.go b/process/transaction/shardProcess.go index 5e48b0d8f39..45028360bfd 100644 --- a/process/transaction/shardProcess.go +++ b/process/transaction/shardProcess.go @@ -17,6 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/common" + elrondErr "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/state" @@ -217,6 +218,10 @@ func (txProc *txProcessor) executeAfterFailedMoveBalanceTransaction( tx *transaction.Transaction, txError error, ) error { + if elrondErr.IsGetNodeFromDBError(txError) { + return txError + } + acntSnd, err := txProc.getAccountFromAddress(tx.SndAddr) if err != nil { return err diff --git a/state/accountsDB.go b/state/accountsDB.go index 5a4382b97eb..303a41a90d3 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -500,7 +500,7 @@ func (adb *AccountsDB) loadDataTrie(accountHandler baseAccountHandler, mainTrie dataTrie, err := mainTrie.Recreate(accountHandler.GetRootHash()) if err != nil { - return NewErrMissingTrie(accountHandler.GetRootHash()) + return fmt.Errorf("%w for rootHash %v", err, accountHandler.GetRootHash()) } accountHandler.SetDataTrie(dataTrie) diff --git a/state/errors.go b/state/errors.go index 89cc3da65e0..b05641975d0 100644 --- a/state/errors.go +++ b/state/errors.go @@ -8,23 +8,6 @@ import ( "github.com/ElrondNetwork/elrond-go/common" ) -// ErrMissingTrie is an error-compatible struct holding the root hash of the trie that is missing -type ErrMissingTrie struct { - rootHash []byte -} - -// ------- ErrMissingTrie - -// NewErrMissingTrie returns a new instantiated struct -func NewErrMissingTrie(rootHash []byte) *ErrMissingTrie { - return &ErrMissingTrie{rootHash: rootHash} -} - -// Error returns the error as string -func (e *ErrMissingTrie) Error() string { - return "trie was not found for hash " + hex.EncodeToString(e.rootHash) -} - // ErrAccountNotFoundAtBlock is an error-compatible struct holding the block info at which an account was not found type ErrAccountNotFoundAtBlock struct { BlockInfo common.BlockInfo diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index 8b2157f2728..906e6412a28 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -281,11 +281,6 @@ func (tr *patriciaMerkleTrie) recreate(root []byte, tsm common.StorageManager) ( ) } - _, err := tsm.Get(root) - if err != nil { - return nil, err - } - newTr, _, err := tr.recreateFromDb(root, tsm) if err != nil { if errors.IsClosingError(err) { From b7c39e4ffbd49fea2e355f6a23a935e5f028e8c9 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 17 Oct 2022 15:48:22 +0300 Subject: [PATCH 002/221] add unit tests --- .../preprocess/rewardTxPreProcessor_test.go | 56 ++++++++++++++++ .../preprocess/smartContractResults_test.go | 64 +++++++++++++++++++ .../block/preprocess/transactionsV2_test.go | 52 +++++++++++++++ process/block/preprocess/transactions_test.go | 32 ++++++++++ process/rewardTransaction/process_test.go | 38 +++++++++++ 5 files changed, 242 insertions(+) diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index f24c3ed6955..35c09c7347b 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -1,6 +1,7 @@ package preprocess import ( + "fmt" "testing" "time" @@ -8,6 +9,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" @@ -16,6 +18,7 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" + vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/stretchr/testify/assert" ) @@ -678,6 +681,59 @@ func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { assert.Nil(t, err) } +func TestRewardTxPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + txHash := testTxHash + tdp := initDataPool() + rtp, _ := NewRewardTxPreprocessor( + tdp.RewardTransactions(), + &storageStubs.ChainStorerStub{}, + &hashingMocks.HasherMock{}, + &mock.MarshalizerMock{}, + &testscommon.RewardTxProcessorMock{}, + mock.NewMultiShardsCoordinatorMock(3), + &stateMock.AccountsStub{ + GetExistingAccountCalled: func(_ []byte) (vmcommon.AccountHandler, error) { + return nil, missingNodeErr + }, + }, + func(shardID uint32, txHashes [][]byte) {}, + &testscommon.GasHandlerStub{}, + createMockPubkeyConverter(), + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, + ) + + txHashes := [][]byte{[]byte(txHash)} + txs := []data.TransactionHandler{&rewardTx.RewardTx{}} + rtp.AddTxs(txHashes, txs) + + mb1 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 1, + SenderShardID: 0, + Type: block.RewardsBlock, + } + mb2 := block.MiniBlock{ + TxHashes: txHashes, + ReceiverShardID: 0, + SenderShardID: 1, + Type: block.RewardsBlock, + } + + mbHash1, _ := core.CalculateHash(rtp.marshalizer, rtp.hasher, &mb1) + mbHash2, _ := core.CalculateHash(rtp.marshalizer, rtp.hasher, &mb2) + + var blockBody block.Body + blockBody.MiniBlocks = append(blockBody.MiniBlocks, &mb1, &mb2) + + err := rtp.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1, Hash: mbHash1}, {TxCount: 1, Hash: mbHash2}}}, &blockBody, haveTimeTrue) + assert.Equal(t, missingNodeErr, err) +} + func TestRewardTxPreprocessor_IsDataPreparedShouldErr(t *testing.T) { t.Parallel() diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 3d2d8178056..5fcad42a95f 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -2,6 +2,7 @@ package preprocess import ( "encoding/json" + "fmt" "reflect" "testing" "time" @@ -11,6 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" @@ -1101,6 +1103,68 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { assert.Nil(t, err) } +func TestScrsPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + tdp := initDataPool() + requestTransaction := func(shardID uint32, txHashes [][]byte) {} + scrPreproc, _ := NewSmartContractResultPreprocessor( + tdp.UnsignedTransactions(), + &storageStubs.ChainStorerStub{}, + &hashingMocks.HasherMock{}, + &mock.MarshalizerMock{}, + &testscommon.TxProcessorMock{ + ProcessSmartContractResultCalled: func(_ *smartContractResult.SmartContractResult) (vmcommon.ReturnCode, error) { + return 0, nil + }, + }, + mock.NewMultiShardsCoordinatorMock(3), + &stateMock.AccountsStub{ + GetExistingAccountCalled: func(_ []byte) (vmcommon.AccountHandler, error) { + return nil, missingNodeErr + }, + }, + requestTransaction, + &testscommon.GasHandlerStub{}, + feeHandlerMock(), + createMockPubkeyConverter(), + &testscommon.BlockSizeComputationStub{}, + &testscommon.BalanceComputationStub{}, + &testscommon.EnableEpochsHandlerStub{}, + &testscommon.ProcessedMiniBlocksTrackerStub{}, + ) + + body := &block.Body{} + + txHash := []byte("txHash") + txHashes := make([][]byte, 0) + txHashes = append(txHashes, txHash) + + miniblock := block.MiniBlock{ + ReceiverShardID: 0, + SenderShardID: 1, + TxHashes: txHashes, + Type: block.SmartContractResultBlock, + } + + miniblockHash, _ := core.CalculateHash(scrPreproc.marshalizer, scrPreproc.hasher, &miniblock) + + body.MiniBlocks = append(body.MiniBlocks, &miniblock) + + scrPreproc.AddScrHashToRequestedList([]byte("txHash")) + txshardInfo := txShardInfo{0, 0} + scr := smartContractResult.SmartContractResult{ + Nonce: 1, + Data: []byte("tx"), + } + + scrPreproc.scrForBlock.txHashAndInfo["txHash"] = &txInfo{&scr, &txshardInfo} + + err := scrPreproc.ProcessBlockTransactions(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{TxCount: 1, Hash: miniblockHash}}}, body, haveTimeTrue) + assert.Equal(t, missingNodeErr, err) +} + func TestScrsPreprocessor_ProcessBlockTransactionsShouldErrMaxGasLimitPerBlockInSelfShardIsReached(t *testing.T) { t.Parallel() diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 59dd6320fa1..1fa8a23d33e 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -2,6 +2,7 @@ package preprocess import ( "bytes" + "fmt" "math/big" "testing" @@ -10,6 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/smartContractResult" "github.com/ElrondNetwork/elrond-go-core/data/transaction" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage/txcache" @@ -752,6 +754,56 @@ func TestTransactions_CreateAndProcessMiniBlocksFromMeV2ShouldWork(t *testing.T) assert.Equal(t, 2, len(mapSCTxs)) } +func TestTransactions_CreateAndProcessMiniBlocksFromMeV2MissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + preprocessor := createTransactionPreprocessor() + preprocessor.txProcessor = &testscommon.TxProcessorMock{ + ProcessTransactionCalled: func(transaction *transaction.Transaction) (vmcommon.ReturnCode, error) { + return vmcommon.ExecutionFailed, missingNodeErr + }, + } + + haveTimeMethodReturn := true + isShardStuckMethodReturn := false + isMaxBlockSizeReachedMethodReturn := false + sortedTxs := make([]*txcache.WrappedTransaction, 0) + mapSCTxs := make(map[string]struct{}) + tx1 := &txcache.WrappedTransaction{ + ReceiverShardID: 0, + Tx: &transaction.Transaction{Nonce: 1}, + TxHash: []byte("hash1"), + } + tx2 := &txcache.WrappedTransaction{ + ReceiverShardID: 1, + Tx: &transaction.Transaction{Nonce: 2, RcvAddr: []byte("smart contract address")}, + TxHash: []byte("hash2"), + } + tx3 := &txcache.WrappedTransaction{ + ReceiverShardID: 2, + Tx: &transaction.Transaction{Nonce: 3, RcvAddr: []byte("smart contract address")}, + TxHash: []byte("hash3"), + } + sortedTxs = append(sortedTxs, tx1) + sortedTxs = append(sortedTxs, tx2) + sortedTxs = append(sortedTxs, tx3) + mapSCTxs["hash1"] = struct{}{} + + haveTimeMethod := func() bool { + return haveTimeMethodReturn + } + isShardStuckMethod := func(uint32) bool { + return isShardStuckMethodReturn + } + isMaxBlockSizeReachedMethod := func(int, int) bool { + return isMaxBlockSizeReachedMethodReturn + } + + _, _, _, err := preprocessor.createAndProcessMiniBlocksFromMeV2(haveTimeMethod, isShardStuckMethod, isMaxBlockSizeReachedMethod, sortedTxs) + assert.Equal(t, missingNodeErr, err) +} + func TestTransactions_ProcessTransactionShouldWork(t *testing.T) { t.Parallel() diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index d684332eaaa..d74bf26646d 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -1149,6 +1149,38 @@ func TestTransactionPreprocessor_ProcessTxsToMeShouldUseCorrectSenderAndReceiver assert.Equal(t, uint32(0), receiverShardID) } +func TestTransactionPreprocessor_ProcessTxsToMeMissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + + args := createDefaultTransactionsProcessorArgs() + args.Accounts = &stateMock.AccountsStub{ + GetExistingAccountCalled: func(_ []byte) (vmcommon.AccountHandler, error) { + return nil, missingNodeErr + }, + } + preprocessor, _ := NewTransactionPreprocessor(args) + + tx := transaction.Transaction{SndAddr: []byte("2"), RcvAddr: []byte("0")} + txHash, _ := core.CalculateHash(preprocessor.marshalizer, preprocessor.hasher, tx) + miniBlock := &block.MiniBlock{ + TxHashes: [][]byte{txHash}, + SenderShardID: 1, + ReceiverShardID: 0, + Type: block.TxBlock, + } + miniBlockHash, _ := core.CalculateHash(preprocessor.marshalizer, preprocessor.hasher, miniBlock) + body := block.Body{ + MiniBlocks: []*block.MiniBlock{miniBlock}, + } + + preprocessor.AddTxForCurrentBlock(txHash, &tx, 1, 0) + + err := preprocessor.ProcessTxsToMe(&block.Header{MiniBlockHeaders: []block.MiniBlockHeader{{Hash: miniBlockHash, TxCount: 1}}}, &body, haveTimeTrue) + assert.Equal(t, missingNodeErr, err) +} + func TestTransactionsPreprocessor_ProcessMiniBlockShouldWork(t *testing.T) { t.Parallel() diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index feebdf4efca..001bd84a1b5 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -2,16 +2,19 @@ package rewardTransaction_test import ( "errors" + "fmt" "math/big" "testing" "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" "github.com/ElrondNetwork/elrond-go/state" stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" + "github.com/ElrondNetwork/elrond-go/testscommon/trie" vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/stretchr/testify/assert" ) @@ -212,6 +215,41 @@ func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { assert.True(t, saveAccountWasCalled) } +func TestRewardTxProcessor_ProcessRewardTransactionMissingTrieNode(t *testing.T) { + t.Parallel() + + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + accountsDb := &stateMock.AccountsStub{ + LoadAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { + acc, _ := state.NewUserAccount(address) + acc.SetDataTrie(&trie.TrieStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, missingNodeErr + }, + }, + ) + + return acc, nil + }, + } + + rtp, _ := rewardTransaction.NewRewardTxProcessor( + accountsDb, + createMockPubkeyConverter(), + mock.NewMultiShardsCoordinatorMock(3), + ) + + rwdTx := rewardTx.RewardTx{ + Round: 0, + Epoch: 0, + Value: big.NewInt(100), + RcvAddr: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6}, + } + + err := rtp.ProcessRewardTransaction(&rwdTx) + assert.Equal(t, missingNodeErr, err) +} + func TestRewardTxProcessor_ProcessRewardTransactionToASmartContractShouldWork(t *testing.T) { t.Parallel() From cfb725ca82be70bdd96214b3da0a205a2ee1a1cc Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Tue, 18 Oct 2022 11:00:51 +0300 Subject: [PATCH 003/221] fix after review --- process/smartContract/process.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 39b4f8b5163..def73407321 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -344,7 +344,7 @@ func (sc *scProcessor) doExecuteSmartContractTransaction( results, err = sc.processVMOutput(vmOutput, txHash, tx, vmInput.CallType, vmInput.GasProvided) if err != nil { if errors.IsGetNodeFromDBError(err) { - return vmcommon.UserError, err + return vmcommon.ExecutionFailed, err } log.Trace("process vm output returned with problem ", "err", err.Error()) return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(vmOutput.ReturnMessage), snapshot, vmInput.GasLocked) @@ -382,7 +382,7 @@ func (sc *scProcessor) executeSmartContractCall( sc.arwenChangeLocker.RUnlock() if err != nil { if errors.IsGetNodeFromDBError(err) { - return userErrorVmOutput, err + return nil, err } log.Debug("run smart contract call error", "error", err.Error()) return userErrorVmOutput, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(""), snapshot, vmInput.GasLocked) @@ -1047,7 +1047,7 @@ func (sc *scProcessor) resolveBuiltInFunctions( } if errors.IsGetNodeFromDBError(err) { - return vmOutput, err + return nil, err } return vmOutput, nil @@ -1680,7 +1680,7 @@ func (sc *scProcessor) doDeploySmartContract( if err != nil { log.Debug("VM error", "error", err.Error()) if errors.IsGetNodeFromDBError(err) { - return vmcommon.UserError, err + return vmcommon.ExecutionFailed, err } return vmcommon.UserError, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(""), snapshot, vmInput.GasLocked) } From 61440528d3532e7efadd6320b85d5b008dde8aad Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 21 Nov 2022 12:02:36 +0200 Subject: [PATCH 004/221] conflicts fix after merge --- state/accountsDB.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/state/accountsDB.go b/state/accountsDB.go index 21b0b51e2bd..9029439ca96 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -519,11 +519,7 @@ func (adb *AccountsDB) loadDataTrie(accountHandler baseAccountHandler, mainTrie dataTrie, err := mainTrie.Recreate(accountHandler.GetRootHash()) if err != nil { -<<<<<<< HEAD - return fmt.Errorf("%w for rootHash %v", err, accountHandler.GetRootHash()) -======= return fmt.Errorf("trie was not found for hash, rootHash = %s, err = %w", hex.EncodeToString(accountHandler.GetRootHash()), err) ->>>>>>> rc/v1.4.0 } accountHandler.SetDataTrie(dataTrie) From 0927cae81f6e90faaf24074e3a4601904c3c92dc Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 21 Nov 2022 12:13:04 +0200 Subject: [PATCH 005/221] conflicts fix after merge - fix unit test --- process/rewardTransaction/process_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index 308301dbab1..427daf567b4 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -223,8 +223,8 @@ func TestRewardTxProcessor_ProcessRewardTransactionMissingTrieNode(t *testing.T) LoadAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { acc, _ := state.NewUserAccount(address) acc.SetDataTrie(&trie.TrieStub{ - GetCalled: func(key []byte) ([]byte, error) { - return nil, missingNodeErr + GetCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, missingNodeErr }, }, ) From 088cc588f6042361fbb5ddda7ca0ceb66b90603c Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 21 Nov 2022 22:42:46 +0200 Subject: [PATCH 006/221] add custom error for trie get node --- process/sync/baseSync.go | 9 ++------- process/sync/metablock.go | 12 +++++++++--- process/sync/metablock_test.go | 3 ++- process/sync/shardblock.go | 8 +++++++- process/sync/shardblock_test.go | 3 ++- trie/errors.go | 28 ++++++++++++++++++++++++++++ trie/node.go | 4 +--- 7 files changed, 51 insertions(+), 16 deletions(-) diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 2bb613d2d8c..1b07c400124 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -693,14 +693,9 @@ func (boot *baseBootstrap) handleTrieSyncError(err error, ctx context.Context) { } } -func (boot *baseBootstrap) syncUserAccountsState() error { - rootHash, err := boot.accounts.RootHash() - if err != nil { - return err - } - +func (boot *baseBootstrap) syncUserAccountsState(key []byte) error { log.Warn("base sync: started syncUserAccountsState") - return boot.accountsDBSyncer.SyncAccounts(rootHash) + return boot.accountsDBSyncer.SyncAccounts(key) } func (boot *baseBootstrap) cleanNoncesSyncedWithErrorsBehindFinal() { diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 95d06f22563..c881dbc5c19 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -12,6 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/state" "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/trie" ) // MetaBootstrap implements the bootstrap mechanism @@ -180,14 +181,19 @@ func (boot *MetaBootstrap) setLastEpochStartRound() { func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if errors.IsGetNodeFromDBError(err) { - errSync := boot.syncAccountsDBs() + getNodeErr, ok := err.(*trie.GetErr) + if !ok { + return err + } + + errSync := boot.syncAccountsDBs(getNodeErr.GetKey()) boot.handleTrieSyncError(errSync, ctx) } return err } -func (boot *MetaBootstrap) syncAccountsDBs() error { +func (boot *MetaBootstrap) syncAccountsDBs(key []byte) error { var err error err = boot.syncValidatorAccountsState() @@ -195,7 +201,7 @@ func (boot *MetaBootstrap) syncAccountsDBs() error { return err } - err = boot.syncUserAccountsState() + err = boot.syncUserAccountsState(key) if err != nil { return err } diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 43c5b71a0a7..f3b2c1bc361 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -30,6 +30,7 @@ import ( stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" + "github.com/ElrondNetwork/elrond-go/trie" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -1621,7 +1622,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := errors.New(common.GetNodeFromDBErrorString) + errGetNodeFromDB := trie.NewGetErr([]byte("key")) blockProcessor := createMetaBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 785a33044ec..a975769d96f 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -12,6 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/storage" + "github.com/ElrondNetwork/elrond-go/trie" ) // ShardBootstrap implements the bootstrap mechanism @@ -144,7 +145,12 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if errors.IsGetNodeFromDBError(err) { - errSync := boot.syncUserAccountsState() + getNodeErr, ok := err.(*trie.GetErr) + if !ok { + return err + } + + errSync := boot.syncUserAccountsState(getNodeErr.GetKey()) boot.handleTrieSyncError(errSync, ctx) } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 58b1fbd13de..782577af3ab 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -34,6 +34,7 @@ import ( stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" + "github.com/ElrondNetwork/elrond-go/trie" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -2061,7 +2062,7 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := errors.New(common.GetNodeFromDBErrorString) + errGetNodeFromDB := trie.NewGetErr([]byte("key")) blockProcessor := createBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB diff --git a/trie/errors.go b/trie/errors.go index a225a84c00c..03142a6821c 100644 --- a/trie/errors.go +++ b/trie/errors.go @@ -1,9 +1,37 @@ package trie import ( + "encoding/hex" "errors" + "fmt" + + "github.com/ElrondNetwork/elrond-go/common" ) +// GetErr defines a custom error for trie get node +type GetErr struct { + key []byte +} + +// NewGetErr will create a new instance of GetErr +func NewGetErr(key []byte) *GetErr { + return &GetErr{key: key} +} + +// Error returns the error as string +func (e *GetErr) Error() string { + return fmt.Sprintf( + "%s for key %v", + common.GetNodeFromDBErrorString, + hex.EncodeToString(e.key), + ) +} + +// GetKey will return the key that generated the error +func (e *GetErr) GetKey() []byte { + return e.key +} + // ErrInvalidNode is raised when we reach an invalid node var ErrInvalidNode = errors.New("invalid node") diff --git a/trie/node.go b/trie/node.go index ac03e15d422..5fe87fccaf7 100644 --- a/trie/node.go +++ b/trie/node.go @@ -3,8 +3,6 @@ package trie import ( "context" - "encoding/hex" - "fmt" "time" "github.com/ElrondNetwork/elrond-go-core/hashing" @@ -120,7 +118,7 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh encChild, err := db.Get(n) if err != nil { log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) - return nil, fmt.Errorf(common.GetNodeFromDBErrorString+" %w for key %v", err, hex.EncodeToString(n)) + return nil, NewGetErr(n) } return decodeNode(encChild, marshalizer, hasher) From e0dbd57c87a0daa49f4e52d56998497063b37eeb Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 21 Nov 2022 22:58:24 +0200 Subject: [PATCH 007/221] move get node from db custom error to common errors --- errors/missingTrieNodeError.go | 26 ++++++++++++++++++++++++++ process/sync/metablock.go | 3 +-- process/sync/metablock_test.go | 4 ++-- process/sync/shardblock.go | 3 +-- process/sync/shardblock_test.go | 4 ++-- trie/errors.go | 28 ---------------------------- trie/node.go | 2 +- 7 files changed, 33 insertions(+), 37 deletions(-) diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index 029cc45069f..3cf5961029f 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -1,6 +1,8 @@ package errors import ( + "encoding/hex" + "fmt" "strings" "github.com/ElrondNetwork/elrond-go/common" @@ -22,3 +24,27 @@ func IsGetNodeFromDBError(err error) bool { return false } + +// GetNodeFromDBErr defines a custom error for trie get node +type GetNodeFromDBErr struct { + key []byte +} + +// NewGetNodeFromDBErr will create a new instance of GetNodeFromDBErr +func NewGetNodeFromDBErr(key []byte) *GetNodeFromDBErr { + return &GetNodeFromDBErr{key: key} +} + +// Error returns the error as string +func (e *GetNodeFromDBErr) Error() string { + return fmt.Sprintf( + "%s for key %v", + common.GetNodeFromDBErrorString, + hex.EncodeToString(e.key), + ) +} + +// GetKey will return the key that generated the error +func (e *GetNodeFromDBErr) GetKey() []byte { + return e.key +} diff --git a/process/sync/metablock.go b/process/sync/metablock.go index c881dbc5c19..5021ba9b428 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -12,7 +12,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/state" "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/trie" ) // MetaBootstrap implements the bootstrap mechanism @@ -181,7 +180,7 @@ func (boot *MetaBootstrap) setLastEpochStartRound() { func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if errors.IsGetNodeFromDBError(err) { - getNodeErr, ok := err.(*trie.GetErr) + getNodeErr, ok := err.(*errors.GetNodeFromDBErr) if !ok { return err } diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index f3b2c1bc361..efc230dedf9 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -19,6 +19,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/round" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/blockchain" + commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/process/sync" @@ -30,7 +31,6 @@ import ( stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" - "github.com/ElrondNetwork/elrond-go/trie" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -1622,7 +1622,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := trie.NewGetErr([]byte("key")) + errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key")) blockProcessor := createMetaBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index a975769d96f..204718a6b9c 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -12,7 +12,6 @@ import ( "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/storage" - "github.com/ElrondNetwork/elrond-go/trie" ) // ShardBootstrap implements the bootstrap mechanism @@ -145,7 +144,7 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if errors.IsGetNodeFromDBError(err) { - getNodeErr, ok := err.(*trie.GetErr) + getNodeErr, ok := err.(*errors.GetNodeFromDBErr) if !ok { return err } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 782577af3ab..26d4b469f2b 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -20,6 +20,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/round" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/blockchain" + commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/process/sync" @@ -34,7 +35,6 @@ import ( stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" - "github.com/ElrondNetwork/elrond-go/trie" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -2062,7 +2062,7 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := trie.NewGetErr([]byte("key")) + errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key")) blockProcessor := createBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB diff --git a/trie/errors.go b/trie/errors.go index 03142a6821c..a225a84c00c 100644 --- a/trie/errors.go +++ b/trie/errors.go @@ -1,37 +1,9 @@ package trie import ( - "encoding/hex" "errors" - "fmt" - - "github.com/ElrondNetwork/elrond-go/common" ) -// GetErr defines a custom error for trie get node -type GetErr struct { - key []byte -} - -// NewGetErr will create a new instance of GetErr -func NewGetErr(key []byte) *GetErr { - return &GetErr{key: key} -} - -// Error returns the error as string -func (e *GetErr) Error() string { - return fmt.Sprintf( - "%s for key %v", - common.GetNodeFromDBErrorString, - hex.EncodeToString(e.key), - ) -} - -// GetKey will return the key that generated the error -func (e *GetErr) GetKey() []byte { - return e.key -} - // ErrInvalidNode is raised when we reach an invalid node var ErrInvalidNode = errors.New("invalid node") diff --git a/trie/node.go b/trie/node.go index 5fe87fccaf7..45cff10ef97 100644 --- a/trie/node.go +++ b/trie/node.go @@ -118,7 +118,7 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh encChild, err := db.Get(n) if err != nil { log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) - return nil, NewGetErr(n) + return nil, errors.NewGetNodeFromDBErr(n) } return decodeNode(encChild, marshalizer, hasher) From 004264ca9183d88e879dc962fa48f05d4c80bc32 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 21 Nov 2022 23:23:31 +0200 Subject: [PATCH 008/221] fix unit test on context closing --- errors/missingTrieNodeError.go | 10 ++++++---- trie/node.go | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index 3cf5961029f..2de83156eac 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -27,19 +27,21 @@ func IsGetNodeFromDBError(err error) bool { // GetNodeFromDBErr defines a custom error for trie get node type GetNodeFromDBErr struct { - key []byte + getErr error + key []byte } // NewGetNodeFromDBErr will create a new instance of GetNodeFromDBErr -func NewGetNodeFromDBErr(key []byte) *GetNodeFromDBErr { - return &GetNodeFromDBErr{key: key} +func NewGetNodeFromDBErr(key []byte, err error) *GetNodeFromDBErr { + return &GetNodeFromDBErr{getErr: err, key: key} } // Error returns the error as string func (e *GetNodeFromDBErr) Error() string { return fmt.Sprintf( - "%s for key %v", + "%s: %s for key %v", common.GetNodeFromDBErrorString, + e.getErr.Error(), hex.EncodeToString(e.key), ) } diff --git a/trie/node.go b/trie/node.go index 45cff10ef97..f6ee33bfe2e 100644 --- a/trie/node.go +++ b/trie/node.go @@ -118,7 +118,7 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh encChild, err := db.Get(n) if err != nil { log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) - return nil, errors.NewGetNodeFromDBErr(n) + return nil, errors.NewGetNodeFromDBErr(n, err) } return decodeNode(encChild, marshalizer, hasher) From 0d6e8d6ad507b55da198b4d17a2facfb04fa0595 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 21 Nov 2022 23:25:10 +0200 Subject: [PATCH 009/221] fix unit tests in process sync --- process/sync/metablock_test.go | 2 +- process/sync/shardblock_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index efc230dedf9..07055519d90 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -1622,7 +1622,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key")) + errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) blockProcessor := createMetaBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 26d4b469f2b..396a7805ff2 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -2062,7 +2062,7 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key")) + errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) blockProcessor := createBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB From 0141588292623a51a38e27d1d6ef9cd21231a4c1 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 23 Nov 2022 10:00:29 +0200 Subject: [PATCH 010/221] add custom error cast check in error check function --- errors/missingTrieNodeError.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index 2de83156eac..b379c349baa 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -3,7 +3,6 @@ package errors import ( "encoding/hex" "fmt" - "strings" "github.com/ElrondNetwork/elrond-go/common" ) @@ -18,11 +17,12 @@ func IsGetNodeFromDBError(err error) bool { return false } - if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { - return true + _, ok := err.(*GetNodeFromDBErr) + if !ok { + return false } - return false + return true } // GetNodeFromDBErr defines a custom error for trie get node From 8b2d6e4db684f0a266cbdc6661bf484da4e8343d Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 23 Nov 2022 10:47:37 +0200 Subject: [PATCH 011/221] use new custom error in process unit tests --- process/block/preprocess/rewardTxPreProcessor_test.go | 6 +++--- process/block/preprocess/smartContractResults_test.go | 6 +++--- process/block/preprocess/transactionsV2_test.go | 6 +++--- process/block/preprocess/transactions_test.go | 3 ++- process/rewardTransaction/process_test.go | 5 ++--- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index 35c09c7347b..9629b956ac4 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -1,7 +1,7 @@ package preprocess import ( - "fmt" + "errors" "testing" "time" @@ -9,8 +9,8 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" + commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage" @@ -684,7 +684,7 @@ func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { func TestRewardTxPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) txHash := testTxHash tdp := initDataPool() rtp, _ := NewRewardTxPreprocessor( diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 5fcad42a95f..66287ae04fa 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -2,7 +2,7 @@ package preprocess import ( "encoding/json" - "fmt" + "errors" "reflect" "testing" "time" @@ -12,8 +12,8 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/smartContractResult" - "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" + commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage" @@ -1106,7 +1106,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { func TestScrsPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} scrPreproc, _ := NewSmartContractResultPreprocessor( diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 1fa8a23d33e..5823d5b33ce 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -2,7 +2,7 @@ package preprocess import ( "bytes" - "fmt" + "errors" "math/big" "testing" @@ -11,7 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/smartContractResult" "github.com/ElrondNetwork/elrond-go-core/data/transaction" - "github.com/ElrondNetwork/elrond-go/common" + commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage/txcache" @@ -757,7 +757,7 @@ func TestTransactions_CreateAndProcessMiniBlocksFromMeV2ShouldWork(t *testing.T) func TestTransactions_CreateAndProcessMiniBlocksFromMeV2MissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) preprocessor := createTransactionPreprocessor() preprocessor.txProcessor = &testscommon.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) (vmcommon.ReturnCode, error) { diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index d74bf26646d..2e20593c25b 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -24,6 +24,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" + commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/sharding" @@ -1152,7 +1153,7 @@ func TestTransactionPreprocessor_ProcessTxsToMeShouldUseCorrectSenderAndReceiver func TestTransactionPreprocessor_ProcessTxsToMeMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) args := createDefaultTransactionsProcessorArgs() args.Accounts = &stateMock.AccountsStub{ diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index 427daf567b4..e560d5f6f47 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -2,13 +2,12 @@ package rewardTransaction_test import ( "errors" - "fmt" "math/big" "testing" "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/data/rewardTx" - "github.com/ElrondNetwork/elrond-go/common" + commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" @@ -218,7 +217,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { func TestRewardTxProcessor_ProcessRewardTransactionMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) accountsDb := &stateMock.AccountsStub{ LoadAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { acc, _ := state.NewUserAccount(address) From 8d6ea224fdc8d8ed0290239e058620831e8ef171 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 23 Nov 2022 11:00:47 +0200 Subject: [PATCH 012/221] fix linter issue --- errors/missingTrieNodeError.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index b379c349baa..4e0774b0c54 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -18,11 +18,7 @@ func IsGetNodeFromDBError(err error) bool { } _, ok := err.(*GetNodeFromDBErr) - if !ok { - return false - } - - return true + return ok } // GetNodeFromDBErr defines a custom error for trie get node From 3bcb4a334134418e28c26b792b4894a836cdce57 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 23 Nov 2022 12:30:46 +0200 Subject: [PATCH 013/221] revert get node from db error additional check --- errors/missingTrieNodeError.go | 8 ++++++-- process/block/preprocess/rewardTxPreProcessor_test.go | 6 +++--- process/block/preprocess/smartContractResults_test.go | 6 +++--- process/block/preprocess/transactionsV2_test.go | 6 +++--- process/block/preprocess/transactions_test.go | 3 +-- process/rewardTransaction/process_test.go | 5 +++-- 6 files changed, 19 insertions(+), 15 deletions(-) diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index 4e0774b0c54..2de83156eac 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -3,6 +3,7 @@ package errors import ( "encoding/hex" "fmt" + "strings" "github.com/ElrondNetwork/elrond-go/common" ) @@ -17,8 +18,11 @@ func IsGetNodeFromDBError(err error) bool { return false } - _, ok := err.(*GetNodeFromDBErr) - return ok + if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { + return true + } + + return false } // GetNodeFromDBErr defines a custom error for trie get node diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index 9629b956ac4..35c09c7347b 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -1,7 +1,7 @@ package preprocess import ( - "errors" + "fmt" "testing" "time" @@ -9,8 +9,8 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/rewardTx" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" - commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage" @@ -684,7 +684,7 @@ func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { func TestRewardTxPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) txHash := testTxHash tdp := initDataPool() rtp, _ := NewRewardTxPreprocessor( diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 66287ae04fa..5fcad42a95f 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -2,7 +2,7 @@ package preprocess import ( "encoding/json" - "errors" + "fmt" "reflect" "testing" "time" @@ -12,8 +12,8 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/smartContractResult" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" - commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage" @@ -1106,7 +1106,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { func TestScrsPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} scrPreproc, _ := NewSmartContractResultPreprocessor( diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 5823d5b33ce..1fa8a23d33e 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -2,7 +2,7 @@ package preprocess import ( "bytes" - "errors" + "fmt" "math/big" "testing" @@ -11,7 +11,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go-core/data/smartContractResult" "github.com/ElrondNetwork/elrond-go-core/data/transaction" - commonErrors "github.com/ElrondNetwork/elrond-go/errors" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/storage/txcache" @@ -757,7 +757,7 @@ func TestTransactions_CreateAndProcessMiniBlocksFromMeV2ShouldWork(t *testing.T) func TestTransactions_CreateAndProcessMiniBlocksFromMeV2MissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) preprocessor := createTransactionPreprocessor() preprocessor.txProcessor = &testscommon.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) (vmcommon.ReturnCode, error) { diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index 2e20593c25b..d74bf26646d 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -24,7 +24,6 @@ import ( "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" - commonErrors "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/sharding" @@ -1153,7 +1152,7 @@ func TestTransactionPreprocessor_ProcessTxsToMeShouldUseCorrectSenderAndReceiver func TestTransactionPreprocessor_ProcessTxsToMeMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) args := createDefaultTransactionsProcessorArgs() args.Accounts = &stateMock.AccountsStub{ diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index e560d5f6f47..427daf567b4 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -2,12 +2,13 @@ package rewardTransaction_test import ( "errors" + "fmt" "math/big" "testing" "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/data/rewardTx" - commonErrors "github.com/ElrondNetwork/elrond-go/errors" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/mock" "github.com/ElrondNetwork/elrond-go/process/rewardTransaction" @@ -217,7 +218,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { func TestRewardTxProcessor_ProcessRewardTransactionMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) + missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) accountsDb := &stateMock.AccountsStub{ LoadAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { acc, _ := state.NewUserAccount(address) From 4462ceda6d573ac38858b9d3132b9c1e45f6fe40 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 23 Nov 2022 15:06:50 +0200 Subject: [PATCH 014/221] use identifier to determine storage type --- common/constants.go | 3 +++ errors/missingTrieNodeError.go | 18 ++++++++++++++---- process/sync/metablock.go | 34 +++++++++++++--------------------- trie/node.go | 10 ++++++++-- 4 files changed, 38 insertions(+), 27 deletions(-) diff --git a/common/constants.go b/common/constants.go index 4aa4033eaa0..bf2f53854c8 100644 --- a/common/constants.go +++ b/common/constants.go @@ -834,3 +834,6 @@ const MetricTrieSyncNumReceivedBytes = "erd_trie_sync_num_bytes_received" // MetricTrieSyncNumProcessedNodes is the metric that outputs the number of trie nodes processed for accounts during trie sync const MetricTrieSyncNumProcessedNodes = "erd_trie_sync_num_nodes_processed" + +const AccountsTrieIdentifier = "AccountsTrie" +const PeerAccountsTrieIdentifier = "PeerAccountsTrie" diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index 2de83156eac..cd9782df5a3 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -27,13 +27,18 @@ func IsGetNodeFromDBError(err error) bool { // GetNodeFromDBErr defines a custom error for trie get node type GetNodeFromDBErr struct { - getErr error - key []byte + getErr error + key []byte + identifier string } // NewGetNodeFromDBErr will create a new instance of GetNodeFromDBErr -func NewGetNodeFromDBErr(key []byte, err error) *GetNodeFromDBErr { - return &GetNodeFromDBErr{getErr: err, key: key} +func NewGetNodeFromDBErr(key []byte, err error, id string) *GetNodeFromDBErr { + return &GetNodeFromDBErr{ + getErr: err, + key: key, + identifier: id, + } } // Error returns the error as string @@ -50,3 +55,8 @@ func (e *GetNodeFromDBErr) Error() string { func (e *GetNodeFromDBErr) GetKey() []byte { return e.key } + +// GetKey will return the key that generated the error +func (e *GetNodeFromDBErr) GetIdentifier() string { + return e.identifier +} diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 5021ba9b428..f673671ef8b 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -2,11 +2,13 @@ package sync import ( "context" + "fmt" "github.com/ElrondNetwork/elrond-go-core/core" "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" + "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" @@ -185,37 +187,27 @@ func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { return err } - errSync := boot.syncAccountsDBs(getNodeErr.GetKey()) + errSync := boot.syncAccountsDBs(getNodeErr.GetKey(), getNodeErr.GetIdentifier()) boot.handleTrieSyncError(errSync, ctx) } return err } -func (boot *MetaBootstrap) syncAccountsDBs(key []byte) error { - var err error - - err = boot.syncValidatorAccountsState() - if err != nil { - return err - } - - err = boot.syncUserAccountsState(key) - if err != nil { - return err +func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { + switch id { + case common.AccountsTrieIdentifier: + return boot.syncUserAccountsState(key) + case common.PeerAccountsTrieIdentifier: + return boot.syncValidatorAccountsState(key) + default: + return fmt.Errorf("invalid trie identifier, id %s", id) } - - return nil } -func (boot *MetaBootstrap) syncValidatorAccountsState() error { - rootHash, err := boot.validatorAccountsDB.RootHash() - if err != nil { - return err - } - +func (boot *MetaBootstrap) syncValidatorAccountsState(key []byte) error { log.Warn("base sync: started syncValidatorAccountsState") - return boot.validatorStatisticsDBSyncer.SyncAccounts(rootHash) + return boot.validatorStatisticsDBSyncer.SyncAccounts(key) } // Close closes the synchronization loop diff --git a/trie/node.go b/trie/node.go index f6ee33bfe2e..53665f7c0c6 100644 --- a/trie/node.go +++ b/trie/node.go @@ -9,6 +9,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/errors" + "github.com/ElrondNetwork/elrond-go/storage/pruning" "github.com/ElrondNetwork/elrond-go/trie/keyBuilder" ) @@ -117,8 +118,13 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { - log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) - return nil, errors.NewGetNodeFromDBErr(n, err) + dbWithID, ok := db.(*pruning.TriePruningStorerWithID) + if !ok { + return nil, errors.NewGetNodeFromDBErr(n, err, "") + } + + log.Debug(common.GetNodeFromDBErrorString, "error", err, "key", n) + return nil, errors.NewGetNodeFromDBErr(n, err, dbWithID.GetIdentifier()) } return decodeNode(encChild, marshalizer, hasher) From 6c5d0730d75be58d69403e189206fc028203e537 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 23 Nov 2022 16:29:01 +0200 Subject: [PATCH 015/221] add custom interface for get identifier --- process/sync/export_test.go | 5 +++ process/sync/metablock_test.go | 54 +++++++++++++++++++++++----- process/sync/shardblock_test.go | 2 +- storage/pruning/triePruningStorer.go | 5 +++ trie/interface.go | 5 +++ trie/node.go | 3 +- 6 files changed, 62 insertions(+), 12 deletions(-) diff --git a/process/sync/export_test.go b/process/sync/export_test.go index 7f243a2fe54..53b7242e5f4 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -175,6 +175,11 @@ func (boot *MetaBootstrap) GetNotarizedInfo( } } +// SyncAccountsDBs - +func (boot *MetaBootstrap) SyncAccountsDBs(key []byte, id string) error { + return boot.syncAccountsDBs(key, id) +} + // ProcessReceivedHeader - func (boot *baseBootstrap) ProcessReceivedHeader(headerHandler data.HeaderHandler, headerHash []byte) { boot.processReceivedHeader(headerHandler, headerHash) diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 07055519d90..3bb64c69a7a 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -1622,7 +1622,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) + errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error"), common.AccountsTrieIdentifier) blockProcessor := createMetaBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB @@ -1681,13 +1681,8 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { SyncAccountsCalled: func(rootHash []byte) error { accountsSyncCalled = true return nil - }} - validatorSyncCalled := false - args.ValidatorStatisticsDBSyncer = &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { - validatorSyncCalled = true - return nil - }} + }, + } args.Accounts = &stateMock.AccountsStub{RootHashCalled: func() ([]byte, error) { return []byte("roothash"), nil }} @@ -1700,5 +1695,46 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { assert.Equal(t, errGetNodeFromDB, err) assert.True(t, accountsSyncCalled) - assert.True(t, validatorSyncCalled) +} + +func TestMetaBootstrap_SyncAccountsDBs(t *testing.T) { + t.Parallel() + + t.Run("sync user accounts state", func(t *testing.T) { + t.Parallel() + + args := CreateMetaBootstrapMockArguments() + accountsSyncCalled := false + args.AccountsDBSyncer = &mock.AccountsDBSyncerStub{ + SyncAccountsCalled: func(rootHash []byte) error { + accountsSyncCalled = true + return nil + }, + } + + bs, _ := sync.NewMetaBootstrap(args) + + err := bs.SyncAccountsDBs([]byte("key"), common.AccountsTrieIdentifier) + require.Nil(t, err) + require.True(t, accountsSyncCalled) + }) + + t.Run("sync validator accounts state", func(t *testing.T) { + t.Parallel() + + args := CreateMetaBootstrapMockArguments() + accountsSyncCalled := false + args.ValidatorStatisticsDBSyncer = &mock.AccountsDBSyncerStub{ + SyncAccountsCalled: func(rootHash []byte) error { + accountsSyncCalled = true + return nil + }, + } + + bs, _ := sync.NewMetaBootstrap(args) + + err := bs.SyncAccountsDBs([]byte("key"), common.PeerAccountsTrieIdentifier) + require.Nil(t, err) + require.True(t, accountsSyncCalled) + }) } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 396a7805ff2..000134eeb5b 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -2062,7 +2062,7 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error")) + errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error"), "") blockProcessor := createBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB diff --git a/storage/pruning/triePruningStorer.go b/storage/pruning/triePruningStorer.go index bda72d5f23f..adfcbe3c195 100644 --- a/storage/pruning/triePruningStorer.go +++ b/storage/pruning/triePruningStorer.go @@ -164,6 +164,11 @@ func (ps *triePruningStorer) GetLatestStorageEpoch() (uint32, error) { return ps.activePersisters[currentEpochIndex].epoch, nil } +// GetIdentifier returns the identifier for storer +func (ps *triePruningStorer) GetIdentifier() string { + return ps.identifier +} + // IsInterfaceNil returns true if there is no value under the interface func (ps *triePruningStorer) IsInterfaceNil() bool { return ps == nil diff --git a/trie/interface.go b/trie/interface.go index a18d73947cb..0c52b345674 100644 --- a/trie/interface.go +++ b/trie/interface.go @@ -128,3 +128,8 @@ type storageManagerExtension interface { type StorageMarker interface { MarkStorerAsSyncedAndActive(storer common.StorageManager) } + +type dbWriteCacherWithIdentifier interface { + common.DBWriteCacher + GetIdentifier() string +} diff --git a/trie/node.go b/trie/node.go index 53665f7c0c6..c7b35b297d8 100644 --- a/trie/node.go +++ b/trie/node.go @@ -9,7 +9,6 @@ import ( "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/errors" - "github.com/ElrondNetwork/elrond-go/storage/pruning" "github.com/ElrondNetwork/elrond-go/trie/keyBuilder" ) @@ -118,7 +117,7 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { - dbWithID, ok := db.(*pruning.TriePruningStorerWithID) + dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { return nil, errors.NewGetNodeFromDBErr(n, err, "") } From d7e1b6bc381324c5b7ec2d23797844483a315818 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 23 Nov 2022 16:33:29 +0200 Subject: [PATCH 016/221] fix comments and log messages --- common/constants.go | 3 +++ process/sync/metablock.go | 2 +- trie/node.go | 3 ++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/common/constants.go b/common/constants.go index bf2f53854c8..3736867b83f 100644 --- a/common/constants.go +++ b/common/constants.go @@ -835,5 +835,8 @@ const MetricTrieSyncNumReceivedBytes = "erd_trie_sync_num_bytes_received" // MetricTrieSyncNumProcessedNodes is the metric that outputs the number of trie nodes processed for accounts during trie sync const MetricTrieSyncNumProcessedNodes = "erd_trie_sync_num_nodes_processed" +// AccountsTrieIdentifier defines the identifier for accounts trie storer const AccountsTrieIdentifier = "AccountsTrie" + +// PeerAccountsTrieIdentifier defines the identifier for peer accounts storer const PeerAccountsTrieIdentifier = "PeerAccountsTrie" diff --git a/process/sync/metablock.go b/process/sync/metablock.go index f673671ef8b..6fb6f195e10 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -201,7 +201,7 @@ func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { case common.PeerAccountsTrieIdentifier: return boot.syncValidatorAccountsState(key) default: - return fmt.Errorf("invalid trie identifier, id %s", id) + return fmt.Errorf("invalid trie identifier, id: %s", id) } } diff --git a/trie/node.go b/trie/node.go index c7b35b297d8..724ad650d8a 100644 --- a/trie/node.go +++ b/trie/node.go @@ -119,10 +119,11 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh if err != nil { dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { + log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) return nil, errors.NewGetNodeFromDBErr(n, err, "") } - log.Debug(common.GetNodeFromDBErrorString, "error", err, "key", n) + log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) return nil, errors.NewGetNodeFromDBErr(n, err, dbWithID.GetIdentifier()) } From ac87c63fe245a3684bfbf5dfe72bdbd303016c2a Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 24 Nov 2022 09:52:17 +0200 Subject: [PATCH 017/221] fixes after review: comments fixes + renamings --- errors/missingTrieNodeError.go | 16 ++++++++-------- storage/pruning/pruningStorer.go | 5 +++++ storage/pruning/triePruningStorer.go | 5 ----- trie/interface.go | 1 - trie/node.go | 2 +- 5 files changed, 14 insertions(+), 15 deletions(-) diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index cd9782df5a3..a4a386f049e 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -27,17 +27,17 @@ func IsGetNodeFromDBError(err error) bool { // GetNodeFromDBErr defines a custom error for trie get node type GetNodeFromDBErr struct { - getErr error - key []byte - identifier string + getErr error + key []byte + dbIdentifier string } // NewGetNodeFromDBErr will create a new instance of GetNodeFromDBErr func NewGetNodeFromDBErr(key []byte, err error, id string) *GetNodeFromDBErr { return &GetNodeFromDBErr{ - getErr: err, - key: key, - identifier: id, + getErr: err, + key: key, + dbIdentifier: id, } } @@ -56,7 +56,7 @@ func (e *GetNodeFromDBErr) GetKey() []byte { return e.key } -// GetKey will return the key that generated the error +// GetIdentifier will return the db corresponding to the db func (e *GetNodeFromDBErr) GetIdentifier() string { - return e.identifier + return e.dbIdentifier } diff --git a/storage/pruning/pruningStorer.go b/storage/pruning/pruningStorer.go index 69c5946cb1b..dc3c5e7bf10 100644 --- a/storage/pruning/pruningStorer.go +++ b/storage/pruning/pruningStorer.go @@ -1033,6 +1033,11 @@ func (ps *PruningStorer) RangeKeys(_ func(key []byte, val []byte) bool) { debug.PrintStack() } +// GetIdentifier returns the identifier for storer +func (ps *PruningStorer) GetIdentifier() string { + return ps.identifier +} + // IsInterfaceNil returns true if there is no value under the interface func (ps *PruningStorer) IsInterfaceNil() bool { return ps == nil diff --git a/storage/pruning/triePruningStorer.go b/storage/pruning/triePruningStorer.go index adfcbe3c195..bda72d5f23f 100644 --- a/storage/pruning/triePruningStorer.go +++ b/storage/pruning/triePruningStorer.go @@ -164,11 +164,6 @@ func (ps *triePruningStorer) GetLatestStorageEpoch() (uint32, error) { return ps.activePersisters[currentEpochIndex].epoch, nil } -// GetIdentifier returns the identifier for storer -func (ps *triePruningStorer) GetIdentifier() string { - return ps.identifier -} - // IsInterfaceNil returns true if there is no value under the interface func (ps *triePruningStorer) IsInterfaceNil() bool { return ps == nil diff --git a/trie/interface.go b/trie/interface.go index 0c52b345674..99940af882e 100644 --- a/trie/interface.go +++ b/trie/interface.go @@ -130,6 +130,5 @@ type StorageMarker interface { } type dbWriteCacherWithIdentifier interface { - common.DBWriteCacher GetIdentifier() string } diff --git a/trie/node.go b/trie/node.go index 724ad650d8a..55735146775 100644 --- a/trie/node.go +++ b/trie/node.go @@ -119,7 +119,7 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh if err != nil { dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { - log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) + log.Warn("wrong type assertion on", common.GetNodeFromDBErrorString, "error", err, "key", n) return nil, errors.NewGetNodeFromDBErr(n, err, "") } From 113c8339fb2c9b4935f2b00b85f107058ce46cdf Mon Sep 17 00:00:00 2001 From: ssd04 Date: Wed, 14 Dec 2022 12:21:23 +0200 Subject: [PATCH 018/221] error from db renaming --- errors/missingTrieNodeError.go | 18 +++++++++--------- process/sync/metablock.go | 3 ++- process/sync/metablock_test.go | 2 +- process/sync/shardblock.go | 2 +- process/sync/shardblock_test.go | 2 +- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index a4a386f049e..2e59df14156 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -25,16 +25,16 @@ func IsGetNodeFromDBError(err error) bool { return false } -// GetNodeFromDBErr defines a custom error for trie get node -type GetNodeFromDBErr struct { +// GetNodeFromDBErrWithKey defines a custom error for trie get node +type GetNodeFromDBErrWithKey struct { getErr error key []byte dbIdentifier string } -// NewGetNodeFromDBErr will create a new instance of GetNodeFromDBErr -func NewGetNodeFromDBErr(key []byte, err error, id string) *GetNodeFromDBErr { - return &GetNodeFromDBErr{ +// NewGetNodeFromDBErrWithKey will create a new instance of GetNodeFromDBErr +func NewGetNodeFromDBErrWithKey(key []byte, err error, id string) *GetNodeFromDBErrWithKey { + return &GetNodeFromDBErrWithKey{ getErr: err, key: key, dbIdentifier: id, @@ -42,7 +42,7 @@ func NewGetNodeFromDBErr(key []byte, err error, id string) *GetNodeFromDBErr { } // Error returns the error as string -func (e *GetNodeFromDBErr) Error() string { +func (e *GetNodeFromDBErrWithKey) Error() string { return fmt.Sprintf( "%s: %s for key %v", common.GetNodeFromDBErrorString, @@ -52,11 +52,11 @@ func (e *GetNodeFromDBErr) Error() string { } // GetKey will return the key that generated the error -func (e *GetNodeFromDBErr) GetKey() []byte { +func (e *GetNodeFromDBErrWithKey) GetKey() []byte { return e.key } -// GetIdentifier will return the db corresponding to the db -func (e *GetNodeFromDBErr) GetIdentifier() string { +// GetIdentifier will return the db identifier corresponding to the db +func (e *GetNodeFromDBErrWithKey) GetIdentifier() string { return e.dbIdentifier } diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 6fb6f195e10..4d46c8fd1be 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -182,7 +182,7 @@ func (boot *MetaBootstrap) setLastEpochStartRound() { func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if errors.IsGetNodeFromDBError(err) { - getNodeErr, ok := err.(*errors.GetNodeFromDBErr) + getNodeErr, ok := err.(*errors.GetNodeFromDBErrWithKey) if !ok { return err } @@ -195,6 +195,7 @@ func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { } func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { + // TODO: refactor this in order to avoid treatment based on identifier switch id { case common.AccountsTrieIdentifier: return boot.syncUserAccountsState(key) diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 3bb64c69a7a..c997b55a602 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -1622,7 +1622,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error"), common.AccountsTrieIdentifier) + errGetNodeFromDB := commonErrors.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), common.AccountsTrieIdentifier) blockProcessor := createMetaBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 204718a6b9c..d39572b221a 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -144,7 +144,7 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if errors.IsGetNodeFromDBError(err) { - getNodeErr, ok := err.(*errors.GetNodeFromDBErr) + getNodeErr, ok := err.(*errors.GetNodeFromDBErrWithKey) if !ok { return err } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 000134eeb5b..8cb5f66d8ae 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -2062,7 +2062,7 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := commonErrors.NewGetNodeFromDBErr([]byte("key"), errors.New("get error"), "") + errGetNodeFromDB := commonErrors.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), "") blockProcessor := createBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB From 482aecbc61d13e0bc30b1e05ff8406d4c635c972 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 15 Dec 2022 17:12:21 +0200 Subject: [PATCH 019/221] renamed error with key in trie node --- trie/node.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trie/node.go b/trie/node.go index 55735146775..f56a733e8ae 100644 --- a/trie/node.go +++ b/trie/node.go @@ -120,11 +120,11 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { log.Warn("wrong type assertion on", common.GetNodeFromDBErrorString, "error", err, "key", n) - return nil, errors.NewGetNodeFromDBErr(n, err, "") + return nil, errors.NewGetNodeFromDBErrWithKey(n, err, "") } log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) - return nil, errors.NewGetNodeFromDBErr(n, err, dbWithID.GetIdentifier()) + return nil, errors.NewGetNodeFromDBErrWithKey(n, err, dbWithID.GetIdentifier()) } return decodeNode(encChild, marshalizer, hasher) From 737c93ed4ffd0eff8c0f596a5e54260a19b1fcf0 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 19 Dec 2022 16:18:22 +0200 Subject: [PATCH 020/221] add interface for db error with key --- process/sync/interface.go | 6 ++++++ process/sync/metablock.go | 2 +- process/sync/shardblock.go | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/process/sync/interface.go b/process/sync/interface.go index 7536f132e53..d9b2df014d0 100644 --- a/process/sync/interface.go +++ b/process/sync/interface.go @@ -29,3 +29,9 @@ type syncStarter interface { type forkDetector interface { computeFinalCheckpoint() } + +// getKeyHandler defines the behaviour of a component that can provide a trie node key and identifier +type getKeyHandler interface { + GetKey() []byte + GetIdentifier() string +} diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 4d46c8fd1be..3bce4846d76 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -182,7 +182,7 @@ func (boot *MetaBootstrap) setLastEpochStartRound() { func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if errors.IsGetNodeFromDBError(err) { - getNodeErr, ok := err.(*errors.GetNodeFromDBErrWithKey) + getNodeErr, ok := err.(getKeyHandler) if !ok { return err } diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index d39572b221a..ddc9f524752 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -144,7 +144,7 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if errors.IsGetNodeFromDBError(err) { - getNodeErr, ok := err.(*errors.GetNodeFromDBErrWithKey) + getNodeErr, ok := err.(getKeyHandler) if !ok { return err } From 5f852a346dcec93c385aa2f6f10deee9464d62fb Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 19 Dec 2022 18:10:48 +0200 Subject: [PATCH 021/221] added todo for db identifier constants refactoring --- trie/factory/trieFactoryArgs.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/trie/factory/trieFactoryArgs.go b/trie/factory/trieFactoryArgs.go index 602feb8754b..06e6a011c67 100644 --- a/trie/factory/trieFactoryArgs.go +++ b/trie/factory/trieFactoryArgs.go @@ -7,6 +7,8 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" ) +// TODO: refactor to align these constants with db filepath identifier + // UserAccountTrie represents the use account identifier const UserAccountTrie = "userAccount" From 349d22adad7ed5893eccb0658262a46529d0511f Mon Sep 17 00:00:00 2001 From: ssd04 Date: Thu, 22 Dec 2022 14:05:02 +0200 Subject: [PATCH 022/221] refactor to use storer ids for dataRetriever --- common/constants.go | 6 ---- .../metaResolversContainerFactory.go | 7 ++--- .../metaResolversContainerFactory_test.go | 5 ++-- .../shardResolversContainerFactory.go | 3 +- .../shardResolversContainerFactory_test.go | 5 ++-- epochStart/bootstrap/process.go | 4 +-- factory/consensus/consensusComponents.go | 6 ++-- factory/state/stateComponents.go | 4 +-- genesis/process/genesisBlockCreator.go | 3 +- genesis/process/genesisBlockCreator_test.go | 7 ++--- .../state/stateTrie/stateTrie_test.go | 8 ++--- .../state/stateTrieSync/stateTrieSync_test.go | 24 +++++++-------- integrationTests/testProcessorNode.go | 23 +++++++-------- node/nodeRunner.go | 5 ++-- process/sync/baseSync.go | 29 +++++++++++++++++++ process/sync/interface.go | 4 +++ process/sync/metablock.go | 11 +++++-- process/sync/shardblock.go | 5 ++++ testscommon/components/components.go | 10 +++---- testscommon/components/default.go | 8 ++--- trie/factory/trieCreator.go | 8 ++--- trie/factory/trieFactoryArgs.go | 8 ----- update/genesis/import.go | 8 ++--- update/genesis/import_test.go | 10 +++---- 24 files changed, 116 insertions(+), 95 deletions(-) diff --git a/common/constants.go b/common/constants.go index 3736867b83f..4aa4033eaa0 100644 --- a/common/constants.go +++ b/common/constants.go @@ -834,9 +834,3 @@ const MetricTrieSyncNumReceivedBytes = "erd_trie_sync_num_bytes_received" // MetricTrieSyncNumProcessedNodes is the metric that outputs the number of trie nodes processed for accounts during trie sync const MetricTrieSyncNumProcessedNodes = "erd_trie_sync_num_nodes_processed" - -// AccountsTrieIdentifier defines the identifier for accounts trie storer -const AccountsTrieIdentifier = "AccountsTrie" - -// PeerAccountsTrieIdentifier defines the identifier for peer accounts storer -const PeerAccountsTrieIdentifier = "PeerAccountsTrie" diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index e08ac70fb93..5928e7b6a8c 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -9,7 +9,6 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" - triesFactory "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go/process/factory" @@ -151,7 +150,7 @@ func (mrcf *metaResolversContainerFactory) AddShardTrieNodeResolvers(container d identifierTrieNodes := factory.AccountTrieNodesTopic + shardC.CommunicationIdentifier(idx) resolver, err := mrcf.createTrieNodesResolver( identifierTrieNodes, - triesFactory.UserAccountTrie, + dataRetriever.UserAccountsUnit.String(), mrcf.numCrossShardPeers, mrcf.numTotalPeers-mrcf.numCrossShardPeers, idx, @@ -310,7 +309,7 @@ func (mrcf *metaResolversContainerFactory) generateTrieNodesResolvers() error { identifierTrieNodes := factory.AccountTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) resolver, err := mrcf.createTrieNodesResolver( identifierTrieNodes, - triesFactory.UserAccountTrie, + dataRetriever.UserAccountsUnit.String(), 0, mrcf.numTotalPeers, core.MetachainShardId, @@ -325,7 +324,7 @@ func (mrcf *metaResolversContainerFactory) generateTrieNodesResolvers() error { identifierTrieNodes = factory.ValidatorTrieNodesTopic + core.CommunicationIdentifierBetweenShards(core.MetachainShardId, core.MetachainShardId) resolver, err = mrcf.createTrieNodesResolver( identifierTrieNodes, - triesFactory.PeerAccountTrie, + dataRetriever.PeerAccountsUnit.String(), 0, mrcf.numTotalPeers, core.MetachainShardId, diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index 017d107f291..13f862349bc 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -20,7 +20,6 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" - triesFactory "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/stretchr/testify/assert" ) @@ -84,8 +83,8 @@ func createStoreForMeta() dataRetriever.StorageService { func createTriesHolderForMeta() common.TriesHolder { triesHolder := state.NewDataTriesHolder() - triesHolder.Put([]byte(triesFactory.UserAccountTrie), &trieMock.TrieStub{}) - triesHolder.Put([]byte(triesFactory.PeerAccountTrie), &trieMock.TrieStub{}) + triesHolder.Put([]byte(dataRetriever.UserAccountsUnit.String()), &trieMock.TrieStub{}) + triesHolder.Put([]byte(dataRetriever.PeerAccountsUnit.String()), &trieMock.TrieStub{}) return triesHolder } diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index 01e3310a1f4..3136147797e 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -10,7 +10,6 @@ import ( "github.com/ElrondNetwork/elrond-go/dataRetriever/factory/containers" "github.com/ElrondNetwork/elrond-go/dataRetriever/resolvers" "github.com/ElrondNetwork/elrond-go/process/factory" - triesFactory "github.com/ElrondNetwork/elrond-go/trie/factory" ) var _ dataRetriever.ResolversContainerFactory = (*shardResolversContainerFactory)(nil) @@ -241,7 +240,7 @@ func (srcf *shardResolversContainerFactory) generateTrieNodesResolvers() error { identifierTrieNodes := factory.AccountTrieNodesTopic + shardC.CommunicationIdentifier(core.MetachainShardId) resolver, err := srcf.createTrieNodesResolver( identifierTrieNodes, - triesFactory.UserAccountTrie, + dataRetriever.UserAccountsUnit.String(), 0, srcf.numTotalPeers, core.MetachainShardId, diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index 6b731d36d0b..5a793430bd2 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -19,7 +19,6 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon/p2pmocks" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" - triesFactory "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -90,8 +89,8 @@ func createStoreForShard() dataRetriever.StorageService { func createTriesHolderForShard() common.TriesHolder { triesHolder := state.NewDataTriesHolder() - triesHolder.Put([]byte(triesFactory.UserAccountTrie), &trieMock.TrieStub{}) - triesHolder.Put([]byte(triesFactory.PeerAccountTrie), &trieMock.TrieStub{}) + triesHolder.Put([]byte(dataRetriever.UserAccountsUnit.String()), &trieMock.TrieStub{}) + triesHolder.Put([]byte(dataRetriever.PeerAccountsUnit.String()), &trieMock.TrieStub{}) return triesHolder } diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index bcdd9638ffb..0dcee5c4134 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1046,7 +1046,7 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { } e.mutTrieStorageManagers.RLock() - trieStorageManager := e.trieStorageManagers[factory.UserAccountTrie] + trieStorageManager := e.trieStorageManagers[dataRetriever.UserAccountsUnit.String()] e.mutTrieStorageManagers.RUnlock() argsUserAccountsSyncer := syncer.ArgsNewUserAccountsSyncer{ @@ -1115,7 +1115,7 @@ func (e *epochStartBootstrap) createStorageService( func (e *epochStartBootstrap) syncValidatorAccountsState(rootHash []byte) error { e.mutTrieStorageManagers.RLock() - peerTrieStorageManager := e.trieStorageManagers[factory.PeerAccountTrie] + peerTrieStorageManager := e.trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] e.mutTrieStorageManagers.RUnlock() argsValidatorAccountsSyncer := syncer.ArgsNewValidatorAccountsSyncer{ diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index ef2c824d08f..ae69175bb29 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -17,6 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus/signing" "github.com/ElrondNetwork/elrond-go/consensus/spos" "github.com/ElrondNetwork/elrond-go/consensus/spos/sposFactory" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/factory" "github.com/ElrondNetwork/elrond-go/process" @@ -24,7 +25,6 @@ import ( "github.com/ElrondNetwork/elrond-go/process/sync/storageBootstrap" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/state/syncer" - trieFactory "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/ElrondNetwork/elrond-go/trie/statistics" "github.com/ElrondNetwork/elrond-go/trie/storageMarker" "github.com/ElrondNetwork/elrond-go/update" @@ -516,7 +516,7 @@ func (ccf *consensusComponentsFactory) createArgsBaseAccountsSyncer(trieStorageM } func (ccf *consensusComponentsFactory) createValidatorAccountsSyncer() (process.AccountsDBSyncer, error) { - trieStorageManager, ok := ccf.stateComponents.TrieStorageManagers()[trieFactory.PeerAccountTrie] + trieStorageManager, ok := ccf.stateComponents.TrieStorageManagers()[dataRetriever.PeerAccountsUnit.String()] if !ok { return nil, errors.ErrNilTrieStorageManager } @@ -528,7 +528,7 @@ func (ccf *consensusComponentsFactory) createValidatorAccountsSyncer() (process. } func (ccf *consensusComponentsFactory) createUserAccountsSyncer() (process.AccountsDBSyncer, error) { - trieStorageManager, ok := ccf.stateComponents.TrieStorageManagers()[trieFactory.UserAccountTrie] + trieStorageManager, ok := ccf.stateComponents.TrieStorageManagers()[dataRetriever.UserAccountsUnit.String()] if !ok { return nil, errors.ErrNilTrieStorageManager } diff --git a/factory/state/stateComponents.go b/factory/state/stateComponents.go index 5f17b621ee4..169fccf8f89 100644 --- a/factory/state/stateComponents.go +++ b/factory/state/stateComponents.go @@ -128,7 +128,7 @@ func (scf *stateComponentsFactory) Create() (*stateComponents, error) { func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common.TriesHolder) (state.AccountsAdapter, state.AccountsAdapter, state.AccountsRepository, error) { accountFactory := factoryState.NewAccountCreator() - merkleTrie := triesContainer.Get([]byte(trieFactory.UserAccountTrie)) + merkleTrie := triesContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) storagePruning, err := scf.newStoragePruningManager() if err != nil { return nil, nil, nil, err @@ -192,7 +192,7 @@ func (scf *stateComponentsFactory) createAccountsAdapters(triesContainer common. func (scf *stateComponentsFactory) createPeerAdapter(triesContainer common.TriesHolder) (state.AccountsAdapter, error) { accountFactory := factoryState.NewPeerAccountCreator() - merkleTrie := triesContainer.Get([]byte(trieFactory.PeerAccountTrie)) + merkleTrie := triesContainer.Get([]byte(dataRetriever.PeerAccountsUnit.String())) storagePruning, err := scf.newStoragePruningManager() if err != nil { return nil, err diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index 629177b235b..1a8c945b1d4 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -27,7 +27,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/storageunit" - triesFactory "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/ElrondNetwork/elrond-go/update" hardfork "github.com/ElrondNetwork/elrond-go/update/genesis" hardForkProcess "github.com/ElrondNetwork/elrond-go/update/process" @@ -481,7 +480,7 @@ func (gbc *genesisBlockCreator) getNewArgForShard(shardID uint32) (ArgsGenesisBl newArgument.Core.InternalMarshalizer(), newArgument.Core.Hasher(), factoryState.NewAccountCreator(), - gbc.arg.TrieStorageManagers[triesFactory.UserAccountTrie], + gbc.arg.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()], ) if err != nil { return ArgsGenesisBlockCreator{}, fmt.Errorf("'%w' while generating an in-memory accounts adapter for shard %d", diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index b5df301bfcb..bbdf1031bde 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -28,7 +28,6 @@ import ( stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" storageCommon "github.com/ElrondNetwork/elrond-go/testscommon/storage" "github.com/ElrondNetwork/elrond-go/trie" - "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/ElrondNetwork/elrond-go/update" updateMock "github.com/ElrondNetwork/elrond-go/update/mock" "github.com/ElrondNetwork/elrond-go/vm/systemSmartContracts/defaults" @@ -52,8 +51,8 @@ func createMockArgument( storageManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[factory.UserAccountTrie] = storageManager - trieStorageManagers[factory.PeerAccountTrie] = storageManager + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = storageManager + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = storageManager arg := ArgsGenesisBlockCreator{ GenesisTime: 0, @@ -146,7 +145,7 @@ func createMockArgument( &mock.MarshalizerMock{}, &hashingMocks.HasherMock{}, factoryState.NewAccountCreator(), - trieStorageManagers[factory.UserAccountTrie], + trieStorageManagers[dataRetriever.UserAccountsUnit.String()], ) require.Nil(t, err) diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index 11a0c8fa88a..3c7ec355a1f 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -25,6 +25,7 @@ import ( crypto "github.com/ElrondNetwork/elrond-go-crypto" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/integrationTests/mock" "github.com/ElrondNetwork/elrond-go/sharding" @@ -39,7 +40,6 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" "github.com/ElrondNetwork/elrond-go/trie" - trieFactory "github.com/ElrondNetwork/elrond-go/trie/factory" vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -1574,7 +1574,7 @@ func TestStatePruningIsBuffered(t *testing.T) { round, nonce = integrationTests.ProposeAndSyncOneBlock(t, nodes, idxProposers, round, nonce) rootHash := shardNode.BlockChain.GetCurrentBlockHeader().GetRootHash() - stateTrie := shardNode.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + stateTrie := shardNode.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) delayRounds := 10 for i := 0; i < delayRounds; i++ { @@ -1764,7 +1764,7 @@ func testNodeStateCheckpointSnapshotAndPruning( prunedRootHashes [][]byte, ) { - stateTrie := node.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + stateTrie := node.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) assert.Equal(t, 6, len(checkpointsRootHashes)) for i := range checkpointsRootHashes { tr, err := stateTrie.Recreate(checkpointsRootHashes[i]) @@ -1945,7 +1945,7 @@ func checkCodeConsistency( ) { for code := range codeMap { codeHash := integrationTests.TestHasher.Compute(code) - tr := shardNode.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + tr := shardNode.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) if codeMap[code] != 0 { val, _, err := tr.Get(codeHash) diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 06ecbd48a89..fbdf285e769 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -12,6 +12,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core/throttler" logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/epochStart/notifier" "github.com/ElrondNetwork/elrond-go/integrationTests" "github.com/ElrondNetwork/elrond-go/process/factory" @@ -21,7 +22,6 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon" testStorage "github.com/ElrondNetwork/elrond-go/testscommon/state" "github.com/ElrondNetwork/elrond-go/trie" - trieFactory "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/ElrondNetwork/elrond-go/trie/keyBuilder" "github.com/ElrondNetwork/elrond-go/trie/statistics" "github.com/ElrondNetwork/elrond-go/trie/storageMarker" @@ -94,7 +94,7 @@ func testNodeRequestInterceptTrieNodesWithMessenger(t *testing.T, version int) { time.Sleep(integrationTests.SyncDelay) - resolverTrie := nResolver.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + resolverTrie := nResolver.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) // we have tested even with the 1000000 value and found out that it worked in a reasonable amount of time ~3.5 minutes numTrieLeaves := 10000 for i := 0; i < numTrieLeaves; i++ { @@ -116,7 +116,7 @@ func testNodeRequestInterceptTrieNodesWithMessenger(t *testing.T, version int) { numLeaves := getNumLeaves(t, resolverTrie, rootHash) assert.Equal(t, numTrieLeaves, numLeaves) - requesterTrie := nRequester.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + requesterTrie := nRequester.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) nilRootHash, _ := requesterTrie.RootHash() tss := statistics.NewTrieSyncStatistics() @@ -224,7 +224,7 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin time.Sleep(integrationTests.SyncDelay) - resolverTrie := nResolver.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + resolverTrie := nResolver.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) // we have tested even with the 1000000 value and found out that it worked in a reasonable amount of time ~3.5 minutes numTrieLeaves := 10000 for i := 0; i < numTrieLeaves; i++ { @@ -246,7 +246,7 @@ func testNodeRequestInterceptTrieNodesWithMessengerNotSyncingShouldErr(t *testin numLeaves := getNumLeaves(t, resolverTrie, rootHash) assert.Equal(t, numTrieLeaves, numLeaves) - requesterTrie := nRequester.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + requesterTrie := nRequester.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) tss := statistics.NewTrieSyncStatistics() arg := trie.ArgTrieSyncer{ @@ -356,7 +356,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves err = common.GetErrorFromChanNonBlocking(leavesChannel.ErrChan) require.Nil(t, err) - requesterTrie := nRequester.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + requesterTrie := nRequester.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) nilRootHash, _ := requesterTrie.RootHash() syncerArgs := getUserAccountSyncerArgs(nRequester, version) @@ -483,14 +483,14 @@ func testSyncMissingSnapshotNodes(t *testing.T, version int) { time.Sleep(integrationTests.StepDelay) } - resolverTrie := nResolver.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + resolverTrie := nResolver.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) accState := nResolver.AccntState dataTrieRootHashes := addAccountsToState(t, numAccounts, numDataTrieLeaves, accState, valSize) rootHash, _ := accState.RootHash() numLeaves := getNumLeaves(t, resolverTrie, rootHash) require.Equal(t, numAccounts+numSystemAccounts, numLeaves) - requesterTrie := nRequester.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + requesterTrie := nRequester.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) nilRootHash, _ := requesterTrie.RootHash() copyPartialState(t, nResolver, nRequester, dataTrieRootHashes) @@ -504,7 +504,7 @@ func testSyncMissingSnapshotNodes(t *testing.T, version int) { err = nRequester.AccntState.StartSnapshotIfNeeded() assert.Nil(t, err) - tsm := nRequester.TrieStorageManagers[trieFactory.UserAccountTrie] + tsm := nRequester.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()] _ = tsm.PutInEpoch([]byte(common.ActiveDBKey), []byte(common.ActiveDBVal), 0) nRequester.AccntState.SnapshotState(rootHash) for tsm.IsPruningBlocked() { @@ -522,12 +522,12 @@ func testSyncMissingSnapshotNodes(t *testing.T, version int) { } func copyPartialState(t *testing.T, sourceNode, destinationNode *integrationTests.TestProcessorNode, dataTriesRootHashes [][]byte) { - resolverTrie := sourceNode.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + resolverTrie := sourceNode.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) hashes, _ := resolverTrie.GetAllHashes() assert.NotEqual(t, 0, len(hashes)) hashes = append(hashes, getDataTriesHashes(t, resolverTrie, dataTriesRootHashes)...) - destStorage := destinationNode.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)).GetStorageManager() + destStorage := destinationNode.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())).GetStorageManager() for i, hash := range hashes { if i%1000 == 0 { @@ -599,7 +599,7 @@ func getUserAccountSyncerArgs(node *integrationTests.TestProcessorNode, version ArgsNewBaseAccountsSyncer: syncer.ArgsNewBaseAccountsSyncer{ Hasher: integrationTests.TestHasher, Marshalizer: integrationTests.TestMarshalizer, - TrieStorageManager: node.TrieStorageManagers[trieFactory.UserAccountTrie], + TrieStorageManager: node.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()], RequestHandler: node.RequestHandler, Timeout: common.TimeoutGettingTrieNodes, Cacher: node.DataPool.TrieNodes(), diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index fc622efc15f..61fafaa609b 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -110,7 +110,6 @@ import ( statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" storageStubs "github.com/ElrondNetwork/elrond-go/testscommon/storage" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" - trieFactory "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/ElrondNetwork/elrond-go/trie/keyBuilder" "github.com/ElrondNetwork/elrond-go/update" "github.com/ElrondNetwork/elrond-go/update/trigger" @@ -525,15 +524,15 @@ func (tpn *TestProcessorNode) initAccountDBsWithPruningStorer() { tpn.TrieContainer = state.NewDataTriesHolder() var stateTrie common.Trie tpn.AccntState, stateTrie = CreateAccountsDB(UserAccount, trieStorageManager) - tpn.TrieContainer.Put([]byte(trieFactory.UserAccountTrie), stateTrie) + tpn.TrieContainer.Put([]byte(dataRetriever.UserAccountsUnit.String()), stateTrie) var peerTrie common.Trie tpn.PeerState, peerTrie = CreateAccountsDB(ValidatorAccount, trieStorageManager) - tpn.TrieContainer.Put([]byte(trieFactory.PeerAccountTrie), peerTrie) + tpn.TrieContainer.Put([]byte(dataRetriever.PeerAccountsUnit.String()), peerTrie) tpn.TrieStorageManagers = make(map[string]common.StorageManager) - tpn.TrieStorageManagers[trieFactory.UserAccountTrie] = trieStorageManager - tpn.TrieStorageManagers[trieFactory.PeerAccountTrie] = trieStorageManager + tpn.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()] = trieStorageManager + tpn.TrieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = trieStorageManager } func (tpn *TestProcessorNode) initAccountDBs(store storage.Storer) { @@ -541,15 +540,15 @@ func (tpn *TestProcessorNode) initAccountDBs(store storage.Storer) { tpn.TrieContainer = state.NewDataTriesHolder() var stateTrie common.Trie tpn.AccntState, stateTrie = CreateAccountsDB(UserAccount, trieStorageManager) - tpn.TrieContainer.Put([]byte(trieFactory.UserAccountTrie), stateTrie) + tpn.TrieContainer.Put([]byte(dataRetriever.UserAccountsUnit.String()), stateTrie) var peerTrie common.Trie tpn.PeerState, peerTrie = CreateAccountsDB(ValidatorAccount, trieStorageManager) - tpn.TrieContainer.Put([]byte(trieFactory.PeerAccountTrie), peerTrie) + tpn.TrieContainer.Put([]byte(dataRetriever.PeerAccountsUnit.String()), peerTrie) tpn.TrieStorageManagers = make(map[string]common.StorageManager) - tpn.TrieStorageManagers[trieFactory.UserAccountTrie] = trieStorageManager - tpn.TrieStorageManagers[trieFactory.PeerAccountTrie] = trieStorageManager + tpn.TrieStorageManagers[dataRetriever.UserAccountsUnit.String()] = trieStorageManager + tpn.TrieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = trieStorageManager } func (tpn *TestProcessorNode) initValidatorStatistics() { @@ -3064,9 +3063,9 @@ func GetDefaultStateComponents() *testscommon.StateComponentsMock { AccountsRepo: &stateMock.AccountsRepositoryStub{}, Tries: &trieMock.TriesHolderStub{}, StorageManagers: map[string]common.StorageManager{ - "0": &testscommon.StorageManagerStub{}, - trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, - trieFactory.PeerAccountTrie: &testscommon.StorageManagerStub{}, + "0": &testscommon.StorageManagerStub{}, + dataRetriever.UserAccountsUnit.String(): &testscommon.StorageManagerStub{}, + dataRetriever.PeerAccountsUnit.String(): &testscommon.StorageManagerStub{}, }, } } diff --git a/node/nodeRunner.go b/node/nodeRunner.go index a2729ddab5b..9f331f0ee68 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -59,7 +59,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage/cache" storageFactory "github.com/ElrondNetwork/elrond-go/storage/factory" "github.com/ElrondNetwork/elrond-go/storage/storageunit" - trieFactory "github.com/ElrondNetwork/elrond-go/trie/factory" trieStatistics "github.com/ElrondNetwork/elrond-go/trie/statistics" "github.com/ElrondNetwork/elrond-go/trie/storageMarker" "github.com/ElrondNetwork/elrond-go/update/trigger" @@ -600,7 +599,7 @@ func getUserAccountSyncer( processComponents mainFactory.ProcessComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxStateTrieLevelInMemory - userTrie := stateComponents.TriesContainer().Get([]byte(trieFactory.UserAccountTrie)) + userTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.UserAccountsUnit.String())) storageManager := userTrie.GetStorageManager() thr, err := throttler.NewNumGoRoutinesThrottler(int32(config.TrieSync.NumConcurrentTrieSyncers)) @@ -633,7 +632,7 @@ func getValidatorAccountSyncer( processComponents mainFactory.ProcessComponentsHolder, ) (process.AccountsDBSyncer, error) { maxTrieLevelInMemory := config.StateTriesConfig.MaxPeerTrieLevelInMemory - peerTrie := stateComponents.TriesContainer().Get([]byte(trieFactory.PeerAccountTrie)) + peerTrie := stateComponents.TriesContainer().Get([]byte(dataRetriever.PeerAccountsUnit.String())) storageManager := peerTrie.GetStorageManager() args := syncer.ArgsNewValidatorAccountsSyncer{ diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 1b07c400124..fd56a4d141e 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -21,6 +21,7 @@ import ( "github.com/ElrondNetwork/elrond-go/consensus" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/dblookupext" + "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/outport" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/sync/storageBootstrap/metricsLoader" @@ -123,6 +124,9 @@ type baseBootstrap struct { isInImportMode bool scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler processWaitTime time.Duration + + userAccountsStorerIdentifier string + peerAccountsStorerIdentifier string } // setRequestedHeaderNonce method sets the header nonce requested by the sync mechanism @@ -1123,6 +1127,31 @@ func (boot *baseBootstrap) waitForMiniBlocks() error { } } +func (boot *baseBootstrap) setAccountsStorerIdentifiers() error { + userStorer, err := boot.store.GetStorer(dataRetriever.UserAccountsUnit) + if err != nil { + return err + } + dbWithID, ok := userStorer.(dbStorerWithIdentifier) + if !ok { + return errors.ErrWrongTypeAssertion + } + boot.userAccountsStorerIdentifier = dbWithID.GetIdentifier() + + peerStorer, err := boot.store.GetStorer(dataRetriever.PeerAccountsUnit) + if err != nil { + return err + } + dbPeerWithID, ok := peerStorer.(dbStorerWithIdentifier) + if !ok { + return errors.ErrWrongTypeAssertion + } + + boot.peerAccountsStorerIdentifier = dbPeerWithID.GetIdentifier() + + return nil +} + func (boot *baseBootstrap) init() { boot.forkInfo = process.NewForkInfo() diff --git a/process/sync/interface.go b/process/sync/interface.go index d9b2df014d0..f2f717a56f9 100644 --- a/process/sync/interface.go +++ b/process/sync/interface.go @@ -35,3 +35,7 @@ type getKeyHandler interface { GetKey() []byte GetIdentifier() string } + +type dbStorerWithIdentifier interface { + GetIdentifier() string +} diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 3bce4846d76..915ef619445 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -8,7 +8,6 @@ import ( "github.com/ElrondNetwork/elrond-go-core/core/check" "github.com/ElrondNetwork/elrond-go-core/data" "github.com/ElrondNetwork/elrond-go-core/data/block" - "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" @@ -108,6 +107,11 @@ func NewMetaBootstrap(arguments ArgMetaBootstrapper) (*MetaBootstrap, error) { return nil, err } + err = base.setAccountsStorerIdentifiers() + if err != nil { + return nil, err + } + base.init() return &boot, nil @@ -195,11 +199,12 @@ func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { } func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { + // TODO: refactor this in order to avoid treatment based on identifier switch id { - case common.AccountsTrieIdentifier: + case boot.userAccountsStorerIdentifier: return boot.syncUserAccountsState(key) - case common.PeerAccountsTrieIdentifier: + case boot.peerAccountsStorerIdentifier: return boot.syncValidatorAccountsState(key) default: return fmt.Errorf("invalid trie identifier, id: %s", id) diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index ddc9f524752..60fabc3e556 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -92,6 +92,11 @@ func NewShardBootstrap(arguments ArgShardBootstrapper) (*ShardBootstrap, error) return nil, err } + err = base.setAccountsStorerIdentifiers() + if err != nil { + return nil, err + } + base.init() return &boot, nil diff --git a/testscommon/components/components.go b/testscommon/components/components.go index e2333ebd84d..541cdd40981 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -13,6 +13,7 @@ import ( commonFactory "github.com/ElrondNetwork/elrond-go/common/factory" "github.com/ElrondNetwork/elrond-go/config" "github.com/ElrondNetwork/elrond-go/consensus/spos" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/epochStart/bootstrap/disabled" "github.com/ElrondNetwork/elrond-go/factory" bootstrapComp "github.com/ElrondNetwork/elrond-go/factory/bootstrap" @@ -40,7 +41,6 @@ import ( "github.com/ElrondNetwork/elrond-go/testscommon/shardingMocks" statusHandlerMock "github.com/ElrondNetwork/elrond-go/testscommon/statusHandler" "github.com/ElrondNetwork/elrond-go/trie" - trieFactory "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/ElrondNetwork/elrond-go/trie/hashesHolder" arwenConfig "github.com/ElrondNetwork/wasm-vm-v1_4/config" "github.com/stretchr/testify/require" @@ -334,14 +334,14 @@ func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder, shardCoord storageManagerPeer, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[trieFactory.UserAccountTrie] = storageManagerUser - trieStorageManagers[trieFactory.PeerAccountTrie] = storageManagerPeer + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = storageManagerUser + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = storageManagerPeer triesHolder := state.NewDataTriesHolder() trieUsers, _ := trie.NewTrie(storageManagerUser, coreComponents.InternalMarshalizer(), coreComponents.Hasher(), 5) triePeers, _ := trie.NewTrie(storageManagerPeer, coreComponents.InternalMarshalizer(), coreComponents.Hasher(), 5) - triesHolder.Put([]byte(trieFactory.UserAccountTrie), trieUsers) - triesHolder.Put([]byte(trieFactory.PeerAccountTrie), triePeers) + triesHolder.Put([]byte(dataRetriever.UserAccountsUnit.String()), trieUsers) + triesHolder.Put([]byte(dataRetriever.PeerAccountsUnit.String()), triePeers) stateComponentsFactoryArgs := stateComp.StateComponentsFactoryArgs{ Config: GetGeneralConfig(), diff --git a/testscommon/components/default.go b/testscommon/components/default.go index 18ffb4e509b..3cf22223c77 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -5,6 +5,7 @@ import ( crypto "github.com/ElrondNetwork/elrond-go-crypto" "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/factory/mock" "github.com/ElrondNetwork/elrond-go/sharding" "github.com/ElrondNetwork/elrond-go/testscommon" @@ -17,7 +18,6 @@ import ( stateMock "github.com/ElrondNetwork/elrond-go/testscommon/state" "github.com/ElrondNetwork/elrond-go/testscommon/storage" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" - trieFactory "github.com/ElrondNetwork/elrond-go/trie/factory" ) // GetDefaultCoreComponents - @@ -85,9 +85,9 @@ func GetDefaultStateComponents() *testscommon.StateComponentsMock { Accounts: &stateMock.AccountsStub{}, Tries: &trieMock.TriesHolderStub{}, StorageManagers: map[string]common.StorageManager{ - "0": &testscommon.StorageManagerStub{}, - trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, - trieFactory.PeerAccountTrie: &testscommon.StorageManagerStub{}, + "0": &testscommon.StorageManagerStub{}, + dataRetriever.UserAccountsUnit.String(): &testscommon.StorageManagerStub{}, + dataRetriever.PeerAccountsUnit.String(): &testscommon.StorageManagerStub{}, }, } } diff --git a/trie/factory/trieCreator.go b/trie/factory/trieCreator.go index c80ff2d09ee..bdd8f9c637e 100644 --- a/trie/factory/trieCreator.go +++ b/trie/factory/trieCreator.go @@ -148,8 +148,8 @@ func CreateTriesComponentsForShardId( trieContainer := state.NewDataTriesHolder() trieStorageManagers := make(map[string]common.StorageManager) - trieContainer.Put([]byte(UserAccountTrie), userAccountTrie) - trieStorageManagers[UserAccountTrie] = userStorageManager + trieContainer.Put([]byte(dataRetriever.UserAccountsUnit.String()), userAccountTrie) + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = userStorageManager mainStorer, err = storageService.GetStorer(dataRetriever.PeerAccountsUnit) if err != nil { @@ -175,8 +175,8 @@ func CreateTriesComponentsForShardId( return nil, nil, err } - trieContainer.Put([]byte(PeerAccountTrie), peerAccountsTrie) - trieStorageManagers[PeerAccountTrie] = peerStorageManager + trieContainer.Put([]byte(dataRetriever.PeerAccountsUnit.String()), peerAccountsTrie) + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = peerStorageManager return trieContainer, trieStorageManagers, nil } diff --git a/trie/factory/trieFactoryArgs.go b/trie/factory/trieFactoryArgs.go index 06e6a011c67..71639870cfd 100644 --- a/trie/factory/trieFactoryArgs.go +++ b/trie/factory/trieFactoryArgs.go @@ -7,14 +7,6 @@ import ( "github.com/ElrondNetwork/elrond-go/storage" ) -// TODO: refactor to align these constants with db filepath identifier - -// UserAccountTrie represents the use account identifier -const UserAccountTrie = "userAccount" - -// PeerAccountTrie represents the peer account identifier -const PeerAccountTrie = "peerAccount" - // TrieFactoryArgs holds the arguments for creating a trie factory type TrieFactoryArgs struct { Marshalizer marshal.Marshalizer diff --git a/update/genesis/import.go b/update/genesis/import.go index ab74f41070d..2c15f298a67 100644 --- a/update/genesis/import.go +++ b/update/genesis/import.go @@ -16,11 +16,11 @@ import ( "github.com/ElrondNetwork/elrond-go/common" commonDisabled "github.com/ElrondNetwork/elrond-go/common/disabled" "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/state" "github.com/ElrondNetwork/elrond-go/state/factory" "github.com/ElrondNetwork/elrond-go/state/storagePruningManager/disabled" "github.com/ElrondNetwork/elrond-go/trie" - triesFactory "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/ElrondNetwork/elrond-go/update" ) @@ -286,9 +286,9 @@ func (si *stateImport) getTrie(shardID uint32, accType Type) (common.Trie, error return trieForShard, nil } - trieStorageManager := si.trieStorageManagers[triesFactory.UserAccountTrie] + trieStorageManager := si.trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] if accType == ValidatorAccount { - trieStorageManager = si.trieStorageManagers[triesFactory.PeerAccountTrie] + trieStorageManager = si.trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] } trieForShard, err := trie.NewTrie(trieStorageManager, si.marshalizer, si.hasher, maxTrieLevelInMemory) @@ -323,7 +323,7 @@ func (si *stateImport) importDataTrie(identifier string, shID uint32, keys [][]b return fmt.Errorf("%w wanted a roothash", update.ErrWrongTypeAssertion) } - dataTrie, err := trie.NewTrie(si.trieStorageManagers[triesFactory.UserAccountTrie], si.marshalizer, si.hasher, maxTrieLevelInMemory) + dataTrie, err := trie.NewTrie(si.trieStorageManagers[dataRetriever.UserAccountsUnit.String()], si.marshalizer, si.hasher, maxTrieLevelInMemory) if err != nil { return err } diff --git a/update/genesis/import_test.go b/update/genesis/import_test.go index 7273b2f058d..ae7ca7d0468 100644 --- a/update/genesis/import_test.go +++ b/update/genesis/import_test.go @@ -10,9 +10,9 @@ import ( "github.com/ElrondNetwork/elrond-go-core/data/block" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/config" + "github.com/ElrondNetwork/elrond-go/dataRetriever" "github.com/ElrondNetwork/elrond-go/testscommon" "github.com/ElrondNetwork/elrond-go/testscommon/hashingMocks" - "github.com/ElrondNetwork/elrond-go/trie/factory" "github.com/ElrondNetwork/elrond-go/update" "github.com/ElrondNetwork/elrond-go/update/mock" "github.com/stretchr/testify/assert" @@ -23,7 +23,7 @@ import ( func TestNewStateImport(t *testing.T) { trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[factory.UserAccountTrie] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &testscommon.StorageManagerStub{} tests := []struct { name string args ArgsNewStateImport @@ -82,8 +82,8 @@ func TestImportAll(t *testing.T) { t.Parallel() trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[factory.UserAccountTrie] = &testscommon.StorageManagerStub{} - trieStorageManagers[factory.PeerAccountTrie] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = &testscommon.StorageManagerStub{} args := ArgsNewStateImport{ HardforkStorer: &mock.HardforkStorerStub{}, @@ -105,7 +105,7 @@ func TestStateImport_ImportUnFinishedMetaBlocksShouldWork(t *testing.T) { t.Parallel() trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[factory.UserAccountTrie] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &testscommon.StorageManagerStub{} hasher := &hashingMocks.HasherMock{} marshahlizer := &mock.MarshalizerMock{} From 3548ad0ffc3c26c4e8e13c89b5c8b609302fa6ad Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 23 Dec 2022 01:15:03 +0200 Subject: [PATCH 023/221] fix and update unit tests for db identifier set --- process/sync/metablock.go | 9 ++-- process/sync/metablock_test.go | 82 +++++++++++++++++++++++++++++-- process/sync/shardblock.go | 5 -- process/sync/shardblock_test.go | 5 +- testscommon/storage/storerStub.go | 9 ++++ 5 files changed, 94 insertions(+), 16 deletions(-) diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 915ef619445..e2e4906b89a 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -107,11 +107,6 @@ func NewMetaBootstrap(arguments ArgMetaBootstrapper) (*MetaBootstrap, error) { return nil, err } - err = base.setAccountsStorerIdentifiers() - if err != nil { - return nil, err - } - base.init() return &boot, nil @@ -199,6 +194,10 @@ func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { } func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { + err := boot.setAccountsStorerIdentifiers() + if err != nil { + return err + } // TODO: refactor this in order to avoid treatment based on identifier switch id { diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index c997b55a602..8b6922c0fba 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -59,6 +59,8 @@ func createMetaStore() dataRetriever.StorageService { store.AddStorer(dataRetriever.MetaBlockUnit, generateTestUnit()) store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit, generateTestUnit()) store.AddStorer(dataRetriever.MetaHdrNonceHashDataUnit, generateTestUnit()) + store.AddStorer(dataRetriever.UserAccountsUnit, generateTestUnit()) + store.AddStorer(dataRetriever.PeerAccountsUnit, generateTestUnit()) return store } @@ -1127,7 +1129,8 @@ func TestMetaBootstrap_ReceivedHeadersFoundInPoolShouldAddToForkDetector(t *test args.ShardCoordinator = shardCoordinator args.RoundHandler = initRoundHandler() - bs, _ := sync.NewMetaBootstrap(args) + bs, err := sync.NewMetaBootstrap(args) + require.Nil(t, err) bs.ReceivedHeaders(addedHdr, addedHash) time.Sleep(500 * time.Millisecond) @@ -1178,7 +1181,8 @@ func TestMetaBootstrap_ReceivedHeadersNotFoundInPoolShouldNotAddToForkDetector(t args.ChainHandler, _ = blockchain.NewBlockChain(&statusHandlerMock.AppStatusHandlerStub{}) args.RoundHandler = initRoundHandler() - bs, _ := sync.NewMetaBootstrap(args) + bs, err := sync.NewMetaBootstrap(args) + require.Nil(t, err) bs.ReceivedHeaders(addedHdr, addedHash) time.Sleep(500 * time.Millisecond) @@ -1622,7 +1626,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := commonErrors.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), common.AccountsTrieIdentifier) + errGetNodeFromDB := commonErrors.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), "userAccountsUnit") blockProcessor := createMetaBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB @@ -1690,6 +1694,32 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { return []byte("roothash"), nil }} + args.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { + var dbIdentifier string + switch unitType { + case dataRetriever.UserAccountsUnit: + dbIdentifier = "userAccountsUnit" + case dataRetriever.PeerAccountsUnit: + dbIdentifier = "peerAccountsUnit" + default: + dbIdentifier = "" + } + + return &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, process.ErrMissingHeader + }, + RemoveCalled: func(key []byte) error { + return nil + }, + GetIdentifierCalled: func() string { + return dbIdentifier + }, + }, nil + }, + } + bs, _ := sync.NewMetaBootstrap(args) err := bs.SyncBlock(context.Background()) @@ -1712,9 +1742,30 @@ func TestMetaBootstrap_SyncAccountsDBs(t *testing.T) { }, } + dbIdentifier := "userAccountsTrie" + args.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { + if unitType != dataRetriever.UserAccountsUnit { + return &storageStubs.StorerStub{}, nil + } + + return &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, process.ErrMissingHeader + }, + RemoveCalled: func(key []byte) error { + return nil + }, + GetIdentifierCalled: func() string { + return dbIdentifier + }, + }, nil + }, + } + bs, _ := sync.NewMetaBootstrap(args) - err := bs.SyncAccountsDBs([]byte("key"), common.AccountsTrieIdentifier) + err := bs.SyncAccountsDBs([]byte("key"), dbIdentifier) require.Nil(t, err) require.True(t, accountsSyncCalled) }) @@ -1731,9 +1782,30 @@ func TestMetaBootstrap_SyncAccountsDBs(t *testing.T) { }, } + dbIdentifier := "peerAccountsTrie" + args.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { + if unitType != dataRetriever.PeerAccountsUnit { + return &storageStubs.StorerStub{}, nil + } + + return &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, process.ErrMissingHeader + }, + RemoveCalled: func(key []byte) error { + return nil + }, + GetIdentifierCalled: func() string { + return dbIdentifier + }, + }, nil + }, + } + bs, _ := sync.NewMetaBootstrap(args) - err := bs.SyncAccountsDBs([]byte("key"), common.PeerAccountsTrieIdentifier) + err := bs.SyncAccountsDBs([]byte("key"), dbIdentifier) require.Nil(t, err) require.True(t, accountsSyncCalled) }) diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 60fabc3e556..ddc9f524752 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -92,11 +92,6 @@ func NewShardBootstrap(arguments ArgShardBootstrapper) (*ShardBootstrap, error) return nil, err } - err = base.setAccountsStorerIdentifiers() - if err != nil { - return nil, err - } - base.init() return &boot, nil diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 8cb5f66d8ae..c4e9c23d0bf 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -107,6 +107,8 @@ func createFullStore() dataRetriever.StorageService { store.AddStorer(dataRetriever.ShardHdrNonceHashDataUnit, generateTestUnit()) store.AddStorer(dataRetriever.ReceiptsUnit, generateTestUnit()) store.AddStorer(dataRetriever.ScheduledSCRsUnit, generateTestUnit()) + store.AddStorer(dataRetriever.UserAccountsUnit, generateTestUnit()) + store.AddStorer(dataRetriever.PeerAccountsUnit, generateTestUnit()) return store } @@ -1739,7 +1741,8 @@ func TestBootstrap_GetTxBodyHavingHashNotFoundInCacherOrStorageShouldRetEmptySli args.Store = createFullStore() args.Store.AddStorer(dataRetriever.TransactionUnit, txBlockUnit) - bs, _ := sync.NewShardBootstrap(args) + bs, err := sync.NewShardBootstrap(args) + require.Nil(t, err) gotMbsAndHashes, _ := bs.GetMiniBlocks(requestedHash) assert.Equal(t, 0, len(gotMbsAndHashes)) diff --git a/testscommon/storage/storerStub.go b/testscommon/storage/storerStub.go index a9fe5647880..a4e08a90095 100644 --- a/testscommon/storage/storerStub.go +++ b/testscommon/storage/storerStub.go @@ -19,6 +19,7 @@ type StorerStub struct { GetBulkFromEpochCalled func(keys [][]byte, epoch uint32) ([]storage.KeyValuePair, error) GetOldestEpochCalled func() (uint32, error) RangeKeysCalled func(handler func(key []byte, val []byte) bool) + GetIdentifierCalled func() string CloseCalled func() error } @@ -124,6 +125,14 @@ func (ss *StorerStub) RangeKeys(handler func(key []byte, val []byte) bool) { } } +// GetIdentifier - +func (ss *StorerStub) GetIdentifier() string { + if ss.GetIdentifierCalled != nil { + return ss.GetIdentifierCalled() + } + return "" +} + // Close - func (ss *StorerStub) Close() error { if ss.CloseCalled != nil { From bec60f1ba0fa86fcfa55862081558b8e4d38fbce Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 23 Dec 2022 11:03:02 +0200 Subject: [PATCH 024/221] fix factory processing unit tests --- factory/processing/blockProcessorCreator_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index 165078792d6..8aaa99f4efe 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -8,6 +8,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/hashing" "github.com/ElrondNetwork/elrond-go-core/marshal" "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/dataRetriever" dataComp "github.com/ElrondNetwork/elrond-go/factory/data" "github.com/ElrondNetwork/elrond-go/factory/mock" processComp "github.com/ElrondNetwork/elrond-go/factory/processing" @@ -24,7 +25,6 @@ import ( storageManager "github.com/ElrondNetwork/elrond-go/testscommon/storage" trieMock "github.com/ElrondNetwork/elrond-go/testscommon/trie" "github.com/ElrondNetwork/elrond-go/trie" - trieFactory "github.com/ElrondNetwork/elrond-go/trie/factory" vmcommon "github.com/ElrondNetwork/elrond-vm-common" "github.com/stretchr/testify/require" ) @@ -104,14 +104,14 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { storageManagerPeer, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[trieFactory.UserAccountTrie] = storageManagerUser - trieStorageManagers[trieFactory.PeerAccountTrie] = storageManagerPeer + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = storageManagerUser + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = storageManagerPeer accounts, err := createAccountAdapter( &mock.MarshalizerMock{}, &hashingMocks.HasherMock{}, factoryState.NewAccountCreator(), - trieStorageManagers[trieFactory.UserAccountTrie], + trieStorageManagers[dataRetriever.UserAccountsUnit.String()], ) require.Nil(t, err) From ff5674dbd37f67b7902692520ba29392d5cb8e48 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 23 Dec 2022 15:10:12 +0200 Subject: [PATCH 025/221] refactor the set of accounts db identifier --- process/sync/baseSync.go | 27 ++++++--------------------- process/sync/metablock.go | 11 ++++++++--- 2 files changed, 14 insertions(+), 24 deletions(-) diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index fd56a4d141e..7c204ad0891 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -124,9 +124,6 @@ type baseBootstrap struct { isInImportMode bool scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler processWaitTime time.Duration - - userAccountsStorerIdentifier string - peerAccountsStorerIdentifier string } // setRequestedHeaderNonce method sets the header nonce requested by the sync mechanism @@ -1127,29 +1124,17 @@ func (boot *baseBootstrap) waitForMiniBlocks() error { } } -func (boot *baseBootstrap) setAccountsStorerIdentifiers() error { - userStorer, err := boot.store.GetStorer(dataRetriever.UserAccountsUnit) - if err != nil { - return err - } - dbWithID, ok := userStorer.(dbStorerWithIdentifier) - if !ok { - return errors.ErrWrongTypeAssertion - } - boot.userAccountsStorerIdentifier = dbWithID.GetIdentifier() - - peerStorer, err := boot.store.GetStorer(dataRetriever.PeerAccountsUnit) +func (boot *baseBootstrap) getStorerIdentifier(unitType dataRetriever.UnitType) (string, error) { + storer, err := boot.store.GetStorer(unitType) if err != nil { - return err + return "", err } - dbPeerWithID, ok := peerStorer.(dbStorerWithIdentifier) + dbWithID, ok := storer.(dbStorerWithIdentifier) if !ok { - return errors.ErrWrongTypeAssertion + return "", errors.ErrWrongTypeAssertion } - boot.peerAccountsStorerIdentifier = dbPeerWithID.GetIdentifier() - - return nil + return dbWithID.GetIdentifier(), nil } func (boot *baseBootstrap) init() { diff --git a/process/sync/metablock.go b/process/sync/metablock.go index e2e4906b89a..a610b59f0e2 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -194,16 +194,21 @@ func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { } func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { - err := boot.setAccountsStorerIdentifiers() + userAccountsStorerIdentifier, err := boot.getStorerIdentifier(dataRetriever.UserAccountsUnit) + if err != nil { + return err + } + + peerAccountsStorerIdentifier, err := boot.getStorerIdentifier(dataRetriever.PeerAccountsUnit) if err != nil { return err } // TODO: refactor this in order to avoid treatment based on identifier switch id { - case boot.userAccountsStorerIdentifier: + case userAccountsStorerIdentifier: return boot.syncUserAccountsState(key) - case boot.peerAccountsStorerIdentifier: + case peerAccountsStorerIdentifier: return boot.syncValidatorAccountsState(key) default: return fmt.Errorf("invalid trie identifier, id: %s", id) From 0a17029aafdfc9bfdf971a94cc641e3a44cb2419 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 9 Jan 2023 11:00:37 +0200 Subject: [PATCH 026/221] changed log warn to trace in get node from db --- trie/node.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trie/node.go b/trie/node.go index f56a733e8ae..b399fab5ca0 100644 --- a/trie/node.go +++ b/trie/node.go @@ -119,7 +119,7 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh if err != nil { dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { - log.Warn("wrong type assertion on", common.GetNodeFromDBErrorString, "error", err, "key", n) + log.Trace("wrong type assertion on", common.GetNodeFromDBErrorString, "error", err, "key", n) return nil, errors.NewGetNodeFromDBErrWithKey(n, err, "") } From 73275b12cd93e8e0ca7845fc249228e039016ebb Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 12 Jan 2023 10:27:45 +0200 Subject: [PATCH 027/221] return err on CreateAndProcessMiniBlocks if getNodeFromDB err --- process/block/preprocess/transactions.go | 4 ++++ trie/node.go | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 35dd0900a43..1093033bc01 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -1083,6 +1083,10 @@ func (txs *transactions) CreateAndProcessMiniBlocks(haveTime func() bool, random ) if err != nil { + if elrondErr.IsGetNodeFromDBError(err) { + return nil, err + } + log.Debug("createAndProcessMiniBlocksFromMe", "error", err.Error()) return make(block.MiniBlockSlice, 0), nil } diff --git a/trie/node.go b/trie/node.go index b399fab5ca0..76afbee0223 100644 --- a/trie/node.go +++ b/trie/node.go @@ -3,6 +3,7 @@ package trie import ( "context" + "fmt" "time" "github.com/ElrondNetwork/elrond-go-core/hashing" @@ -119,7 +120,7 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh if err != nil { dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { - log.Trace("wrong type assertion on", common.GetNodeFromDBErrorString, "error", err, "key", n) + log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n, "db type", fmt.Sprintf("%T", db)) return nil, errors.NewGetNodeFromDBErrWithKey(n, err, "") } From a2deeffc2903991993e20652cdb9dccf0c66bd6c Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 12 Jan 2023 10:33:18 +0200 Subject: [PATCH 028/221] small refactor --- process/block/preprocess/transactions.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 1093033bc01..e9b72498f67 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -1083,11 +1083,12 @@ func (txs *transactions) CreateAndProcessMiniBlocks(haveTime func() bool, random ) if err != nil { + log.Debug("createAndProcessMiniBlocksFromMe", "error", err.Error()) + if elrondErr.IsGetNodeFromDBError(err) { return nil, err } - log.Debug("createAndProcessMiniBlocksFromMe", "error", err.Error()) return make(block.MiniBlockSlice, 0), nil } From 65e517f6ae5a992d25791c92afa10cb31d119140 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Fri, 3 Feb 2023 12:13:48 +0200 Subject: [PATCH 029/221] return error if key not found --- process/block/shardblock.go | 5 +++++ process/coordinator/process.go | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 01cb1fb8ae3..fa1e61e9932 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -15,6 +15,7 @@ import ( logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/common" "github.com/ElrondNetwork/elrond-go/dataRetriever" + "github.com/ElrondNetwork/elrond-go/errors" processOutport "github.com/ElrondNetwork/elrond-go/outport/process" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/bootstrapStorage" @@ -2021,6 +2022,10 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by log.Debug("elapsed time to create mbs to me", "time", elapsedTime) if err != nil { log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) + + if errors.IsGetNodeFromDBError(err) { + return nil, nil, err + } } if createAndProcessMBsDestMeInfo != nil { processedMiniBlocksDestMeInfo = createAndProcessMBsDestMeInfo.allProcessedMiniBlocksInfo diff --git a/process/coordinator/process.go b/process/coordinator/process.go index ff532881e3b..f3b7643fbc9 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -17,6 +17,7 @@ import ( "github.com/ElrondNetwork/elrond-go-core/marshal" logger "github.com/ElrondNetwork/elrond-go-logger" "github.com/ElrondNetwork/elrond-go/common" + "github.com/ElrondNetwork/elrond-go/errors" "github.com/ElrondNetwork/elrond-go/process" "github.com/ElrondNetwork/elrond-go/process/block/preprocess" "github.com/ElrondNetwork/elrond-go/process/block/processedMb" @@ -719,6 +720,11 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe "total gas penalized", tc.gasHandler.TotalGasPenalized(), "error", errProc, ) + + if errors.IsGetNodeFromDBError(errProc) { + return nil, 0, false, err + } + continue } From 773d9a8ccd00988e776873c08c57623da71f1a78 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Fri, 3 Feb 2023 13:21:50 +0200 Subject: [PATCH 030/221] fix after merge --- errors/missingTrieNodeError.go | 2 +- process/block/preprocess/rewardTxPreProcessor_test.go | 2 +- process/block/preprocess/transactions.go | 2 +- process/block/preprocess/transactionsV2.go | 4 ++-- process/rewardTransaction/process.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index 029cc45069f..b1583bc3e4d 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -3,7 +3,7 @@ package errors import ( "strings" - "github.com/ElrondNetwork/elrond-go/common" + "github.com/multiversx/mx-chain-go/common" ) // IsGetNodeFromDBError returns true if the provided error is of type getNodeFromDB diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index 33a547a86ea..9871ba22081 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -9,7 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" - "github.com/multiversx/mx-chain-core-go/common" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 56542dadd6a..2cd03f289ea 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -1201,7 +1201,7 @@ func (txs *transactions) createAndProcessMiniBlocksFromMeV1( err = txs.processMiniBlockBuilderTx(mbBuilder, wtx, tx) if err != nil { - if elrondErr.IsGetNodeFromDBError(err) { + if chainErr.IsGetNodeFromDBError(err) { return nil, nil, err } continue diff --git a/process/block/preprocess/transactionsV2.go b/process/block/preprocess/transactionsV2.go index 9890ab0272f..430aa373028 100644 --- a/process/block/preprocess/transactionsV2.go +++ b/process/block/preprocess/transactionsV2.go @@ -71,7 +71,7 @@ func (txs *transactions) createAndProcessMiniBlocksFromMeV2( receiverShardID, mbInfo) if err != nil { - if elrondErr.IsGetNodeFromDBError(err) { + if chainErr.IsGetNodeFromDBError(err) { return nil, nil, nil, err } if shouldAddToRemaining { @@ -316,7 +316,7 @@ func (txs *transactions) createScheduledMiniBlocks( receiverShardID, mbInfo) if err != nil { - if elrondErr.IsGetNodeFromDBError(err) { + if chainErr.IsGetNodeFromDBError(err) { return nil, err } continue diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index 35b0f8ad22a..fed488bb606 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -6,7 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/rewardTx" - "github.com/multiversx/mx-chain-core-go/errors" + "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" From a683564b237deafbb1b46d0951a2aee8636a2cd2 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Fri, 3 Feb 2023 14:26:12 +0200 Subject: [PATCH 031/221] fix after merge --- .../resolverscontainer/metaResolversContainerFactory.go | 1 - .../metaResolversContainerFactory_test.go | 1 - .../resolverscontainer/shardResolversContainerFactory.go | 1 - .../shardResolversContainerFactory_test.go | 1 - genesis/process/genesisBlockCreator.go | 1 - genesis/process/genesisBlockCreator_test.go | 1 - integrationTests/state/stateTrie/stateTrie_test.go | 9 ++++----- .../state/stateTrieSync/stateTrieSync_test.go | 2 +- integrationTests/testProcessorNode.go | 1 - node/nodeRunner.go | 1 - process/block/preprocess/transactions.go | 2 +- process/block/shardblock.go | 1 + process/coordinator/process.go | 1 + process/sync/metablock_test.go | 1 + process/sync/shardblock_test.go | 1 + testscommon/components/components.go | 1 - testscommon/components/default.go | 1 + update/genesis/import.go | 2 +- update/genesis/import_test.go | 2 +- 19 files changed, 13 insertions(+), 18 deletions(-) diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go index 788a71e0cdd..889481e9fde 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/process/factory" diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index 15d8154110d..7d95585277c 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -18,7 +18,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/stretchr/testify/assert" ) diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go index 9bb58357816..7a4fb1a282a 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/factory/containers" "github.com/multiversx/mx-chain-go/dataRetriever/resolvers" "github.com/multiversx/mx-chain-go/process/factory" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" ) var _ dataRetriever.ResolversContainerFactory = (*shardResolversContainerFactory)(nil) diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index c62d1e04eab..b33cdbfa64a 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -18,7 +18,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/stretchr/testify/assert" ) diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index fd6a5b273e7..247cb6ae49a 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -28,7 +28,6 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/update" hardfork "github.com/multiversx/mx-chain-go/update/genesis" hardForkProcess "github.com/multiversx/mx-chain-go/update/process" diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index ea1a50b0218..30bfe94d609 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -28,7 +28,6 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" storageCommon "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/update" updateMock "github.com/multiversx/mx-chain-go/update/mock" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts/defaults" diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index 97d7939252d..f71c42d8a85 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -5,13 +5,13 @@ import ( "encoding/base64" "encoding/binary" "encoding/hex" - "errors" "fmt" "math" "math/big" "math/rand" "runtime" "strconv" + "strings" "sync" "sync/atomic" "testing" @@ -39,7 +39,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/statusHandler" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -1368,7 +1367,7 @@ func TestRollbackBlockAndCheckThatPruningIsCancelledOnAccountsTrie(t *testing.T) if !bytes.Equal(rootHash, rootHashOfRollbackedBlock) { time.Sleep(time.Second * 3) err = shardNode.AccntState.RecreateTrie(rootHashOfRollbackedBlock) - require.True(t, errors.Is(err, trie.ErrKeyNotFound)) + require.True(t, strings.Contains(err.Error(), trie.ErrKeyNotFound.Error())) } nonces := []*uint64{new(uint64), new(uint64)} @@ -1529,7 +1528,7 @@ func TestTriePruningWhenBlockIsFinal(t *testing.T) { require.Equal(t, uint64(17), nodes[1].BlockChain.GetCurrentBlockHeader().GetNonce()) err := shardNode.AccntState.RecreateTrie(rootHashOfFirstBlock) - require.True(t, errors.Is(err, trie.ErrKeyNotFound)) + require.True(t, strings.Contains(err.Error(), trie.ErrKeyNotFound.Error())) } func TestStatePruningIsNotBuffered(t *testing.T) { @@ -1674,7 +1673,7 @@ func checkTrieCanBeRecreated(tb testing.TB, node *integrationTests.TestProcessor return } - stateTrie := node.TrieContainer.Get([]byte(trieFactory.UserAccountTrie)) + stateTrie := node.TrieContainer.Get([]byte(dataRetriever.UserAccountsUnit.String())) roothash := node.BlockChain.GetCurrentBlockRootHash() tr, err := stateTrie.Recreate(roothash) require.Nil(tb, err) diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 656cc1b8d14..5a46fc9d9a3 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/throttler" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/process/factory" @@ -20,7 +21,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" testStorage "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/statistics" "github.com/multiversx/mx-chain-go/trie/storageMarker" diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 54dd3eb2146..e154cbfb7a0 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -112,7 +112,6 @@ import ( statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/trigger" diff --git a/node/nodeRunner.go b/node/nodeRunner.go index e6ed4036870..1ea5b5cbd5e 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -59,7 +59,6 @@ import ( "github.com/multiversx/mx-chain-go/storage/cache" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" trieStatistics "github.com/multiversx/mx-chain-go/trie/statistics" "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/multiversx/mx-chain-go/update/trigger" diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 6610af0d68d..3d1b86024d6 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -1091,7 +1091,7 @@ func (txs *transactions) CreateAndProcessMiniBlocks(haveTime func() bool, random if err != nil { log.Debug("createAndProcessMiniBlocksFromMe", "error", err.Error()) - if elrondErr.IsGetNodeFromDBError(err) { + if chainErr.IsGetNodeFromDBError(err) { return nil, err } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 2b94a99091a..f007d6d746e 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/headerVersionData" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/errors" processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 0ebfeb2790e..5a1a685a478 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/block/processedMb" diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index eba20a78c78..019076d66b9 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/round" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + commonErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/process/sync" diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 53e20ed75db..7c3f2ab1d9c 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-go/consensus/round" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" + commonErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/process/sync" diff --git a/testscommon/components/components.go b/testscommon/components/components.go index cc20dbc6b74..e5ab892f0b9 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -40,7 +40,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/trie/hashesHolder" logger "github.com/multiversx/mx-chain-logger-go" wasmConfig "github.com/multiversx/mx-chain-vm-v1_4-go/config" diff --git a/testscommon/components/default.go b/testscommon/components/default.go index 7212144c234..7ebcd67d507 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -6,6 +6,7 @@ import ( crypto "github.com/multiversx/mx-chain-crypto-go" "github.com/multiversx/mx-chain-go/common" consensusMocks "github.com/multiversx/mx-chain-go/consensus/mock" + "github.com/multiversx/mx-chain-go/dataRetriever" dataRetrieverMock "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/factory/mock" "github.com/multiversx/mx-chain-go/sharding" diff --git a/update/genesis/import.go b/update/genesis/import.go index f482dac0674..ce9f3c33f53 100644 --- a/update/genesis/import.go +++ b/update/genesis/import.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/multiversx/mx-chain-go/dataRetriever" "strings" "github.com/multiversx/mx-chain-core-go/core" @@ -20,7 +21,6 @@ import ( "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" "github.com/multiversx/mx-chain-go/trie" - triesFactory "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/update" ) diff --git a/update/genesis/import_test.go b/update/genesis/import_test.go index 9cc938129a5..01a8b50e82e 100644 --- a/update/genesis/import_test.go +++ b/update/genesis/import_test.go @@ -10,9 +10,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - "github.com/multiversx/mx-chain-go/trie/factory" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/assert" From 36ddeb41cbecb03b18a5f9c0c20de625428da294 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 13 Feb 2023 11:45:50 +0200 Subject: [PATCH 032/221] remove error wrapping from trie Get() --- trie/patriciaMerkleTrie.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index 1fb8c3abce7..75bbe70f9bc 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -88,7 +88,7 @@ func (tr *patriciaMerkleTrie) Get(key []byte) ([]byte, uint32, error) { val, depth, err := tr.root.tryGet(hexKey, rootDepthLevel, tr.trieStorage) if err != nil { - err = fmt.Errorf("trie get error: %w, for key %v", err, hex.EncodeToString(key)) + log.Error("trie get error", "error", err.Error(), "key", hex.EncodeToString(key)) return nil, depth, err } From 7123970e7e72aac8596249accfb4e09d06ffbbaa Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Tue, 14 Feb 2023 17:04:41 +0200 Subject: [PATCH 033/221] small logs refactor --- trie/node.go | 2 +- trie/patriciaMerkleTrie.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/trie/node.go b/trie/node.go index e7678e0ab25..004bea97c26 100644 --- a/trie/node.go +++ b/trie/node.go @@ -120,7 +120,7 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh if err != nil { dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { - log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n, "db type", fmt.Sprintf("%T", db)) + log.Warn(common.GetNodeFromDBErrorString, "error", err, "key", n, "db type", fmt.Sprintf("%T", db)) return nil, errors.NewGetNodeFromDBErrWithKey(n, err, "") } diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index 75bbe70f9bc..8d08857b2aa 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -88,7 +88,7 @@ func (tr *patriciaMerkleTrie) Get(key []byte) ([]byte, uint32, error) { val, depth, err := tr.root.tryGet(hexKey, rootDepthLevel, tr.trieStorage) if err != nil { - log.Error("trie get error", "error", err.Error(), "key", hex.EncodeToString(key)) + log.Error("trie get error", "error", err.Error(), "key", key) return nil, depth, err } From 642a9908feeb506d316992afba63e99f11233c92 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 1 Mar 2023 14:38:39 +0200 Subject: [PATCH 034/221] integrated userNames to rc-1.6 --- cmd/node/config/config.toml | 3 +++ cmd/node/config/enableEpochs.toml | 3 +++ common/enablers/enableEpochsHandler.go | 1 + common/enablers/enableEpochsHandler_test.go | 4 ++++ common/enablers/epochFlags.go | 7 +++++++ common/interface.go | 1 + config/config.go | 1 + config/epochConfig.go | 1 + config/tomlConfig_test.go | 4 ++++ factory/api/apiResolverFactory.go | 21 +++++++++++++++++++ factory/processing/blockProcessorCreator.go | 14 +++++++++++++ genesis/process/shardGenesisBlockCreator.go | 1 + go.mod | 2 +- go.sum | 3 ++- integrationTests/testProcessorNode.go | 3 +++ .../testProcessorNodeWithTestWebServer.go | 1 + integrationTests/vm/testInitializer.go | 6 ++++++ integrationTests/vm/wasm/utils.go | 1 + .../smartContract/builtInFunctions/factory.go | 4 +++- .../builtInFunctions/factory_test.go | 1 + sharding/mock/enableEpochsHandlerMock.go | 5 +++++ testscommon/components/configs.go | 5 +++++ testscommon/enableEpochsHandlerStub.go | 9 ++++++++ testscommon/generalConfig.go | 5 +++++ 24 files changed, 103 insertions(+), 3 deletions(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 21245843e1a..9f16b275451 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -707,6 +707,9 @@ "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", #shard 2 ] MaxNumAddressesInTransferRole = 100 + DNSV2Addresses =[ + "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", #shard 0 + ] [Hardfork] EnableTrigger = true diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 7bd4ffbcd41..6a853936b61 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -242,6 +242,9 @@ # RuntimeCodeSizeFixEnableEpoch represents the epoch when the code size fix in the VM is enabled RuntimeCodeSizeFixEnableEpoch = 2 + # ChangeUserNameEnableEpoch represents the epoch when changing username is enabled + ChangeUserNameEnableEpoch = 2 + # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers BLSMultiSignerEnableEpoch = [ { EnableEpoch = 0, Type = "no-KOSK"}, diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index b9017bdcd4e..c33b1f8a173 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -118,6 +118,7 @@ func (handler *enableEpochsHandler) EpochConfirmed(epoch uint32, _ uint64) { handler.setFlagValue(epoch >= handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch, handler.maxBlockchainHookCountersFlag, "maxBlockchainHookCountersFlag") handler.setFlagValue(epoch >= handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch, handler.wipeSingleNFTLiquidityDecreaseFlag, "wipeSingleNFTLiquidityDecreaseFlag") handler.setFlagValue(epoch >= handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch, handler.alwaysSaveTokenMetaDataFlag, "alwaysSaveTokenMetaDataFlag") + handler.setFlagValue(epoch >= handler.enableEpochsConfig.ChangeUserNameEnableEpoch, handler.changeUsernameFlag, "runtimeCodeSizeFixFlag") } func (handler *enableEpochsHandler) setFlagValue(value bool, flag *atomic.Flag, flagName string) { diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index 661d684f010..1e349b1d93c 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -91,6 +91,7 @@ func createEnableEpochsConfig() config.EnableEpochs { WipeSingleNFTLiquidityDecreaseEnableEpoch: 75, AlwaysSaveTokenMetaDataEnableEpoch: 76, RuntimeCodeSizeFixEnableEpoch: 77, + ChangeUserNameEnableEpoch: 78, } } @@ -213,6 +214,7 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.True(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) + assert.False(t, handler.IsChangeUsernameEnabled()) }) t.Run("flags with == condition should be set, along with all >=", func(t *testing.T) { t.Parallel() @@ -313,6 +315,7 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.True(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) + assert.True(t, handler.IsChangeUsernameEnabled()) }) t.Run("flags with < should be set", func(t *testing.T) { t.Parallel() @@ -408,5 +411,6 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.False(t, handler.IsWipeSingleNFTLiquidityDecreaseEnabled()) assert.False(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.False(t, handler.IsRuntimeCodeSizeFixEnabled()) + assert.False(t, handler.IsChangeUsernameEnabled()) }) } diff --git a/common/enablers/epochFlags.go b/common/enablers/epochFlags.go index fe11469f4bb..ef4609ba95c 100644 --- a/common/enablers/epochFlags.go +++ b/common/enablers/epochFlags.go @@ -90,6 +90,7 @@ type epochFlagsHolder struct { maxBlockchainHookCountersFlag *atomic.Flag wipeSingleNFTLiquidityDecreaseFlag *atomic.Flag alwaysSaveTokenMetaDataFlag *atomic.Flag + changeUsernameFlag *atomic.Flag } func newEpochFlagsHolder() *epochFlagsHolder { @@ -179,6 +180,7 @@ func newEpochFlagsHolder() *epochFlagsHolder { maxBlockchainHookCountersFlag: &atomic.Flag{}, wipeSingleNFTLiquidityDecreaseFlag: &atomic.Flag{}, alwaysSaveTokenMetaDataFlag: &atomic.Flag{}, + changeUsernameFlag: &atomic.Flag{}, } } @@ -659,3 +661,8 @@ func (holder *epochFlagsHolder) IsWipeSingleNFTLiquidityDecreaseEnabled() bool { func (holder *epochFlagsHolder) IsAlwaysSaveTokenMetaDataEnabled() bool { return holder.alwaysSaveTokenMetaDataFlag.IsSet() } + +// IsChangeUsernameEnabled returns true if changeUsernameFlag is enabled +func (holder *epochFlagsHolder) IsChangeUsernameEnabled() bool { + return holder.changeUsernameFlag.IsSet() +} diff --git a/common/interface.go b/common/interface.go index a58b6aa94db..d23a9556133 100644 --- a/common/interface.go +++ b/common/interface.go @@ -327,6 +327,7 @@ type EnableEpochsHandler interface { IsMaxBlockchainHookCountersFlagEnabled() bool IsWipeSingleNFTLiquidityDecreaseEnabled() bool IsAlwaysSaveTokenMetaDataEnabled() bool + IsChangeUsernameEnabled() bool IsInterfaceNil() bool } diff --git a/config/config.go b/config/config.go index 1ba40cd315c..a9279f3e757 100644 --- a/config/config.go +++ b/config/config.go @@ -417,6 +417,7 @@ type VirtualMachineGasConfig struct { type BuiltInFunctionsConfig struct { AutomaticCrawlerAddresses []string MaxNumAddressesInTransferRole uint32 + DNSV2Addresses []string } // HardforkConfig holds the configuration for the hardfork trigger diff --git a/config/epochConfig.go b/config/epochConfig.go index e729f362d91..4c8a6790068 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -93,6 +93,7 @@ type EnableEpochs struct { MaxBlockchainHookCountersEnableEpoch uint32 WipeSingleNFTLiquidityDecreaseEnableEpoch uint32 AlwaysSaveTokenMetaDataEnableEpoch uint32 + ChangeUserNameEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig } diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index a3cc8c949dc..9a1f88d84cc 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -684,6 +684,9 @@ func TestEnableEpochConfig(t *testing.T) { # RuntimeMemStoreLimitEnableEpoch represents the epoch when the condition for Runtime MemStore is enabled RuntimeMemStoreLimitEnableEpoch = 63 + # ChangeUsernameEnableEpoch represents the epoch when change username is enabled + ChangeUsernameEnableEpoch = 64 + # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ { EpochEnable = 44, MaxNumNodes = 2169, NodesToShufflePerShard = 80 }, @@ -779,6 +782,7 @@ func TestEnableEpochConfig(t *testing.T) { AlwaysSaveTokenMetaDataEnableEpoch: 61, RuntimeCodeSizeFixEnableEpoch: 62, RuntimeMemStoreLimitEnableEpoch: 63, + ChangeUserNameEnableEpoch: 64, BLSMultiSignerEnableEpoch: []MultiSignerConfig{ { EnableEpoch: 0, diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index 3c815aecb3e..0e9f64ce258 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -123,6 +123,12 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { return nil, errDecode } + dnsV2AddressesStrings := args.Configs.GeneralConfig.BuiltInFunctions.DNSV2Addresses + convertedDNSV2Addresses, errDecode := factory.DecodeAddresses(pkConverter, dnsV2AddressesStrings) + if errDecode != nil { + return nil, errDecode + } + builtInFuncFactory, err := createBuiltinFuncs( args.GasScheduleNotifier, args.CoreComponents.InternalMarshalizer(), @@ -132,6 +138,7 @@ func CreateApiResolver(args *ApiResolverArgs) (facade.ApiResolver, error) { args.CoreComponents.EnableEpochsHandler(), convertedAddresses, args.Configs.GeneralConfig.BuiltInFunctions.MaxNumAddressesInTransferRole, + convertedDNSV2Addresses, ) if err != nil { return nil, err @@ -332,6 +339,12 @@ func createScQueryElement( return nil, errDecode } + dnsV2AddressesStrings := args.generalConfig.BuiltInFunctions.DNSV2Addresses + convertedDNSV2Addresses, errDecode := factory.DecodeAddresses(pkConverter, dnsV2AddressesStrings) + if errDecode != nil { + return nil, errDecode + } + builtInFuncFactory, err := createBuiltinFuncs( args.gasScheduleNotifier, args.coreComponents.InternalMarshalizer(), @@ -341,6 +354,7 @@ func createScQueryElement( args.coreComponents.EnableEpochsHandler(), convertedAddresses, args.generalConfig.BuiltInFunctions.MaxNumAddressesInTransferRole, + convertedDNSV2Addresses, ) if err != nil { return nil, err @@ -474,10 +488,17 @@ func createBuiltinFuncs( enableEpochsHandler vmcommon.EnableEpochsHandler, automaticCrawlerAddresses [][]byte, maxNumAddressesInTransferRole uint32, + dnsV2Addresses [][]byte, ) (vmcommon.BuiltInFunctionFactory, error) { + mapDNSV2Addresses := make(map[string]struct{}) + for _, address := range dnsV2Addresses { + mapDNSV2Addresses[string(address)] = struct{}{} + } + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasScheduleNotifier, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: mapDNSV2Addresses, Marshalizer: marshalizer, Accounts: accnts, ShardCoordinator: shardCoordinator, diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 4f73a39db02..034d6121444 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -1257,9 +1257,23 @@ func (pcf *processComponentsFactory) createBuiltInFunctionContainer( return nil, err } + convertedDNSV2Addresses, err := mainFactory.DecodeAddresses( + pcf.coreData.AddressPubKeyConverter(), + pcf.config.BuiltInFunctions.DNSV2Addresses, + ) + if err != nil { + return nil, err + } + + mapDNSV2Addresses := make(map[string]struct{}) + for _, address := range convertedDNSV2Addresses { + mapDNSV2Addresses[string(address)] = struct{}{} + } + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: pcf.gasSchedule, MapDNSAddresses: mapDNSAddresses, + MapDNSV2Addresses: mapDNSV2Addresses, Marshalizer: pcf.coreData.InternalMarshalizer(), Accounts: accounts, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index aa140fe629a..0b0f820f73f 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -390,6 +390,7 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: arg.GasSchedule, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), EnableUserNameChange: false, Marshalizer: arg.Core.InternalMarshalizer(), Accounts: arg.Accounts, diff --git a/go.mod b/go.mod index 264132c7f04..64b64c4b42a 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.11 github.com/multiversx/mx-chain-storage-go v1.0.7 - github.com/multiversx/mx-chain-vm-common-go v1.3.37 + github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230301122431-1c50bb69ded0 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77 diff --git a/go.sum b/go.sum index 98d6665f0b0..718847e67a4 100644 --- a/go.sum +++ b/go.sum @@ -624,8 +624,9 @@ github.com/multiversx/mx-chain-storage-go v1.0.7 h1:UqLo/OLTD3IHiE/TB/SEdNRV1GG2 github.com/multiversx/mx-chain-storage-go v1.0.7/go.mod h1:gtKoV32Cg2Uy8deHzF8Ud0qAl0zv92FvWgPSYIP0Zmg= github.com/multiversx/mx-chain-vm-common-go v1.3.34/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.36/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.3.37 h1:KeK6JCjeNUOHC5Z12/CTQIa8Z1at0dnnL9hY1LNrHS8= github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= +github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230301122431-1c50bb69ded0 h1:T7MwPQgORhAOZqHbKh2Z274eqc599BSCJQOq4Ovy8nA= +github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230301122431-1c50bb69ded0/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 h1:ScUq7/wq78vthMTQ6v5Ux1DvSMQMHxQ2Sl7aPP26q1w= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50/go.mod h1:e3uYdgoKzs3puaznbmSjDcRisJc5Do4tpg7VqyYwoek= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 h1:axtp5/mpA+xYJ1cu4KtAGETV4t6v6/tNfQh0HCclBYY= diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index acc387540dd..d73d2e9bb01 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -739,6 +739,7 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), Marshalizer: TestMarshalizer, Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, @@ -1405,6 +1406,7 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: mapDNSAddresses, + MapDNSV2Addresses: mapDNSAddresses, Marshalizer: TestMarshalizer, Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, @@ -1623,6 +1625,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), Marshalizer: TestMarshalizer, Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, diff --git a/integrationTests/testProcessorNodeWithTestWebServer.go b/integrationTests/testProcessorNodeWithTestWebServer.go index 814064aead5..e2efa708ec4 100644 --- a/integrationTests/testProcessorNodeWithTestWebServer.go +++ b/integrationTests/testProcessorNodeWithTestWebServer.go @@ -142,6 +142,7 @@ func createFacadeComponents(tpn *TestProcessorNode) (nodeFacade.ApiResolver, nod argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasScheduleNotifier, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), Marshalizer: TestMarshalizer, Accounts: tpn.AccntState, ShardCoordinator: tpn.ShardCoordinator, diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 1acb1994d02..cf2baba8230 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -509,6 +509,9 @@ func CreateVMAndBlockchainHookAndDataPool( MapDNSAddresses: map[string]struct{}{ string(dnsAddr): {}, }, + MapDNSV2Addresses: map[string]struct{}{ + string(dnsAddr): {}, + }, Marshalizer: integrationtests.TestMarshalizer, Accounts: accnts, ShardCoordinator: shardCoordinator, @@ -594,6 +597,9 @@ func CreateVMAndBlockchainHookMeta( MapDNSAddresses: map[string]struct{}{ string(dnsAddr): {}, }, + MapDNSV2Addresses: map[string]struct{}{ + string(dnsAddr): {}, + }, Marshalizer: integrationtests.TestMarshalizer, Accounts: accnts, ShardCoordinator: shardCoordinator, diff --git a/integrationTests/vm/wasm/utils.go b/integrationTests/vm/wasm/utils.go index a5b9fab8f80..6424a9c813a 100644 --- a/integrationTests/vm/wasm/utils.go +++ b/integrationTests/vm/wasm/utils.go @@ -241,6 +241,7 @@ func (context *TestContext) initVMAndBlockchainHook() { argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: DNSAddresses, + MapDNSV2Addresses: DNSAddresses, Marshalizer: marshalizer, Accounts: context.Accounts, ShardCoordinator: oneShardCoordinator, diff --git a/process/smartContract/builtInFunctions/factory.go b/process/smartContract/builtInFunctions/factory.go index 8e241e0bec9..a6d94ee3a8b 100644 --- a/process/smartContract/builtInFunctions/factory.go +++ b/process/smartContract/builtInFunctions/factory.go @@ -20,6 +20,7 @@ var log = logger.GetOrCreate("process/smartcontract/builtInFunctions") type ArgsCreateBuiltInFunctionContainer struct { GasSchedule core.GasScheduleNotifier MapDNSAddresses map[string]struct{} + MapDNSV2Addresses map[string]struct{} EnableUserNameChange bool Marshalizer marshal.Marshalizer Accounts state.AccountsAdapter @@ -41,7 +42,7 @@ func CreateBuiltInFunctionsFactory(args ArgsCreateBuiltInFunctionContainer) (vmc if check.IfNil(args.Accounts) { return nil, process.ErrNilAccountsAdapter } - if args.MapDNSAddresses == nil { + if args.MapDNSAddresses == nil || args.MapDNSV2Addresses == nil { return nil, process.ErrNilDnsAddresses } if check.IfNil(args.ShardCoordinator) { @@ -74,6 +75,7 @@ func CreateBuiltInFunctionsFactory(args ArgsCreateBuiltInFunctionContainer) (vmc modifiedArgs := vmcommonBuiltInFunctions.ArgsCreateBuiltInFunctionContainer{ GasMap: args.GasSchedule.LatestGasSchedule(), MapDNSAddresses: args.MapDNSAddresses, + MapDNSV2Addresses: args.MapDNSV2Addresses, EnableUserNameChange: args.EnableUserNameChange, Marshalizer: args.Marshalizer, Accounts: vmcommonAccounts, diff --git a/process/smartContract/builtInFunctions/factory_test.go b/process/smartContract/builtInFunctions/factory_test.go index 8f9979ac698..3a772492f28 100644 --- a/process/smartContract/builtInFunctions/factory_test.go +++ b/process/smartContract/builtInFunctions/factory_test.go @@ -26,6 +26,7 @@ func createMockArguments() ArgsCreateBuiltInFunctionContainer { args := ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasScheduleNotifier, MapDNSAddresses: make(map[string]struct{}), + MapDNSV2Addresses: make(map[string]struct{}), EnableUserNameChange: false, Marshalizer: &mock.MarshalizerMock{}, Accounts: &stateMock.AccountsStub{}, diff --git a/sharding/mock/enableEpochsHandlerMock.go b/sharding/mock/enableEpochsHandlerMock.go index 6173c091e32..3654cbcdb20 100644 --- a/sharding/mock/enableEpochsHandlerMock.go +++ b/sharding/mock/enableEpochsHandlerMock.go @@ -566,6 +566,11 @@ func (mock *EnableEpochsHandlerMock) IsAlwaysSaveTokenMetaDataEnabled() bool { return false } +// IsChangeUsernameEnabled - +func (mock *EnableEpochsHandlerMock) IsChangeUsernameEnabled() bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (mock *EnableEpochsHandlerMock) IsInterfaceNil() bool { return mock == nil diff --git a/testscommon/components/configs.go b/testscommon/components/configs.go index 26640cdbdce..ecd397d01c2 100644 --- a/testscommon/components/configs.go +++ b/testscommon/components/configs.go @@ -141,6 +141,11 @@ func GetGeneralConfig() config.Config { "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 }, + DNSV2Addresses: []string{ + "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", //shard 0 + "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 + "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 + }, MaxNumAddressesInTransferRole: 100, }, EpochStartConfig: GetEpochStartConfig(), diff --git a/testscommon/enableEpochsHandlerStub.go b/testscommon/enableEpochsHandlerStub.go index 092131f8ebc..d999151838a 100644 --- a/testscommon/enableEpochsHandlerStub.go +++ b/testscommon/enableEpochsHandlerStub.go @@ -117,6 +117,7 @@ type EnableEpochsHandlerStub struct { IsMaxBlockchainHookCountersFlagEnabledField bool IsWipeSingleNFTLiquidityDecreaseEnabledField bool IsAlwaysSaveTokenMetaDataEnabledField bool + IsChangeUsernameEnabledField bool } // ResetPenalizedTooMuchGasFlag - @@ -1014,6 +1015,14 @@ func (stub *EnableEpochsHandlerStub) IsAlwaysSaveTokenMetaDataEnabled() bool { return stub.IsAlwaysSaveTokenMetaDataEnabledField } +// IsChangeUsernameEnabled - +func (stub *EnableEpochsHandlerStub) IsChangeUsernameEnabled() bool { + stub.RLock() + defer stub.RUnlock() + + return stub.IsChangeUsernameEnabledField +} + // IsInterfaceNil - func (stub *EnableEpochsHandlerStub) IsInterfaceNil() bool { return stub == nil diff --git a/testscommon/generalConfig.go b/testscommon/generalConfig.go index acd0255b587..d32ee23a9e6 100644 --- a/testscommon/generalConfig.go +++ b/testscommon/generalConfig.go @@ -426,6 +426,11 @@ func GetGeneralConfig() config.Config { "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 }, MaxNumAddressesInTransferRole: 100, + DNSV2Addresses: []string{ + "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", //shard 0 + "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", //shard 1 + "erd1najnxxweyw6plhg8efql330nttrj6l5cf87wqsuym85s9ha0hmdqnqgenp", //shard 2 + }, }, } } From 4c0c5787ceac2d5dc939660b0ea6b7f29a1b92d6 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 2 Mar 2023 10:30:51 +0200 Subject: [PATCH 035/221] - added semi-integration test for relayed dns scenario --- integrationTests/vm/txsFee/asyncCall_test.go | 52 +++--- integrationTests/vm/txsFee/dns_test.go | 176 +++++++++++++++++- integrationTests/vm/txsFee/relayedDns_test.go | 4 +- integrationTests/vm/txsFee/utils/utils.go | 11 +- 4 files changed, 210 insertions(+), 33 deletions(-) diff --git a/integrationTests/vm/txsFee/asyncCall_test.go b/integrationTests/vm/txsFee/asyncCall_test.go index 20c635f0a94..e652587390d 100644 --- a/integrationTests/vm/txsFee/asyncCall_test.go +++ b/integrationTests/vm/txsFee/asyncCall_test.go @@ -175,12 +175,12 @@ func TestAsyncCallsOnInitFunctionOnUpgrade(t *testing.T) { } func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.EnableEpochs, expectedGasLimit uint64, gasScheduleNotifier core.GasScheduleNotifier) { - shardCoordinatorForShard0, _ := sharding.NewMultiShardCoordinator(3, 1) + shardCoordinatorForShard1, _ := sharding.NewMultiShardCoordinator(3, 1) shardCoordinatorForShardMeta, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) - testContextShard0, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( + testContextShard1, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( enableEpochs, - shardCoordinatorForShard0, + shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, ) @@ -196,7 +196,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.Ena // step 1. deploy the first contract scAddress, owner := utils.DoDeployWithCustomParams( t, - testContextShard0, + testContextShard1, "./testdata/first/first.wasm", big.NewInt(100000000000), 2000, @@ -205,20 +205,20 @@ func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.Ena assert.Equal(t, 32, len(owner)) assert.Equal(t, 32, len(scAddress)) - intermediates := testContextShard0.GetIntermediateTransactions(t) + intermediates := testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 2. call a dummy function on the first version of the contract tx := utils.CreateSmartContractCall(1, owner, scAddress, 10, 2000, "callMe", nil) - code, err := testContextShard0.TxProcessor.ProcessTransaction(tx) + code, err := testContextShard1.TxProcessor.ProcessTransaction(tx) require.Nil(t, err) require.Equal(t, vmcommon.Ok, code) - intermediates = testContextShard0.GetIntermediateTransactions(t) + intermediates = testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 3. upgrade to the second contract @@ -232,13 +232,13 @@ func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.Ena hex.EncodeToString([]byte("dummyArg")), }, "@") tx = utils.CreateSmartContractCall(2, owner, scAddress, 10, 10000000, txData, nil) - code, err = testContextShard0.TxProcessor.ProcessTransaction(tx) + code, err = testContextShard1.TxProcessor.ProcessTransaction(tx) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, code) - intermediates = testContextShard0.GetIntermediateTransactions(t) + intermediates = testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 4. execute scr on metachain, should fail @@ -253,7 +253,7 @@ func testAsyncCallsOnInitFunctionOnUpgrade(t *testing.T, enableEpochs config.Ena // step 5. execute generated metachain scr on the contract scr = intermediates[0].(*smartContractResult.SmartContractResult) - code, err = testContextShard0.ScProcessor.ProcessSmartContractResult(scr) + code, err = testContextShard1.ScProcessor.ProcessSmartContractResult(scr) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, code) @@ -297,12 +297,12 @@ func TestAsyncCallsOnInitFunctionOnDeploy(t *testing.T) { } func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.EnableEpochs, expectedGasLimit uint64, gasScheduleNotifier core.GasScheduleNotifier) { - shardCoordinatorForShard0, _ := sharding.NewMultiShardCoordinator(3, 1) + shardCoordinatorForShard1, _ := sharding.NewMultiShardCoordinator(3, 1) shardCoordinatorForShardMeta, _ := sharding.NewMultiShardCoordinator(3, core.MetachainShardId) - testContextShard0, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( + testContextShard1, err := vm.CreatePreparedTxProcessorWithVMsWithShardCoordinatorDBAndGas( enableEpochs, - shardCoordinatorForShard0, + shardCoordinatorForShard1, integrationtests.CreateMemUnit(), gasScheduleNotifier, ) @@ -318,7 +318,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.Enab // step 1. deploy the first contract scAddressFirst, firstOwner := utils.DoDeployWithCustomParams( t, - testContextShard0, + testContextShard1, "./testdata/first/first.wasm", big.NewInt(100000000000), 2000, @@ -327,26 +327,26 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.Enab assert.Equal(t, 32, len(firstOwner)) assert.Equal(t, 32, len(scAddressFirst)) - intermediates := testContextShard0.GetIntermediateTransactions(t) + intermediates := testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 2. call a dummy function on the first contract tx := utils.CreateSmartContractCall(1, firstOwner, scAddressFirst, 10, 2000, "callMe", nil) - code, err := testContextShard0.TxProcessor.ProcessTransaction(tx) + code, err := testContextShard1.TxProcessor.ProcessTransaction(tx) require.Nil(t, err) require.Equal(t, vmcommon.Ok, code) - intermediates = testContextShard0.GetIntermediateTransactions(t) + intermediates = testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 3. deploy the second contract that does an async on init function scAddressSecond, secondOwner := utils.DoDeployWithCustomParams( t, - testContextShard0, + testContextShard1, "./testdata/asyncOnInit/asyncOnInit.wasm", big.NewInt(100000000000), 10000000, @@ -359,9 +359,9 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.Enab assert.Equal(t, 32, len(secondOwner)) assert.Equal(t, 32, len(scAddressSecond)) - intermediates = testContextShard0.GetIntermediateTransactions(t) + intermediates = testContextShard1.GetIntermediateTransactions(t) assert.Equal(t, 1, len(intermediates)) - testContextShard0.CleanIntermediateTransactions(t) + testContextShard1.CleanIntermediateTransactions(t) // step 4. execute scr on metachain, should fail @@ -376,7 +376,7 @@ func testAsyncCallsOnInitFunctionOnDeploy(t *testing.T, enableEpochs config.Enab // step 5. execute generated metachain scr on the contract scr = intermediates[0].(*smartContractResult.SmartContractResult) - code, err = testContextShard0.ScProcessor.ProcessSmartContractResult(scr) + code, err = testContextShard1.ScProcessor.ProcessSmartContractResult(scr) assert.Nil(t, err) assert.Equal(t, vmcommon.Ok, code) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 3d8edc6c63a..2ea55f5611c 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -11,11 +11,15 @@ import ( "math/big" "testing" + "github.com/davecgh/go-spew/spew" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -38,7 +42,7 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( _, _ = vm.CreateAccount(testContext.Accounts, sndAddr, 0, senderBalance) _, _ = vm.CreateAccount(testContext.Accounts, rcvAddr, 0, senderBalance) - userName := utils.GenerateUserNameForMyDNSContract() + userName := utils.GenerateUserNameForDefaultDNSContract() txData := []byte("register@" + hex.EncodeToString(userName)) // create username for sender tx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddress, gasPrice, gasLimit, txData) @@ -56,7 +60,7 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( utils.CleanAccumulatedIntermediateTransactions(t, testContext) // create username for receiver - rcvUserName := utils.GenerateUserNameForMyDNSContract() + rcvUserName := utils.GenerateUserNameForDefaultDNSContract() txData = []byte("register@" + hex.EncodeToString(rcvUserName)) tx = vm.CreateTransaction(0, big.NewInt(0), rcvAddr, scAddress, gasPrice, gasLimit, txData) retCode, err = testContext.TxProcessor.ProcessTransaction(tx) @@ -101,3 +105,171 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( require.Equal(t, vmcommon.Ok, retCode) require.Nil(t, err) } + +func getNonce(testContext *vm.VMTestContext, address []byte) uint64 { + accnt, _ := testContext.Accounts.LoadAccount(address) + return accnt.GetNonce() +} + +// relayer address is in shard 2, creates a transaction on the behalf of the user from shard 2, that will call the DNS contract +// from shard 1 that will try to set the username but fails. +func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) { + testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + require.Nil(t, err) + defer testContextForDNSContract.Close() + + // TODO remove this + logger.SetLogLevel("process:TRACE,vm:TRACE") + + testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) + require.Nil(t, err) + defer testContextForRelayerAndUser.Close() + + scAddress, _ := utils.DoDeployDNS(t, testContextForDNSContract, "../../multiShard/smartContract/dns/dns.wasm") + fmt.Println(scAddress) + utils.CleanAccumulatedIntermediateTransactions(t, testContextForDNSContract) + require.Equal(t, uint32(1), testContextForDNSContract.ShardCoordinator.ComputeId(scAddress)) + + relayerAddress := []byte("relayer-901234567890123456789112") + require.Equal(t, uint32(2), testContextForRelayerAndUser.ShardCoordinator.ComputeId(relayerAddress)) + userAddress := []byte("user-678901234567890123456789112") + require.Equal(t, uint32(2), testContextForRelayerAndUser.ShardCoordinator.ComputeId(userAddress)) + + initialBalance := big.NewInt(10000000) + _, _ = vm.CreateAccount(testContextForRelayerAndUser.Accounts, relayerAddress, 0, initialBalance) + + firstUsername := utils.GenerateUserNameForDNSContract(scAddress) + args := argsProcessRegister{ + relayerAddress: relayerAddress, + userAddress: userAddress, + scAddress: scAddress, + testContextForRelayerAndUser: testContextForRelayerAndUser, + testContextForDNSContract: testContextForDNSContract, + username: firstUsername, + } + scrs, retCode, err := processRegisterThroughRelayedTxs(t, args) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, retCode) + assert.Equal(t, 3, len(scrs)) + + // check username + acc, _ := testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) + account, _ := acc.(state.UserAccountHandler) + require.Equal(t, firstUsername, account.GetUserName()) + + // TODO refactor + for _, scr := range scrs { + log.Info("SCR: " + spew.Sdump(scr)) + } + + secondUsername := utils.GenerateUserNameForDNSContract(scAddress) + args.username = secondUsername + + scrs, retCode, err = processRegisterThroughRelayedTxs(t, args) + require.Nil(t, err) + require.Equal(t, vmcommon.UserError, retCode) + + // TODO refactor + for _, scr := range scrs { + log.Info("SCR: " + spew.Sdump(scr)) + } +} + +type argsProcessRegister struct { + relayerAddress []byte + userAddress []byte + scAddress []byte + testContextForRelayerAndUser *vm.VMTestContext + testContextForDNSContract *vm.VMTestContext + username []byte +} + +func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ([]*smartContractResult.SmartContractResult, vmcommon.ReturnCode, error) { + overallReturnCode := vmcommon.Ok + scrs := make([]*smartContractResult.SmartContractResult, 0) + + // generate de user transaction + gasPrice := uint64(10) + userTxData := []byte("register@" + hex.EncodeToString(args.username)) + userTxGasLimit := uint64(200000) + userTx := vm.CreateTransaction( + getNonce(args.testContextForRelayerAndUser, args.userAddress), + big.NewInt(0), + args.userAddress, + args.scAddress, + gasPrice, + userTxGasLimit, + userTxData, + ) + + // generate the relayed transaction + relayedTxData := utils.PrepareRelayerTxData(userTx) // v1 will suffice + relayedTxGasLimit := userTxGasLimit + 1 + uint64(len(relayedTxData)) + relayedTx := vm.CreateTransaction( + getNonce(args.testContextForRelayerAndUser, args.relayerAddress), + big.NewInt(0), + args.relayerAddress, + args.userAddress, + gasPrice, + relayedTxGasLimit, + relayedTxData, + ) + // start executing relayed transaction + retCode, err := args.testContextForRelayerAndUser.TxProcessor.ProcessTransaction(relayedTx) + if retCode != vmcommon.Ok { + overallReturnCode = retCode + } + if err != nil { + return scrs, overallReturnCode, err + } + + // record the SCR and clean all intermediate results + intermediateTxs := args.testContextForRelayerAndUser.GetIntermediateTransactions(tb) + args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) + require.Equal(tb, 1, len(intermediateTxs)) + scrRegister := intermediateTxs[0].(*smartContractResult.SmartContractResult) + scrs = append(scrs, scrRegister) + + // execute the scr on the shard that contains the dns contract + retCode, err = args.testContextForDNSContract.ScProcessor.ProcessSmartContractResult(scrRegister) + if retCode != vmcommon.Ok { + overallReturnCode = retCode + } + if err != nil { + return scrs, overallReturnCode, err + } + + // record the SCR and clean all intermediate results + intermediateTxs = args.testContextForDNSContract.GetIntermediateTransactions(tb) + args.testContextForDNSContract.CleanIntermediateTransactions(tb) + require.Equal(tb, 1, len(intermediateTxs)) + scrSCProcess := intermediateTxs[0].(*smartContractResult.SmartContractResult) + scrs = append(scrs, scrSCProcess) + + // execute the scr on the initial shard that contains the user address + retCode, err = args.testContextForRelayerAndUser.ScProcessor.ProcessSmartContractResult(scrSCProcess) + if retCode != vmcommon.Ok { + overallReturnCode = retCode + } + if err != nil { + return scrs, overallReturnCode, err + } + + // record the SCR and clean all intermediate results + intermediateTxs = args.testContextForRelayerAndUser.GetIntermediateTransactions(tb) + args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) + require.Equal(tb, 1, len(intermediateTxs)) + scrFinished := intermediateTxs[0].(*smartContractResult.SmartContractResult) + scrs = append(scrs, scrFinished) + + // commit & cleanup + _, err = args.testContextForRelayerAndUser.Accounts.Commit() + require.Nil(tb, err) + args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) + + _, err = args.testContextForDNSContract.Accounts.Commit() + require.Nil(tb, err) + args.testContextForDNSContract.CleanIntermediateTransactions(tb) + + return scrs, overallReturnCode, nil +} diff --git a/integrationTests/vm/txsFee/relayedDns_test.go b/integrationTests/vm/txsFee/relayedDns_test.go index 31867df3330..774e21fae80 100644 --- a/integrationTests/vm/txsFee/relayedDns_test.go +++ b/integrationTests/vm/txsFee/relayedDns_test.go @@ -35,7 +35,7 @@ func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { _, _ = vm.CreateAccount(testContext.Accounts, rcvAddr, 0, big.NewInt(0)) _, _ = vm.CreateAccount(testContext.Accounts, relayerAddr, 0, big.NewInt(100000000)) - sndAddrUserName := utils.GenerateUserNameForMyDNSContract() + sndAddrUserName := utils.GenerateUserNameForDefaultDNSContract() txData := []byte("register@" + hex.EncodeToString(sndAddrUserName)) // create user name for sender innerTx := vm.CreateTransaction(0, big.NewInt(0), sndAddr, scAddress, gasPrice, gasLimit, txData) @@ -57,7 +57,7 @@ func TestRelayedTxDnsTransaction_ShouldWork(t *testing.T) { dnsUserNameAddr := ret.ReturnData[0] require.Equal(t, sndAddr, dnsUserNameAddr) - rcvAddrUserName := utils.GenerateUserNameForMyDNSContract() + rcvAddrUserName := utils.GenerateUserNameForDefaultDNSContract() txData = []byte("register@" + hex.EncodeToString(rcvAddrUserName)) // create user name for receiver innerTx = vm.CreateTransaction(0, big.NewInt(0), rcvAddr, scAddress, gasPrice, gasLimit, txData) diff --git a/integrationTests/vm/txsFee/utils/utils.go b/integrationTests/vm/txsFee/utils/utils.go index 1d9e114d8c2..6d1d9c79a28 100644 --- a/integrationTests/vm/txsFee/utils/utils.go +++ b/integrationTests/vm/txsFee/utils/utils.go @@ -364,10 +364,15 @@ func randStringBytes(n int) string { return string(b) } -// GenerateUserNameForMyDNSContract - -func GenerateUserNameForMyDNSContract() []byte { +// GenerateUserNameForDefaultDNSContract - +func GenerateUserNameForDefaultDNSContract() []byte { + return GenerateUserNameForDNSContract([]byte{49}) +} + +// GenerateUserNameForDNSContract - +func GenerateUserNameForDNSContract(contractAddress []byte) []byte { testHasher := keccak.NewKeccak() - contractLastByte := byte(49) + contractLastByte := contractAddress[len(contractAddress)-1] for { userName := randStringBytes(10) From 0ab5918e02a5604acd7e646be6099235ed164d9d Mon Sep 17 00:00:00 2001 From: robertsasu Date: Thu, 2 Mar 2023 10:58:13 +0200 Subject: [PATCH 036/221] fixing tests --- config/tomlConfig_test.go | 4 ++-- process/smartContract/builtInFunctions/factory_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 9a1f88d84cc..d1851fc52ca 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -684,8 +684,8 @@ func TestEnableEpochConfig(t *testing.T) { # RuntimeMemStoreLimitEnableEpoch represents the epoch when the condition for Runtime MemStore is enabled RuntimeMemStoreLimitEnableEpoch = 63 - # ChangeUsernameEnableEpoch represents the epoch when change username is enabled - ChangeUsernameEnableEpoch = 64 + # ChangeUserNameEnableEpoch represents the epoch when change username is enabled + ChangeUserNameEnableEpoch = 64 # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ diff --git a/process/smartContract/builtInFunctions/factory_test.go b/process/smartContract/builtInFunctions/factory_test.go index 3a772492f28..1ebc421447e 100644 --- a/process/smartContract/builtInFunctions/factory_test.go +++ b/process/smartContract/builtInFunctions/factory_test.go @@ -161,7 +161,7 @@ func TestCreateBuiltInFunctionContainer(t *testing.T) { args := createMockArguments() builtInFuncFactory, err := CreateBuiltInFunctionsFactory(args) assert.Nil(t, err) - assert.Equal(t, len(builtInFuncFactory.BuiltInFunctionContainer().Keys()), 31) + assert.Equal(t, len(builtInFuncFactory.BuiltInFunctionContainer().Keys()), 32) err = builtInFuncFactory.SetPayableHandler(&testscommon.BlockChainHookStub{}) assert.Nil(t, err) From 730cdeda794e85da3051c8e1d848b069a3aa48a0 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Thu, 2 Mar 2023 14:16:43 +0200 Subject: [PATCH 037/221] fix after review --- cmd/node/config/enableEpochs.toml | 4 ++-- common/enablers/enableEpochsHandler.go | 2 +- common/enablers/enableEpochsHandler_test.go | 2 +- config/epochConfig.go | 2 +- config/tomlConfig_test.go | 6 +++--- factory/api/apiResolverFactory.go | 2 +- factory/processing/blockProcessorCreator.go | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 6a853936b61..d0d841ef910 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -242,8 +242,8 @@ # RuntimeCodeSizeFixEnableEpoch represents the epoch when the code size fix in the VM is enabled RuntimeCodeSizeFixEnableEpoch = 2 - # ChangeUserNameEnableEpoch represents the epoch when changing username is enabled - ChangeUserNameEnableEpoch = 2 + # ChangeUsernameEnableEpoch represents the epoch when changing username is enabled + ChangeUsernameEnableEpoch = 2 # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers BLSMultiSignerEnableEpoch = [ diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index c33b1f8a173..4ff3534b035 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -118,7 +118,7 @@ func (handler *enableEpochsHandler) EpochConfirmed(epoch uint32, _ uint64) { handler.setFlagValue(epoch >= handler.enableEpochsConfig.MaxBlockchainHookCountersEnableEpoch, handler.maxBlockchainHookCountersFlag, "maxBlockchainHookCountersFlag") handler.setFlagValue(epoch >= handler.enableEpochsConfig.WipeSingleNFTLiquidityDecreaseEnableEpoch, handler.wipeSingleNFTLiquidityDecreaseFlag, "wipeSingleNFTLiquidityDecreaseFlag") handler.setFlagValue(epoch >= handler.enableEpochsConfig.AlwaysSaveTokenMetaDataEnableEpoch, handler.alwaysSaveTokenMetaDataFlag, "alwaysSaveTokenMetaDataFlag") - handler.setFlagValue(epoch >= handler.enableEpochsConfig.ChangeUserNameEnableEpoch, handler.changeUsernameFlag, "runtimeCodeSizeFixFlag") + handler.setFlagValue(epoch >= handler.enableEpochsConfig.ChangeUsernameEnableEpoch, handler.changeUsernameFlag, "changeUsername") } func (handler *enableEpochsHandler) setFlagValue(value bool, flag *atomic.Flag, flagName string) { diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index 1e349b1d93c..c0f691ea5ca 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -91,7 +91,7 @@ func createEnableEpochsConfig() config.EnableEpochs { WipeSingleNFTLiquidityDecreaseEnableEpoch: 75, AlwaysSaveTokenMetaDataEnableEpoch: 76, RuntimeCodeSizeFixEnableEpoch: 77, - ChangeUserNameEnableEpoch: 78, + ChangeUsernameEnableEpoch: 78, } } diff --git a/config/epochConfig.go b/config/epochConfig.go index 4c8a6790068..a4efa5ac3dc 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -93,7 +93,7 @@ type EnableEpochs struct { MaxBlockchainHookCountersEnableEpoch uint32 WipeSingleNFTLiquidityDecreaseEnableEpoch uint32 AlwaysSaveTokenMetaDataEnableEpoch uint32 - ChangeUserNameEnableEpoch uint32 + ChangeUsernameEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig } diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index d1851fc52ca..d334a19f8e6 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -684,8 +684,8 @@ func TestEnableEpochConfig(t *testing.T) { # RuntimeMemStoreLimitEnableEpoch represents the epoch when the condition for Runtime MemStore is enabled RuntimeMemStoreLimitEnableEpoch = 63 - # ChangeUserNameEnableEpoch represents the epoch when change username is enabled - ChangeUserNameEnableEpoch = 64 + # ChangeUsernameEnableEpoch represents the epoch when change username is enabled + ChangeUsernameEnableEpoch = 64 # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ @@ -782,7 +782,7 @@ func TestEnableEpochConfig(t *testing.T) { AlwaysSaveTokenMetaDataEnableEpoch: 61, RuntimeCodeSizeFixEnableEpoch: 62, RuntimeMemStoreLimitEnableEpoch: 63, - ChangeUserNameEnableEpoch: 64, + ChangeUsernameEnableEpoch: 64, BLSMultiSignerEnableEpoch: []MultiSignerConfig{ { EnableEpoch: 0, diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index 0e9f64ce258..7a0a4ff6961 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -490,7 +490,7 @@ func createBuiltinFuncs( maxNumAddressesInTransferRole uint32, dnsV2Addresses [][]byte, ) (vmcommon.BuiltInFunctionFactory, error) { - mapDNSV2Addresses := make(map[string]struct{}) + mapDNSV2Addresses := make(map[string]struct{}, len(dnsV2Addresses)) for _, address := range dnsV2Addresses { mapDNSV2Addresses[string(address)] = struct{}{} } diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 034d6121444..4b788f41ece 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -1265,7 +1265,7 @@ func (pcf *processComponentsFactory) createBuiltInFunctionContainer( return nil, err } - mapDNSV2Addresses := make(map[string]struct{}) + mapDNSV2Addresses := make(map[string]struct{}, len(convertedDNSV2Addresses)) for _, address := range convertedDNSV2Addresses { mapDNSV2Addresses[string(address)] = struct{}{} } From a99d8d7ba51066aa3c5dc7e6febe003377f51d02 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 6 Mar 2023 16:03:45 +0200 Subject: [PATCH 038/221] remove error wrapping when loading data trie --- state/accountsDB.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/state/accountsDB.go b/state/accountsDB.go index 7ff1617e04b..f1514d58b05 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -524,7 +524,8 @@ func (adb *AccountsDB) loadDataTrie(accountHandler baseAccountHandler, mainTrie dataTrie, err := mainTrie.Recreate(accountHandler.GetRootHash()) if err != nil { - return fmt.Errorf("trie was not found for hash, rootHash = %s, err = %w", hex.EncodeToString(accountHandler.GetRootHash()), err) + log.Error("trie was not found for hash", "rootHash", accountHandler.GetRootHash(), "err", err) + return err } accountHandler.SetDataTrie(dataTrie) From 022102364b3c1d3b0ce4f8083ae8472205a5a577 Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 7 Mar 2023 11:40:07 +0200 Subject: [PATCH 039/221] - semi-integration tests work --- integrationTests/vm/txsFee/dns_test.go | 88 +++++++++++++++++++++----- 1 file changed, 73 insertions(+), 15 deletions(-) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 2ea55f5611c..0580da51aba 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -17,7 +17,6 @@ import ( "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/state" - logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -111,6 +110,13 @@ func getNonce(testContext *vm.VMTestContext, address []byte) uint64 { return accnt.GetNonce() } +func getBalance(testContext *vm.VMTestContext, address []byte) *big.Int { + accnt, _ := testContext.Accounts.LoadAccount(address) + userAccnt, _ := accnt.(state.UserAccountHandler) + + return userAccnt.GetBalance() +} + // relayer address is in shard 2, creates a transaction on the behalf of the user from shard 2, that will call the DNS contract // from shard 1 that will try to set the username but fails. func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) { @@ -119,7 +125,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) defer testContextForDNSContract.Close() // TODO remove this - logger.SetLogLevel("process:TRACE,vm:TRACE") + // logger.SetLogLevel("process:TRACE,vm:TRACE") testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) require.Nil(t, err) @@ -146,21 +152,18 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) testContextForRelayerAndUser: testContextForRelayerAndUser, testContextForDNSContract: testContextForDNSContract, username: firstUsername, + gasPrice: 10, } scrs, retCode, err := processRegisterThroughRelayedTxs(t, args) require.Nil(t, err) require.Equal(t, vmcommon.Ok, retCode) - assert.Equal(t, 3, len(scrs)) + assert.Equal(t, 4, len(scrs)) // check username acc, _ := testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) account, _ := acc.(state.UserAccountHandler) require.Equal(t, firstUsername, account.GetUserName()) - - // TODO refactor - for _, scr := range scrs { - log.Info("SCR: " + spew.Sdump(scr)) - } + checkBalances(t, args, initialBalance) secondUsername := utils.GenerateUserNameForDNSContract(scAddress) args.username = secondUsername @@ -169,6 +172,11 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) require.Nil(t, err) require.Equal(t, vmcommon.UserError, retCode) + // check username hasn't changed + acc, _ = testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) + account, _ = acc.(state.UserAccountHandler) + require.Equal(t, firstUsername, account.GetUserName()) + // TODO refactor for _, scr := range scrs { log.Info("SCR: " + spew.Sdump(scr)) @@ -182,14 +190,14 @@ type argsProcessRegister struct { testContextForRelayerAndUser *vm.VMTestContext testContextForDNSContract *vm.VMTestContext username []byte + gasPrice uint64 } func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ([]*smartContractResult.SmartContractResult, vmcommon.ReturnCode, error) { overallReturnCode := vmcommon.Ok scrs := make([]*smartContractResult.SmartContractResult, 0) - // generate de user transaction - gasPrice := uint64(10) + // generate the user transaction userTxData := []byte("register@" + hex.EncodeToString(args.username)) userTxGasLimit := uint64(200000) userTx := vm.CreateTransaction( @@ -197,7 +205,7 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( big.NewInt(0), args.userAddress, args.scAddress, - gasPrice, + args.gasPrice, userTxGasLimit, userTxData, ) @@ -210,7 +218,7 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( big.NewInt(0), args.relayerAddress, args.userAddress, - gasPrice, + args.gasPrice, relayedTxGasLimit, relayedTxData, ) @@ -223,6 +231,9 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( return scrs, overallReturnCode, err } + log.Warn("relayer", "balance", getBalance(args.testContextForRelayerAndUser, args.relayerAddress).String()) + log.Warn("relayer", "tx", args.gasPrice*relayedTxGasLimit) + // record the SCR and clean all intermediate results intermediateTxs := args.testContextForRelayerAndUser.GetIntermediateTransactions(tb) args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) @@ -230,6 +241,8 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( scrRegister := intermediateTxs[0].(*smartContractResult.SmartContractResult) scrs = append(scrs, scrRegister) + log.Warn("scrRegister", "tx", args.gasPrice*scrRegister.GasLimit) + // execute the scr on the shard that contains the dns contract retCode, err = args.testContextForDNSContract.ScProcessor.ProcessSmartContractResult(scrRegister) if retCode != vmcommon.Ok { @@ -246,7 +259,9 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( scrSCProcess := intermediateTxs[0].(*smartContractResult.SmartContractResult) scrs = append(scrs, scrSCProcess) - // execute the scr on the initial shard that contains the user address + log.Warn("scrSCProcess", "tx", args.gasPrice*scrSCProcess.GasLimit) + + // execute the scr on the initial shard that contains the user address (builtin function call) retCode, err = args.testContextForRelayerAndUser.ScProcessor.ProcessSmartContractResult(scrSCProcess) if retCode != vmcommon.Ok { overallReturnCode = retCode @@ -259,8 +274,40 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( intermediateTxs = args.testContextForRelayerAndUser.GetIntermediateTransactions(tb) args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) require.Equal(tb, 1, len(intermediateTxs)) - scrFinished := intermediateTxs[0].(*smartContractResult.SmartContractResult) - scrs = append(scrs, scrFinished) + scrFinishedBuiltinCall := intermediateTxs[0].(*smartContractResult.SmartContractResult) + scrs = append(scrs, scrFinishedBuiltinCall) + + // execute the finished scr on the shard that contains the dns contract + retCode, err = args.testContextForDNSContract.ScProcessor.ProcessSmartContractResult(scrFinishedBuiltinCall) + if retCode != vmcommon.Ok { + overallReturnCode = retCode + } + if err != nil { + return scrs, overallReturnCode, err + } + + // record the SCR and clean all intermediate results + intermediateTxs = args.testContextForDNSContract.GetIntermediateTransactions(tb) + args.testContextForDNSContract.CleanIntermediateTransactions(tb) + require.Equal(tb, 1, len(intermediateTxs)) + scrSCFinished := intermediateTxs[0].(*smartContractResult.SmartContractResult) + scrs = append(scrs, scrSCFinished) + + // execute the scr on the initial shard that contains the user address (refund) + retCode, err = args.testContextForRelayerAndUser.ScProcessor.ProcessSmartContractResult(scrSCFinished) + if retCode != vmcommon.Ok { + overallReturnCode = retCode + } + if err != nil { + return scrs, overallReturnCode, err + } + + // record the SCR and clean all intermediate results + intermediateTxs = args.testContextForRelayerAndUser.GetIntermediateTransactions(tb) + args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) + require.Equal(tb, 0, len(intermediateTxs)) + + log.Warn("relayer", "balance", getBalance(args.testContextForRelayerAndUser, args.relayerAddress).String()) // commit & cleanup _, err = args.testContextForRelayerAndUser.Accounts.Commit() @@ -273,3 +320,14 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( return scrs, overallReturnCode, nil } + +func checkBalances(tb testing.TB, args argsProcessRegister, initialBalance *big.Int) { + entireBalance := big.NewInt(0) + entireBalance.Add(entireBalance, getBalance(args.testContextForRelayerAndUser, args.relayerAddress)) + entireBalance.Add(entireBalance, getBalance(args.testContextForRelayerAndUser, args.userAddress)) + entireBalance.Add(entireBalance, getBalance(args.testContextForDNSContract, args.scAddress)) + entireBalance.Add(entireBalance, args.testContextForRelayerAndUser.TxFeeHandler.GetAccumulatedFees()) + entireBalance.Add(entireBalance, args.testContextForDNSContract.TxFeeHandler.GetAccumulatedFees()) + + assert.Equal(tb, initialBalance, entireBalance) +} From 4e2985dd6fbd8e023f0d8719db15f0cad5bed8ad Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 7 Mar 2023 11:55:38 +0200 Subject: [PATCH 040/221] - refactored test --- integrationTests/vm/txsFee/dns_test.go | 39 ++++++++------------------ 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 0580da51aba..2e23bdca51a 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -176,6 +176,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) acc, _ = testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) account, _ = acc.(state.UserAccountHandler) require.Equal(t, firstUsername, account.GetUserName()) + checkBalances(t, args, initialBalance) // TODO refactor for _, scr := range scrs { @@ -194,7 +195,6 @@ type argsProcessRegister struct { } func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ([]*smartContractResult.SmartContractResult, vmcommon.ReturnCode, error) { - overallReturnCode := vmcommon.Ok scrs := make([]*smartContractResult.SmartContractResult, 0) // generate the user transaction @@ -224,11 +224,8 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( ) // start executing relayed transaction retCode, err := args.testContextForRelayerAndUser.TxProcessor.ProcessTransaction(relayedTx) - if retCode != vmcommon.Ok { - overallReturnCode = retCode - } - if err != nil { - return scrs, overallReturnCode, err + if retCode != vmcommon.Ok || err != nil { + return scrs, retCode, err } log.Warn("relayer", "balance", getBalance(args.testContextForRelayerAndUser, args.relayerAddress).String()) @@ -245,11 +242,8 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( // execute the scr on the shard that contains the dns contract retCode, err = args.testContextForDNSContract.ScProcessor.ProcessSmartContractResult(scrRegister) - if retCode != vmcommon.Ok { - overallReturnCode = retCode - } - if err != nil { - return scrs, overallReturnCode, err + if retCode != vmcommon.Ok || err != nil { + return scrs, retCode, err } // record the SCR and clean all intermediate results @@ -263,11 +257,8 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( // execute the scr on the initial shard that contains the user address (builtin function call) retCode, err = args.testContextForRelayerAndUser.ScProcessor.ProcessSmartContractResult(scrSCProcess) - if retCode != vmcommon.Ok { - overallReturnCode = retCode - } - if err != nil { - return scrs, overallReturnCode, err + if retCode != vmcommon.Ok || err != nil { + return scrs, retCode, err } // record the SCR and clean all intermediate results @@ -279,11 +270,8 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( // execute the finished scr on the shard that contains the dns contract retCode, err = args.testContextForDNSContract.ScProcessor.ProcessSmartContractResult(scrFinishedBuiltinCall) - if retCode != vmcommon.Ok { - overallReturnCode = retCode - } - if err != nil { - return scrs, overallReturnCode, err + if retCode != vmcommon.Ok || err != nil { + return scrs, retCode, err } // record the SCR and clean all intermediate results @@ -295,11 +283,8 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( // execute the scr on the initial shard that contains the user address (refund) retCode, err = args.testContextForRelayerAndUser.ScProcessor.ProcessSmartContractResult(scrSCFinished) - if retCode != vmcommon.Ok { - overallReturnCode = retCode - } - if err != nil { - return scrs, overallReturnCode, err + if retCode != vmcommon.Ok || err != nil { + return scrs, retCode, err } // record the SCR and clean all intermediate results @@ -318,7 +303,7 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( require.Nil(tb, err) args.testContextForDNSContract.CleanIntermediateTransactions(tb) - return scrs, overallReturnCode, nil + return scrs, vmcommon.Ok, nil } func checkBalances(tb testing.TB, args argsProcessRegister, initialBalance *big.Int) { From 433fc615d6ef831f3e72cdac3fec288d13dcdfd0 Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 7 Mar 2023 13:06:14 +0200 Subject: [PATCH 041/221] - refactored scr processing --- integrationTests/vm/txsFee/dns_test.go | 151 +++++++++++++------------ 1 file changed, 81 insertions(+), 70 deletions(-) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 2e23bdca51a..90aa65fcd7e 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -10,13 +10,15 @@ import ( "fmt" "math/big" "testing" + "unicode/utf8" - "github.com/davecgh/go-spew/spew" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -177,11 +179,6 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) account, _ = acc.(state.UserAccountHandler) require.Equal(t, firstUsername, account.GetUserName()) checkBalances(t, args, initialBalance) - - // TODO refactor - for _, scr := range scrs { - log.Info("SCR: " + spew.Sdump(scr)) - } } type argsProcessRegister struct { @@ -224,95 +221,109 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( ) // start executing relayed transaction retCode, err := args.testContextForRelayerAndUser.TxProcessor.ProcessTransaction(relayedTx) - if retCode != vmcommon.Ok || err != nil { + if err != nil { return scrs, retCode, err } - log.Warn("relayer", "balance", getBalance(args.testContextForRelayerAndUser, args.relayerAddress).String()) - log.Warn("relayer", "tx", args.gasPrice*relayedTxGasLimit) - - // record the SCR and clean all intermediate results intermediateTxs := args.testContextForRelayerAndUser.GetIntermediateTransactions(tb) args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) - require.Equal(tb, 1, len(intermediateTxs)) - scrRegister := intermediateTxs[0].(*smartContractResult.SmartContractResult) - scrs = append(scrs, scrRegister) + if len(intermediateTxs) == 0 { + return scrs, retCode, err // execution finished + } + testContexts := []*vm.VMTestContext{args.testContextForRelayerAndUser, args.testContextForDNSContract} - log.Warn("scrRegister", "tx", args.gasPrice*scrRegister.GasLimit) + globalReturnCode := vmcommon.Ok - // execute the scr on the shard that contains the dns contract - retCode, err = args.testContextForDNSContract.ScProcessor.ProcessSmartContractResult(scrRegister) - if retCode != vmcommon.Ok || err != nil { - return scrs, retCode, err - } + for { + scr := intermediateTxs[0].(*smartContractResult.SmartContractResult) + scrs = append(scrs, scr) - // record the SCR and clean all intermediate results - intermediateTxs = args.testContextForDNSContract.GetIntermediateTransactions(tb) - args.testContextForDNSContract.CleanIntermediateTransactions(tb) - require.Equal(tb, 1, len(intermediateTxs)) - scrSCProcess := intermediateTxs[0].(*smartContractResult.SmartContractResult) - scrs = append(scrs, scrSCProcess) + context := chooseVMTestContexts(scr, testContexts) + require.NotNil(tb, context) - log.Warn("scrSCProcess", "tx", args.gasPrice*scrSCProcess.GasLimit) + // execute the smart contract result + log.Info("executing scr", "on shard", context.ShardCoordinator.SelfId(), "scr", scrToString(scr)) - // execute the scr on the initial shard that contains the user address (builtin function call) - retCode, err = args.testContextForRelayerAndUser.ScProcessor.ProcessSmartContractResult(scrSCProcess) - if retCode != vmcommon.Ok || err != nil { - return scrs, retCode, err - } + retCode, err = context.ScProcessor.ProcessSmartContractResult(scr) + if err != nil { + return scrs, retCode, err + } + if retCode != vmcommon.Ok { + globalReturnCode = retCode + } - // record the SCR and clean all intermediate results - intermediateTxs = args.testContextForRelayerAndUser.GetIntermediateTransactions(tb) - args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) - require.Equal(tb, 1, len(intermediateTxs)) - scrFinishedBuiltinCall := intermediateTxs[0].(*smartContractResult.SmartContractResult) - scrs = append(scrs, scrFinishedBuiltinCall) + intermediateTxs = context.GetIntermediateTransactions(tb) + context.CleanIntermediateTransactions(tb) + if len(intermediateTxs) == 0 { + return scrs, globalReturnCode, err // execution finished + } + } +} - // execute the finished scr on the shard that contains the dns contract - retCode, err = args.testContextForDNSContract.ScProcessor.ProcessSmartContractResult(scrFinishedBuiltinCall) - if retCode != vmcommon.Ok || err != nil { - return scrs, retCode, err +func chooseVMTestContexts(scr *smartContractResult.SmartContractResult, contexts []*vm.VMTestContext) *vm.VMTestContext { + for _, context := range contexts { + if context.ShardCoordinator.ComputeId(scr.RcvAddr) == context.ShardCoordinator.SelfId() { + return context + } } - // record the SCR and clean all intermediate results - intermediateTxs = args.testContextForDNSContract.GetIntermediateTransactions(tb) - args.testContextForDNSContract.CleanIntermediateTransactions(tb) - require.Equal(tb, 1, len(intermediateTxs)) - scrSCFinished := intermediateTxs[0].(*smartContractResult.SmartContractResult) - scrs = append(scrs, scrSCFinished) + return nil +} - // execute the scr on the initial shard that contains the user address (refund) - retCode, err = args.testContextForRelayerAndUser.ScProcessor.ProcessSmartContractResult(scrSCFinished) - if retCode != vmcommon.Ok || err != nil { - return scrs, retCode, err +func scrToString(scr *smartContractResult.SmartContractResult) string { + data := string(scr.Data) + if !isASCII(data) { + data = hex.EncodeToString(scr.Data) } - // record the SCR and clean all intermediate results - intermediateTxs = args.testContextForRelayerAndUser.GetIntermediateTransactions(tb) - args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) - require.Equal(tb, 0, len(intermediateTxs)) + return fmt.Sprintf("nonce: %d, value: %s, rcvAddr: %s, sender: %s, gasLimit: %d, gasPrice: %d, data: %s", + scr.Nonce, scr.Value.String(), + integrationTests.TestAddressPubkeyConverter.Encode(scr.RcvAddr), + integrationTests.TestAddressPubkeyConverter.Encode(scr.SndAddr), + scr.GasLimit, scr.GasPrice, data, + ) +} - log.Warn("relayer", "balance", getBalance(args.testContextForRelayerAndUser, args.relayerAddress).String()) +func isASCII(data string) bool { + for i := 0; i < len(data); i++ { + if data[i] >= utf8.RuneSelf { + return false + } - // commit & cleanup - _, err = args.testContextForRelayerAndUser.Accounts.Commit() - require.Nil(tb, err) - args.testContextForRelayerAndUser.CleanIntermediateTransactions(tb) + if data[i] >= logger.ASCIISpace { + continue + } - _, err = args.testContextForDNSContract.Accounts.Commit() - require.Nil(tb, err) - args.testContextForDNSContract.CleanIntermediateTransactions(tb) + if data[i] == logger.ASCIITab || data[i] == logger.ASCIILineFeed || data[i] == logger.ASCIINewLine { + continue + } - return scrs, vmcommon.Ok, nil + return false + } + + return true } func checkBalances(tb testing.TB, args argsProcessRegister, initialBalance *big.Int) { entireBalance := big.NewInt(0) - entireBalance.Add(entireBalance, getBalance(args.testContextForRelayerAndUser, args.relayerAddress)) - entireBalance.Add(entireBalance, getBalance(args.testContextForRelayerAndUser, args.userAddress)) - entireBalance.Add(entireBalance, getBalance(args.testContextForDNSContract, args.scAddress)) - entireBalance.Add(entireBalance, args.testContextForRelayerAndUser.TxFeeHandler.GetAccumulatedFees()) - entireBalance.Add(entireBalance, args.testContextForDNSContract.TxFeeHandler.GetAccumulatedFees()) + relayerBalance := getBalance(args.testContextForRelayerAndUser, args.relayerAddress) + userBalance := getBalance(args.testContextForRelayerAndUser, args.userAddress) + scBalance := getBalance(args.testContextForDNSContract, args.scAddress) + accumulatedFees := big.NewInt(0).Set(args.testContextForRelayerAndUser.TxFeeHandler.GetAccumulatedFees()) + accumulatedFees.Add(accumulatedFees, args.testContextForDNSContract.TxFeeHandler.GetAccumulatedFees()) + + entireBalance.Add(entireBalance, relayerBalance) + entireBalance.Add(entireBalance, userBalance) + entireBalance.Add(entireBalance, scBalance) + entireBalance.Add(entireBalance, accumulatedFees) + + log.Info("checkBalances", + "relayerBalance", relayerBalance.String(), + "userBalance", userBalance.String(), + "scBalance", scBalance.String(), + "accumulated fees", accumulatedFees.String(), + "total", entireBalance.String(), + ) assert.Equal(tb, initialBalance, entireBalance) } From 05152633187e92b7850344fbf2e895db8afa14e2 Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 7 Mar 2023 20:45:13 +0200 Subject: [PATCH 042/221] - added prints --- integrationTests/vm/testInitializer.go | 4 +-- integrationTests/vm/txsFee/dns_test.go | 36 ++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 1acb1994d02..6dd8bd4c712 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -52,7 +52,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/integrationtests" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" @@ -544,7 +543,6 @@ func CreateVMAndBlockchainHookAndDataPool( Counter: counter, } - hasher := &hashingMocks.HasherMock{} maxGasLimitPerBlock := uint64(0xFFFFFFFFFFFFFFFF) blockChainHookImpl, _ := hooks.NewBlockChainHookImpl(args) argsNewVMFactory := shard.ArgVMContainerFactory{ @@ -557,7 +555,7 @@ func CreateVMAndBlockchainHookAndDataPool( EnableEpochsHandler: enableEpochsHandler, WasmVMChangeLocker: wasmVMChangeLocker, ESDTTransferParser: esdtTransferParser, - Hasher: hasher, + Hasher: integrationtests.TestHasher, } vmFactory, err := shard.NewVMContainerFactory(argsNewVMFactory) if err != nil { diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 90aa65fcd7e..3157be37ddb 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -12,7 +12,9 @@ import ( "testing" "unicode/utf8" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests" "github.com/multiversx/mx-chain-go/integrationTests/vm" @@ -24,6 +26,8 @@ import ( "github.com/stretchr/testify/require" ) +const returnOkData = "@6f6b" + func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName(t *testing.T) { testContext, err := vm.CreatePreparedTxProcessorWithVMs(config.EnableEpochs{}) require.Nil(t, err) @@ -143,7 +147,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) userAddress := []byte("user-678901234567890123456789112") require.Equal(t, uint32(2), testContextForRelayerAndUser.ShardCoordinator.ComputeId(userAddress)) - initialBalance := big.NewInt(10000000) + initialBalance := big.NewInt(10000000000) _, _ = vm.CreateAccount(testContextForRelayerAndUser.Accounts, relayerAddress, 0, initialBalance) firstUsername := utils.GenerateUserNameForDNSContract(scAddress) @@ -207,6 +211,8 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( userTxData, ) + log.Info("user tx", "tx", txToString(userTx)) + // generate the relayed transaction relayedTxData := utils.PrepareRelayerTxData(userTx) // v1 will suffice relayedTxGasLimit := userTxGasLimit + 1 + uint64(len(relayedTxData)) @@ -219,6 +225,9 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( relayedTxGasLimit, relayedTxData, ) + + log.Info("executing relayed tx", "tx", txToString(relayedTx)) + // start executing relayed transaction retCode, err := args.testContextForRelayerAndUser.TxProcessor.ProcessTransaction(relayedTx) if err != nil { @@ -251,6 +260,9 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( if retCode != vmcommon.Ok { globalReturnCode = retCode } + if string(scr.Data) == returnOkData { + return scrs, globalReturnCode, err // execution finished + } intermediateTxs = context.GetIntermediateTransactions(tb) context.CleanIntermediateTransactions(tb) @@ -276,7 +288,10 @@ func scrToString(scr *smartContractResult.SmartContractResult) string { data = hex.EncodeToString(scr.Data) } - return fmt.Sprintf("nonce: %d, value: %s, rcvAddr: %s, sender: %s, gasLimit: %d, gasPrice: %d, data: %s", + hash, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, scr) + + return fmt.Sprintf("hash: %s, nonce: %d, value: %s, rcvAddr: %s, sender: %s, gasLimit: %d, gasPrice: %d, data: %s", + hex.EncodeToString(hash), scr.Nonce, scr.Value.String(), integrationTests.TestAddressPubkeyConverter.Encode(scr.RcvAddr), integrationTests.TestAddressPubkeyConverter.Encode(scr.SndAddr), @@ -284,6 +299,23 @@ func scrToString(scr *smartContractResult.SmartContractResult) string { ) } +func txToString(tx *transaction.Transaction) string { + data := string(tx.Data) + if !isASCII(data) { + data = hex.EncodeToString(tx.Data) + } + + hash, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, tx) + + return fmt.Sprintf("hash: %s, nonce: %d, value: %s, rcvAddr: %s, sender: %s, gasLimit: %d, gasPrice: %d, data: %s", + hex.EncodeToString(hash), + tx.Nonce, tx.Value.String(), + integrationTests.TestAddressPubkeyConverter.Encode(tx.RcvAddr), + integrationTests.TestAddressPubkeyConverter.Encode(tx.SndAddr), + tx.GasLimit, tx.GasPrice, data, + ) +} + func isASCII(data string) bool { for i := 0; i < len(data); i++ { if data[i] >= utf8.RuneSelf { From cd63397b7f4d952438c17d17f7641db1393079c5 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 8 Mar 2023 13:05:39 +0200 Subject: [PATCH 043/221] integrate fix --- go.mod | 2 +- go.sum | 4 ++-- integrationTests/vm/txsFee/dns_test.go | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index acef87c980e..3b591470260 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.13 github.com/multiversx/mx-chain-storage-go v1.0.7 - github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230301122431-1c50bb69ded0 + github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230308095202-db189788becc github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77 diff --git a/go.sum b/go.sum index 43bcaf93eec..1ca906f3bf9 100644 --- a/go.sum +++ b/go.sum @@ -625,8 +625,8 @@ github.com/multiversx/mx-chain-storage-go v1.0.7/go.mod h1:gtKoV32Cg2Uy8deHzF8Ud github.com/multiversx/mx-chain-vm-common-go v1.3.34/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.36/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230301122431-1c50bb69ded0 h1:T7MwPQgORhAOZqHbKh2Z274eqc599BSCJQOq4Ovy8nA= -github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230301122431-1c50bb69ded0/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= +github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230308095202-db189788becc h1:Kiq9b9MST0XAriUu4//odg8OR4VjUl5XG3VKENOg+aE= +github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230308095202-db189788becc/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 h1:ScUq7/wq78vthMTQ6v5Ux1DvSMQMHxQ2Sl7aPP26q1w= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50/go.mod h1:e3uYdgoKzs3puaznbmSjDcRisJc5Do4tpg7VqyYwoek= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 h1:axtp5/mpA+xYJ1cu4KtAGETV4t6v6/tNfQh0HCclBYY= diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 3157be37ddb..87f3a88d57a 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -176,12 +176,12 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) scrs, retCode, err = processRegisterThroughRelayedTxs(t, args) require.Nil(t, err) - require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, vmcommon.Ok, retCode) - // check username hasn't changed + // check username has changed acc, _ = testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) account, _ = acc.(state.UserAccountHandler) - require.Equal(t, firstUsername, account.GetUserName()) + require.Equal(t, secondUsername, account.GetUserName()) checkBalances(t, args, initialBalance) } From 10c26f2cddaa91e6ab2844747c39667e29e10479 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 8 Mar 2023 13:16:26 +0200 Subject: [PATCH 044/221] fix after review --- factory/api/apiResolverFactory.go | 2 +- factory/processing/blockProcessorCreator.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index 7a0a4ff6961..0e9f64ce258 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -490,7 +490,7 @@ func createBuiltinFuncs( maxNumAddressesInTransferRole uint32, dnsV2Addresses [][]byte, ) (vmcommon.BuiltInFunctionFactory, error) { - mapDNSV2Addresses := make(map[string]struct{}, len(dnsV2Addresses)) + mapDNSV2Addresses := make(map[string]struct{}) for _, address := range dnsV2Addresses { mapDNSV2Addresses[string(address)] = struct{}{} } diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 4b788f41ece..034d6121444 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -1265,7 +1265,7 @@ func (pcf *processComponentsFactory) createBuiltInFunctionContainer( return nil, err } - mapDNSV2Addresses := make(map[string]struct{}, len(convertedDNSV2Addresses)) + mapDNSV2Addresses := make(map[string]struct{}) for _, address := range convertedDNSV2Addresses { mapDNSV2Addresses[string(address)] = struct{}{} } From 7f1da7bf43ef4da42f98d1e67756fc3217844b10 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 8 Mar 2023 13:18:40 +0200 Subject: [PATCH 045/221] linter fix --- integrationTests/vm/txsFee/dns_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 87f3a88d57a..ed53fd49b56 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -174,7 +174,7 @@ func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) secondUsername := utils.GenerateUserNameForDNSContract(scAddress) args.username = secondUsername - scrs, retCode, err = processRegisterThroughRelayedTxs(t, args) + _, retCode, err = processRegisterThroughRelayedTxs(t, args) require.Nil(t, err) require.Equal(t, vmcommon.Ok, retCode) From c3f2d200609cfab4406e15e9c9b0ec76deaedf71 Mon Sep 17 00:00:00 2001 From: jules01 Date: Wed, 8 Mar 2023 13:37:21 +0200 Subject: [PATCH 046/221] - added backwards compatibility test --- integrationTests/vm/txsFee/dns_test.go | 71 ++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 5 deletions(-) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index ed53fd49b56..2c629ffaeb1 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -124,14 +124,75 @@ func getBalance(testContext *vm.VMTestContext, address []byte) *big.Int { } // relayer address is in shard 2, creates a transaction on the behalf of the user from shard 2, that will call the DNS contract -// from shard 1 that will try to set the username but fails. -func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShard(t *testing.T) { - testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) +// from shard 1. +func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompatibility(t *testing.T) { + enableEpochs := config.EnableEpochs{ + ChangeUsernameEnableEpoch: 1000, // flag disabled, backwards compatibility + } + + testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, enableEpochs) require.Nil(t, err) defer testContextForDNSContract.Close() - // TODO remove this - // logger.SetLogLevel("process:TRACE,vm:TRACE") + testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, enableEpochs) + require.Nil(t, err) + defer testContextForRelayerAndUser.Close() + + scAddress, _ := utils.DoDeployDNS(t, testContextForDNSContract, "../../multiShard/smartContract/dns/dns.wasm") + fmt.Println(scAddress) + utils.CleanAccumulatedIntermediateTransactions(t, testContextForDNSContract) + require.Equal(t, uint32(1), testContextForDNSContract.ShardCoordinator.ComputeId(scAddress)) + + relayerAddress := []byte("relayer-901234567890123456789112") + require.Equal(t, uint32(2), testContextForRelayerAndUser.ShardCoordinator.ComputeId(relayerAddress)) + userAddress := []byte("user-678901234567890123456789112") + require.Equal(t, uint32(2), testContextForRelayerAndUser.ShardCoordinator.ComputeId(userAddress)) + + initialBalance := big.NewInt(10000000000) + _, _ = vm.CreateAccount(testContextForRelayerAndUser.Accounts, relayerAddress, 0, initialBalance) + + firstUsername := utils.GenerateUserNameForDNSContract(scAddress) + args := argsProcessRegister{ + relayerAddress: relayerAddress, + userAddress: userAddress, + scAddress: scAddress, + testContextForRelayerAndUser: testContextForRelayerAndUser, + testContextForDNSContract: testContextForDNSContract, + username: firstUsername, + gasPrice: 10, + } + scrs, retCode, err := processRegisterThroughRelayedTxs(t, args) + require.Nil(t, err) + require.Equal(t, vmcommon.Ok, retCode) + assert.Equal(t, 4, len(scrs)) + + expectedTotalBalance := big.NewInt(0).Set(initialBalance) + expectedTotalBalance.Sub(expectedTotalBalance, big.NewInt(10)) // due to a bug, some fees were burnt + + // check username + acc, _ := testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) + account, _ := acc.(state.UserAccountHandler) + require.Equal(t, firstUsername, account.GetUserName()) + checkBalances(t, args, expectedTotalBalance) + + secondUsername := utils.GenerateUserNameForDNSContract(scAddress) + args.username = secondUsername + + _, retCode, err = processRegisterThroughRelayedTxs(t, args) + require.Nil(t, err) + require.Equal(t, vmcommon.UserError, retCode) + + // check username hasn't changed + acc, _ = testContextForRelayerAndUser.Accounts.GetExistingAccount(userAddress) + account, _ = acc.(state.UserAccountHandler) + require.Equal(t, firstUsername, account.GetUserName()) + checkBalances(t, args, expectedTotalBalance) +} + +func TestDeployDNSContract_TestGasWhenSaveUsernameAfterDNSv2IsActivated(t *testing.T) { + testContextForDNSContract, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + require.Nil(t, err) + defer testContextForDNSContract.Close() testContextForRelayerAndUser, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(2, config.EnableEpochs{}) require.Nil(t, err) From b5357c99222f5781cefba6c78998c8fd97ad2eb4 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 8 Mar 2023 21:22:40 +0200 Subject: [PATCH 047/221] added new tests for dns v2 --- cmd/node/config/config.toml | 2 +- integrationTests/vm/testInitializer.go | 31 ++++- .../wasm/testdata/manage-user-contract.wasm | Bin 0 -> 2524 bytes .../vm/wasm/wasmvm/wasmVM_test.go | 117 ++++++++++++++++++ 4 files changed, 147 insertions(+), 3 deletions(-) create mode 100644 integrationTests/vm/wasm/testdata/manage-user-contract.wasm diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 1a6e71a875e..10bf1ea5224 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -708,7 +708,7 @@ ] MaxNumAddressesInTransferRole = 100 DNSV2Addresses =[ - "erd1he8wwxn4az3j82p7wwqsdk794dm7hcrwny6f8dfegkfla34udx7qrf7xje", #shard 0 + "erd1qqqqqqqqqqqqqpgqcy67yanvwpepqmerkq6m8pgav0tlvgwxjmdq4hukxw", ] [Hardfork] diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index cf2baba8230..596bafef9b6 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -69,6 +69,15 @@ import ( var dnsAddr = []byte{0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 137, 17, 46, 56, 127, 47, 62, 172, 4, 126, 190, 242, 221, 230, 209, 243, 105, 104, 242, 66, 49, 49} +// DNSV2Address defines the address for the new DNS contract +const DNSV2Address = "erd1qqqqqqqqqqqqqpgqcy67yanvwpepqmerkq6m8pgav0tlvgwxjmdq4hukxw" + +// DNSV2DeployerAddress defines the address of the deployer for the DNS v2 contracts +const DNSV2DeployerAddress = "erd1uzk2g5rhvg8prk9y50d0q7qsxg7tm7f320q0q4qlpmfu395wjmdqqy0n9q" + +// TestAddressPubkeyConverter represents an address public key converter +var TestAddressPubkeyConverter, _ = pubkeyConverter.NewBech32PubkeyConverter(32, log) + // TODO: Merge test utilities from this file with the ones from "wasmvm/utils.go" var globalEpochNotifier = forking.NewGenericEpochNotifier() @@ -504,13 +513,15 @@ func CreateVMAndBlockchainHookAndDataPool( gasSchedule = mock.NewGasScheduleNotifierMock(testGasSchedule) } + dnsV2Decoded, _ := TestAddressPubkeyConverter.Decode(DNSV2Address) + argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, MapDNSAddresses: map[string]struct{}{ string(dnsAddr): {}, }, MapDNSV2Addresses: map[string]struct{}{ - string(dnsAddr): {}, + string(dnsV2Decoded): {}, }, Marshalizer: integrationtests.TestMarshalizer, Accounts: accnts, @@ -591,6 +602,7 @@ func CreateVMAndBlockchainHookMeta( gasSchedule = mock.NewGasScheduleNotifierMock(testGasSchedule) } + dnsV2Decoded, _ := TestAddressPubkeyConverter.Decode(DNSV2Address) enableEpochsHandler, _ := enablers.NewEnableEpochsHandler(enableEpochsConfig, globalEpochNotifier) argsBuiltIn := builtInFunctions.ArgsCreateBuiltInFunctionContainer{ GasSchedule: gasSchedule, @@ -598,7 +610,7 @@ func CreateVMAndBlockchainHookMeta( string(dnsAddr): {}, }, MapDNSV2Addresses: map[string]struct{}{ - string(dnsAddr): {}, + string(dnsV2Decoded): {}, }, Marshalizer: integrationtests.TestMarshalizer, Accounts: accnts, @@ -1338,6 +1350,21 @@ func TestAccount( return senderRecovShardAccount.GetBalance() } +// TestAccountUsername - +func TestAccountUsername( + t *testing.T, + accnts state.AccountsAdapter, + senderAddressBytes []byte, + username []byte, +) { + + senderRecovAccount, _ := accnts.GetExistingAccount(senderAddressBytes) + require.False(t, check.IfNil(senderRecovAccount)) + + senderRecovShardAccount := senderRecovAccount.(state.UserAccountHandler) + require.Equal(t, senderRecovShardAccount.GetUserName(), username) +} + // ComputeExpectedBalance - func ComputeExpectedBalance( existing *big.Int, diff --git a/integrationTests/vm/wasm/testdata/manage-user-contract.wasm b/integrationTests/vm/wasm/testdata/manage-user-contract.wasm new file mode 100644 index 0000000000000000000000000000000000000000..b1b34fceb4bb315f180c09d32c1dcc7b57592aea GIT binary patch literal 2524 zcma)8L2nyH6n-I)(g zA4WSwJ7P>bB*r^(JkFnN!bpw1A;1eBJfj^y@m`Ne{w}AL8yrWYPJ58-uczrSrGPEx z`fEoI9=OyV9JoPi?Id#}D&-5wAZfeS)|nlG7j&`Xw#)9hK|9-{kj>L=mu(#NZQ4HS zyFoT0$<}&5j{@h-wA1b&`DdShl#KSs$C~_Cc2Yi2VcT-q891|Vva)q+zteOy!TzT! zTiGxLV1Btg($sFJeP@s%5T47|sLJlA1q0D3*4(>VWY};IbL_gnZbv7B=FMb(pUNCy zs=4Qyy^Y}~$q5%qq|VQUDwIM<8B{7FD1$Hjg-Xy&>5>`?`}pw-ipFGr{55~S;CFem zT*P^&b=?h*wB)shA?DbzIyelye!Fxe0VEZ^;qmBO%xds&A?rskEMZ&F3%eo zO_Cv4NJsOAqFY$Eq-QW!*4)PS?@)@psC7*C4<44afTMlRW0jA8uke)T0AQ4*>&a#?D;log{DnlohQy68}>&=%cAvWrHV&?sHU zQr$0=P_8^i;)@2uM#WM?s#Q}l(gxi)+zeK+7dh*u#3oGY-8j&dd883bvJM3G3@eok z{rm79C3tZL25ABt0G_x?a}9<~s6(#uKUZ}a zD*)nv6ZY$0@{VwULGSLQgm(q|@jt;3FCjn3UXnF_Q~=pG&;)28n#V@%8_tbWNaKxw zpi$fDng>#>BmMg185VM801(WFo{&6f14#V762{a77t93mS5*=Bj=zEn*7;#FAc$l?Q9=m~ z?1BZuTC8wFz~}T`yr5n~J*y7mu-ga>&V>6m zGRP6q@q1X1kdqKKfEK9s170H&;TH|vUmwSDawNke0hS)6Q|{& zfhdoU_gfU@lC7|0WuXahgBypD5R}RiV7g$6(00$&HSRQQA0wS)e(s_TDBXzOGV2MI zJ%xC>h&t`FaHjhZk6}tZ65t9r2sB>^m0Kh1H^0y(2gA(Az-21(bLB6lGeXWIe<1PI ze#A|xuxTUdM>RElR0oCal2ork|=gm=*mB14k0G;kWyL&9hl92m9QAiyYK9NYspshC+V;(Rn_KrkUEfVwFsI3h+4sMF pir5Hyf8^2uKHM$0?=p8fBRWV1ou(N!n@4HtTKNSP1yAv1{s+9${%8OI literal 0 HcmV?d00001 diff --git a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go index 7e97d8b69d7..65f8e0e259b 100644 --- a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go +++ b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go @@ -932,3 +932,120 @@ func TestCommunityContract_CrossShard_TxProcessor(t *testing.T) { utils.TestAccount(t, testContextParentSC.Accounts, parentAddress, 0, zero) utils.TestAccount(t, testContextFunderSC.Accounts, funderAddress, 0, transferEGLD) } + +func TestDeployDNSV2SetDeleteUserNames(t *testing.T) { + senderAddressBytes, _ := vm.TestAddressPubkeyConverter.Decode(vm.DNSV2DeployerAddress) + senderNonce := uint64(0) + senderBalance := big.NewInt(100000000) + gasPrice := uint64(1) + gasLimit := uint64(10000000) + + scCode := wasm.GetSCCode("../testdata/manage-user-contract.wasm") + + tx := vm.CreateTx( + senderAddressBytes, + vm.CreateEmptyAddress(), + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + wasm.CreateDeployTxData(scCode), + ) + + testContext, err := vm.CreatePreparedTxProcessorAndAccountsWithVMs( + senderNonce, + senderAddressBytes, + senderBalance, + config.EnableEpochs{}, + ) + require.Nil(t, err) + defer testContext.Close() + + returnCode, err := testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + + _, err = testContext.Accounts.Commit() + require.Nil(t, err) + + expectedBalance := big.NewInt(90000000) + + vm.TestAccount( + t, + testContext.Accounts, + senderAddressBytes, + senderNonce+1, + expectedBalance) + + dnsV2Address, _ := vm.TestAddressPubkeyConverter.Decode(vm.DNSV2Address) + senderNonce++ + tx = vm.CreateTx( + senderAddressBytes, + dnsV2Address, + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + "saveName@"+hex.EncodeToString(senderAddressBytes)+"@"+hex.EncodeToString([]byte("userName1")), + ) + + _ = logger.SetLogLevel("*:TRACE") + decoded, _ := hex.DecodeString("736574557365726e616d6540373537333635373234653631366436353331") + fmt.Println(string(decoded)) + argParser := parsers.NewCallArgsParser() + functionName, args, err := argParser.ParseData("736574557365726e616d6540373537333635373234653631366436353331") + log.Debug("some message", "lenArgs", args, "funcs", functionName) + + returnCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + vm.TestAccountUsername(t, testContext.Accounts, senderAddressBytes, []byte("userName1")) + + senderNonce++ + tx = vm.CreateTx( + senderAddressBytes, + dnsV2Address, + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + "removeName@"+hex.EncodeToString(senderAddressBytes), + ) + + returnCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + vm.TestAccountUsername(t, testContext.Accounts, senderAddressBytes, nil) + + senderNonce++ + tx = vm.CreateTx( + senderAddressBytes, + dnsV2Address, + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + "saveName@"+hex.EncodeToString(senderAddressBytes)+"@"+hex.EncodeToString([]byte("userName2")), + ) + + returnCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + vm.TestAccountUsername(t, testContext.Accounts, senderAddressBytes, []byte("userName2")) + + senderNonce++ + tx = vm.CreateTx( + senderAddressBytes, + dnsV2Address, + senderNonce, + big.NewInt(0), + gasPrice, + gasLimit, + "saveName@"+hex.EncodeToString(senderAddressBytes)+"@"+hex.EncodeToString([]byte("userName3")), + ) + + returnCode, err = testContext.TxProcessor.ProcessTransaction(tx) + require.Nil(t, err) + require.Equal(t, returnCode, vmcommon.Ok) + vm.TestAccountUsername(t, testContext.Accounts, senderAddressBytes, []byte("userName3")) +} From 611d8891881238243c854dcd7acf63bb54e21cd5 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Thu, 9 Mar 2023 11:52:52 +0200 Subject: [PATCH 048/221] added new tests for dns v2. go mod for new vm-common with updated name for DeleteUsername --- go.mod | 2 +- go.sum | 4 ++-- .../wasm/testdata/manage-user-contract.wasm | Bin 2524 -> 2586 bytes .../vm/wasm/wasmvm/wasmVM_test.go | 7 ------- 4 files changed, 3 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index acef87c980e..f4e2bba1e26 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.13 github.com/multiversx/mx-chain-storage-go v1.0.7 - github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230301122431-1c50bb69ded0 + github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230309094547-02b05187de89 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77 diff --git a/go.sum b/go.sum index 43bcaf93eec..2f93c5fc0fc 100644 --- a/go.sum +++ b/go.sum @@ -625,8 +625,8 @@ github.com/multiversx/mx-chain-storage-go v1.0.7/go.mod h1:gtKoV32Cg2Uy8deHzF8Ud github.com/multiversx/mx-chain-vm-common-go v1.3.34/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.36/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230301122431-1c50bb69ded0 h1:T7MwPQgORhAOZqHbKh2Z274eqc599BSCJQOq4Ovy8nA= -github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230301122431-1c50bb69ded0/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= +github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230309094547-02b05187de89 h1:RvWG9FNCxw6OE1Lo4qcruYW441YVq27gGGZddDScXic= +github.com/multiversx/mx-chain-vm-common-go v1.3.38-0.20230309094547-02b05187de89/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 h1:ScUq7/wq78vthMTQ6v5Ux1DvSMQMHxQ2Sl7aPP26q1w= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50/go.mod h1:e3uYdgoKzs3puaznbmSjDcRisJc5Do4tpg7VqyYwoek= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 h1:axtp5/mpA+xYJ1cu4KtAGETV4t6v6/tNfQh0HCclBYY= diff --git a/integrationTests/vm/wasm/testdata/manage-user-contract.wasm b/integrationTests/vm/wasm/testdata/manage-user-contract.wasm index b1b34fceb4bb315f180c09d32c1dcc7b57592aea..0d41f70927b4706c77d6a3c6e4aabce54e0347cb 100644 GIT binary patch delta 647 zcmX|;zi-n(6vy9tKG#khCD%F_D4{)v46Q&27MGz&bqZ@1)UlGSaEhA7sbr^>SYihD zD)Pp^f!L_TpTG=A49x6|ymKm+`+o2J{G9Hm|FL@tX^|lS;7cz7NC3+OOGv0JYsUv$ zNUX9B7=c9aY0^|KZ!bXKsblH+l~L%8)h<58Mm56yduVE9md?|Oej69>=HugZHiKq$jX|gD3ripoqU|_@ z)n|M$c-YK!c5$UgX{KSvEz(=N>yEi`n2yguRh!haZrc8*XY^ZOuj)G5?tv$J)zUdB z*|iZ%d8ZFUkWB@i4Kf7J1=)^4`rZd5QR4Cu#7EOU3``iMuRv}^bf_RcoXSo}@!$7f zA#O_sZd!FwOA-3SK+Wcut32c?S-gdpPrPVkPkxQ9x&yg#DV3qaJgW? zh`3yK!TW{e{Kvo=gQPFFLSURt-pcbBh%x1Pp9b8zCKK@=L*_A=%`qke8T2>S%k1br z!|_rB2Q{1wwq>gi%BsG(kB>GK#jR;9ByO{lQ#04VvoL(<)aljQ5XbM%w&KL0Ig!*CaMiH?`Hc8iGjM` zpo-pt_!=-W@Hv2uiGdvn32}B<-v57hzyEu>_o4H>eGXBcAOPSCCjKPGVCT_)>)CTjPNVrH3^h~F`1_#q@frOvv9H@TW%a?~zJ6Cb zE5x0UEPCtV0og~OpuCw2wuiMjNrL6k=v4gIHrlb=EXjWOk!>Yzwf?4u*JRd zKJE(B0p~rURp>i==g9N~l#txTmp+XID$|BYmHt(x9qzHa%S|6VQ~$J|bd3YqhJt1s kn2fvPI4+a2MpwVCuiad&&T^Hd@}U=MsaC40^q>0RAIv3oA^-pY diff --git a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go index 65f8e0e259b..16df1e7a154 100644 --- a/integrationTests/vm/wasm/wasmvm/wasmVM_test.go +++ b/integrationTests/vm/wasm/wasmvm/wasmVM_test.go @@ -989,13 +989,6 @@ func TestDeployDNSV2SetDeleteUserNames(t *testing.T) { "saveName@"+hex.EncodeToString(senderAddressBytes)+"@"+hex.EncodeToString([]byte("userName1")), ) - _ = logger.SetLogLevel("*:TRACE") - decoded, _ := hex.DecodeString("736574557365726e616d6540373537333635373234653631366436353331") - fmt.Println(string(decoded)) - argParser := parsers.NewCallArgsParser() - functionName, args, err := argParser.ParseData("736574557365726e616d6540373537333635373234653631366436353331") - log.Debug("some message", "lenArgs", args, "funcs", functionName) - returnCode, err = testContext.TxProcessor.ProcessTransaction(tx) require.Nil(t, err) require.Equal(t, returnCode, vmcommon.Ok) From 0d2fdc73e1d40a60c56330db2bb5fe82f089d11d Mon Sep 17 00:00:00 2001 From: robertsasu Date: Thu, 9 Mar 2023 12:56:36 +0200 Subject: [PATCH 049/221] fix after merge --- integrationTests/vm/testInitializer.go | 1 + 1 file changed, 1 insertion(+) diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 7369c54aab4..8c01682d8b8 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -521,6 +521,7 @@ func CreateVMAndBlockchainHookAndDataPool( }, MapDNSV2Addresses: map[string]struct{}{ string(dnsV2Decoded): {}, + string(dnsAddr): {}, }, Marshalizer: integrationtests.TestMarshalizer, Accounts: accnts, From 1af4cdd0785c39f82176e267361f67e270d8a675 Mon Sep 17 00:00:00 2001 From: MariusC Date: Tue, 14 Mar 2023 14:12:28 +0200 Subject: [PATCH 050/221] FEAT: Refactor outport interface + stubs --- go.mod | 2 +- go.sum | 4 +- outport/interface.go | 31 +++++++------- outport/mock/driverStub.go | 41 +++++++++---------- .../outport/outportDataProviderStub.go | 4 +- testscommon/outport/outportStub.go | 25 ++++++----- 6 files changed, 52 insertions(+), 55 deletions(-) diff --git a/go.mod b/go.mod index d3124d16a7c..f2407d1f868 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.35 + github.com/multiversx/mx-chain-core-go v1.1.35-0.20230309140731-222b64e21ca6 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.3.12 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index fdd249cef76..c50b9921759 100644 --- a/go.sum +++ b/go.sum @@ -610,8 +610,8 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.35 h1:pzYXfYgCXiPXszZiS7POevE1W4ZaToVD65hbJMRiui4= -github.com/multiversx/mx-chain-core-go v1.1.35/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230309140731-222b64e21ca6 h1:+J4X/BmNQPz7+/bhQ/mSss3Gl70GpwabZObFDUK39SE= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230309140731-222b64e21ca6/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.3.12 h1:b7B8KMrCHM0Ghh4W0s1jXLI5MknEAOo7ZChFAwUUYpY= diff --git a/outport/interface.go b/outport/interface.go index 7efc264de51..5923ee47717 100644 --- a/outport/interface.go +++ b/outport/interface.go @@ -1,7 +1,6 @@ package outport import ( - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/process" ) @@ -9,13 +8,13 @@ import ( // Driver is an interface for saving node specific data to other storage. // This could be an elastic search index, a MySql database or any other external services. type Driver interface { - SaveBlock(args *outportcore.ArgsSaveBlockData) error - RevertIndexedBlock(header data.HeaderHandler, body data.BodyHandler) error - SaveRoundsInfo(roundsInfos []*outportcore.RoundInfo) error - SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte, epoch uint32) error - SaveValidatorsRating(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error - SaveAccounts(blockTimestamp uint64, acc map[string]*outportcore.AlteredAccount, shardID uint32) error - FinalizedBlock(headerHash []byte) error + SaveBlock(outportBlock *outportcore.OutportBlock) error + RevertIndexedBlock(blockData *outportcore.BlockData) error + SaveRoundsInfo(roundsInfos *outportcore.RoundsInfo) error + SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) error + SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) error + SaveAccounts(accounts *outportcore.Accounts) error + FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) error Close() error IsInterfaceNil() bool } @@ -23,13 +22,13 @@ type Driver interface { // OutportHandler is interface that defines what a proxy implementation should be able to do // The node is able to talk only with this interface type OutportHandler interface { - SaveBlock(args *outportcore.ArgsSaveBlockData) - RevertIndexedBlock(header data.HeaderHandler, body data.BodyHandler) - SaveRoundsInfo(roundsInfos []*outportcore.RoundInfo) - SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte, epoch uint32) - SaveValidatorsRating(indexID string, infoRating []*outportcore.ValidatorRatingInfo) - SaveAccounts(blockTimestamp uint64, acc map[string]*outportcore.AlteredAccount, shardID uint32) - FinalizedBlock(headerHash []byte) + SaveBlock(outportBlock *outportcore.OutportBlock) + RevertIndexedBlock(blockData *outportcore.BlockData) + SaveRoundsInfo(roundsInfos *outportcore.RoundsInfo) + SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) + SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) + SaveAccounts(accounts *outportcore.Accounts) + FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) SubscribeDriver(driver Driver) error HasDrivers() bool Close() error @@ -38,6 +37,6 @@ type OutportHandler interface { // DataProviderOutport is an interface that defines what an implementation of data provider outport should be able to do type DataProviderOutport interface { - PrepareOutportSaveBlockData(arg process.ArgPrepareOutportSaveBlockData) (*outportcore.ArgsSaveBlockData, error) + PrepareOutportSaveBlockData(arg process.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlock, error) IsInterfaceNil() bool } diff --git a/outport/mock/driverStub.go b/outport/mock/driverStub.go index 7bf27d343fc..2136dfaa338 100644 --- a/outport/mock/driverStub.go +++ b/outport/mock/driverStub.go @@ -1,24 +1,23 @@ package mock import ( - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" ) // DriverStub - type DriverStub struct { - SaveBlockCalled func(args *outportcore.ArgsSaveBlockData) error - RevertBlockCalled func(header data.HeaderHandler, body data.BodyHandler) error - SaveRoundsInfoCalled func(roundsInfos []*outportcore.RoundInfo) error - SaveValidatorsPubKeysCalled func(validatorsPubKeys map[uint32][][]byte, epoch uint32) error - SaveValidatorsRatingCalled func(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error - SaveAccountsCalled func(timestamp uint64, acc map[string]*outportcore.AlteredAccount) error - FinalizedBlockCalled func(headerHash []byte) error + SaveBlockCalled func(outportBlock *outportcore.OutportBlock) error + RevertIndexedBlockCalled func(blockData *outportcore.BlockData) error + SaveRoundsInfoCalled func(roundsInfos *outportcore.RoundsInfo) error + SaveValidatorsPubKeysCalled func(validatorsPubKeys *outportcore.ValidatorsPubKeys) error + SaveValidatorsRatingCalled func(validatorsRating *outportcore.ValidatorsRating) error + SaveAccountsCalled func(accounts *outportcore.Accounts) error + FinalizedBlockCalled func(finalizedBlock *outportcore.FinalizedBlock) error CloseCalled func() error } // SaveBlock - -func (d *DriverStub) SaveBlock(args *outportcore.ArgsSaveBlockData) error { +func (d *DriverStub) SaveBlock(args *outportcore.OutportBlock) error { if d.SaveBlockCalled != nil { return d.SaveBlockCalled(args) } @@ -27,16 +26,16 @@ func (d *DriverStub) SaveBlock(args *outportcore.ArgsSaveBlockData) error { } // RevertIndexedBlock - -func (d *DriverStub) RevertIndexedBlock(header data.HeaderHandler, body data.BodyHandler) error { - if d.RevertBlockCalled != nil { - return d.RevertBlockCalled(header, body) +func (d *DriverStub) RevertIndexedBlock(blockData *outportcore.BlockData) error { + if d.RevertIndexedBlockCalled != nil { + return d.RevertIndexedBlockCalled(blockData) } return nil } // SaveRoundsInfo - -func (d *DriverStub) SaveRoundsInfo(roundsInfos []*outportcore.RoundInfo) error { +func (d *DriverStub) SaveRoundsInfo(roundsInfos *outportcore.RoundsInfo) error { if d.SaveRoundsInfoCalled != nil { return d.SaveRoundsInfoCalled(roundsInfos) } @@ -45,36 +44,36 @@ func (d *DriverStub) SaveRoundsInfo(roundsInfos []*outportcore.RoundInfo) error } // SaveValidatorsPubKeys - -func (d *DriverStub) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte, epoch uint32) error { +func (d *DriverStub) SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) error { if d.SaveValidatorsPubKeysCalled != nil { - return d.SaveValidatorsPubKeysCalled(validatorsPubKeys, epoch) + return d.SaveValidatorsPubKeysCalled(validatorsPubKeys) } return nil } // SaveValidatorsRating - -func (d *DriverStub) SaveValidatorsRating(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error { +func (d *DriverStub) SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) error { if d.SaveValidatorsRatingCalled != nil { - return d.SaveValidatorsRatingCalled(indexID, infoRating) + return d.SaveValidatorsRatingCalled(validatorsRating) } return nil } // SaveAccounts - -func (d *DriverStub) SaveAccounts(timestamp uint64, acc map[string]*outportcore.AlteredAccount, _ uint32) error { +func (d *DriverStub) SaveAccounts(accounts *outportcore.Accounts) error { if d.SaveAccountsCalled != nil { - return d.SaveAccountsCalled(timestamp, acc) + return d.SaveAccountsCalled(accounts) } return nil } // FinalizedBlock - -func (d *DriverStub) FinalizedBlock(headerHash []byte) error { +func (d *DriverStub) FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) error { if d.FinalizedBlockCalled != nil { - return d.FinalizedBlockCalled(headerHash) + return d.FinalizedBlockCalled(finalizedBlock) } return nil diff --git a/testscommon/outport/outportDataProviderStub.go b/testscommon/outport/outportDataProviderStub.go index b8cae58fd6e..6f8e13b4798 100644 --- a/testscommon/outport/outportDataProviderStub.go +++ b/testscommon/outport/outportDataProviderStub.go @@ -9,13 +9,13 @@ import ( type OutportDataProviderStub struct { PrepareOutportSaveBlockDataCalled func( arg process.ArgPrepareOutportSaveBlockData, - ) (*outportcore.ArgsSaveBlockData, error) + ) (*outportcore.OutportBlock, error) } // PrepareOutportSaveBlockData - func (a *OutportDataProviderStub) PrepareOutportSaveBlockData( arg process.ArgPrepareOutportSaveBlockData, -) (*outportcore.ArgsSaveBlockData, error) { +) (*outportcore.OutportBlock, error) { if a.PrepareOutportSaveBlockDataCalled != nil { return a.PrepareOutportSaveBlockDataCalled(arg) } diff --git a/testscommon/outport/outportStub.go b/testscommon/outport/outportStub.go index faa200ef05f..bd9ea033a73 100644 --- a/testscommon/outport/outportStub.go +++ b/testscommon/outport/outportStub.go @@ -1,37 +1,36 @@ package outport import ( - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport" ) // OutportStub is a mock implementation fot the OutportHandler interface type OutportStub struct { - SaveBlockCalled func(args *outportcore.ArgsSaveBlockData) - SaveValidatorsRatingCalled func(index string, validatorsInfo []*outportcore.ValidatorRatingInfo) - SaveValidatorsPubKeysCalled func(shardPubKeys map[uint32][][]byte, epoch uint32) + SaveBlockCalled func(args *outportcore.OutportBlock) + SaveValidatorsRatingCalled func(validatorsRating *outportcore.ValidatorsRating) + SaveValidatorsPubKeysCalled func(validatorsPubKeys *outportcore.ValidatorsPubKeys) HasDriversCalled func() bool } // SaveBlock - -func (as *OutportStub) SaveBlock(args *outportcore.ArgsSaveBlockData) { +func (as *OutportStub) SaveBlock(args *outportcore.OutportBlock) { if as.SaveBlockCalled != nil { as.SaveBlockCalled(args) } } // SaveValidatorsRating - -func (as *OutportStub) SaveValidatorsRating(index string, validatorsInfo []*outportcore.ValidatorRatingInfo) { +func (as *OutportStub) SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) { if as.SaveValidatorsRatingCalled != nil { - as.SaveValidatorsRatingCalled(index, validatorsInfo) + as.SaveValidatorsRatingCalled(validatorsRating) } } // SaveValidatorsPubKeys - -func (as *OutportStub) SaveValidatorsPubKeys(shardPubKeys map[uint32][][]byte, epoch uint32) { +func (as *OutportStub) SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) { if as.SaveValidatorsPubKeysCalled != nil { - as.SaveValidatorsPubKeysCalled(shardPubKeys, epoch) + as.SaveValidatorsPubKeysCalled(validatorsPubKeys) } } @@ -49,12 +48,12 @@ func (as *OutportStub) HasDrivers() bool { } // RevertIndexedBlock - -func (as *OutportStub) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { +func (as *OutportStub) RevertIndexedBlock(_ *outportcore.BlockData) { } // SaveAccounts - -func (as *OutportStub) SaveAccounts(_ uint64, _ map[string]*outportcore.AlteredAccount, _ uint32) { +func (as *OutportStub) SaveAccounts(_ *outportcore.Accounts) { } @@ -64,7 +63,7 @@ func (as *OutportStub) Close() error { } // SaveRoundsInfo - -func (as *OutportStub) SaveRoundsInfo(_ []*outportcore.RoundInfo) { +func (as *OutportStub) SaveRoundsInfo(_ *outportcore.RoundsInfo) { } @@ -74,5 +73,5 @@ func (as *OutportStub) SubscribeDriver(_ outport.Driver) error { } // FinalizedBlock - -func (as *OutportStub) FinalizedBlock(_ []byte) { +func (as *OutportStub) FinalizedBlock(_ *outportcore.FinalizedBlock) { } From fd9b838c27b418061279a6ddd231ea3d22b948f2 Mon Sep 17 00:00:00 2001 From: MariusC Date: Tue, 14 Mar 2023 15:55:09 +0200 Subject: [PATCH 051/221] FEAT: Refactor outportDataProvider.go --- go.mod | 2 +- go.sum | 4 +- outport/outport.go | 49 +++-- outport/process/errors.go | 14 ++ outport/process/outportDataProvider.go | 287 ++++++++++++++++++++++--- process/block/metablock.go | 4 +- process/block/shardblock.go | 10 +- 7 files changed, 306 insertions(+), 64 deletions(-) diff --git a/go.mod b/go.mod index f2407d1f868..fd41be26a22 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.35-0.20230309140731-222b64e21ca6 + github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314123908-e69a88031a4f github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.3.12 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index c50b9921759..b4faf6c6218 100644 --- a/go.sum +++ b/go.sum @@ -610,8 +610,8 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230309140731-222b64e21ca6 h1:+J4X/BmNQPz7+/bhQ/mSss3Gl70GpwabZObFDUK39SE= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230309140731-222b64e21ca6/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314123908-e69a88031a4f h1:0ZxG8wHSn9N2apygXzxmMHMIE4mVgn7CG3YuwKDhCm0= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314123908-e69a88031a4f/go.mod h1:HOJUlm7m8L3KbvfbqWb9TeXYskTvHevaRPFXUPpFayQ= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.3.12 h1:b7B8KMrCHM0Ghh4W0s1jXLI5MknEAOo7ZChFAwUUYpY= diff --git a/outport/outport.go b/outport/outport.go index b0366e49c44..1e7b4dd219f 100644 --- a/outport/outport.go +++ b/outport/outport.go @@ -7,7 +7,6 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -44,7 +43,7 @@ func NewOutport(retrialInterval time.Duration) (*outport, error) { } // SaveBlock will save block for every driver -func (o *outport) SaveBlock(args *outportcore.ArgsSaveBlockData) { +func (o *outport) SaveBlock(args *outportcore.OutportBlock) { o.mutex.RLock() defer o.mutex.RUnlock() @@ -77,7 +76,7 @@ func (o *outport) monitorCompletionOnDriver(function string, driver Driver) chan return ch } -func (o *outport) saveBlockBlocking(args *outportcore.ArgsSaveBlockData, driver Driver) { +func (o *outport) saveBlockBlocking(args *outportcore.OutportBlock, driver Driver) { ch := o.monitorCompletionOnDriver("saveBlockBlocking", driver) defer close(ch) @@ -108,21 +107,21 @@ func (o *outport) shouldTerminate() bool { } // RevertIndexedBlock will revert block for every driver -func (o *outport) RevertIndexedBlock(header data.HeaderHandler, body data.BodyHandler) { +func (o *outport) RevertIndexedBlock(blockData *outportcore.BlockData) { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.revertIndexedBlockBlocking(header, body, driver) + o.revertIndexedBlockBlocking(blockData, driver) } } -func (o *outport) revertIndexedBlockBlocking(header data.HeaderHandler, body data.BodyHandler, driver Driver) { +func (o *outport) revertIndexedBlockBlocking(blockData *outportcore.BlockData, driver Driver) { ch := o.monitorCompletionOnDriver("revertIndexedBlockBlocking", driver) defer close(ch) for { - err := driver.RevertIndexedBlock(header, body) + err := driver.RevertIndexedBlock(blockData) if err == nil { return } @@ -139,7 +138,7 @@ func (o *outport) revertIndexedBlockBlocking(header data.HeaderHandler, body dat } // SaveRoundsInfo will save rounds information for every driver -func (o *outport) SaveRoundsInfo(roundsInfo []*outportcore.RoundInfo) { +func (o *outport) SaveRoundsInfo(roundsInfo *outportcore.RoundsInfo) { o.mutex.RLock() defer o.mutex.RUnlock() @@ -148,7 +147,7 @@ func (o *outport) SaveRoundsInfo(roundsInfo []*outportcore.RoundInfo) { } } -func (o *outport) saveRoundsInfoBlocking(roundsInfo []*outportcore.RoundInfo, driver Driver) { +func (o *outport) saveRoundsInfoBlocking(roundsInfo *outportcore.RoundsInfo, driver Driver) { ch := o.monitorCompletionOnDriver("saveRoundsInfoBlocking", driver) defer close(ch) @@ -170,21 +169,21 @@ func (o *outport) saveRoundsInfoBlocking(roundsInfo []*outportcore.RoundInfo, dr } // SaveValidatorsPubKeys will save validators public keys for every driver -func (o *outport) SaveValidatorsPubKeys(validatorsPubKeys map[uint32][][]byte, epoch uint32) { +func (o *outport) SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.saveValidatorsPubKeysBlocking(validatorsPubKeys, epoch, driver) + o.saveValidatorsPubKeysBlocking(validatorsPubKeys, driver) } } -func (o *outport) saveValidatorsPubKeysBlocking(validatorsPubKeys map[uint32][][]byte, epoch uint32, driver Driver) { +func (o *outport) saveValidatorsPubKeysBlocking(validatorsPubKeys *outportcore.ValidatorsPubKeys, driver Driver) { ch := o.monitorCompletionOnDriver("saveValidatorsPubKeysBlocking", driver) defer close(ch) for { - err := driver.SaveValidatorsPubKeys(validatorsPubKeys, epoch) + err := driver.SaveValidatorsPubKeys(validatorsPubKeys) if err == nil { return } @@ -201,21 +200,21 @@ func (o *outport) saveValidatorsPubKeysBlocking(validatorsPubKeys map[uint32][][ } // SaveValidatorsRating will save validators rating for every driver -func (o *outport) SaveValidatorsRating(indexID string, infoRating []*outportcore.ValidatorRatingInfo) { +func (o *outport) SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.saveValidatorsRatingBlocking(indexID, infoRating, driver) + o.saveValidatorsRatingBlocking(validatorsRating, driver) } } -func (o *outport) saveValidatorsRatingBlocking(indexID string, infoRating []*outportcore.ValidatorRatingInfo, driver Driver) { +func (o *outport) saveValidatorsRatingBlocking(validatorsRating *outportcore.ValidatorsRating, driver Driver) { ch := o.monitorCompletionOnDriver("saveValidatorsRatingBlocking", driver) defer close(ch) for { - err := driver.SaveValidatorsRating(indexID, infoRating) + err := driver.SaveValidatorsRating(validatorsRating) if err == nil { return } @@ -232,21 +231,21 @@ func (o *outport) saveValidatorsRatingBlocking(indexID string, infoRating []*out } // SaveAccounts will save accounts for every driver -func (o *outport) SaveAccounts(blockTimestamp uint64, acc map[string]*outportcore.AlteredAccount, shardID uint32) { +func (o *outport) SaveAccounts(accounts *outportcore.Accounts) { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.saveAccountsBlocking(blockTimestamp, acc, shardID, driver) + o.saveAccountsBlocking(accounts, driver) } } -func (o *outport) saveAccountsBlocking(blockTimestamp uint64, acc map[string]*outportcore.AlteredAccount, shardID uint32, driver Driver) { +func (o *outport) saveAccountsBlocking(accounts *outportcore.Accounts, driver Driver) { ch := o.monitorCompletionOnDriver("saveAccountsBlocking", driver) defer close(ch) for { - err := driver.SaveAccounts(blockTimestamp, acc, shardID) + err := driver.SaveAccounts(accounts) if err == nil { return } @@ -263,21 +262,21 @@ func (o *outport) saveAccountsBlocking(blockTimestamp uint64, acc map[string]*ou } // FinalizedBlock will call all the drivers that a block is finalized -func (o *outport) FinalizedBlock(headerHash []byte) { +func (o *outport) FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.finalizedBlockBlocking(headerHash, driver) + o.finalizedBlockBlocking(finalizedBlock, driver) } } -func (o *outport) finalizedBlockBlocking(headerHash []byte, driver Driver) { +func (o *outport) finalizedBlockBlocking(finalizedBlock *outportcore.FinalizedBlock, driver Driver) { ch := o.monitorCompletionOnDriver("finalizedBlockBlocking", driver) defer close(ch) for { - err := driver.FinalizedBlock(headerHash) + err := driver.FinalizedBlock(finalizedBlock) if err == nil { return } diff --git a/outport/process/errors.go b/outport/process/errors.go index 42ccbdac3a8..4d9ba29005a 100644 --- a/outport/process/errors.go +++ b/outport/process/errors.go @@ -17,6 +17,20 @@ var errNilHeaderHandler = errors.New("nil header handler") // errNilBodyHandler signal that provided body handler is nil var errNilBodyHandler = errors.New("nil body handler") +var errCannotCastBlockBody = errors.New("cannot cast block body") + +var errInvalidHeaderType = errors.New("received invalid/unknown header type") + +var errCannotCastTransaction = errors.New("cannot cast transaction") + +var errCannotCastSCR = errors.New("cannot cast smart contract result") + +var errCannotCastReward = errors.New("cannot cast reward transaction") + +var errCannotCastReceipt = errors.New("cannot cast receipt transaction") + +var errCannotCastEvent = errors.New("cannot cast event") + // ErrNilHasher signals that a nil hasher has been provided var ErrNilHasher = errors.New("nil hasher provided") diff --git a/outport/process/outportDataProvider.go b/outport/process/outportDataProvider.go index 6c16ee99d07..1288b4b9a4a 100644 --- a/outport/process/outportDataProvider.go +++ b/outport/process/outportDataProvider.go @@ -1,6 +1,7 @@ package process import ( + "encoding/hex" "fmt" "math/big" @@ -9,6 +10,11 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/data/receipt" + "github.com/multiversx/mx-chain-core-go/data/rewardTx" + "github.com/multiversx/mx-chain-core-go/data/smartContractResult" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts/shared" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" @@ -26,6 +32,7 @@ type ArgOutportDataProvider struct { GasConsumedProvider GasConsumedProvider EconomicsData EconomicsDataHandler ExecutionOrderHandler ExecutionOrderHandler + Marshaller marshal.Marshalizer } // ArgPrepareOutportSaveBlockData holds the arguments needed for prepare outport save block data @@ -36,6 +43,8 @@ type ArgPrepareOutportSaveBlockData struct { PreviousHeader data.HeaderHandler RewardsTxs map[string]data.TransactionHandler NotarizedHeadersHashes []string + HighestFinalBlockNonce uint64 + HighestFinalBlockHash []byte } type outportDataProvider struct { @@ -49,6 +58,7 @@ type outportDataProvider struct { gasConsumedProvider GasConsumedProvider economicsData EconomicsDataHandler executionOrderHandler ExecutionOrderHandler + marshaller marshal.Marshalizer } // NewOutportDataProvider will create a new instance of outportDataProvider @@ -63,11 +73,12 @@ func NewOutportDataProvider(arg ArgOutportDataProvider) (*outportDataProvider, e gasConsumedProvider: arg.GasConsumedProvider, economicsData: arg.EconomicsData, executionOrderHandler: arg.ExecutionOrderHandler, + marshaller: arg.Marshaller, }, nil } // PrepareOutportSaveBlockData will prepare the provided data in a format that will be accepted by an outport driver -func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutportSaveBlockData) (*outportcore.ArgsSaveBlockData, error) { +func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlock, error) { if check.IfNil(arg.Header) { return nil, errNilHeaderHandler } @@ -75,8 +86,12 @@ func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutpor return nil, errNilBodyHandler } - pool := odp.createPool(arg.RewardsTxs) - err := odp.transactionsFeeProcessor.PutFeeAndGasUsed(pool) + pool, err := odp.createPool(arg.RewardsTxs) + if err != nil { + return nil, err + } + + err = odp.transactionsFeeProcessor.PutFeeAndGasUsed(pool) if err != nil { return nil, fmt.Errorf("transactionsFeeProcessor.PutFeeAndGasUsed %w", err) } @@ -100,23 +115,38 @@ func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutpor if err != nil { return nil, err } + body, err := getBody(arg.Body) + if err != nil { + return nil, err + } - return &outportcore.ArgsSaveBlockData{ - HeaderHash: arg.HeaderHash, - Body: arg.Body, - Header: arg.Header, - SignersIndexes: signersIndexes, - HeaderGasConsumption: outportcore.HeaderGasConsumption{ + headerBytes, headerType, err := odp.getHeaderBytes(arg.Header) + if err != nil { + return nil, err + } + + return &outportcore.OutportBlock{ + BlockData: &outportcore.BlockData{ + HeaderBytes: headerBytes, + HeaderType: string(headerType), + HeaderHash: arg.HeaderHash, + Body: body, + }, + TransactionPool: pool, + HeaderGasConsumption: &outportcore.HeaderGasConsumption{ GasProvided: odp.gasConsumedProvider.TotalGasProvidedWithScheduled(), GasRefunded: odp.gasConsumedProvider.TotalGasRefunded(), GasPenalized: odp.gasConsumedProvider.TotalGasPenalized(), MaxGasPerBlock: odp.economicsData.MaxGasLimitPerBlock(odp.shardID), }, - NotarizedHeadersHashes: arg.NotarizedHeadersHashes, - TransactionsPool: pool, AlteredAccounts: alteredAccounts, + NotarizedHeadersHashes: arg.NotarizedHeadersHashes, NumberOfShards: odp.numOfShards, IsImportDB: odp.isImportDBMode, + SignersIndexes: signersIndexes, + + HighestFinalBlockNonce: arg.HighestFinalBlockNonce, + HighestFinalBlockHash: arg.HighestFinalBlockHash, }, nil } @@ -150,7 +180,42 @@ func (odp *outportDataProvider) getSignersIndexes(header data.HeaderHandler) ([] return signersIndexes, nil } -func (odp *outportDataProvider) createPool(rewardsTxs map[string]data.TransactionHandler) *outportcore.Pool { +func getBody(bodyHandler data.BodyHandler) (*block.Body, error) { + if check.IfNil(bodyHandler) { + return nil, errNilBodyHandler + } + + body, castOk := bodyHandler.(*block.Body) + if !castOk { + return nil, errCannotCastBlockBody + } + + return body, nil +} + +func (odp *outportDataProvider) getHeaderBytes(headerHandler data.HeaderHandler) ([]byte, core.HeaderType, error) { + var err error + var headerBytes []byte + var headerType core.HeaderType + + switch header := headerHandler.(type) { + case *block.MetaBlock: + headerType = core.MetaHeader + headerBytes, err = odp.marshaller.Marshal(header) + case *block.Header: + headerType = core.ShardHeaderV1 + headerBytes, err = odp.marshaller.Marshal(header) + case *block.HeaderV2: + headerType = core.ShardHeaderV2 + headerBytes, err = odp.marshaller.Marshal(header) + default: + return nil, "", errInvalidHeaderType + } + + return headerBytes, headerType, err +} + +func (odp *outportDataProvider) createPool(rewardsTxs map[string]data.TransactionHandler) (*outportcore.TransactionPool, error) { if odp.shardID == core.MetachainShardId { return odp.createPoolForMeta(rewardsTxs) } @@ -158,33 +223,193 @@ func (odp *outportDataProvider) createPool(rewardsTxs map[string]data.Transactio return odp.createPoolForShard() } -func (odp *outportDataProvider) createPoolForShard() *outportcore.Pool { - return &outportcore.Pool{ - Txs: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock)), - Scrs: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock)), - Rewards: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock)), - Invalid: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.InvalidBlock)), - Receipts: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.ReceiptBlock)), - Logs: odp.txCoordinator.GetAllCurrentLogs(), +func (odp *outportDataProvider) createPoolForShard() (*outportcore.TransactionPool, error) { + txs, err := getTxs(odp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock)) + if err != nil { + return nil, err + } + + scrs, err := getScrs(odp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock)) + if err != nil { + return nil, err + } + + rewards, err := getRewards(odp.txCoordinator.GetAllCurrentUsedTxs(block.RewardsBlock)) + if err != nil { + return nil, err + } + + invalidTxs, err := getTxs(odp.txCoordinator.GetAllCurrentUsedTxs(block.InvalidBlock)) + if err != nil { + return nil, err } + + receipts, err := getReceipts(odp.txCoordinator.GetAllCurrentUsedTxs(block.ReceiptBlock)) + if err != nil { + return nil, err + } + + logs, err := getLogs(odp.txCoordinator.GetAllCurrentLogs()) + if err != nil { + return nil, err + } + + return &outportcore.TransactionPool{ + Transactions: txs, + SmartContractResults: scrs, + Rewards: rewards, + InvalidTxs: invalidTxs, + Receipts: receipts, + Logs: logs, + }, nil } -func (odp *outportDataProvider) createPoolForMeta(rewardsTxs map[string]data.TransactionHandler) *outportcore.Pool { - return &outportcore.Pool{ - Txs: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock)), - Scrs: WrapTxsMap(odp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock)), - Rewards: WrapTxsMap(rewardsTxs), - Logs: odp.txCoordinator.GetAllCurrentLogs(), +func (odp *outportDataProvider) createPoolForMeta(rewardsTxs map[string]data.TransactionHandler) (*outportcore.TransactionPool, error) { + txs, err := getTxs(odp.txCoordinator.GetAllCurrentUsedTxs(block.TxBlock)) + if err != nil { + return nil, err + } + + scrs, err := getScrs(odp.txCoordinator.GetAllCurrentUsedTxs(block.SmartContractResultBlock)) + if err != nil { + return nil, err + } + + rewards, err := getRewards(rewardsTxs) + if err != nil { + return nil, err + } + + logs, err := getLogs(odp.txCoordinator.GetAllCurrentLogs()) + if err != nil { + return nil, err + } + + return &outportcore.TransactionPool{ + Transactions: txs, + SmartContractResults: scrs, + Rewards: rewards, + Logs: logs, + }, nil +} + +func getTxs(txs map[string]data.TransactionHandler) (map[string]*outportcore.TxInfo, error) { + ret := make(map[string]*outportcore.TxInfo, len(txs)) + + for txHash, txHandler := range txs { + tx, castOk := txHandler.(*transaction.Transaction) + txHashHex := getHexEncodedHash(txHash) + if !castOk { + return nil, fmt.Errorf("%w, hash: %s", errCannotCastTransaction, txHashHex) + } + + ret[txHashHex] = &outportcore.TxInfo{ + Transaction: tx, + FeeInfo: newFeeInfo(), + } + } + + return ret, nil +} + +func getHexEncodedHash(txHash string) string { + txHashBytes := []byte(txHash) + return hex.EncodeToString(txHashBytes) +} + +func newFeeInfo() *outportcore.FeeInfo { + return &outportcore.FeeInfo{ + GasUsed: 0, + Fee: big.NewInt(0), + InitialPaidFee: big.NewInt(0), } } -func WrapTxsMap(txs map[string]data.TransactionHandler) map[string]data.TransactionHandlerWithGasUsedAndFee { - newMap := make(map[string]data.TransactionHandlerWithGasUsedAndFee, len(txs)) - for txHash, tx := range txs { - newMap[txHash] = outportcore.NewTransactionHandlerWithGasAndFee(tx, 0, big.NewInt(0)) +func getScrs(scrs map[string]data.TransactionHandler) (map[string]*outportcore.SCRInfo, error) { + ret := make(map[string]*outportcore.SCRInfo, len(scrs)) + + for scrHash, txHandler := range scrs { + scr, castOk := txHandler.(*smartContractResult.SmartContractResult) + scrHashHex := getHexEncodedHash(scrHash) + if !castOk { + return nil, fmt.Errorf("%w, hash: %s", errCannotCastSCR, scrHashHex) + } + + ret[scrHashHex] = &outportcore.SCRInfo{ + SmartContractResult: scr, + FeeInfo: newFeeInfo(), + } + } + + return ret, nil +} + +func getRewards(rewards map[string]data.TransactionHandler) (map[string]*outportcore.RewardInfo, error) { + ret := make(map[string]*outportcore.RewardInfo, len(rewards)) + + for hash, txHandler := range rewards { + reward, castOk := txHandler.(*rewardTx.RewardTx) + hexHex := getHexEncodedHash(hash) + if !castOk { + return nil, fmt.Errorf("%w, hash: %s", errCannotCastReward, hexHex) + } + + ret[hexHex] = &outportcore.RewardInfo{ + Reward: reward, + } + } + + return ret, nil +} + +func getReceipts(receipts map[string]data.TransactionHandler) (map[string]*receipt.Receipt, error) { + ret := make(map[string]*receipt.Receipt, len(receipts)) + + for hash, receiptHandler := range receipts { + tx, castOk := receiptHandler.(*receipt.Receipt) + hashHex := getHexEncodedHash(hash) + if !castOk { + return nil, fmt.Errorf("%w, hash: %s", errCannotCastReceipt, hashHex) + } + + ret[hashHex] = tx + } + + return ret, nil +} + +func getLogs(logs []*data.LogData) (map[string]*transaction.Log, error) { + ret := make(map[string]*transaction.Log, len(logs)) + + for _, logHandler := range logs { + eventHandlers := logHandler.GetLogEvents() + events, err := getEvents(eventHandlers) + txHashHex := getHexEncodedHash(logHandler.TxHash) + if err != nil { + return nil, fmt.Errorf("%w, hash: %s", err, txHashHex) + } + + ret[txHashHex] = &transaction.Log{ + Address: logHandler.GetAddress(), + Events: events, + } + } + return ret, nil +} + +func getEvents(eventHandlers []data.EventHandler) ([]*transaction.Event, error) { + events := make([]*transaction.Event, len(eventHandlers)) + + for idx, eventHandler := range eventHandlers { + event, castOk := eventHandler.(*transaction.Event) + if !castOk { + return nil, errCannotCastEvent + } + + events[idx] = event } - return newMap + return events, nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/process/block/metablock.go b/process/block/metablock.go index 17dd596bcf8..fcb1ba141ef 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -617,9 +617,11 @@ func (mp *metaProcessor) indexBlock( HeaderHash: headerHash, Header: metaBlock, Body: body, + PreviousHeader: lastMetaBlock, RewardsTxs: rewardsTxs, NotarizedHeadersHashes: notarizedHeadersHashes, - PreviousHeader: lastMetaBlock, + HighestFinalBlockNonce: mp.forkDetector.GetHighestFinalBlockNonce(), + HighestFinalBlockHash: mp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { log.Warn("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error()) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 42eed2856b5..8a90cc45295 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -596,10 +596,12 @@ func (sp *shardProcessor) indexBlockIfNeeded( log.Debug("preparing to index block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) argSaveBlock, err := sp.outportDataProvider.PrepareOutportSaveBlockData(processOutport.ArgPrepareOutportSaveBlockData{ - HeaderHash: headerHash, - Header: header, - Body: body, - PreviousHeader: lastBlockHeader, + HeaderHash: headerHash, + Header: header, + Body: body, + PreviousHeader: lastBlockHeader, + HighestFinalBlockNonce: sp.forkDetector.GetHighestFinalBlockNonce(), + HighestFinalBlockHash: sp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { log.Warn("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error()) From 70042f47337010cb63ed093add29a3dd639a86be Mon Sep 17 00:00:00 2001 From: MariusC Date: Tue, 14 Mar 2023 16:21:37 +0200 Subject: [PATCH 052/221] FIX: Outport tests --- common/dtos.go | 6 +- go.mod | 2 +- go.sum | 4 +- outport/mock/executionOrderHandlerStub.go | 2 +- outport/outport_test.go | 73 +++++++++---------- outport/process/interface.go | 7 +- .../transactionsFeeProcessor.go | 14 ++-- 7 files changed, 55 insertions(+), 53 deletions(-) diff --git a/common/dtos.go b/common/dtos.go index 312562fa6cb..e7876a9131b 100644 --- a/common/dtos.go +++ b/common/dtos.go @@ -1,6 +1,8 @@ package common -import "github.com/multiversx/mx-chain-core-go/data/outport" +import ( + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" +) // GetProofResponse is a struct that stores the response of a GetProof API request type GetProofResponse struct { @@ -71,5 +73,5 @@ type EpochStartDataAPI struct { // AlteredAccountsForBlockAPIResponse holds the altered accounts for a certain block type AlteredAccountsForBlockAPIResponse struct { - Accounts []*outport.AlteredAccount `json:"accounts"` + Accounts []*alteredAccount.AlteredAccount `json:"accounts"` } diff --git a/go.mod b/go.mod index fd41be26a22..6f62f48df37 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314123908-e69a88031a4f + github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.3.12 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index b4faf6c6218..c431cdbdea9 100644 --- a/go.sum +++ b/go.sum @@ -610,8 +610,8 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314123908-e69a88031a4f h1:0ZxG8wHSn9N2apygXzxmMHMIE4mVgn7CG3YuwKDhCm0= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314123908-e69a88031a4f/go.mod h1:HOJUlm7m8L3KbvfbqWb9TeXYskTvHevaRPFXUPpFayQ= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e h1:B7Da37AgvQ1u82tjPv2L6EAhJ+PVPuU/9wq7v2aa0Xs= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.3.12 h1:b7B8KMrCHM0Ghh4W0s1jXLI5MknEAOo7ZChFAwUUYpY= diff --git a/outport/mock/executionOrderHandlerStub.go b/outport/mock/executionOrderHandlerStub.go index 9d7805b65cf..96db500f223 100644 --- a/outport/mock/executionOrderHandlerStub.go +++ b/outport/mock/executionOrderHandlerStub.go @@ -11,7 +11,7 @@ type ExecutionOrderHandlerStub struct { // PutExecutionOrderInTransactionPool - func (e *ExecutionOrderHandlerStub) PutExecutionOrderInTransactionPool( - _ *outport.Pool, + _ *outport.TransactionPool, _ data.HeaderHandler, _ data.BodyHandler, _ data.HeaderHandler, diff --git a/outport/outport_test.go b/outport/outport_test.go index e55b7203765..88c1010af56 100644 --- a/outport/outport_test.go +++ b/outport/outport_test.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/mock" logger "github.com/multiversx/mx-chain-logger-go" @@ -43,7 +42,7 @@ func TestOutport_SaveAccounts(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveAccountsCalled: func(blockTimestamp uint64, accs map[string]*outportcore.AlteredAccount) error { + SaveAccountsCalled: func(accounts *outportcore.Accounts) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -53,7 +52,7 @@ func TestOutport_SaveAccounts(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveAccountsCalled: func(blockTimestamp uint64, accs map[string]*outportcore.AlteredAccount) error { + SaveAccountsCalled: func(accounts *outportcore.Accounts) error { numCalled2++ return nil }, @@ -69,12 +68,12 @@ func TestOutport_SaveAccounts(t *testing.T) { } } - outportHandler.SaveAccounts(0, map[string]*outportcore.AlteredAccount{}, 0) + outportHandler.SaveAccounts(&outportcore.Accounts{}) time.Sleep(time.Second) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.SaveAccounts(0, map[string]*outportcore.AlteredAccount{}, 0) + outportHandler.SaveAccounts(&outportcore.Accounts{}) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -89,7 +88,7 @@ func TestOutport_SaveBlock(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -99,7 +98,7 @@ func TestOutport_SaveBlock(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { numCalled2++ return nil }, @@ -134,7 +133,7 @@ func TestOutport_SaveRoundsInfo(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveRoundsInfoCalled: func(roundsInfos []*outportcore.RoundInfo) error { + SaveRoundsInfoCalled: func(roundsInfos *outportcore.RoundsInfo) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -144,7 +143,7 @@ func TestOutport_SaveRoundsInfo(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveRoundsInfoCalled: func(roundsInfos []*outportcore.RoundInfo) error { + SaveRoundsInfoCalled: func(roundsInfos *outportcore.RoundsInfo) error { numCalled2++ return nil }, @@ -179,7 +178,7 @@ func TestOutport_SaveValidatorsPubKeys(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveValidatorsPubKeysCalled: func(validatorsPubKeys map[uint32][][]byte, epoch uint32) error { + SaveValidatorsPubKeysCalled: func(validatorsRating *outportcore.ValidatorsPubKeys) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -189,7 +188,7 @@ func TestOutport_SaveValidatorsPubKeys(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveValidatorsPubKeysCalled: func(validatorsPubKeys map[uint32][][]byte, epoch uint32) error { + SaveValidatorsPubKeysCalled: func(validatorsRating *outportcore.ValidatorsPubKeys) error { numCalled2++ return nil }, @@ -205,13 +204,13 @@ func TestOutport_SaveValidatorsPubKeys(t *testing.T) { } } - outportHandler.SaveValidatorsPubKeys(nil, 0) + outportHandler.SaveValidatorsPubKeys(&outportcore.ValidatorsPubKeys{}) time.Sleep(time.Second) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.SaveValidatorsPubKeys(nil, 0) + outportHandler.SaveValidatorsPubKeys(&outportcore.ValidatorsPubKeys{}) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -226,7 +225,7 @@ func TestOutport_SaveValidatorsRating(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - SaveValidatorsRatingCalled: func(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error { + SaveValidatorsRatingCalled: func(validatorsRating *outportcore.ValidatorsRating) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -236,7 +235,7 @@ func TestOutport_SaveValidatorsRating(t *testing.T) { }, } driver2 := &mock.DriverStub{ - SaveValidatorsRatingCalled: func(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error { + SaveValidatorsRatingCalled: func(validatorsRating *outportcore.ValidatorsRating) error { numCalled2++ return nil }, @@ -252,13 +251,13 @@ func TestOutport_SaveValidatorsRating(t *testing.T) { } } - outportHandler.SaveValidatorsRating("", nil) + outportHandler.SaveValidatorsRating(&outportcore.ValidatorsRating{}) time.Sleep(time.Second) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.SaveValidatorsRating("", nil) + outportHandler.SaveValidatorsRating(&outportcore.ValidatorsRating{}) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -273,7 +272,7 @@ func TestOutport_RevertIndexedBlock(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - RevertBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + RevertIndexedBlockCalled: func(blockData *outportcore.BlockData) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -283,7 +282,7 @@ func TestOutport_RevertIndexedBlock(t *testing.T) { }, } driver2 := &mock.DriverStub{ - RevertBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + RevertIndexedBlockCalled: func(blockData *outportcore.BlockData) error { numCalled2++ return nil }, @@ -299,13 +298,13 @@ func TestOutport_RevertIndexedBlock(t *testing.T) { } } - outportHandler.RevertIndexedBlock(nil, nil) + outportHandler.RevertIndexedBlock(&outportcore.BlockData{}) time.Sleep(time.Second) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.RevertIndexedBlock(nil, nil) + outportHandler.RevertIndexedBlock(&outportcore.BlockData{}) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -320,7 +319,7 @@ func TestOutport_FinalizedBlock(t *testing.T) { numCalled1 := 0 numCalled2 := 0 driver1 := &mock.DriverStub{ - FinalizedBlockCalled: func(headerHash []byte) error { + FinalizedBlockCalled: func(finalizedBlock *outportcore.FinalizedBlock) error { numCalled1++ if numCalled1 < 10 { return expectedError @@ -330,7 +329,7 @@ func TestOutport_FinalizedBlock(t *testing.T) { }, } driver2 := &mock.DriverStub{ - FinalizedBlockCalled: func(headerHash []byte) error { + FinalizedBlockCalled: func(finalizedBlock *outportcore.FinalizedBlock) error { numCalled2++ return nil }, @@ -414,25 +413,25 @@ func TestOutport_CloseWhileDriverIsStuckInContinuousErrors(t *testing.T) { localErr := errors.New("driver stuck in error") driver1 := &mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { return localErr }, - RevertBlockCalled: func(header data.HeaderHandler, body data.BodyHandler) error { + RevertIndexedBlockCalled: func(blockData *outportcore.BlockData) error { return localErr }, - SaveRoundsInfoCalled: func(roundsInfos []*outportcore.RoundInfo) error { + SaveRoundsInfoCalled: func(roundsInfos *outportcore.RoundsInfo) error { return localErr }, - SaveValidatorsPubKeysCalled: func(validatorsPubKeys map[uint32][][]byte, epoch uint32) error { + SaveValidatorsPubKeysCalled: func(validatorsPubKeys *outportcore.ValidatorsPubKeys) error { return localErr }, - SaveValidatorsRatingCalled: func(indexID string, infoRating []*outportcore.ValidatorRatingInfo) error { + SaveValidatorsRatingCalled: func(validatorsRating *outportcore.ValidatorsRating) error { return localErr }, - SaveAccountsCalled: func(timestamp uint64, accs map[string]*outportcore.AlteredAccount) error { + SaveAccountsCalled: func(accounts *outportcore.Accounts) error { return localErr }, - FinalizedBlockCalled: func(headerHash []byte) error { + FinalizedBlockCalled: func(finalizedBlock *outportcore.FinalizedBlock) error { return localErr }, CloseCalled: func() error { @@ -445,7 +444,7 @@ func TestOutport_CloseWhileDriverIsStuckInContinuousErrors(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(9) go func() { - outportHandler.SaveAccounts(0, nil, 0) + outportHandler.SaveAccounts(nil) wg.Done() }() go func() { @@ -453,7 +452,7 @@ func TestOutport_CloseWhileDriverIsStuckInContinuousErrors(t *testing.T) { wg.Done() }() go func() { - outportHandler.RevertIndexedBlock(nil, nil) + outportHandler.RevertIndexedBlock(nil) wg.Done() }() go func() { @@ -461,15 +460,15 @@ func TestOutport_CloseWhileDriverIsStuckInContinuousErrors(t *testing.T) { wg.Done() }() go func() { - outportHandler.SaveValidatorsPubKeys(nil, 0) + outportHandler.SaveValidatorsPubKeys(nil) wg.Done() }() go func() { - outportHandler.SaveValidatorsRating("", nil) + outportHandler.SaveValidatorsRating(nil) wg.Done() }() go func() { - outportHandler.SaveAccounts(0, nil, 0) + outportHandler.SaveAccounts(nil) wg.Done() }() go func() { @@ -516,7 +515,7 @@ func TestOutport_SaveBlockDriverStuck(t *testing.T) { } _ = outportHandler.SubscribeDriver(&mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { time.Sleep(time.Second * 5) return nil }, @@ -555,7 +554,7 @@ func TestOutport_SaveBlockDriverIsNotStuck(t *testing.T) { } _ = outportHandler.SubscribeDriver(&mock.DriverStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) error { + SaveBlockCalled: func(args *outportcore.OutportBlock) error { return nil }, }) diff --git a/outport/process/interface.go b/outport/process/interface.go index bdf8e3efc73..25f675975ce 100644 --- a/outport/process/interface.go +++ b/outport/process/interface.go @@ -4,19 +4,20 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts/shared" ) // AlteredAccountsProviderHandler defines the functionality needed for provisioning of altered accounts when indexing data type AlteredAccountsProviderHandler interface { - ExtractAlteredAccountsFromPool(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*outport.AlteredAccount, error) + ExtractAlteredAccountsFromPool(txPool *outport.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) IsInterfaceNil() bool } // TransactionsFeeHandler defines the functionality needed for computation of the transaction fee and gas used type TransactionsFeeHandler interface { - PutFeeAndGasUsed(pool *outport.Pool) error + PutFeeAndGasUsed(pool *outport.TransactionPool) error IsInterfaceNil() bool } @@ -41,7 +42,7 @@ type EconomicsDataHandler interface { // ExecutionOrderHandler defines the interface for the execution order handler type ExecutionOrderHandler interface { PutExecutionOrderInTransactionPool( - pool *outport.Pool, + pool *outport.TransactionPool, header data.HeaderHandler, body data.BodyHandler, prevHeader data.HeaderHandler, diff --git a/outport/process/transactionsfee/transactionsFeeProcessor.go b/outport/process/transactionsfee/transactionsFeeProcessor.go index ca509ed3185..75696f2d83f 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor.go @@ -76,7 +76,7 @@ func checkArg(arg ArgTransactionsFeeProcessor) error { } // PutFeeAndGasUsed will compute and set in transactions pool fee and gas used -func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.Pool) error { +func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.TransactionPool) error { tep.prepareInvalidTxs(pool) txsWithResultsMap := prepareTransactionsAndScrs(pool) @@ -85,12 +85,12 @@ func (tep *transactionsFeeProcessor) PutFeeAndGasUsed(pool *outportcore.Pool) er return tep.prepareScrsNoTx(txsWithResultsMap) } -func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.Pool) { - for _, invalidTx := range pool.Invalid { - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(invalidTx, invalidTx.GetGasLimit()) - invalidTx.SetGasUsed(invalidTx.GetGasLimit()) - invalidTx.SetFee(fee) - invalidTx.SetInitialPaidFee(fee) +func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.TransactionPool) { + for _, invalidTx := range pool.InvalidTxs { + fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(invalidTx.Transaction, invalidTx.Transaction.GasLimit) + invalidTx.FeeInfo.SetGasUsed(invalidTx.Transaction.GetGasLimit()) + invalidTx.FeeInfo.SetFee(fee) + invalidTx.FeeInfo.SetInitialPaidFee(fee) } } From a2a058921c71a4b071a75fae023a29e080a2abab Mon Sep 17 00:00:00 2001 From: MariusC Date: Wed, 15 Mar 2023 11:37:58 +0200 Subject: [PATCH 053/221] FIX: Refactor outport --- api/groups/blockGroup.go | 4 +- api/groups/blockGroup_test.go | 12 +-- facade/nodeFacade.go | 4 +- factory/processing/export_test.go | 4 +- factory/processing/processComponents.go | 13 +-- go.mod | 2 +- go.sum | 4 +- integrationTests/interface.go | 4 +- node/external/blockAPI/baseBlock.go | 88 ++++++++++--------- node/external/blockAPI/interface.go | 4 +- node/external/blockAPI/metaBlock.go | 4 +- node/external/blockAPI/shardBlock.go | 4 +- node/external/nodeApiResolver.go | 4 +- node/mock/apiBlockHandlerStub.go | 6 +- outport/notifier/eventNotifier.go | 3 +- .../alteredAccountsProvider_test.go | 71 +++++++-------- testscommon/alteredAccountsProviderStub.go | 5 +- 17 files changed, 121 insertions(+), 115 deletions(-) diff --git a/api/groups/blockGroup.go b/api/groups/blockGroup.go index ce49375137a..485b7a4ac62 100644 --- a/api/groups/blockGroup.go +++ b/api/groups/blockGroup.go @@ -10,8 +10,8 @@ import ( "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/shared" "github.com/multiversx/mx-chain-go/api/shared/logging" @@ -33,7 +33,7 @@ type blockFacadeHandler interface { GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) + GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) IsInterfaceNil() bool } diff --git a/api/groups/blockGroup_test.go b/api/groups/blockGroup_test.go index 2ef12bc17f3..fad93c6ab7d 100644 --- a/api/groups/blockGroup_test.go +++ b/api/groups/blockGroup_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/gin-gonic/gin" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" apiErrors "github.com/multiversx/mx-chain-go/api/errors" "github.com/multiversx/mx-chain-go/api/groups" "github.com/multiversx/mx-chain-go/api/mock" @@ -38,7 +38,7 @@ func TestNewBlockGroup(t *testing.T) { type alteredAccountsForBlockResponse struct { Data struct { - Accounts []*outport.AlteredAccount `json:"accounts"` + Accounts []*alteredAccount.AlteredAccount `json:"accounts"` } `json:"data"` Error string `json:"error"` Code string `json:"code"` @@ -442,7 +442,7 @@ func TestGetBlockByRound_WithBlockQueryOptionsShouldWork(t *testing.T) { func TestGetAlteredAccountsByNonce_ShouldWork(t *testing.T) { t.Parallel() - expectedResponse := []*outport.AlteredAccount{ + expectedResponse := []*alteredAccount.AlteredAccount{ { Address: "alice", Balance: "100000", @@ -450,7 +450,7 @@ func TestGetAlteredAccountsByNonce_ShouldWork(t *testing.T) { } facade := mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { require.Equal(t, api.BlockFetchTypeByNonce, options.RequestType) require.Equal(t, uint64(37), options.Nonce) @@ -473,14 +473,14 @@ func TestGetAlteredAccountsByNonce_ShouldWork(t *testing.T) { func TestGetAlteredAccountsByHash_ShouldWork(t *testing.T) { t.Parallel() - expectedResponse := []*outport.AlteredAccount{ + expectedResponse := []*alteredAccount.AlteredAccount{ { Address: "alice", Balance: "100000", }, } facade := mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { require.Equal(t, api.BlockFetchTypeByHash, options.RequestType) require.Equal(t, "aabb", hex.EncodeToString(options.Hash)) diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index 0c7ee09d43d..f7321993c38 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -12,9 +12,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/throttler" chainData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" apiData "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -507,7 +507,7 @@ func (nf *nodeFacade) GetBlockByRound(round uint64, options apiData.BlockQueryOp } // GetAlteredAccountsForBlock returns the altered accounts for a given block -func (nf *nodeFacade) GetAlteredAccountsForBlock(options apiData.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (nf *nodeFacade) GetAlteredAccountsForBlock(options apiData.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nf.apiResolver.GetAlteredAccountsForBlock(options) } diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index f9cae468a41..a6bbf59b9a8 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -2,7 +2,7 @@ package processing import ( "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/factory" @@ -51,5 +51,5 @@ func (pcf *processComponentsFactory) NewBlockProcessor( // IndexGenesisBlocks - func (pcf *processComponentsFactory) IndexGenesisBlocks(genesisBlocks map[uint32]data.HeaderHandler, indexingData map[uint32]*genesis.IndexingData) error { - return pcf.indexGenesisBlocks(genesisBlocks, indexingData, map[string]*outport.AlteredAccount{}) + return pcf.indexGenesisBlocks(genesisBlocks, indexingData, map[string]*alteredAccount.AlteredAccount{}) } diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index ddd81c46aad..2196f3e81e2 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/partitioning" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" dataBlock "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" @@ -884,9 +885,9 @@ func (pcf *processComponentsFactory) generateGenesisHeadersAndApplyInitialBalanc return genesisBlocks, indexingData, nil } -func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string]*outport.AlteredAccount, error) { +func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string]*alteredAccount.AlteredAccount, error) { if !pcf.statusComponents.OutportHandler().HasDrivers() { - return map[string]*outport.AlteredAccount{}, nil + return map[string]*alteredAccount.AlteredAccount{}, nil } rootHash, err := pcf.state.AccountsAdapter().RootHash() @@ -903,7 +904,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string return nil, err } - genesisAccounts := make(map[string]*outport.AlteredAccount, 0) + genesisAccounts := make(map[string]*alteredAccount.AlteredAccount, 0) for leaf := range leavesChannels.LeavesChan { userAccount, errUnmarshal := pcf.unmarshalUserAccount(leaf.Key(), leaf.Value()) if errUnmarshal != nil { @@ -912,7 +913,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string } encodedAddress := pcf.coreData.AddressPubKeyConverter().Encode(userAccount.AddressBytes()) - genesisAccounts[encodedAddress] = &outport.AlteredAccount{ + genesisAccounts[encodedAddress] = &alteredAccount.AlteredAccount{ AdditionalData: &outport.AdditionalAccountData{ BalanceChanged: true, }, @@ -1098,7 +1099,7 @@ func (pcf *processComponentsFactory) createGenesisMiniBlockHandlers(miniBlocks [ func (pcf *processComponentsFactory) indexGenesisBlocks( genesisBlocks map[uint32]data.HeaderHandler, initialIndexingData map[uint32]*genesis.IndexingData, - alteredAccounts map[string]*outport.AlteredAccount, + alteredAccounts map[string]*alteredAccount.AlteredAccount, ) error { currentShardId := pcf.bootstrapComponents.ShardCoordinator().SelfId() originalGenesisBlockHeader := genesisBlocks[currentShardId] @@ -1122,7 +1123,7 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( // manually add the genesis minting address as it is not exist in the trie genesisAddress := pcf.accountsParser.GenesisMintingAddress() - alteredAccounts[genesisAddress] = &outport.AlteredAccount{ + alteredAccounts[genesisAddress] = &alteredAccount.AlteredAccount{ Address: genesisAddress, Balance: "0", } diff --git a/go.mod b/go.mod index 6f62f48df37..e6258e75629 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e github.com/multiversx/mx-chain-crypto-go v1.2.5 - github.com/multiversx/mx-chain-es-indexer-go v1.3.12 + github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.13 github.com/multiversx/mx-chain-storage-go v1.0.7 diff --git a/go.sum b/go.sum index c431cdbdea9..a7e5bc35b50 100644 --- a/go.sum +++ b/go.sum @@ -614,8 +614,8 @@ github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e h1: github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= -github.com/multiversx/mx-chain-es-indexer-go v1.3.12 h1:b7B8KMrCHM0Ghh4W0s1jXLI5MknEAOo7ZChFAwUUYpY= -github.com/multiversx/mx-chain-es-indexer-go v1.3.12/go.mod h1:IV42GfhkqQ5vVO0OzGaF/ejp8TQrLkNo4LSB3TPnVhg= +github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd h1:IPrhwnzjqCKKlmRd8h/uzYiCCoLOYiVHB5QTFfUbt00= +github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd/go.mod h1:QcF/hS31tE/Tq/YX1QTPdpKCcbXPQ5HqcKM76p6rYYE= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.13 h1:woIlYkDFCKYyJQ5urDcOzz8HUFGsSEhTfUXDDxNI2zM= diff --git a/integrationTests/interface.go b/integrationTests/interface.go index 676be442104..5f86a9af17d 100644 --- a/integrationTests/interface.go +++ b/integrationTests/interface.go @@ -4,10 +4,10 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" dataApi "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -108,6 +108,6 @@ type Facade interface { GetTransactionsPoolForSender(sender, fields string) (*common.TransactionsPoolForSenderApiResponse, error) GetLastPoolNonceForSender(sender string) (uint64, error) GetTransactionsPoolNonceGapsForSender(sender string) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) - GetAlteredAccountsForBlock(options dataApi.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) + GetAlteredAccountsForBlock(options dataApi.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) IsInterfaceNil() bool } diff --git a/node/external/blockAPI/baseBlock.go b/node/external/blockAPI/baseBlock.go index 2c0867ef2ac..255464fa879 100644 --- a/node/external/blockAPI/baseBlock.go +++ b/node/external/blockAPI/baseBlock.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" @@ -359,11 +360,11 @@ func bigIntToStr(value *big.Int) string { return value.String() } -func alteredAccountsMapToAPIResponse(alteredAccounts map[string]*outport.AlteredAccount, tokensFilter string) []*outport.AlteredAccount { - response := make([]*outport.AlteredAccount, 0, len(alteredAccounts)) +func alteredAccountsMapToAPIResponse(alteredAccounts map[string]*alteredAccount.AlteredAccount, tokensFilter string) []*alteredAccount.AlteredAccount { + response := make([]*alteredAccount.AlteredAccount, 0, len(alteredAccounts)) for address, altAccount := range alteredAccounts { - apiAlteredAccount := &outport.AlteredAccount{ + apiAlteredAccount := &alteredAccount.AlteredAccount{ Address: address, Balance: altAccount.Balance, Nonce: altAccount.Nonce, @@ -379,13 +380,13 @@ func alteredAccountsMapToAPIResponse(alteredAccounts map[string]*outport.Altered return response } -func attachTokensToAlteredAccount(apiAlteredAccount *outport.AlteredAccount, altAccount *outport.AlteredAccount, tokensFilter string) { +func attachTokensToAlteredAccount(apiAlteredAccount *alteredAccount.AlteredAccount, altAccount *alteredAccount.AlteredAccount, tokensFilter string) { for _, token := range altAccount.Tokens { if !shouldAddTokenToResult(token.Identifier, tokensFilter) { continue } - apiAlteredAccount.Tokens = append(apiAlteredAccount.Tokens, &outport.AccountTokenData{ + apiAlteredAccount.Tokens = append(apiAlteredAccount.Tokens, &alteredAccount.AccountTokenData{ Identifier: token.Identifier, Balance: token.Balance, Nonce: token.Nonce, @@ -408,7 +409,7 @@ func shouldIncludeAllTokens(tokensFilter string) bool { return tokensFilter == "*" || tokensFilter == "all" } -func (bap *baseAPIBlockProcessor) apiBlockToAlteredAccounts(apiBlock *api.Block, options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (bap *baseAPIBlockProcessor) apiBlockToAlteredAccounts(apiBlock *api.Block, options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { blockHash, err := hex.DecodeString(apiBlock.Hash) if err != nil { return nil, err @@ -448,13 +449,13 @@ func (bap *baseAPIBlockProcessor) apiBlockToAlteredAccounts(apiBlock *api.Block, return alteredAccountsAPI, nil } -func (bap *baseAPIBlockProcessor) apiBlockToOutportPool(apiBlock *api.Block) (*outport.Pool, error) { - pool := &outport.Pool{ - Txs: make(map[string]data.TransactionHandlerWithGasUsedAndFee), - Scrs: make(map[string]data.TransactionHandlerWithGasUsedAndFee), - Invalid: make(map[string]data.TransactionHandlerWithGasUsedAndFee), - Rewards: make(map[string]data.TransactionHandlerWithGasUsedAndFee), - Logs: make([]*data.LogData, 0), +func (bap *baseAPIBlockProcessor) apiBlockToOutportPool(apiBlock *api.Block) (*outport.TransactionPool, error) { + pool := &outport.TransactionPool{ + Transactions: make(map[string]*outport.TxInfo), + SmartContractResults: make(map[string]*outport.SCRInfo), + InvalidTxs: make(map[string]*outport.TxInfo), + Rewards: make(map[string]*outport.RewardInfo), + Logs: make(map[string]*transaction.Log), } var err error @@ -475,7 +476,7 @@ func (bap *baseAPIBlockProcessor) apiBlockToOutportPool(apiBlock *api.Block) (*o return pool, nil } -func (bap *baseAPIBlockProcessor) addLogsToPool(tx *transaction.ApiTransactionResult, pool *outport.Pool) error { +func (bap *baseAPIBlockProcessor) addLogsToPool(tx *transaction.ApiTransactionResult, pool *outport.TransactionPool) error { if tx.Logs == nil { return nil } @@ -500,18 +501,15 @@ func (bap *baseAPIBlockProcessor) addLogsToPool(tx *transaction.ApiTransactionRe }) } - pool.Logs = append(pool.Logs, &data.LogData{ - LogHandler: &transaction.Log{ - Address: logAddressBytes, - Events: logsEvents, - }, - TxHash: tx.Hash, - }) + pool.Logs[tx.Hash] = &transaction.Log{ + Address: logAddressBytes, + Events: logsEvents, + } return nil } -func (bap *baseAPIBlockProcessor) addTxToPool(tx *transaction.ApiTransactionResult, pool *outport.Pool) error { +func (bap *baseAPIBlockProcessor) addTxToPool(tx *transaction.ApiTransactionResult, pool *outport.TransactionPool) error { senderBytes, err := bap.addressPubKeyConverter.Decode(tx.Sender) if err != nil && tx.Type != string(transaction.TxTypeReward) { return fmt.Errorf("error while decoding the sender address. address=%s, error=%s", tx.Sender, err.Error()) @@ -521,7 +519,6 @@ func (bap *baseAPIBlockProcessor) addTxToPool(tx *transaction.ApiTransactionResu return fmt.Errorf("error while decoding the receiver address. address=%s, error=%s", tx.Receiver, err.Error()) } - zeroBigInt := big.NewInt(0) txValueString := tx.Value if len(txValueString) == 0 { txValueString = "0" @@ -533,49 +530,54 @@ func (bap *baseAPIBlockProcessor) addTxToPool(tx *transaction.ApiTransactionResu switch tx.Type { case string(transaction.TxTypeNormal): - pool.Txs[tx.Hash] = outport.NewTransactionHandlerWithGasAndFee( - &transaction.Transaction{ + pool.Transactions[tx.Hash] = &outport.TxInfo{ + Transaction: &transaction.Transaction{ SndAddr: senderBytes, RcvAddr: receiverBytes, Value: txValue, }, - 0, - zeroBigInt, - ) + FeeInfo: newFeeInfo(), + } + case string(transaction.TxTypeUnsigned): - pool.Scrs[tx.Hash] = outport.NewTransactionHandlerWithGasAndFee( - &smartContractResult.SmartContractResult{ + pool.SmartContractResults[tx.Hash] = &outport.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ SndAddr: senderBytes, RcvAddr: receiverBytes, Value: txValue, }, - 0, - zeroBigInt, - ) + FeeInfo: newFeeInfo(), + } case string(transaction.TxTypeInvalid): - pool.Invalid[tx.Hash] = outport.NewTransactionHandlerWithGasAndFee( - &transaction.Transaction{ + pool.InvalidTxs[tx.Hash] = &outport.TxInfo{ + Transaction: &transaction.Transaction{ SndAddr: senderBytes, // do not set the receiver since the cost is only on sender's side in case of invalid txs Value: txValue, }, - 0, - zeroBigInt, - ) + FeeInfo: newFeeInfo(), + } + case string(transaction.TxTypeReward): - pool.Rewards[tx.Hash] = outport.NewTransactionHandlerWithGasAndFee( - &rewardTx.RewardTx{ + pool.Rewards[tx.Hash] = &outport.RewardInfo{ + Reward: &rewardTx.RewardTx{ RcvAddr: receiverBytes, Value: txValue, }, - 0, - zeroBigInt, - ) + } } return nil } +func newFeeInfo() *outport.FeeInfo { + return &outport.FeeInfo{ + GasUsed: 0, + Fee: big.NewInt(0), + InitialPaidFee: big.NewInt(0), + } +} + func createAlteredBlockHash(hash []byte) []byte { alteredHash := make([]byte, 0) alteredHash = append(alteredHash, hash...) diff --git a/node/external/blockAPI/interface.go b/node/external/blockAPI/interface.go index 8519c8c4963..75707d828c7 100644 --- a/node/external/blockAPI/interface.go +++ b/node/external/blockAPI/interface.go @@ -2,8 +2,8 @@ package blockAPI import ( "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" @@ -22,7 +22,7 @@ type APIBlockHandler interface { GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByHash(hash []byte, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) + GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) IsInterfaceNil() bool } diff --git a/node/external/blockAPI/metaBlock.go b/node/external/blockAPI/metaBlock.go index 970b918eb02..9367153e11d 100644 --- a/node/external/blockAPI/metaBlock.go +++ b/node/external/blockAPI/metaBlock.go @@ -5,9 +5,9 @@ import ( "time" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" ) @@ -112,7 +112,7 @@ func (mbp *metaAPIBlockProcessor) GetBlockByRound(round uint64, options api.Bloc } // GetAlteredAccountsForBlock returns the altered accounts for the desired meta block -func (mbp *metaAPIBlockProcessor) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (mbp *metaAPIBlockProcessor) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { headerHash, blockBytes, err := mbp.getHashAndBlockBytesFromStorer(options.GetBlockParameters) if err != nil { return nil, err diff --git a/node/external/blockAPI/shardBlock.go b/node/external/blockAPI/shardBlock.go index eab2e51ed9e..4f97f1af026 100644 --- a/node/external/blockAPI/shardBlock.go +++ b/node/external/blockAPI/shardBlock.go @@ -4,9 +4,9 @@ import ( "encoding/hex" "time" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/node/filters" @@ -114,7 +114,7 @@ func (sbp *shardAPIBlockProcessor) GetBlockByRound(round uint64, options api.Blo } // GetAlteredAccountsForBlock will return the altered accounts for the desired shard block -func (sbp *shardAPIBlockProcessor) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (sbp *shardAPIBlockProcessor) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { headerHash, blockBytes, err := sbp.getHashAndBlockBytesFromStorer(options.GetBlockParameters) if err != nil { return nil, err diff --git a/node/external/nodeApiResolver.go b/node/external/nodeApiResolver.go index aef2c7a1316..74bd394616c 100644 --- a/node/external/nodeApiResolver.go +++ b/node/external/nodeApiResolver.go @@ -7,8 +7,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/genesis" @@ -194,7 +194,7 @@ func (nar *nodeApiResolver) GetBlockByRound(round uint64, options api.BlockQuery } // GetAlteredAccountsForBlock will return the altered accounts for the desired block -func (nar *nodeApiResolver) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (nar *nodeApiResolver) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nar.apiBlockHandler.GetAlteredAccountsForBlock(options) } diff --git a/node/mock/apiBlockHandlerStub.go b/node/mock/apiBlockHandlerStub.go index 691072e0ca0..54bbed8c29c 100644 --- a/node/mock/apiBlockHandlerStub.go +++ b/node/mock/apiBlockHandlerStub.go @@ -1,8 +1,8 @@ package mock import ( + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - "github.com/multiversx/mx-chain-core-go/data/outport" ) // BlockAPIHandlerStub - @@ -10,7 +10,7 @@ type BlockAPIHandlerStub struct { GetBlockByNonceCalled func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByHashCalled func(hash []byte, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRoundCalled func(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) + GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) } // GetBlockByNonce - @@ -41,7 +41,7 @@ func (bah *BlockAPIHandlerStub) GetBlockByRound(round uint64, options api.BlockQ } // GetAlteredAccountsForBlock - -func (bah *BlockAPIHandlerStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { +func (bah *BlockAPIHandlerStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { if bah.GetAlteredAccountsForBlockCalled != nil { return bah.GetAlteredAccountsForBlockCalled(options) } diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index 158c11c2841..f2d5a183d7a 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" nodeData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" @@ -156,7 +157,7 @@ func (en *eventNotifier) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) } // SaveAccounts does nothing -func (en *eventNotifier) SaveAccounts(_ uint64, _ map[string]*outport.AlteredAccount, _ uint32) error { +func (en *eventNotifier) SaveAccounts(_ uint64, _ map[string]*alteredAccount.AlteredAccount, _ uint32) error { return nil } diff --git a/outport/process/alteredaccounts/alteredAccountsProvider_test.go b/outport/process/alteredaccounts/alteredAccountsProvider_test.go index 91093408608..ac28775fd30 100644 --- a/outport/process/alteredaccounts/alteredAccountsProvider_test.go +++ b/outport/process/alteredaccounts/alteredAccountsProvider_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/esdt" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/rewardTx" @@ -95,16 +96,16 @@ func TestGetAlteredAccountFromUserAccount(t *testing.T) { Address: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, } - res := &outportcore.AlteredAccount{ + res := &alteredAccount.AlteredAccount{ Address: "addr", Balance: "1000", } aap.addAdditionalDataInAlteredAccount(res, userAccount, &markedAlteredAccount{}) - require.Equal(t, &outportcore.AlteredAccount{ + require.Equal(t, &alteredAccount.AlteredAccount{ Address: "addr", Balance: "1000", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ DeveloperRewards: "100", CurrentOwner: "6f776e6572", UserName: "contract", @@ -118,16 +119,16 @@ func TestGetAlteredAccountFromUserAccount(t *testing.T) { Address: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, } - res = &outportcore.AlteredAccount{ + res = &alteredAccount.AlteredAccount{ Address: "addr", Balance: "5000", } aap.addAdditionalDataInAlteredAccount(res, userAccount, &markedAlteredAccount{}) - require.Equal(t, &outportcore.AlteredAccount{ + require.Equal(t, &alteredAccount.AlteredAccount{ Address: "addr", Balance: "5000", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ DeveloperRewards: "5000", }, }, res) @@ -186,7 +187,7 @@ func testExtractAlteredAccountsFromPoolSenderShard(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(20) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ SndAddr: []byte("sender shard - tx0 "), @@ -529,7 +530,7 @@ func testExtractAlteredAccountsFromPoolShouldIncludeESDT(t *testing.T) { require.Len(t, res, 1) require.Len(t, res[encodedAddr].Tokens, 1) - require.Equal(t, &outportcore.AccountTokenData{ + require.Equal(t, &alteredAccount.AccountTokenData{ Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: 0, @@ -582,11 +583,11 @@ func testExtractAlteredAccountsFromPoolShouldIncludeNFT(t *testing.T) { require.NoError(t, err) encodedAddr := args.AddressConverter.Encode([]byte("addr")) - require.Equal(t, &outportcore.AccountTokenData{ + require.Equal(t, &alteredAccount.AccountTokenData{ Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: expectedToken.TokenMetaData.Nonce, - MetaData: &outportcore.TokenMetaData{Nonce: expectedToken.TokenMetaData.Nonce}, + MetaData: &alteredAccount.TokenMetaData{Nonce: expectedToken.TokenMetaData.Nonce}, }, res[encodedAddr].Tokens[0]) } @@ -714,11 +715,11 @@ func testExtractAlteredAccountsFromPoolShouldIncludeDestinationFromTokensLogsTop mapKeyToSearch := args.AddressConverter.Encode(receiverOnDestination) creator := args.AddressConverter.Encode(expectedToken.TokenMetaData.Creator) require.Len(t, res[mapKeyToSearch].Tokens, 1) - require.Equal(t, res[mapKeyToSearch].Tokens[0], &outportcore.AccountTokenData{ + require.Equal(t, res[mapKeyToSearch].Tokens[0], &alteredAccount.AccountTokenData{ Identifier: "token0", Balance: "37", Nonce: 38, - MetaData: &outportcore.TokenMetaData{ + MetaData: &alteredAccount.TokenMetaData{ Nonce: 38, Name: "name", Creator: creator, @@ -910,28 +911,28 @@ func testExtractAlteredAccountsFromPoolAddressHasMultipleNfts(t *testing.T) { require.Len(t, res, 1) require.Len(t, res[encodedAddr].Tokens, 3) - require.Contains(t, res[encodedAddr].Tokens, &outportcore.AccountTokenData{ + require.Contains(t, res[encodedAddr].Tokens, &alteredAccount.AccountTokenData{ Identifier: "esdttoken", Balance: expectedToken0.Value.String(), Nonce: 0, MetaData: nil, }) - require.Contains(t, res[encodedAddr].Tokens, &outportcore.AccountTokenData{ + require.Contains(t, res[encodedAddr].Tokens, &alteredAccount.AccountTokenData{ Identifier: string(expectedToken1.TokenMetaData.Name), Balance: expectedToken1.Value.String(), Nonce: expectedToken1.TokenMetaData.Nonce, - MetaData: &outportcore.TokenMetaData{ + MetaData: &alteredAccount.TokenMetaData{ Nonce: expectedToken1.TokenMetaData.Nonce, Name: string(expectedToken1.TokenMetaData.Name), }, }) - require.Contains(t, res[encodedAddr].Tokens, &outportcore.AccountTokenData{ + require.Contains(t, res[encodedAddr].Tokens, &alteredAccount.AccountTokenData{ Identifier: string(expectedToken2.TokenMetaData.Name), Balance: expectedToken2.Value.String(), Nonce: expectedToken2.TokenMetaData.Nonce, - MetaData: &outportcore.TokenMetaData{ + MetaData: &alteredAccount.TokenMetaData{ Nonce: expectedToken2.TokenMetaData.Nonce, Name: string(expectedToken2.TokenMetaData.Name), }, @@ -993,23 +994,23 @@ func testExtractAlteredAccountsFromPoolESDTTransferBalanceNotChanged(t *testing. encodedAddrSnd := args.AddressConverter.Encode([]byte("snd")) encodedAddrRcv := args.AddressConverter.Encode([]byte("rcv")) - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "10", - Tokens: []*outportcore.AccountTokenData{ + Tokens: []*alteredAccount.AccountTokenData{ { Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: 0, Properties: "6f6b", MetaData: nil, - AdditionalData: &outportcore.AdditionalAccountTokenData{ + AdditionalData: &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: false, }, }, }, - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, @@ -1017,18 +1018,18 @@ func testExtractAlteredAccountsFromPoolESDTTransferBalanceNotChanged(t *testing. encodedAddrRcv: { Address: encodedAddrRcv, Balance: "10", - Tokens: []*outportcore.AccountTokenData{ + Tokens: []*alteredAccount.AccountTokenData{ { Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: 0, Properties: "6f6b", - AdditionalData: &outportcore.AdditionalAccountTokenData{ + AdditionalData: &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: false, }, }, }, - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ IsSender: false, BalanceChanged: false, }, @@ -1071,11 +1072,11 @@ func testExtractAlteredAccountsFromPoolReceiverShouldHaveBalanceChanged(t *testi encodedAddrSnd := args.AddressConverter.Encode([]byte("snd")) encodedAddrRcv := args.AddressConverter.Encode([]byte("rcv")) - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "15", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, @@ -1083,7 +1084,7 @@ func testExtractAlteredAccountsFromPoolReceiverShouldHaveBalanceChanged(t *testi encodedAddrRcv: { Address: encodedAddrRcv, Balance: "15", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ IsSender: false, BalanceChanged: true, }, @@ -1117,11 +1118,11 @@ func testExtractAlteredAccountsFromPoolOnlySenderShouldHaveBalanceChanged(t *tes require.NoError(t, err) encodedAddrSnd := args.AddressConverter.Encode([]byte("snd")) - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "15", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, @@ -1182,23 +1183,23 @@ func textExtractAlteredAccountsFromPoolNftCreate(t *testing.T) { require.NoError(t, err) encodedAddrSnd := args.AddressConverter.Encode([]byte("snd")) - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "10", - Tokens: []*outportcore.AccountTokenData{ + Tokens: []*alteredAccount.AccountTokenData{ { Identifier: "token0", Balance: expectedToken.Value.String(), Nonce: 0, Properties: "6f6b", MetaData: nil, - AdditionalData: &outportcore.AdditionalAccountTokenData{ + AdditionalData: &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: true, }, }, }, - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, @@ -1235,11 +1236,11 @@ func textExtractAlteredAccountsFromPoolTransactionValueNil(t *testing.T) { require.NoError(t, err) encodedAddrSnd := args.AddressConverter.Encode([]byte("snd")) - require.Equal(t, map[string]*outportcore.AlteredAccount{ + require.Equal(t, map[string]*alteredAccount.AlteredAccount{ encodedAddrSnd: { Address: encodedAddrSnd, Balance: "15", - AdditionalData: &outportcore.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, IsSender: true, }, diff --git a/testscommon/alteredAccountsProviderStub.go b/testscommon/alteredAccountsProviderStub.go index 5772bdac68b..c6ca8458cae 100644 --- a/testscommon/alteredAccountsProviderStub.go +++ b/testscommon/alteredAccountsProviderStub.go @@ -1,17 +1,18 @@ package testscommon import ( + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts/shared" ) // AlteredAccountsProviderStub - type AlteredAccountsProviderStub struct { - ExtractAlteredAccountsFromPoolCalled func(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*outport.AlteredAccount, error) + ExtractAlteredAccountsFromPoolCalled func(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) } // ExtractAlteredAccountsFromPool - -func (a *AlteredAccountsProviderStub) ExtractAlteredAccountsFromPool(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*outport.AlteredAccount, error) { +func (a *AlteredAccountsProviderStub) ExtractAlteredAccountsFromPool(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { if a.ExtractAlteredAccountsFromPoolCalled != nil { return a.ExtractAlteredAccountsFromPoolCalled(txPool, options) } From 8ccc419b269c097bea1c6d1b317c1210bcfb8306 Mon Sep 17 00:00:00 2001 From: MariusC Date: Wed, 15 Mar 2023 13:16:28 +0200 Subject: [PATCH 054/221] FIX: Altered accounts --- .../alteredAccountsProvider.go | 91 ++- .../alteredAccountsProvider_test.go | 691 ++++++++++-------- .../alteredaccounts/tokensProcessor.go | 4 +- testscommon/alteredAccountsProviderStub.go | 4 +- 4 files changed, 461 insertions(+), 329 deletions(-) diff --git a/outport/process/alteredaccounts/alteredAccountsProvider.go b/outport/process/alteredaccounts/alteredAccountsProvider.go index 209efde59cc..f4b1409db96 100644 --- a/outport/process/alteredaccounts/alteredAccountsProvider.go +++ b/outport/process/alteredaccounts/alteredAccountsProvider.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/esdt" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/process/alteredaccounts/shared" @@ -69,7 +70,7 @@ func NewAlteredAccountsProvider(args ArgsAlteredAccountsProvider) (*alteredAccou } // ExtractAlteredAccountsFromPool will extract and return altered accounts from the pool -func (aap *alteredAccountsProvider) ExtractAlteredAccountsFromPool(txPool *outportcore.Pool, options shared.AlteredAccountsOptions) (map[string]*outportcore.AlteredAccount, error) { +func (aap *alteredAccountsProvider) ExtractAlteredAccountsFromPool(txPool *outportcore.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { if err := options.Verify(); err != nil { return nil, err } @@ -79,7 +80,7 @@ func (aap *alteredAccountsProvider) ExtractAlteredAccountsFromPool(txPool *outpo if txPool == nil { log.Warn("alteredAccountsProvider: ExtractAlteredAccountsFromPool", "txPool is nil", "will return") - return map[string]*outportcore.AlteredAccount{}, nil + return map[string]*alteredAccount.AlteredAccount{}, nil } markedAccounts := make(map[string]*markedAlteredAccount) @@ -92,8 +93,8 @@ func (aap *alteredAccountsProvider) ExtractAlteredAccountsFromPool(txPool *outpo return aap.fetchDataForMarkedAccounts(markedAccounts, options) } -func (aap *alteredAccountsProvider) fetchDataForMarkedAccounts(markedAccounts map[string]*markedAlteredAccount, options shared.AlteredAccountsOptions) (map[string]*outportcore.AlteredAccount, error) { - alteredAccounts := make(map[string]*outportcore.AlteredAccount) +func (aap *alteredAccountsProvider) fetchDataForMarkedAccounts(markedAccounts map[string]*markedAlteredAccount, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { + alteredAccounts := make(map[string]*alteredAccount.AlteredAccount) var err error for address, markedAccount := range markedAccounts { err = aap.processMarkedAccountData(address, markedAccount, alteredAccounts, options) @@ -108,7 +109,7 @@ func (aap *alteredAccountsProvider) fetchDataForMarkedAccounts(markedAccounts ma func (aap *alteredAccountsProvider) processMarkedAccountData( addressStr string, markedAccount *markedAlteredAccount, - alteredAccounts map[string]*outportcore.AlteredAccount, + alteredAccounts map[string]*alteredAccount.AlteredAccount, options shared.AlteredAccountsOptions, ) error { addressBytes := []byte(addressStr) @@ -135,8 +136,8 @@ func (aap *alteredAccountsProvider) processMarkedAccountData( return nil } -func (aap *alteredAccountsProvider) addAdditionalDataInAlteredAccount(alteredAccount *outportcore.AlteredAccount, userAccount state.UserAccountHandler, markedAccount *markedAlteredAccount) { - alteredAccount.AdditionalData = &outportcore.AdditionalAccountData{ +func (aap *alteredAccountsProvider) addAdditionalDataInAlteredAccount(alteredAcc *alteredAccount.AlteredAccount, userAccount state.UserAccountHandler, markedAccount *markedAlteredAccount) { + alteredAcc.AdditionalData = &alteredAccount.AdditionalAccountData{ IsSender: markedAccount.isSender, BalanceChanged: markedAccount.balanceChanged, UserName: string(userAccount.GetUserName()), @@ -144,22 +145,20 @@ func (aap *alteredAccountsProvider) addAdditionalDataInAlteredAccount(alteredAcc ownerAddressBytes := userAccount.GetOwnerAddress() if core.IsSmartContractAddress(userAccount.AddressBytes()) && len(ownerAddressBytes) == aap.addressConverter.Len() { - alteredAccount.AdditionalData.CurrentOwner = aap.addressConverter.Encode(ownerAddressBytes) + alteredAcc.AdditionalData.CurrentOwner = aap.addressConverter.Encode(ownerAddressBytes) } developerRewards := userAccount.GetDeveloperReward() if developerRewards != nil { - alteredAccount.AdditionalData.DeveloperRewards = developerRewards.String() + alteredAcc.AdditionalData.DeveloperRewards = developerRewards.String() } } -func (aap *alteredAccountsProvider) getAlteredAccountFromUserAccounts(userEncodedAddress string, userAccount state.UserAccountHandler) *outportcore.AlteredAccount { - alteredAccount := &outportcore.AlteredAccount{ +func (aap *alteredAccountsProvider) getAlteredAccountFromUserAccounts(userEncodedAddress string, userAccount state.UserAccountHandler) *alteredAccount.AlteredAccount { + return &alteredAccount.AlteredAccount{ Address: userEncodedAddress, Balance: userAccount.GetBalance().String(), Nonce: userAccount.GetNonce(), } - - return alteredAccount } func (aap *alteredAccountsProvider) loadUserAccount(addressBytes []byte, options shared.AlteredAccountsOptions) (state.UserAccountHandler, error) { @@ -188,7 +187,7 @@ func (aap *alteredAccountsProvider) addTokensDataForMarkedAccount( encodedAddress string, userAccount state.UserAccountHandler, markedAccountToken *markedAlteredAccountToken, - alteredAccounts map[string]*outportcore.AlteredAccount, + alteredAccounts map[string]*alteredAccount.AlteredAccount, options shared.AlteredAccountsOptions, ) error { nonce := markedAccountToken.nonce @@ -211,7 +210,7 @@ func (aap *alteredAccountsProvider) addTokensDataForMarkedAccount( return nil } - accountTokenData := &outportcore.AccountTokenData{ + accountTokenData := &alteredAccount.AccountTokenData{ Identifier: tokenID, Balance: esdtToken.Value.String(), Nonce: nonce, @@ -219,23 +218,23 @@ func (aap *alteredAccountsProvider) addTokensDataForMarkedAccount( MetaData: aap.convertMetaData(esdtToken.TokenMetaData), } if options.WithAdditionalOutportData { - accountTokenData.AdditionalData = &outportcore.AdditionalAccountTokenData{ + accountTokenData.AdditionalData = &alteredAccount.AdditionalAccountTokenData{ IsNFTCreate: markedAccountToken.isNFTCreate, } } - alteredAccount := alteredAccounts[encodedAddress] - alteredAccount.Tokens = append(alteredAccounts[encodedAddress].Tokens, accountTokenData) + alteredAcc := alteredAccounts[encodedAddress] + alteredAcc.Tokens = append(alteredAccounts[encodedAddress].Tokens, accountTokenData) return nil } -func (aap *alteredAccountsProvider) convertMetaData(metaData *esdt.MetaData) *outportcore.TokenMetaData { +func (aap *alteredAccountsProvider) convertMetaData(metaData *esdt.MetaData) *alteredAccount.TokenMetaData { if metaData == nil { return nil } - return &outportcore.TokenMetaData{ + return &alteredAccount.TokenMetaData{ Nonce: metaData.Nonce, Name: string(metaData.Name), Creator: aap.addressConverter.Encode(metaData.Creator), @@ -247,20 +246,60 @@ func (aap *alteredAccountsProvider) convertMetaData(metaData *esdt.MetaData) *ou } func (aap *alteredAccountsProvider) extractAddressesWithBalanceChange( - txPool *outportcore.Pool, + txPool *outportcore.TransactionPool, markedAlteredAccounts map[string]*markedAlteredAccount, ) { selfShardID := aap.shardCoordinator.SelfId() - aap.extractAddressesFromTxsHandlers(selfShardID, txPool.Txs, markedAlteredAccounts, process.MoveBalance) - aap.extractAddressesFromTxsHandlers(selfShardID, txPool.Scrs, markedAlteredAccounts, process.SCInvoking) - aap.extractAddressesFromTxsHandlers(selfShardID, txPool.Rewards, markedAlteredAccounts, process.RewardTx) - aap.extractAddressesFromTxsHandlers(selfShardID, txPool.Invalid, markedAlteredAccounts, process.InvalidTransaction) + txs := txsMapToTxHandlerSlice(txPool.Transactions) + scrs := scrsMapToTxHandlerSlice(txPool.SmartContractResults) + rewards := rewardsMapToTxHandlerSlice(txPool.Rewards) + invalidTxs := txsMapToTxHandlerSlice(txPool.InvalidTxs) + + aap.extractAddressesFromTxsHandlers(selfShardID, txs, markedAlteredAccounts, process.MoveBalance) + aap.extractAddressesFromTxsHandlers(selfShardID, scrs, markedAlteredAccounts, process.SCInvoking) + aap.extractAddressesFromTxsHandlers(selfShardID, rewards, markedAlteredAccounts, process.RewardTx) + aap.extractAddressesFromTxsHandlers(selfShardID, invalidTxs, markedAlteredAccounts, process.InvalidTransaction) +} + +func txsMapToTxHandlerSlice(txs map[string]*outportcore.TxInfo) []data.TransactionHandler { + ret := make([]data.TransactionHandler, len(txs)) + + idx := 0 + for _, tx := range txs { + ret[idx] = tx.Transaction + idx++ + } + + return ret +} + +func scrsMapToTxHandlerSlice(scrs map[string]*outportcore.SCRInfo) []data.TransactionHandler { + ret := make([]data.TransactionHandler, len(scrs)) + + idx := 0 + for _, scr := range scrs { + ret[idx] = scr.SmartContractResult + idx++ + } + + return ret +} +func rewardsMapToTxHandlerSlice(rewards map[string]*outportcore.RewardInfo) []data.TransactionHandler { + ret := make([]data.TransactionHandler, len(rewards)) + + idx := 0 + for _, reward := range rewards { + ret[idx] = reward.Reward + idx++ + } + + return ret } func (aap *alteredAccountsProvider) extractAddressesFromTxsHandlers( selfShardID uint32, - txsHandlers map[string]data.TransactionHandlerWithGasUsedAndFee, + txsHandlers []data.TransactionHandler, markedAlteredAccounts map[string]*markedAlteredAccount, txType process.TransactionType, ) { diff --git a/outport/process/alteredaccounts/alteredAccountsProvider_test.go b/outport/process/alteredaccounts/alteredAccountsProvider_test.go index ac28775fd30..609ffaf1c81 100644 --- a/outport/process/alteredaccounts/alteredAccountsProvider_test.go +++ b/outport/process/alteredaccounts/alteredAccountsProvider_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/esdt" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" @@ -163,7 +162,7 @@ func testExtractAlteredAccountsFromPoolNoTransaction(t *testing.T) { args := getMockArgs() aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{}, shared.AlteredAccountsOptions{}) + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{}, shared.AlteredAccountsOptions{}) require.NoError(t, err) require.Empty(t, res) } @@ -188,17 +187,27 @@ func testExtractAlteredAccountsFromPoolSenderShard(t *testing.T) { aap, _ := NewAlteredAccountsProvider(args) res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender shard - tx0 "), - RcvAddr: []byte("receiver shard - tx0"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash1": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender shard - tx1 "), - RcvAddr: []byte("receiver shard - tx1"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender shard - tx0 "), + RcvAddr: []byte("receiver shard - tx0"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash1": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender shard - tx1 "), + RcvAddr: []byte("receiver shard - tx1"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, @@ -233,18 +242,28 @@ func testExtractAlteredAccountsFromPoolReceiverShard(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(20) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender shard - tx0 "), - RcvAddr: []byte("receiver shard - tx0"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash1": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender shard - tx1 "), - RcvAddr: []byte("receiver shard - tx1"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender shard - tx0 "), + RcvAddr: []byte("receiver shard - tx0"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash1": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender shard - tx1 "), + RcvAddr: []byte("receiver shard - tx1"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, @@ -277,33 +296,58 @@ func testExtractAlteredAccountsFromPoolBothSenderAndReceiverShards(t *testing.T) args.AddressConverter = testscommon.NewPubkeyConverterMock(19) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // intra-shard 0, different addresses - SndAddr: []byte("shard0 addr - tx0 "), - RcvAddr: []byte("shard0 addr 2 - tx0"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash1": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // intra-shard 0, same addresses - SndAddr: []byte("shard0 addr 3 - tx1"), - RcvAddr: []byte("shard0 addr 3 - tx1"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash2": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // cross-shard, sender in shard 0 - SndAddr: []byte("shard0 addr - tx2 "), - RcvAddr: []byte("shard1 - tx2 "), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash3": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // cross-shard, receiver in shard 0 - SndAddr: []byte("shard1 addr - tx3 "), - RcvAddr: []byte("shard0 addr - tx3 "), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - "hash4": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ // cross-shard, no address in shard 0 - SndAddr: []byte("shard2 addr - tx4 "), - RcvAddr: []byte("shard2 addr - tx3 "), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard0 addr - tx0 "), + RcvAddr: []byte("shard0 addr 2 - tx0"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash1": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard0 addr 3 - tx1"), + RcvAddr: []byte("shard0 addr 3 - tx1"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash2": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard0 addr - tx2 "), + RcvAddr: []byte("shard1 - tx2 "), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash3": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard1 addr - tx3 "), + RcvAddr: []byte("shard0 addr - tx3 "), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "hash4": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("shard2 addr - tx4 "), + RcvAddr: []byte("shard2 addr - tx3 "), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{}) require.NoError(t, err) @@ -348,13 +392,18 @@ func testExtractAlteredAccountsFromPoolTrieDataChecks(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(19) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender in shard 0 "), - RcvAddr: []byte(receiverInSelfShard), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender in shard 0 "), + RcvAddr: []byte(receiverInSelfShard), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{}) require.NoError(t, err) @@ -394,32 +443,49 @@ func testExtractAlteredAccountsFromPoolScrsInvalidRewards(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(26) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender in shard 0 - tx 0 "), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - }, - Rewards: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash1": outportcore.NewTransactionHandlerWithGasAndFee(&rewardTx.RewardTx{ - RcvAddr: []byte("receiver in shard 0 - tx 1"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash2": outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - SndAddr: []byte("sender in shard 0 - tx 2 "), - RcvAddr: []byte("receiver in shard 0 - tx 2"), - Value: big.NewInt(1), - }, 0, big.NewInt(0)), - }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash3": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("sender in shard 0 - tx 3 "), - RcvAddr: []byte("receiver in shard 0 - tx 3"), // receiver for invalid txs should not be included - Value: big.NewInt(1), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender in shard 0 - tx 0 "), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + }, + Rewards: map[string]*outportcore.RewardInfo{ + "hash1": { + Reward: &rewardTx.RewardTx{ + RcvAddr: []byte("receiver in shard 0 - tx 1"), + Value: big.NewInt(1), + }, + }, + }, + SmartContractResults: map[string]*outportcore.SCRInfo{ + "hash2": { + SmartContractResult: &smartContractResult.SmartContractResult{ + SndAddr: []byte("sender in shard 0 - tx 2 "), + RcvAddr: []byte("receiver in shard 0 - tx 2"), + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + }, + InvalidTxs: map[string]*outportcore.TxInfo{ + "hash3": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("sender in shard 0 - tx 3 "), + RcvAddr: []byte("receiver in shard 0 - tx 3"), // receiver for invalid txs should not be included + Value: big.NewInt(1), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{}) require.NoError(t, err) @@ -446,27 +512,25 @@ func testExtractAlteredAccountsFromPoolShouldReturnErrorWhenCastingToVmCommonUse } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Logs: []*data.LogData{ - { - LogHandler: &transaction.Log{ - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(0).Bytes(), - }, + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Logs: map[string]*transaction.Log{ + "hash": { + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(0).Bytes(), }, - { - Address: []byte("addr"), // other event for the same token, to ensure it isn't added twice - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(0).Bytes(), - }, + }, + { + Address: []byte("addr"), // other event for the same token, to ensure it isn't added twice + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(0).Bytes(), }, }, }, @@ -497,27 +561,25 @@ func testExtractAlteredAccountsFromPoolShouldIncludeESDT(t *testing.T) { } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Logs: []*data.LogData{ - { - LogHandler: &transaction.Log{ - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(0).Bytes(), - }, + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Logs: map[string]*transaction.Log{ + "hash": { + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(0).Bytes(), }, - { - Address: []byte("addr"), // other event for the same token, to ensure it isn't added twice - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(0).Bytes(), - }, + }, + { + Address: []byte("addr"), // other event for the same token, to ensure it isn't added twice + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(0).Bytes(), }, }, }, @@ -561,19 +623,17 @@ func testExtractAlteredAccountsFromPoolShouldIncludeNFT(t *testing.T) { } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Logs: []*data.LogData{ - { - LogHandler: &transaction.Log{ - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(38).Bytes(), - }, + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Logs: map[string]*transaction.Log{ + "hash": { + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(38).Bytes(), }, }, }, @@ -616,28 +676,31 @@ func testExtractAlteredAccountsFromPoolShouldNotIncludeReceiverAddressIfNftCreat } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hh": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: sendAddrShard0, - RcvAddr: sendAddrShard0, - Value: big.NewInt(0), - }, 0, big.NewInt(0)), - }, - Logs: []*data.LogData{ - { - LogHandler: &transaction.Log{ - Address: sendAddrShard0, - Events: []*transaction.Event{ - { - Address: sendAddrShard0, - Identifier: []byte(core.BuiltInFunctionESDTNFTCreate), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(38).Bytes(), - nil, - receiverOnDestination, - }, + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hh": { + Transaction: &transaction.Transaction{ + SndAddr: sendAddrShard0, + RcvAddr: sendAddrShard0, + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + }, + Logs: map[string]*transaction.Log{ + "hh": { + Address: sendAddrShard0, + Events: []*transaction.Event{ + { + Address: sendAddrShard0, + Identifier: []byte(core.BuiltInFunctionESDTNFTCreate), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(38).Bytes(), + nil, + receiverOnDestination, }, }, }, @@ -687,21 +750,19 @@ func testExtractAlteredAccountsFromPoolShouldIncludeDestinationFromTokensLogsTop } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Logs: []*data.LogData{ - { - LogHandler: &transaction.Log{ - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(38).Bytes(), - nil, - receiverOnDestination, - }, + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Logs: map[string]*transaction.Log{ + "hash0": { + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(38).Bytes(), + nil, + receiverOnDestination, }, }, }, @@ -753,34 +814,37 @@ func testExtractAlteredAccountsFromPoolAddressHasBalanceChangeEsdtAndfNft(t *tes } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("addr"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), - }, - Logs: []*data.LogData{ - { - LogHandler: &transaction.Log{ - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("esdt"), - big.NewInt(1).Bytes(), - }, + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("addr"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + }, + Logs: map[string]*transaction.Log{ + "hash0": { + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("esdt"), + big.NewInt(1).Bytes(), }, - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - []byte("nft"), - big.NewInt(38).Bytes(), - big.NewInt(1).Bytes(), - }, + }, + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + []byte("nft"), + big.NewInt(38).Bytes(), + big.NewInt(1).Bytes(), }, }, }, @@ -864,41 +928,44 @@ func testExtractAlteredAccountsFromPoolAddressHasMultipleNfts(t *testing.T) { } aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "hash0": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("addr"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), - }, - Logs: []*data.LogData{ - { - LogHandler: &transaction.Log{ - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("esdttoken"), - big.NewInt(0).Bytes(), - }, + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "hash0": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("addr"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + }, + Logs: map[string]*transaction.Log{ + "hash0": { + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("esdttoken"), + big.NewInt(0).Bytes(), }, - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - expectedToken1.TokenMetaData.Name, - big.NewInt(0).SetUint64(expectedToken1.TokenMetaData.Nonce).Bytes(), - }, + }, + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + expectedToken1.TokenMetaData.Name, + big.NewInt(0).SetUint64(expectedToken1.TokenMetaData.Nonce).Bytes(), }, - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - expectedToken2.TokenMetaData.Name, - big.NewInt(0).SetUint64(expectedToken2.TokenMetaData.Nonce).Bytes(), - }, + }, + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + expectedToken2.TokenMetaData.Name, + big.NewInt(0).SetUint64(expectedToken2.TokenMetaData.Nonce).Bytes(), }, }, }, @@ -963,25 +1030,28 @@ func testExtractAlteredAccountsFromPoolESDTTransferBalanceNotChanged(t *testing. args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), - }, - Logs: []*data.LogData{ - { - LogHandler: &transaction.Log{ - Address: []byte("snd"), - Events: []*transaction.Event{ - { - Address: []byte("snd"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), big.NewInt(0).Bytes(), big.NewInt(10).Bytes(), []byte("rcv"), - }, + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + }, + Logs: map[string]*transaction.Log{ + "txHash": { + Address: []byte("snd"), + Events: []*transaction.Event{ + { + Address: []byte("snd"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), big.NewInt(0).Bytes(), big.NewInt(10).Bytes(), []byte("rcv"), }, }, }, @@ -1051,18 +1121,28 @@ func testExtractAlteredAccountsFromPoolReceiverShouldHaveBalanceChanged(t *testi args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), - "txHash2": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: big.NewInt(2), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + "txHash2": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: big.NewInt(2), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, @@ -1104,13 +1184,18 @@ func testExtractAlteredAccountsFromPoolOnlySenderShouldHaveBalanceChanged(t *tes args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, @@ -1153,25 +1238,28 @@ func textExtractAlteredAccountsFromPoolNftCreate(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("snd"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)), - }, - Logs: []*data.LogData{ - { - LogHandler: &transaction.Log{ - Address: []byte("snd"), - Events: []*transaction.Event{ - { - Address: []byte("snd"), - Identifier: []byte(core.BuiltInFunctionESDTNFTCreate), - Topics: [][]byte{ - []byte("token0"), big.NewInt(0).Bytes(), big.NewInt(10).Bytes(), []byte("a"), - }, + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("snd"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + }, + Logs: map[string]*transaction.Log{ + "txHash": { + Address: []byte("snd"), + Events: []*transaction.Event{ + { + Address: []byte("snd"), + Identifier: []byte(core.BuiltInFunctionESDTNFTCreate), + Topics: [][]byte{ + []byte("token0"), big.NewInt(0).Bytes(), big.NewInt(10).Bytes(), []byte("a"), }, }, }, @@ -1221,13 +1309,18 @@ func textExtractAlteredAccountsFromPoolTransactionValueNil(t *testing.T) { args.AddressConverter = testscommon.NewPubkeyConverterMock(3) aap, _ := NewAlteredAccountsProvider(args) - res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - "txHash": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - SndAddr: []byte("snd"), - RcvAddr: []byte("rcv"), - Value: nil, - }, 0, big.NewInt(0)), + res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + "txHash": { + Transaction: &transaction.Transaction{ + SndAddr: []byte("snd"), + RcvAddr: []byte("rcv"), + Value: nil, + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, }, shared.AlteredAccountsOptions{ WithAdditionalOutportData: true, diff --git a/outport/process/alteredaccounts/tokensProcessor.go b/outport/process/alteredaccounts/tokensProcessor.go index 7807e25d73e..82ae98f7b84 100644 --- a/outport/process/alteredaccounts/tokensProcessor.go +++ b/outport/process/alteredaccounts/tokensProcessor.go @@ -41,12 +41,12 @@ func newTokensProcessor(shardCoordinator sharding.Coordinator) *tokensProcessor } func (tp *tokensProcessor) extractESDTAccounts( - txPool *outportcore.Pool, + txPool *outportcore.TransactionPool, markedAlteredAccounts map[string]*markedAlteredAccount, ) error { var err error for _, txLog := range txPool.Logs { - for _, event := range txLog.LogHandler.GetLogEvents() { + for _, event := range txLog.Events { err = tp.processEvent(event, markedAlteredAccounts) if err != nil { return err diff --git a/testscommon/alteredAccountsProviderStub.go b/testscommon/alteredAccountsProviderStub.go index c6ca8458cae..86e8947ab5e 100644 --- a/testscommon/alteredAccountsProviderStub.go +++ b/testscommon/alteredAccountsProviderStub.go @@ -8,11 +8,11 @@ import ( // AlteredAccountsProviderStub - type AlteredAccountsProviderStub struct { - ExtractAlteredAccountsFromPoolCalled func(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) + ExtractAlteredAccountsFromPoolCalled func(txPool *outport.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) } // ExtractAlteredAccountsFromPool - -func (a *AlteredAccountsProviderStub) ExtractAlteredAccountsFromPool(txPool *outport.Pool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { +func (a *AlteredAccountsProviderStub) ExtractAlteredAccountsFromPool(txPool *outport.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { if a.ExtractAlteredAccountsFromPoolCalled != nil { return a.ExtractAlteredAccountsFromPoolCalled(txPool, options) } From 906cd2848b8bcb3aa9089a624b9fb798753d6cb6 Mon Sep 17 00:00:00 2001 From: MariusC Date: Wed, 15 Mar 2023 13:35:14 +0200 Subject: [PATCH 055/221] FIX: Accounts parser --- genesis/interface.go | 2 +- genesis/mock/accountsParserStub.go | 6 ++-- genesis/parsing/accountsParser.go | 37 ++++++++++++++------- genesis/parsing/accountsParser_test.go | 46 +++++++++++++------------- genesis/parsing/export_test.go | 2 +- 5 files changed, 53 insertions(+), 40 deletions(-) diff --git a/genesis/interface.go b/genesis/interface.go index 8396e9845a5..1a618a44efe 100644 --- a/genesis/interface.go +++ b/genesis/interface.go @@ -34,7 +34,7 @@ type AccountsParser interface { GenesisMintingAddress() string GetTotalStakedForDelegationAddress(delegationAddress string) *big.Int GetInitialAccountsForDelegated(addressBytes []byte) []InitialAccountHandler - GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) + GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) IsInterfaceNil() bool } diff --git a/genesis/mock/accountsParserStub.go b/genesis/mock/accountsParserStub.go index 27066140982..436a8a418de 100644 --- a/genesis/mock/accountsParserStub.go +++ b/genesis/mock/accountsParserStub.go @@ -16,7 +16,7 @@ type AccountsParserStub struct { InitialAccountsCalled func() []genesis.InitialAccountHandler GetTotalStakedForDelegationAddressCalled func(delegationAddress string) *big.Int GetInitialAccountsForDelegatedCalled func(addressBytes []byte) []genesis.InitialAccountHandler - GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) + GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) GenesisMintingAddressCalled func() string } @@ -75,12 +75,12 @@ func (aps *AccountsParserStub) InitialAccounts() []genesis.InitialAccountHandler } // GenerateInitialTransactions - -func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) { +func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) { if aps.GenerateInitialTransactionsCalled != nil { return aps.GenerateInitialTransactionsCalled(shardCoordinator, initialIndexingData) } - return make([]*block.MiniBlock, 0), make(map[uint32]*outport.Pool), nil + return make([]*block.MiniBlock, 0), make(map[uint32]*outport.TransactionPool), nil } // IsInterfaceNil - diff --git a/genesis/parsing/accountsParser.go b/genesis/parsing/accountsParser.go index 0dd183b1088..56440fa14b4 100644 --- a/genesis/parsing/accountsParser.go +++ b/genesis/parsing/accountsParser.go @@ -299,13 +299,13 @@ func (ap *accountsParser) GetInitialAccountsForDelegated(addressBytes []byte) [] return list } -func (ap *accountsParser) createIndexerPools(shardIDs []uint32) map[uint32]*outportcore.Pool { - txsPoolPerShard := make(map[uint32]*outportcore.Pool) +func (ap *accountsParser) createIndexerPools(shardIDs []uint32) map[uint32]*outportcore.TransactionPool { + txsPoolPerShard := make(map[uint32]*outportcore.TransactionPool) for _, id := range shardIDs { - txsPoolPerShard[id] = &outportcore.Pool{ - Txs: make(map[string]coreData.TransactionHandlerWithGasUsedAndFee), - Scrs: make(map[string]coreData.TransactionHandlerWithGasUsedAndFee), + txsPoolPerShard[id] = &outportcore.TransactionPool{ + Transactions: make(map[string]*outportcore.TxInfo), + SmartContractResults: make(map[string]*outportcore.SCRInfo), } } @@ -385,7 +385,7 @@ func (ap *accountsParser) getAllTxs( func (ap *accountsParser) setScrsTxsPool( shardCoordinator sharding.Coordinator, indexingData map[uint32]*genesis.IndexingData, - txsPoolPerShard map[uint32]*outportcore.Pool, + txsPoolPerShard map[uint32]*outportcore.TransactionPool, ) { for _, id := range indexingData { for txHash, tx := range id.ScrsTxs { @@ -398,8 +398,14 @@ func (ap *accountsParser) setScrsTxsPool( } scrTx.GasLimit = uint64(0) - txsPoolPerShard[senderShardID].Scrs[txHash] = outportcore.NewTransactionHandlerWithGasAndFee(scrTx, 0, big.NewInt(0)) - txsPoolPerShard[receiverShardID].Scrs[txHash] = outportcore.NewTransactionHandlerWithGasAndFee(scrTx, 0, big.NewInt(0)) + txsPoolPerShard[senderShardID].SmartContractResults[txHash] = &outportcore.SCRInfo{ + SmartContractResult: scrTx, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + txsPoolPerShard[receiverShardID].SmartContractResults[txHash] = &outportcore.SCRInfo{ + SmartContractResult: scrTx, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } } } } @@ -407,7 +413,7 @@ func (ap *accountsParser) setScrsTxsPool( func (ap *accountsParser) setTxsPoolAndMiniBlocks( shardCoordinator sharding.Coordinator, allTxs []coreData.TransactionHandler, - txsPoolPerShard map[uint32]*outportcore.Pool, + txsPoolPerShard map[uint32]*outportcore.TransactionPool, miniBlocks []*block.MiniBlock, ) error { @@ -433,8 +439,15 @@ func (ap *accountsParser) setTxsPoolAndMiniBlocks( tx.Signature = []byte(common.GenesisTxSignatureString) tx.GasLimit = uint64(0) - txsPoolPerShard[senderShardID].Txs[string(txHash)] = outportcore.NewTransactionHandlerWithGasAndFee(tx, 0, big.NewInt(0)) - txsPoolPerShard[receiverShardID].Txs[string(txHash)] = outportcore.NewTransactionHandlerWithGasAndFee(tx, 0, big.NewInt(0)) + txsPoolPerShard[senderShardID].Transactions[string(txHash)] = &outportcore.TxInfo{ + Transaction: tx, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + txsPoolPerShard[receiverShardID].Transactions[string(txHash)] = &outportcore.TxInfo{ + Transaction: tx, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } for _, miniBlock := range miniBlocks { if senderShardID == miniBlock.GetSenderShardID() && @@ -463,7 +476,7 @@ func getNonEmptyMiniBlocks(miniBlocks []*block.MiniBlock) []*block.MiniBlock { func (ap *accountsParser) GenerateInitialTransactions( shardCoordinator sharding.Coordinator, indexingData map[uint32]*genesis.IndexingData, -) ([]*block.MiniBlock, map[uint32]*outportcore.Pool, error) { +) ([]*block.MiniBlock, map[uint32]*outportcore.TransactionPool, error) { if check.IfNil(shardCoordinator) { return nil, nil, genesis.ErrNilShardCoordinator } diff --git a/genesis/parsing/accountsParser_test.go b/genesis/parsing/accountsParser_test.go index 3692f0413f4..de0bdc0b4ed 100644 --- a/genesis/parsing/accountsParser_test.go +++ b/genesis/parsing/accountsParser_test.go @@ -592,17 +592,17 @@ func TestAccountsParser_setScrsTxsPool(t *testing.T) { indexingDataMap[i] = indexingData } - txsPoolPerShard := make(map[uint32]*outport.Pool) + txsPoolPerShard := make(map[uint32]*outport.TransactionPool) for i := uint32(0); i < sharder.NumOfShards; i++ { - txsPoolPerShard[i] = &outport.Pool{ - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{}, + txsPoolPerShard[i] = &outport.TransactionPool{ + SmartContractResults: map[string]*outport.SCRInfo{}, } } ap.SetScrsTxsPool(sharder, indexingDataMap, txsPoolPerShard) assert.Equal(t, 1, len(txsPoolPerShard)) - assert.Equal(t, uint64(0), txsPoolPerShard[0].Scrs["hash"].GetGasLimit()) - assert.Equal(t, uint64(1), txsPoolPerShard[0].Scrs["hash"].GetNonce()) + assert.Equal(t, uint64(0), txsPoolPerShard[0].SmartContractResults["hash"].SmartContractResult.GetGasLimit()) + assert.Equal(t, uint64(1), txsPoolPerShard[0].SmartContractResults["hash"].SmartContractResult.GetNonce()) } func TestAccountsParser_GenerateInitialTransactionsTxsPool(t *testing.T) { @@ -644,21 +644,21 @@ func TestAccountsParser_GenerateInitialTransactionsTxsPool(t *testing.T) { assert.Equal(t, 2, len(miniBlocks)) assert.Equal(t, 3, len(txsPoolPerShard)) - assert.Equal(t, 1, len(txsPoolPerShard[0].Txs)) - assert.Equal(t, 1, len(txsPoolPerShard[1].Txs)) - assert.Equal(t, len(ibs), len(txsPoolPerShard[core.MetachainShardId].Txs)) - assert.Equal(t, 0, len(txsPoolPerShard[0].Scrs)) - assert.Equal(t, 0, len(txsPoolPerShard[1].Scrs)) - assert.Equal(t, 0, len(txsPoolPerShard[core.MetachainShardId].Scrs)) - - for _, tx := range txsPoolPerShard[1].Txs { - assert.Equal(t, ibs[0].GetSupply(), tx.GetValue()) - assert.Equal(t, ibs[0].AddressBytes(), tx.GetRcvAddr()) + assert.Equal(t, 1, len(txsPoolPerShard[0].Transactions)) + assert.Equal(t, 1, len(txsPoolPerShard[1].Transactions)) + assert.Equal(t, len(ibs), len(txsPoolPerShard[core.MetachainShardId].Transactions)) + assert.Equal(t, 0, len(txsPoolPerShard[0].SmartContractResults)) + assert.Equal(t, 0, len(txsPoolPerShard[1].SmartContractResults)) + assert.Equal(t, 0, len(txsPoolPerShard[core.MetachainShardId].SmartContractResults)) + + for _, tx := range txsPoolPerShard[1].Transactions { + assert.Equal(t, ibs[0].GetSupply(), tx.Transaction.GetValue()) + assert.Equal(t, ibs[0].AddressBytes(), tx.Transaction.GetRcvAddr()) } - for _, tx := range txsPoolPerShard[0].Txs { - assert.Equal(t, ibs[1].GetSupply(), tx.GetValue()) - assert.Equal(t, ibs[1].AddressBytes(), tx.GetRcvAddr()) + for _, tx := range txsPoolPerShard[0].Transactions { + assert.Equal(t, ibs[1].GetSupply(), tx.Transaction.GetValue()) + assert.Equal(t, ibs[1].AddressBytes(), tx.Transaction.GetRcvAddr()) } } @@ -691,8 +691,8 @@ func TestAccountsParser_GenerateInitialTransactionsZeroGasLimitShouldWork(t *tes require.Nil(t, err) for i := uint32(0); i < sharder.NumberOfShards(); i++ { - for _, tx := range txsPoolPerShard[i].Txs { - assert.Equal(t, uint64(0), tx.GetGasLimit()) + for _, tx := range txsPoolPerShard[i].Transactions { + assert.Equal(t, uint64(0), tx.Transaction.GetGasLimit()) } } } @@ -736,10 +736,10 @@ func TestAccountsParser_GenerateInitialTransactionsVerifyTxsHashes(t *testing.T) assert.Equal(t, 1, len(miniBlocks)) assert.Equal(t, 2, len(txsPoolPerShard)) - assert.Equal(t, 1, len(txsPoolPerShard[0].Txs)) + assert.Equal(t, 1, len(txsPoolPerShard[0].Transactions)) - for hashString, v := range txsPoolPerShard[0].Txs { + for hashString, v := range txsPoolPerShard[0].Transactions { assert.Equal(t, txHash, []byte(hashString)) - assert.Equal(t, tx, v.GetTxHandler()) + assert.Equal(t, tx, v.Transaction) } } diff --git a/genesis/parsing/export_test.go b/genesis/parsing/export_test.go index 58f21b1aa76..e1bbcdcc8d4 100644 --- a/genesis/parsing/export_test.go +++ b/genesis/parsing/export_test.go @@ -43,7 +43,7 @@ func (ap *accountsParser) CreateMintTransactions() []coreData.TransactionHandler func (ap *accountsParser) SetScrsTxsPool( shardCoordinator sharding.Coordinator, indexingData map[uint32]*genesis.IndexingData, - txsPoolPerShard map[uint32]*outport.Pool, + txsPoolPerShard map[uint32]*outport.TransactionPool, ) { ap.setScrsTxsPool(shardCoordinator, indexingData, txsPoolPerShard) } From aea10143dc8f4caed759ebaec72b3c77fe1d6825 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 16 Mar 2023 11:46:08 +0200 Subject: [PATCH 056/221] add GetIdentifier to StorageManager interface --- common/interface.go | 1 + testscommon/storageManagerStub.go | 10 ++++++++++ trie/trieStorageManager.go | 11 +++++++++++ 3 files changed, 22 insertions(+) diff --git a/common/interface.go b/common/interface.go index a58b6aa94db..1836d4b51e6 100644 --- a/common/interface.go +++ b/common/interface.go @@ -78,6 +78,7 @@ type StorageManager interface { SetEpochForPutOperation(uint32) ShouldTakeSnapshot() bool GetBaseTrieStorageManager() StorageManager + GetIdentifier() string IsClosed() bool Close() error IsInterfaceNil() bool diff --git a/testscommon/storageManagerStub.go b/testscommon/storageManagerStub.go index b7673a4b4cd..924340d4df7 100644 --- a/testscommon/storageManagerStub.go +++ b/testscommon/storageManagerStub.go @@ -28,6 +28,7 @@ type StorageManagerStub struct { IsClosedCalled func() bool RemoveFromCheckpointHashesHolderCalled func([]byte) GetBaseTrieStorageManagerCalled func() common.StorageManager + GetIdentifierCalled func() string } // Put - @@ -214,6 +215,15 @@ func (sms *StorageManagerStub) GetBaseTrieStorageManager() common.StorageManager return nil } +// GetIdentifier - +func (sms *StorageManagerStub) GetIdentifier() string { + if sms.GetIdentifierCalled != nil { + return sms.GetIdentifierCalled() + } + + return "" +} + // IsInterfaceNil - func (sms *StorageManagerStub) IsInterfaceNil() bool { return sms == nil diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index dc50faff711..5ac0979f943 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -687,6 +687,17 @@ func (tsm *trieStorageManager) GetBaseTrieStorageManager() common.StorageManager return tsm } +// GetIdentifier returns the identifier of the main storer +func (tsm *trieStorageManager) GetIdentifier() string { + dbWithIdentifier, ok := tsm.mainStorer.(dbWriteCacherWithIdentifier) + if !ok { + log.Warn("trieStorageManager.GetIdentifier mainStorer is not of type dbWriteCacherWithIdentifier", "type", fmt.Sprintf("%T", tsm.mainStorer)) + return "" + } + + return dbWithIdentifier.GetIdentifier() +} + // IsInterfaceNil returns true if there is no value under the interface func (tsm *trieStorageManager) IsInterfaceNil() bool { return tsm == nil From 6d65587633cdb92602c3cd25e5225c90ae3aed0a Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 16 Mar 2023 12:00:56 +0200 Subject: [PATCH 057/221] add stack trace print if getNodeFromDb error --- trie/node.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/trie/node.go b/trie/node.go index 3dbc6f62c8d..29a829b057a 100644 --- a/trie/node.go +++ b/trie/node.go @@ -4,6 +4,7 @@ package trie import ( "context" "fmt" + "runtime/debug" "time" "github.com/multiversx/mx-chain-core-go/hashing" @@ -124,7 +125,7 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh return nil, errors.NewGetNodeFromDBErrWithKey(n, err, "") } - log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n) + log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) return nil, errors.NewGetNodeFromDBErrWithKey(n, err, dbWithID.GetIdentifier()) } From 5eaa1842aac13a437871da0ac8fe1af291f5a65b Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 16 Mar 2023 12:27:01 +0200 Subject: [PATCH 058/221] FIX: tx sorter --- facade/interface.go | 4 +- factory/processing/processComponents.go | 116 ++++++++++++--- factory/processing/processComponents_test.go | 8 +- go.mod | 2 +- go.sum | 3 +- .../disabled/disabledOutportDataProvider.go | 4 +- outport/process/executionOrder/dtos.go | 10 +- outport/process/executionOrder/scrs.go | 29 ++-- outport/process/executionOrder/scrs_test.go | 27 ++-- .../transactionsExecutionOrder.go | 78 ++++++---- .../transactionsExecutionOrder_test.go | 140 +++++++++--------- 11 files changed, 259 insertions(+), 162 deletions(-) diff --git a/facade/interface.go b/facade/interface.go index 8b884790f67..e5e9907e7d5 100644 --- a/facade/interface.go +++ b/facade/interface.go @@ -5,9 +5,9 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/debug" @@ -121,7 +121,7 @@ type ApiResolver interface { GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) + GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) GetInternalShardBlockByNonce(format common.ApiOutputFormat, nonce uint64) (interface{}, error) GetInternalShardBlockByHash(format common.ApiOutputFormat, hash string) (interface{}, error) GetInternalShardBlockByRound(format common.ApiOutputFormat, round uint64) (interface{}, error) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 2196f3e81e2..d56c52b8da5 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -15,6 +15,8 @@ import ( "github.com/multiversx/mx-chain-core-go/data/alteredAccount" dataBlock "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/data/receipt" + "github.com/multiversx/mx-chain-core-go/data/transaction" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -914,7 +916,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string encodedAddress := pcf.coreData.AddressPubKeyConverter().Encode(userAccount.AddressBytes()) genesisAccounts[encodedAddress] = &alteredAccount.AlteredAccount{ - AdditionalData: &outport.AdditionalAccountData{ + AdditionalData: &alteredAccount.AdditionalAccountData{ BalanceChanged: true, }, Address: encodedAddress, @@ -930,7 +932,11 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string } shardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() - pcf.statusComponents.OutportHandler().SaveAccounts(uint64(pcf.coreData.GenesisNodesSetup().GetStartTime()), genesisAccounts, shardID) + pcf.statusComponents.OutportHandler().SaveAccounts(&outport.Accounts{ + ShardID: shardID, + BlockTimestamp: uint64(pcf.coreData.GenesisNodesSetup().GetStartTime()), + AlteredAccounts: genesisAccounts, + }) return genesisAccounts, nil } @@ -1128,20 +1134,28 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( Balance: "0", } - _ = genesisBlockHeader.SetTxCount(uint32(len(txsPoolPerShard[currentShardId].Txs))) + _ = genesisBlockHeader.SetTxCount(uint32(len(txsPoolPerShard[currentShardId].Transactions))) + + headerBytes, headerType, err := pcf.getHeaderBytes(genesisBlockHeader) + if err != nil { + return err + } - arg := &outport.ArgsSaveBlockData{ - HeaderHash: genesisBlockHash, - Body: genesisBody, - Header: genesisBlockHeader, - HeaderGasConsumption: outport.HeaderGasConsumption{ + arg := &outport.OutportBlock{ + BlockData: &outport.BlockData{ + HeaderBytes: headerBytes, + HeaderType: string(headerType), + HeaderHash: genesisBlockHash, + Body: genesisBody, + }, + HeaderGasConsumption: &outport.HeaderGasConsumption{ GasProvided: 0, GasRefunded: 0, GasPenalized: 0, MaxGasPerBlock: pcf.coreData.EconomicsData().MaxGasLimitPerBlock(currentShardId), }, - TransactionsPool: txsPoolPerShard[currentShardId], - AlteredAccounts: alteredAccounts, + TransactionPool: txsPoolPerShard[currentShardId], + AlteredAccounts: alteredAccounts, } pcf.statusComponents.OutportHandler().SaveBlock(arg) } @@ -1152,10 +1166,10 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( genesisBlockHash, originalGenesisBlockHeader, genesisBody, - unwrapTxs(txsPoolPerShard[currentShardId].Scrs), - unwrapTxs(txsPoolPerShard[currentShardId].Receipts), + unwrapSCRsInfo(txsPoolPerShard[currentShardId].SmartContractResults), + unwrapReceipts(txsPoolPerShard[currentShardId].Receipts), intraShardMiniBlocks, - txsPoolPerShard[currentShardId].Logs) + unwrapLogs(txsPoolPerShard[currentShardId].Logs)) if err != nil { return err } @@ -1167,7 +1181,7 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( } if txsPoolPerShard[currentShardId] != nil { - err = pcf.saveGenesisTxsToStorage(unwrapTxs(txsPoolPerShard[currentShardId].Txs)) + err = pcf.saveGenesisTxsToStorage(unwrapTxsInfo(txsPoolPerShard[currentShardId].Transactions)) if err != nil { return err } @@ -1193,12 +1207,34 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( return nil } +func (pcf *processComponentsFactory) getHeaderBytes(headerHandler data.HeaderHandler) ([]byte, core.HeaderType, error) { + var err error + var headerBytes []byte + var headerType core.HeaderType + + switch header := headerHandler.(type) { + case *dataBlock.MetaBlock: + headerType = core.MetaHeader + headerBytes, err = pcf.coreData.InternalMarshalizer().Marshal(header) + case *dataBlock.Header: + headerType = core.ShardHeaderV1 + headerBytes, err = pcf.coreData.InternalMarshalizer().Marshal(header) + case *dataBlock.HeaderV2: + headerType = core.ShardHeaderV2 + headerBytes, err = pcf.coreData.InternalMarshalizer().Marshal(header) + default: + return nil, "", fmt.Errorf("invalid/unknown header type") + } + + return headerBytes, headerType, err +} + func (pcf *processComponentsFactory) saveAlteredGenesisHeaderToStorage( genesisBlockHeader data.HeaderHandler, genesisBlockHash []byte, genesisBody *dataBlock.Body, intraShardMiniBlocks []*dataBlock.MiniBlock, - txsPoolPerShard map[uint32]*outport.Pool, + txsPoolPerShard map[uint32]*outport.TransactionPool, ) error { currentShardId := pcf.bootstrapComponents.ShardCoordinator().SelfId() @@ -1225,10 +1261,10 @@ func (pcf *processComponentsFactory) saveAlteredGenesisHeaderToStorage( genesisBlockHash, genesisBlockHeader, genesisBody, - unwrapTxs(txsPoolPerShard[currentShardId].Scrs), - unwrapTxs(txsPoolPerShard[currentShardId].Receipts), + unwrapSCRsInfo(txsPoolPerShard[currentShardId].SmartContractResults), + unwrapReceipts(txsPoolPerShard[currentShardId].Receipts), intraShardMiniBlocks, - txsPoolPerShard[currentShardId].Logs) + unwrapLogs(txsPoolPerShard[currentShardId].Logs)) if err != nil { return err } @@ -1977,11 +2013,45 @@ func (pc *processComponents) Close() error { return nil } -func unwrapTxs(txs map[string]data.TransactionHandlerWithGasUsedAndFee) map[string]data.TransactionHandler { - output := make(map[string]data.TransactionHandler) - for hash, wrappedTx := range txs { - output[hash] = wrappedTx.GetTxHandler() +func unwrapTxsInfo(txs map[string]*outport.TxInfo) map[string]data.TransactionHandler { + ret := make(map[string]data.TransactionHandler, len(txs)) + for hash, tx := range txs { + ret[hash] = tx.Transaction + } + + return ret +} + +func unwrapSCRsInfo(scrs map[string]*outport.SCRInfo) map[string]data.TransactionHandler { + ret := make(map[string]data.TransactionHandler, len(scrs)) + for hash, scr := range scrs { + ret[hash] = scr.SmartContractResult + } + + return ret +} + +func unwrapReceipts(receipts map[string]*receipt.Receipt) map[string]data.TransactionHandler { + ret := make(map[string]data.TransactionHandler, len(receipts)) + for hash, r := range receipts { + ret[hash] = r + } + + return ret +} + +func unwrapLogs(logs map[string]*transaction.Log) []*data.LogData { + ret := make([]*data.LogData, len(logs)) + + idx := 0 + for hash, lg := range logs { + ret[idx] = &data.LogData{ + LogHandler: lg, + TxHash: hash, + } + + idx++ } - return output + return ret } diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index d4b3d99c030..a1e46aa8266 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -76,7 +76,7 @@ func TestProcessComponents_IndexGenesisBlocks(t *testing.T) { HasDriversCalled: func() bool { return true }, - SaveBlockCalled: func(args *outportCore.ArgsSaveBlockData) { + SaveBlockCalled: func(args *outportCore.OutportBlock) { saveBlockCalledMutex.Lock() require.NotNil(t, args) @@ -84,10 +84,10 @@ func TestProcessComponents_IndexGenesisBlocks(t *testing.T) { MiniBlocks: make([]*block.MiniBlock, 4), } - txsPoolRequired := &outportCore.Pool{} + txsPoolRequired := &outportCore.TransactionPool{} - assert.Equal(t, txsPoolRequired, args.TransactionsPool) - assert.Equal(t, bodyRequired, args.Body) + assert.Equal(t, txsPoolRequired, args.TransactionPool) + assert.Equal(t, bodyRequired, args.BlockData.Body) saveBlockCalledMutex.Unlock() }, } diff --git a/go.mod b/go.mod index e6258e75629..888d60914ab 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e + github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316095925-a74906deb369 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index a7e5bc35b50..9291c8b2bb8 100644 --- a/go.sum +++ b/go.sum @@ -610,8 +610,9 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e h1:B7Da37AgvQ1u82tjPv2L6EAhJ+PVPuU/9wq7v2aa0Xs= github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316095925-a74906deb369 h1:gkFwa3iiWoJuDZaKl4DNqKYXwK+zUCIAmUgKAHR9D8o= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316095925-a74906deb369/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd h1:IPrhwnzjqCKKlmRd8h/uzYiCCoLOYiVHB5QTFfUbt00= diff --git a/outport/process/disabled/disabledOutportDataProvider.go b/outport/process/disabled/disabledOutportDataProvider.go index 28e0fd4d7c9..dcbace5b465 100644 --- a/outport/process/disabled/disabledOutportDataProvider.go +++ b/outport/process/disabled/disabledOutportDataProvider.go @@ -13,8 +13,8 @@ func NewDisabledOutportDataProvider() *disabledOutportDataProvider { } // PrepareOutportSaveBlockData wil do nothing -func (d *disabledOutportDataProvider) PrepareOutportSaveBlockData(_ process.ArgPrepareOutportSaveBlockData) (*outportcore.ArgsSaveBlockData, error) { - return &outportcore.ArgsSaveBlockData{}, nil +func (d *disabledOutportDataProvider) PrepareOutportSaveBlockData(_ process.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlock, error) { + return &outportcore.OutportBlock{}, nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/outport/process/executionOrder/dtos.go b/outport/process/executionOrder/dtos.go index d1cbba8ebd2..dec622f236c 100644 --- a/outport/process/executionOrder/dtos.go +++ b/outport/process/executionOrder/dtos.go @@ -17,13 +17,13 @@ type ArgSorter struct { } type resultsTransactionsToMe struct { - transactionsToMe []data.TransactionHandlerWithGasUsedAndFee - scheduledTransactionsToMe []data.TransactionHandlerWithGasUsedAndFee - scrsToMe map[string]data.TransactionHandlerWithGasUsedAndFee + transactionsToMe []data.TxWithExecutionOrderHandler + scheduledTransactionsToMe []data.TxWithExecutionOrderHandler + scrsToMe map[string]data.TxWithExecutionOrderHandler } type resultsTransactionsFromMe struct { - transactionsFromMe []data.TransactionHandlerWithGasUsedAndFee - scheduledTransactionsFromMe []data.TransactionHandlerWithGasUsedAndFee + transactionsFromMe []data.TxWithExecutionOrderHandler + scheduledTransactionsFromMe []data.TxWithExecutionOrderHandler scheduledExecutedInvalidTxsHashesPrevBlock []string } diff --git a/outport/process/executionOrder/scrs.go b/outport/process/executionOrder/scrs.go index 0a22a6fad0e..1eae7a00a53 100644 --- a/outport/process/executionOrder/scrs.go +++ b/outport/process/executionOrder/scrs.go @@ -4,10 +4,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-core-go/data/smartContractResult" ) -func setOrderSmartContractResults(pool *outport.Pool, scheduledMbsFromPreviousBlock []*block.MiniBlock, scrsToMe map[string]data.TransactionHandlerWithGasUsedAndFee) []string { +func setOrderSmartContractResults(pool *outport.TransactionPool, scheduledMbsFromPreviousBlock []*block.MiniBlock, scrsToMe map[string]data.TxWithExecutionOrderHandler) []string { scheduledExecutedTxsPrevBlockMap := make(map[string]struct{}) for _, mb := range scheduledMbsFromPreviousBlock { for _, txHash := range mb.TxHashes { @@ -16,25 +15,22 @@ func setOrderSmartContractResults(pool *outport.Pool, scheduledMbsFromPreviousBl } scheduledExecutedSCRsPrevBlock := make([]string, 0) - scrsWithNoTxInCurrentShard := make(map[string]map[string]data.TransactionHandlerWithGasUsedAndFee) - for scrHash, scrHandler := range pool.Scrs { - scr, ok := scrHandler.GetTxHandler().(*smartContractResult.SmartContractResult) - if !ok { - continue - } + scrsWithNoTxInCurrentShard := make(map[string]map[string]data.TxWithExecutionOrderHandler) + for scrHash, scrHandler := range pool.SmartContractResults { + scr := scrHandler.SmartContractResult _, originalTxWasScheduledExecuted := scheduledExecutedTxsPrevBlockMap[string(scr.OriginalTxHash)] if originalTxWasScheduledExecuted { scheduledExecutedSCRsPrevBlock = append(scheduledExecutedSCRsPrevBlock, scrHash) } - tx, found := pool.Txs[string(scr.OriginalTxHash)] + tx, found := pool.Transactions[string(scr.OriginalTxHash)] if !found { groupScrsWithNoTxInCurrentShard(scrsWithNoTxInCurrentShard, string(scr.OriginalTxHash), scrHandler, scrHash) continue } - scrHandler.SetExecutionOrder(tx.GetExecutionOrder()) + scrHandler.ExecutionOrder = tx.GetExecutionOrder() } setExecutionOrderScrsWithNoTxInCurrentShard(scrsWithNoTxInCurrentShard, scrsToMe) @@ -42,18 +38,23 @@ func setOrderSmartContractResults(pool *outport.Pool, scheduledMbsFromPreviousBl return scheduledExecutedSCRsPrevBlock } -func groupScrsWithNoTxInCurrentShard(scrsWithNoTxInCurrentShard map[string]map[string]data.TransactionHandlerWithGasUsedAndFee, originalTxHash string, scrHandler data.TransactionHandlerWithGasUsedAndFee, scrHash string) { +func groupScrsWithNoTxInCurrentShard( + scrsWithNoTxInCurrentShard map[string]map[string]data.TxWithExecutionOrderHandler, + originalTxHash string, + scrHandler data.TxWithExecutionOrderHandler, + scrHash string, +) { _, ok := scrsWithNoTxInCurrentShard[originalTxHash] if !ok { - scrsWithNoTxInCurrentShard[originalTxHash] = make(map[string]data.TransactionHandlerWithGasUsedAndFee, 0) + scrsWithNoTxInCurrentShard[originalTxHash] = make(map[string]data.TxWithExecutionOrderHandler, 0) } scrsWithNoTxInCurrentShard[originalTxHash][scrHash] = scrHandler } -func setExecutionOrderScrsWithNoTxInCurrentShard(groupedScrsByOriginalTxHash map[string]map[string]data.TransactionHandlerWithGasUsedAndFee, scrsToMe map[string]data.TransactionHandlerWithGasUsedAndFee) { +func setExecutionOrderScrsWithNoTxInCurrentShard(groupedScrsByOriginalTxHash map[string]map[string]data.TxWithExecutionOrderHandler, scrsToMe map[string]data.TxWithExecutionOrderHandler) { for _, scrsGrouped := range groupedScrsByOriginalTxHash { - maxOrder := 0 + maxOrder := uint32(0) for _, scr := range scrsGrouped { if maxOrder < scr.GetExecutionOrder() { maxOrder = scr.GetExecutionOrder() diff --git a/outport/process/executionOrder/scrs_test.go b/outport/process/executionOrder/scrs_test.go index 2bf139eb923..2a615237267 100644 --- a/outport/process/executionOrder/scrs_test.go +++ b/outport/process/executionOrder/scrs_test.go @@ -11,9 +11,9 @@ import ( "github.com/stretchr/testify/require" ) -func newScr(nonce uint64, originalTxHash string, execOrder int) data.TransactionHandlerWithGasUsedAndFee { - return &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{ +func newScr(nonce uint64, originalTxHash string, execOrder uint32) *outport.SCRInfo { + return &outport.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: nonce, OriginalTxHash: []byte(originalTxHash), }, @@ -25,11 +25,11 @@ func TestSetOrderSmartContractResults(t *testing.T) { t.Parallel() txHash, txHashNotInPool, scrHash1, scrsHash2, scrsHash3, scrHashToMe := "tx", "txHashNotInPool", "scr1", "scr2", "scr3", "scrHashToMe" - pool := &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - txHash: &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{}, ExecutionOrder: 1}, + pool := &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + txHash: {Transaction: &transaction.Transaction{}, ExecutionOrder: 1}, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ + SmartContractResults: map[string]*outport.SCRInfo{ scrHash1: newScr(0, txHash, 0), scrsHash2: newScr(1, txHashNotInPool, 0), scrsHash3: newScr(2, txHashNotInPool, 2), @@ -37,15 +37,14 @@ func TestSetOrderSmartContractResults(t *testing.T) { }, } - setOrderSmartContractResults(pool, []*block.MiniBlock{}, map[string]data.TransactionHandlerWithGasUsedAndFee{ - scrHashToMe: newScr(3, txHashNotInPool, 1), - }) + setOrderSmartContractResults(pool, []*block.MiniBlock{}, map[string]data.TxWithExecutionOrderHandler{ + scrHashToMe: newScr(3, txHashNotInPool, 1)}) - require.Equal(t, &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - txHash: &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{}, ExecutionOrder: 1}, + require.Equal(t, &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + txHash: {Transaction: &transaction.Transaction{}, ExecutionOrder: 1}, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ + SmartContractResults: map[string]*outport.SCRInfo{ scrHash1: newScr(0, txHash, 1), scrsHash2: newScr(1, txHashNotInPool, 2), scrsHash3: newScr(2, txHashNotInPool, 2), diff --git a/outport/process/executionOrder/transactionsExecutionOrder.go b/outport/process/executionOrder/transactionsExecutionOrder.go index 06fe9958968..ebd486c375c 100644 --- a/outport/process/executionOrder/transactionsExecutionOrder.go +++ b/outport/process/executionOrder/transactionsExecutionOrder.go @@ -49,7 +49,7 @@ func NewSorter(arg ArgSorter) (*sorter, error) { // PutExecutionOrderInTransactionPool will put the execution order for every transaction and smart contract result func (s *sorter) PutExecutionOrderInTransactionPool( - pool *outport.Pool, + pool *outport.TransactionPool, header data.HeaderHandler, body data.BodyHandler, prevHeader data.HeaderHandler, @@ -93,7 +93,7 @@ func (s *sorter) PutExecutionOrderInTransactionPool( allTransaction = append(allTransaction, resultTxsFromMe.scheduledTransactionsFromMe...) for idx, tx := range allTransaction { - tx.SetExecutionOrder(idx) + tx.SetExecutionOrder(uint32(idx)) } scheduledExecutedSCRSHashesPrevBlock := setOrderSmartContractResults(pool, scheduledMbsFromPreviousBlock, resultsTxsToMe.scrsToMe) @@ -101,7 +101,7 @@ func (s *sorter) PutExecutionOrderInTransactionPool( return scheduledExecutedSCRSHashesPrevBlock, resultTxsFromMe.scheduledExecutedInvalidTxsHashesPrevBlock, nil } -func (s *sorter) sortTransactions(transactions []data.TransactionHandlerWithGasUsedAndFee, header data.HeaderHandler) { +func (s *sorter) sortTransactions(transactions []data.TxWithExecutionOrderHandler, header data.HeaderHandler) { if s.enableEpochsHandler.IsFrontRunningProtectionFlagEnabled() { txsSort.SortTransactionsBySenderAndNonceWithFrontRunningProtectionExtendedTransactions(transactions, s.hasher, header.GetPrevRandSeed()) } else { @@ -110,14 +110,14 @@ func (s *sorter) sortTransactions(transactions []data.TransactionHandlerWithGasU } func (s *sorter) extractTransactionsGroupedFromMe( - pool *outport.Pool, blockBody *block.Body, header data.HeaderHandler, scheduledMbsFromPreviousBlock []*block.MiniBlock, + pool *outport.TransactionPool, blockBody *block.Body, header data.HeaderHandler, scheduledMbsFromPreviousBlock []*block.MiniBlock, ) (*resultsTransactionsFromMe, error) { - transactionsFromMe := make([]data.TransactionHandlerWithGasUsedAndFee, 0) - scheduledTransactionsFromMe := make([]data.TransactionHandlerWithGasUsedAndFee, 0) + transactionsFromMe := make([]data.TxWithExecutionOrderHandler, 0) + scheduledTransactionsFromMe := make([]data.TxWithExecutionOrderHandler, 0) scheduledExecutedInvalidTxsHashesPrevBlock := make([]string, 0) for mbIndex, mb := range blockBody.MiniBlocks { - var txs []data.TransactionHandlerWithGasUsedAndFee + var txs []data.TxWithExecutionOrderHandler var err error if isScheduledMBProcessed(header, mbIndex) { continue @@ -129,7 +129,7 @@ func (s *sorter) extractTransactionsGroupedFromMe( } if mb.Type == block.TxBlock { - txs, err = extractTxsFromMap(mb.TxHashes, pool.Txs) + txs, err = extractTxsFromMap(mb.TxHashes, pool.Transactions) } if mb.Type == block.InvalidBlock { var scheduledExecutedInvalidTxsHashesCurrentMB []string @@ -155,9 +155,9 @@ func (s *sorter) extractTransactionsGroupedFromMe( }, nil } -func (s *sorter) getInvalidTxsExecutedInCurrentBlock(scheduledMbsFromPreviousBlock []*block.MiniBlock, mb *block.MiniBlock, pool *outport.Pool) ([]data.TransactionHandlerWithGasUsedAndFee, []string, error) { +func (s *sorter) getInvalidTxsExecutedInCurrentBlock(scheduledMbsFromPreviousBlock []*block.MiniBlock, mb *block.MiniBlock, pool *outport.TransactionPool) ([]data.TxWithExecutionOrderHandler, []string, error) { if len(scheduledMbsFromPreviousBlock) == 0 { - txs, err := extractTxsFromMap(mb.TxHashes, pool.Invalid) + txs, err := extractTxsFromMap(mb.TxHashes, pool.InvalidTxs) return txs, []string{}, err } @@ -179,18 +179,18 @@ func (s *sorter) getInvalidTxsExecutedInCurrentBlock(scheduledMbsFromPreviousBlo invalidTxHashes = append(invalidTxHashes, hash) } - txs, err := extractTxsFromMap(invalidTxHashes, pool.Invalid) + txs, err := extractTxsFromMap(invalidTxHashes, pool.InvalidTxs) return txs, scheduledExecutedInvalidTxsHashesPrevBlock, err } -func extractNormalTransactionAndScrsToMe(pool *outport.Pool, blockBody *block.Body, header data.HeaderHandler) (*resultsTransactionsToMe, error) { - transactionsToMe := make([]data.TransactionHandlerWithGasUsedAndFee, 0) - scheduledTransactionsToMe := make([]data.TransactionHandlerWithGasUsedAndFee, 0) - scrsToMe := make(map[string]data.TransactionHandlerWithGasUsedAndFee) +func extractNormalTransactionAndScrsToMe(pool *outport.TransactionPool, blockBody *block.Body, header data.HeaderHandler) (*resultsTransactionsToMe, error) { + transactionsToMe := make([]data.TxWithExecutionOrderHandler, 0) + scheduledTransactionsToMe := make([]data.TxWithExecutionOrderHandler, 0) + scrsToMe := make(map[string]data.TxWithExecutionOrderHandler) for mbIndex, mb := range blockBody.MiniBlocks { var err error - var txs []data.TransactionHandlerWithGasUsedAndFee + var txs []data.TxWithExecutionOrderHandler if isScheduledMBProcessed(header, mbIndex) { continue } @@ -202,14 +202,14 @@ func extractNormalTransactionAndScrsToMe(pool *outport.Pool, blockBody *block.Bo executedTxsHashes := extractExecutedTxHashes(mbIndex, mb.TxHashes, header) if mb.Type == block.TxBlock { - txs, err = extractTxsFromMap(executedTxsHashes, pool.Txs) + txs, err = extractTxsFromMap(executedTxsHashes, pool.Transactions) } if mb.Type == block.SmartContractResultBlock { - txs, err = extractTxsFromMap(executedTxsHashes, pool.Scrs) - extractAndPutScrsToDestinationMap(executedTxsHashes, pool.Scrs, scrsToMe) + txs, err = extractSCRsFromMap(executedTxsHashes, pool.SmartContractResults) + extractAndPutScrsToDestinationMap(executedTxsHashes, pool.SmartContractResults, scrsToMe) } if mb.Type == block.RewardsBlock { - txs, err = extractTxsFromMap(executedTxsHashes, pool.Rewards) + txs, err = extractRewardsFromMap(executedTxsHashes, pool.Rewards) } if err != nil { return nil, err @@ -229,9 +229,9 @@ func extractNormalTransactionAndScrsToMe(pool *outport.Pool, blockBody *block.Bo }, nil } -func getRewardsTxsFromMe(pool *outport.Pool, blockBody *block.Body, header data.HeaderHandler) ([]data.TransactionHandlerWithGasUsedAndFee, error) { +func getRewardsTxsFromMe(pool *outport.TransactionPool, blockBody *block.Body, header data.HeaderHandler) ([]data.TxWithExecutionOrderHandler, error) { rewardsTxsHashes := make([][]byte, 0) - rewardsTxs := make([]data.TransactionHandlerWithGasUsedAndFee, 0) + rewardsTxs := make([]data.TxWithExecutionOrderHandler, 0) if header.GetShardID() != core.MetachainShardId { return rewardsTxs, nil } @@ -243,11 +243,37 @@ func getRewardsTxsFromMe(pool *outport.Pool, blockBody *block.Body, header data. rewardsTxsHashes = append(rewardsTxsHashes, mb.TxHashes...) } - return extractTxsFromMap(rewardsTxsHashes, pool.Rewards) + return extractRewardsFromMap(rewardsTxsHashes, pool.Rewards) } -func extractTxsFromMap(txsHashes [][]byte, txs map[string]data.TransactionHandlerWithGasUsedAndFee) ([]data.TransactionHandlerWithGasUsedAndFee, error) { - result := make([]data.TransactionHandlerWithGasUsedAndFee, 0, len(txsHashes)) +func extractTxsFromMap(txsHashes [][]byte, txs map[string]*outport.TxInfo) ([]data.TxWithExecutionOrderHandler, error) { + result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) + for _, txHash := range txsHashes { + tx, found := txs[string(txHash)] + if !found { + return nil, fmt.Errorf("cannot find transaction in pool, txHash: %s", hex.EncodeToString(txHash)) + } + result = append(result, tx) + } + + return result, nil +} + +func extractSCRsFromMap(txsHashes [][]byte, txs map[string]*outport.SCRInfo) ([]data.TxWithExecutionOrderHandler, error) { + result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) + for _, txHash := range txsHashes { + tx, found := txs[string(txHash)] + if !found { + return nil, fmt.Errorf("cannot find transaction in pool, txHash: %s", hex.EncodeToString(txHash)) + } + result = append(result, tx) + } + + return result, nil +} + +func extractRewardsFromMap(txsHashes [][]byte, txs map[string]*outport.RewardInfo) ([]data.TxWithExecutionOrderHandler, error) { + result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) for _, txHash := range txsHashes { tx, found := txs[string(txHash)] if !found { @@ -271,7 +297,7 @@ func extractExecutedTxHashes(mbIndex int, mbTxHashes [][]byte, header data.Heade return mbTxHashes[firstProcessed : lastProcessed+1] } -func extractAndPutScrsToDestinationMap(scrsHashes [][]byte, scrsMap, destinationMap map[string]data.TransactionHandlerWithGasUsedAndFee) { +func extractAndPutScrsToDestinationMap(scrsHashes [][]byte, scrsMap map[string]*outport.SCRInfo, destinationMap map[string]data.TxWithExecutionOrderHandler) { for _, scrHash := range scrsHashes { scr, found := scrsMap[string(scrHash)] if !found { diff --git a/outport/process/executionOrder/transactionsExecutionOrder_test.go b/outport/process/executionOrder/transactionsExecutionOrder_test.go index 5fe94392217..41df8a351b3 100644 --- a/outport/process/executionOrder/transactionsExecutionOrder_test.go +++ b/outport/process/executionOrder/transactionsExecutionOrder_test.go @@ -4,9 +4,9 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/data/receipt" "github.com/multiversx/mx-chain-core-go/data/rewardTx" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -128,79 +128,79 @@ func TestAddExecutionOrderInTransactionPool(t *testing.T) { }, } - pool := &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(txHashToMe): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 1}}, - string(txHashFromMe): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 2}}, + pool := &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + string(txHashToMe): {Transaction: &transaction.Transaction{Nonce: 1}}, + string(txHashFromMe): {Transaction: &transaction.Transaction{Nonce: 2}}, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(scrHashToMe): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &smartContractResult.SmartContractResult{Nonce: 3}}, - string(scrHashFromMe): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &smartContractResult.SmartContractResult{ + SmartContractResults: map[string]*outport.SCRInfo{ + string(scrHashToMe): {SmartContractResult: &smartContractResult.SmartContractResult{Nonce: 3}}, + string(scrHashFromMe): {SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 4, OriginalTxHash: txHashToMe, }}, - string(scrHashIntra): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &smartContractResult.SmartContractResult{ + string(scrHashIntra): {SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 0, OriginalTxHash: txHashToMe, }}, }, - Rewards: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(rewardTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &rewardTx.RewardTx{}}, + Rewards: map[string]*outport.RewardInfo{ + string(rewardTxHash): {Reward: &rewardTx.RewardTx{}}, }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(invalidTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 5}}, + InvalidTxs: map[string]*outport.TxInfo{ + string(invalidTxHash): {Transaction: &transaction.Transaction{Nonce: 5}}, }, - Receipts: map[string]data.TransactionHandlerWithGasUsedAndFee{}, + Receipts: map[string]*receipt.Receipt{}, Logs: nil, } _, _, err := s.PutExecutionOrderInTransactionPool(pool, header, blockBody, &block.Header{}) require.Nil(t, err) - require.Equal(t, &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(txHashToMe): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 1}, - ExecutionOrder: 0, + require.Equal(t, &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + string(txHashToMe): { + Transaction: &transaction.Transaction{Nonce: 1}, + ExecutionOrder: 0, }, - string(txHashFromMe): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 2}, - ExecutionOrder: 3, + string(txHashFromMe): { + Transaction: &transaction.Transaction{Nonce: 2}, + ExecutionOrder: 3, }, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(scrHashToMe): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{Nonce: 3}, - ExecutionOrder: 1, + SmartContractResults: map[string]*outport.SCRInfo{ + string(scrHashToMe): { + SmartContractResult: &smartContractResult.SmartContractResult{Nonce: 3}, + ExecutionOrder: 1, }, - string(scrHashFromMe): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{ + string(scrHashFromMe): { + SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 4, OriginalTxHash: txHashToMe, }, ExecutionOrder: 0, }, - string(scrHashIntra): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{ + string(scrHashIntra): { + SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 0, OriginalTxHash: txHashToMe, }, ExecutionOrder: 0, }, }, - Rewards: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(rewardTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &rewardTx.RewardTx{}, - ExecutionOrder: 2, + Rewards: map[string]*outport.RewardInfo{ + string(rewardTxHash): { + Reward: &rewardTx.RewardTx{}, + ExecutionOrder: 2, }, }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(invalidTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 5}, - ExecutionOrder: 4, + InvalidTxs: map[string]*outport.TxInfo{ + string(invalidTxHash): { + Transaction: &transaction.Transaction{Nonce: 5}, + ExecutionOrder: 4, }, }, - Receipts: map[string]data.TransactionHandlerWithGasUsedAndFee{}, + Receipts: map[string]*receipt.Receipt{}, Logs: nil, }, pool) } @@ -254,25 +254,25 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduled(t *test }, } - pool := &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(firstTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 1}}, - string(secondTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 2}}, + pool := &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + string(firstTxHash): {Transaction: &transaction.Transaction{Nonce: 1}}, + string(secondTxHash): {Transaction: &transaction.Transaction{Nonce: 2}}, }, } _, _, err := s.PutExecutionOrderInTransactionPool(pool, header, blockBody, &block.Header{}) require.Nil(t, err) - require.Equal(t, &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(firstTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 1}, - ExecutionOrder: 0, + require.Equal(t, &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + string(firstTxHash): { + Transaction: &transaction.Transaction{Nonce: 1}, + ExecutionOrder: 0, }, - string(secondTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 2}, - ExecutionOrder: 1, + string(secondTxHash): { + Transaction: &transaction.Transaction{Nonce: 2}, + ExecutionOrder: 1, }, }, }, pool) @@ -367,15 +367,15 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduledInvalid( }, } - pool := &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(secondTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 2}}, + pool := &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + string(secondTxHash): {Transaction: &transaction.Transaction{Nonce: 2}}, }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(firstTxHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &transaction.Transaction{Nonce: 1}}, + InvalidTxs: map[string]*outport.TxInfo{ + string(firstTxHash): {Transaction: &transaction.Transaction{Nonce: 1}}, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(scrHash): &outport.TransactionHandlerWithGasAndFee{TransactionHandler: &smartContractResult.SmartContractResult{ + SmartContractResults: map[string]*outport.SCRInfo{ + string(scrHash): {SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 3, OriginalTxHash: scheduledTx, }}, @@ -384,22 +384,22 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduledInvalid( scrsHashes, invalidTxsHashes, err := s.PutExecutionOrderInTransactionPool(pool, header, blockBody, prevHeader) require.Nil(t, err) - require.Equal(t, &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(secondTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 2}, - ExecutionOrder: 1, + require.Equal(t, &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ + string(secondTxHash): { + Transaction: &transaction.Transaction{Nonce: 2}, + ExecutionOrder: 1, }, }, - Invalid: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(firstTxHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &transaction.Transaction{Nonce: 1}, - ExecutionOrder: 0, + InvalidTxs: map[string]*outport.TxInfo{ + string(firstTxHash): { + Transaction: &transaction.Transaction{Nonce: 1}, + ExecutionOrder: 0, }, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - string(scrHash): &outport.TransactionHandlerWithGasAndFee{ - TransactionHandler: &smartContractResult.SmartContractResult{ + SmartContractResults: map[string]*outport.SCRInfo{ + string(scrHash): { + SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 3, OriginalTxHash: scheduledTx, }, From 25dc1d432a5e4375b9751f7c21ebe13e7765eaf3 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 16 Mar 2023 12:47:46 +0200 Subject: [PATCH 059/221] refactor log prints --- trie/node.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/trie/node.go b/trie/node.go index 29a829b057a..190bb41dd9b 100644 --- a/trie/node.go +++ b/trie/node.go @@ -119,13 +119,14 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { + log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) + dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { - log.Warn(common.GetNodeFromDBErrorString, "error", err, "key", n, "db type", fmt.Sprintf("%T", db)) + log.Warn("db does not have an identifier", "db type", fmt.Sprintf("%T", db)) return nil, errors.NewGetNodeFromDBErrWithKey(n, err, "") } - log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) return nil, errors.NewGetNodeFromDBErrWithKey(n, err, dbWithID.GetIdentifier()) } From 578c7a528450cca30603fb383a1f5d963c5ba506 Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 16 Mar 2023 14:19:40 +0200 Subject: [PATCH 060/221] FIX: tx fee processor --- .../process/transactionsfee/dataHolders.go | 42 +-- .../transactionsfee/dataHolders_test.go | 62 ++-- .../transactionsfee/transactionChecker.go | 8 +- .../transactionsFeeProcessor.go | 45 +-- .../transactionsFeeProcessor_test.go | 276 ++++++++++-------- 5 files changed, 238 insertions(+), 195 deletions(-) diff --git a/outport/process/transactionsfee/dataHolders.go b/outport/process/transactionsfee/dataHolders.go index c0f8b518afc..03ca3f0f91e 100644 --- a/outport/process/transactionsfee/dataHolders.go +++ b/outport/process/transactionsfee/dataHolders.go @@ -3,54 +3,58 @@ package transactionsfee import ( "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-core-go/data/smartContractResult" ) +type txHandlerWithFeeInfo interface { + GetTxHandler() data.TransactionHandler + GetFeeInfo() *outportcore.FeeInfo +} + type transactionWithResults struct { - data.TransactionHandlerWithGasUsedAndFee - scrs []data.TransactionHandlerWithGasUsedAndFee + txHandlerWithFeeInfo + scrs []txHandlerWithFeeInfo log *data.LogData } type transactionsAndScrsHolder struct { txsWithResults map[string]*transactionWithResults - scrsNoTx map[string]data.TransactionHandlerWithGasUsedAndFee + scrsNoTx map[string]txHandlerWithFeeInfo } func newTransactionsAndScrsHolder(nrTxs, nrScrs int) *transactionsAndScrsHolder { return &transactionsAndScrsHolder{ txsWithResults: make(map[string]*transactionWithResults, nrTxs), - scrsNoTx: make(map[string]data.TransactionHandlerWithGasUsedAndFee, nrScrs), + scrsNoTx: make(map[string]txHandlerWithFeeInfo, nrScrs), } } -func prepareTransactionsAndScrs(txPool *outportcore.Pool) *transactionsAndScrsHolder { - totalTxs := len(txPool.Txs) + len(txPool.Invalid) + len(txPool.Rewards) - if totalTxs == 0 && len(txPool.Scrs) == 0 { +func prepareTransactionsAndScrs(txPool *outportcore.TransactionPool) *transactionsAndScrsHolder { + totalTxs := len(txPool.Transactions) + len(txPool.InvalidTxs) + len(txPool.Rewards) + if totalTxs == 0 && len(txPool.SmartContractResults) == 0 { return newTransactionsAndScrsHolder(0, 0) } - transactionsAndScrs := newTransactionsAndScrsHolder(totalTxs, len(txPool.Scrs)) - for txHash, tx := range txPool.Txs { + transactionsAndScrs := newTransactionsAndScrsHolder(totalTxs, len(txPool.SmartContractResults)) + for txHash, tx := range txPool.Transactions { transactionsAndScrs.txsWithResults[txHash] = &transactionWithResults{ - TransactionHandlerWithGasUsedAndFee: tx, + txHandlerWithFeeInfo: tx, } } - for _, txLog := range txPool.Logs { - txWithResults, ok := transactionsAndScrs.txsWithResults[txLog.TxHash] + for txHash, txLog := range txPool.Logs { + txWithResults, ok := transactionsAndScrs.txsWithResults[txHash] if !ok { continue } - txWithResults.log = txLog + txWithResults.log = &data.LogData{ + LogHandler: txLog, + TxHash: txHash, + } } - for scrHash, scrHandler := range txPool.Scrs { - scr, ok := scrHandler.GetTxHandler().(*smartContractResult.SmartContractResult) - if !ok { - continue - } + for scrHash, scrHandler := range txPool.SmartContractResults { + scr := scrHandler.SmartContractResult txWithResults, ok := transactionsAndScrs.txsWithResults[string(scr.OriginalTxHash)] if !ok { diff --git a/outport/process/transactionsfee/dataHolders_test.go b/outport/process/transactionsfee/dataHolders_test.go index fbee9eec883..f5de81d2aa5 100644 --- a/outport/process/transactionsfee/dataHolders_test.go +++ b/outport/process/transactionsfee/dataHolders_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -18,35 +17,50 @@ func TestTransactionsAndScrsHolder(t *testing.T) { scrHash1 := "scrHash1" scrHash2 := "scrHash2" scrHash3 := "scrHash3" - pool := &outportcore.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - txHash: outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - Nonce: 1, - }, 0, big.NewInt(0)), + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ + txHash: { + Transaction: &transaction.Transaction{ + Nonce: 1, + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ - scrHash1: outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 2, - OriginalTxHash: []byte(txHash), - }, 0, big.NewInt(0)), + SmartContractResults: map[string]*outportcore.SCRInfo{ + scrHash1: { + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 2, + OriginalTxHash: []byte(txHash), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, - scrHash2: outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{}, 0, big.NewInt(0)), - scrHash3: outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - OriginalTxHash: []byte(txHash), - }, 0, big.NewInt(0)), - }, - Logs: []*data.LogData{ - { - TxHash: "hash", + scrHash2: { + SmartContractResult: &smartContractResult.SmartContractResult{}, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, }, - { - TxHash: txHash, - LogHandler: &transaction.Log{ - Address: []byte("addr"), + scrHash3: { + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + OriginalTxHash: []byte(txHash), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), }, }, }, + Logs: map[string]*transaction.Log{ + "hash": {}, + txHash: { + Address: []byte("addr"), + }, + }, } res := prepareTransactionsAndScrs(pool) diff --git a/outport/process/transactionsfee/transactionChecker.go b/outport/process/transactionsfee/transactionChecker.go index 593ab51a08c..80435e6f094 100644 --- a/outport/process/transactionsfee/transactionChecker.go +++ b/outport/process/transactionsfee/transactionChecker.go @@ -12,7 +12,7 @@ import ( vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) -func (tep *transactionsFeeProcessor) isESDTOperationWithSCCall(tx data.TransactionHandlerWithGasUsedAndFee) bool { +func (tep *transactionsFeeProcessor) isESDTOperationWithSCCall(tx data.TransactionHandler) bool { res := tep.dataFieldParser.Parse(tx.GetData(), tx.GetSndAddr(), tx.GetRcvAddr(), tep.shardCoordinator.NumberOfShards()) isESDTTransferOperation := res.Operation == core.BuiltInFunctionESDTTransfer || @@ -37,7 +37,7 @@ func (tep *transactionsFeeProcessor) isESDTOperationWithSCCall(tx data.Transacti return isESDTTransferOperation && isReceiverSC && hasFunction } -func isSCRForSenderWithRefund(scr *smartContractResult.SmartContractResult, txHash []byte, tx data.TransactionHandlerWithGasUsedAndFee) bool { +func isSCRForSenderWithRefund(scr *smartContractResult.SmartContractResult, txHash []byte, tx data.TransactionHandler) bool { isForSender := bytes.Equal(scr.RcvAddr, tx.GetSndAddr()) isRightNonce := scr.Nonce == tx.GetNonce()+1 isFromCurrentTx := bytes.Equal(scr.PrevTxHash, txHash) @@ -46,7 +46,7 @@ func isSCRForSenderWithRefund(scr *smartContractResult.SmartContractResult, txHa return isFromCurrentTx && isForSender && isRightNonce && isScrDataOk } -func isRefundForRelayed(dbScResult *smartContractResult.SmartContractResult, tx data.TransactionHandlerWithGasUsedAndFee) bool { +func isRefundForRelayed(dbScResult *smartContractResult.SmartContractResult, tx data.TransactionHandler) bool { isForRelayed := string(dbScResult.ReturnMessage) == core.GasRefundForRelayerMessage isForSender := bytes.Equal(dbScResult.RcvAddr, tx.GetSndAddr()) differentHash := !bytes.Equal(dbScResult.OriginalTxHash, dbScResult.PrevTxHash) @@ -73,7 +73,7 @@ func isSCRWithRefundNoTx(scr *smartContractResult.SmartContractResult) bool { } func isRelayedTx(tx *transactionWithResults) bool { - txData := string(tx.GetData()) + txData := string(tx.GetTxHandler().GetData()) isRelayed := strings.HasPrefix(txData, core.RelayedTransaction) || strings.HasPrefix(txData, core.RelayedTransactionV2) return isRelayed && len(tx.scrs) > 0 } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor.go b/outport/process/transactionsfee/transactionsFeeProcessor.go index 75696f2d83f..6d5aa5b4eb8 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor.go @@ -96,17 +96,20 @@ func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.Transac func (tep *transactionsFeeProcessor) prepareNormalTxs(transactionsAndScrs *transactionsAndScrsHolder) { for txHash, txWithResult := range transactionsAndScrs.txsWithResults { - gasUsed := tep.txFeeCalculator.ComputeGasLimit(txWithResult) - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txWithResult, gasUsed) - initialPaidFee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txWithResult, txWithResult.GetGasLimit()) + txHandler := txWithResult.GetTxHandler() - txWithResult.SetGasUsed(gasUsed) - txWithResult.SetFee(fee) - txWithResult.SetInitialPaidFee(initialPaidFee) + gasUsed := tep.txFeeCalculator.ComputeGasLimit(txHandler) + fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, gasUsed) + initialPaidFee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txHandler, txHandler.GetGasLimit()) - if isRelayedTx(txWithResult) || tep.isESDTOperationWithSCCall(txWithResult) { - txWithResult.SetGasUsed(txWithResult.GetGasLimit()) - txWithResult.SetFee(initialPaidFee) + feeInfo := txWithResult.GetFeeInfo() + feeInfo.SetGasUsed(gasUsed) + feeInfo.SetFee(fee) + feeInfo.SetInitialPaidFee(initialPaidFee) + + if isRelayedTx(txWithResult) || tep.isESDTOperationWithSCCall(txHandler) { + feeInfo.SetGasUsed(txWithResult.GetTxHandler().GetGasLimit()) + feeInfo.SetFee(initialPaidFee) } tep.prepareTxWithResults([]byte(txHash), txWithResult) @@ -121,11 +124,11 @@ func (tep *transactionsFeeProcessor) prepareTxWithResults(txHash []byte, txWithR continue } - if isSCRForSenderWithRefund(scr, txHash, txWithResults) || isRefundForRelayed(scr, txWithResults) { - gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults, scr.Value) + if isSCRForSenderWithRefund(scr, txHash, txWithResults.GetTxHandler()) || isRefundForRelayed(scr, txWithResults.GetTxHandler()) { + gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults.GetTxHandler(), scr.Value) - txWithResults.SetGasUsed(gasUsed) - txWithResults.SetFee(fee) + txWithResults.GetFeeInfo().SetGasUsed(gasUsed) + txWithResults.GetFeeInfo().SetFee(fee) hasRefund = true break } @@ -145,16 +148,16 @@ func (tep *transactionsFeeProcessor) prepareTxWithResultsBasedOnLogs( for _, event := range txWithResults.log.GetLogEvents() { if core.WriteLogIdentifier == string(event.GetIdentifier()) && !hasRefund { - gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults, big.NewInt(0)) - txWithResults.SetGasUsed(gasUsed) - txWithResults.SetFee(fee) + gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults.GetTxHandler(), big.NewInt(0)) + txWithResults.GetFeeInfo().SetGasUsed(gasUsed) + txWithResults.GetFeeInfo().SetFee(fee) continue } if core.SignalErrorOperation == string(event.GetIdentifier()) { - fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txWithResults, txWithResults.GetGasLimit()) - txWithResults.SetGasUsed(txWithResults.GetGasLimit()) - txWithResults.SetFee(fee) + fee := tep.txFeeCalculator.ComputeTxFeeBasedOnGasUsed(txWithResults.GetTxHandler(), txWithResults.GetTxHandler().GetGasLimit()) + txWithResults.GetFeeInfo().SetGasUsed(txWithResults.GetTxHandler().GetGasLimit()) + txWithResults.GetFeeInfo().SetFee(fee) } } @@ -183,8 +186,8 @@ func (tep *transactionsFeeProcessor) prepareScrsNoTx(transactionsAndScrs *transa gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txFromStorage, scr.Value) - scrHandler.SetGasUsed(gasUsed) - scrHandler.SetFee(fee) + scrHandler.GetFeeInfo().SetGasUsed(gasUsed) + scrHandler.GetFeeInfo().SetFee(fee) } return nil diff --git a/outport/process/transactionsfee/transactionsFeeProcessor_test.go b/outport/process/transactionsfee/transactionsFeeProcessor_test.go index cdd0be3eb0c..5e8d6f4061d 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor_test.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor_test.go @@ -6,7 +6,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/pubkeyConverter" - coreData "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" @@ -65,15 +64,18 @@ func TestPutFeeAndGasUsedTx1(t *testing.T) { scrHash1 := []byte("scrHash1") scrWithRefund := []byte("scrWithRefund") refundValueBig, _ := big.NewInt(0).SetString("86271830000000", 10) - initialTx := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - Nonce: 1196667, - SndAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - RcvAddr: []byte("erd14eyayfrvlrhzfrwg5zwleua25mkzgncggn35nvc6xhv5yxwml2es0f3dht"), - GasLimit: 16610000, - GasPrice: 1000000000, - Data: []byte("relayedTx@7b226e6f6e6365223a322c2276616c7565223a302c227265636569766572223a22414141414141414141414146414974673738352f736c73554148686b57334569624c6e47524b76496f4e4d3d222c2273656e646572223a22726b6e534a477a343769534e794b43642f504f717075776b5477684534306d7a476a585a51686e622b724d3d222c226761735072696365223a313030303030303030302c226761734c696d6974223a31353030303030302c2264617461223a22633246325a5546306447567a644746306157397551444668597a49314d6a5935596d51335a44497759324a6959544d31596d566c4f4459314d4464684f574e6a4e7a677a5a4755774f445a694e4445334e546b345a54517a59544e6b5a6a566a593245795a5468684d6a6c414d6a51344e54677a4d574e6d4d5445304d54566d596a41354d6a63774e4451324e5755324e7a597a59574d314f4445345a5467314e4751345957526d4e54417a596a63354d6a6c6b4f54526c4e6d49794e6a49775a673d3d222c22636861696e4944223a224d513d3d222c2276657273696f6e223a312c227369676e6174757265223a225239462b34546352415a386d7771324559303163596c337662716c46657176387a76474a775a6833594d4f556b4234643451574e66376744626c484832576b71614a76614845744356617049713365356562384e41773d3d227d"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)) + initialTx := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + Nonce: 1196667, + SndAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + RcvAddr: []byte("erd14eyayfrvlrhzfrwg5zwleua25mkzgncggn35nvc6xhv5yxwml2es0f3dht"), + GasLimit: 16610000, + GasPrice: 1000000000, + Data: []byte("relayedTx@7b226e6f6e6365223a322c2276616c7565223a302c227265636569766572223a22414141414141414141414146414974673738352f736c73554148686b57334569624c6e47524b76496f4e4d3d222c2273656e646572223a22726b6e534a477a343769534e794b43642f504f717075776b5477684534306d7a476a585a51686e622b724d3d222c226761735072696365223a313030303030303030302c226761734c696d6974223a31353030303030302c2264617461223a22633246325a5546306447567a644746306157397551444668597a49314d6a5935596d51335a44497759324a6959544d31596d566c4f4459314d4464684f574e6a4e7a677a5a4755774f445a694e4445334e546b345a54517a59544e6b5a6a566a593245795a5468684d6a6c414d6a51344e54677a4d574e6d4d5445304d54566d596a41354d6a63774e4451324e5755324e7a597a59574d314f4445345a5467314e4751345957526d4e54417a596a63354d6a6c6b4f54526c4e6d49794e6a49775a673d3d222c22636861696e4944223a224d513d3d222c2276657273696f6e223a312c227369676e6174757265223a225239462b34546352415a386d7771324559303163596c337662716c46657176387a76474a775a6833594d4f556b4234643451574e66376744626c484832576b71614a76614845744356617049713365356562384e41773d3d227d"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } scr1 := &smartContractResult.SmartContractResult{ Nonce: 2, @@ -86,23 +88,32 @@ func TestPutFeeAndGasUsedTx1(t *testing.T) { OriginalTxHash: txHash, } - pool := &outportcore.Pool{ - Txs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ string(txHash): initialTx, }, - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - "wrong": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), - string(scrHash1): outportcore.NewTransactionHandlerWithGasAndFee(scr1, 0, big.NewInt(0)), - string(scrWithRefund): outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), - RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), - OriginalTxHash: txHash, - Value: refundValueBig, - Data: []byte(""), - ReturnMessage: []byte("gas refund for relayer"), - }, 0, big.NewInt(0)), + SmartContractResults: map[string]*outportcore.SCRInfo{ + string(scrHash1): { + SmartContractResult: scr1, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, + string(scrWithRefund): { + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), + RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte(""), + ReturnMessage: []byte("gas refund for relayer"), + }, + FeeInfo: &outportcore.FeeInfo{ + Fee: big.NewInt(0), + }, + }, }, } @@ -113,9 +124,9 @@ func TestPutFeeAndGasUsedTx1(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(1673728170000000), initialTx.GetFee()) - require.Equal(t, uint64(7982817), initialTx.GetGasUsed()) - require.Equal(t, "1760000000000000", initialTx.GetInitialPaidFee().String()) + require.Equal(t, big.NewInt(1673728170000000), initialTx.GetFeeInfo().GetFee()) + require.Equal(t, uint64(7982817), initialTx.GetFeeInfo().GetGasUsed()) + require.Equal(t, "1760000000000000", initialTx.GetFeeInfo().GetInitialPaidFee().String()) } func TestPutFeeAndGasUsedScrNoTx(t *testing.T) { @@ -126,20 +137,22 @@ func TestPutFeeAndGasUsedScrNoTx(t *testing.T) { refundValueBig, _ := big.NewInt(0).SetString("226498540000000", 10) - scr := outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), - RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), - OriginalTxHash: txHash, - Value: refundValueBig, - Data: []byte(""), - ReturnMessage: []byte("gas refund for relayer"), - }, 0, big.NewInt(0)) - - pool := &outportcore.Pool{ - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - "wrong": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), + RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte(""), + ReturnMessage: []byte("gas refund for relayer"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ string(scrWithRefund): scr, }, } @@ -160,20 +173,23 @@ func TestPutFeeAndGasUsedScrNoTx(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(123001460000000), scr.GetFee()) - require.Equal(t, uint64(7350146), scr.GetGasUsed()) + require.Equal(t, big.NewInt(123001460000000), scr.GetFeeInfo().GetFee()) + require.Equal(t, uint64(7350146), scr.GetFeeInfo().GetGasUsed()) } func TestPutFeeAndGasUsedInvalidTxs(t *testing.T) { t.Parallel() - tx := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - GasLimit: 30000000, - GasPrice: 1000000000, - }, 0, big.NewInt(0)) + tx := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + GasLimit: 30000000, + GasPrice: 1000000000, + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Invalid: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ + pool := &outportcore.TransactionPool{ + InvalidTxs: map[string]*outportcore.TxInfo{ "tx": tx, }, } @@ -185,63 +201,60 @@ func TestPutFeeAndGasUsedInvalidTxs(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(349500000000000), tx.GetFee()) - require.Equal(t, tx.GetGasLimit(), tx.GetGasUsed()) + require.Equal(t, big.NewInt(349500000000000), tx.GetFeeInfo().GetFee()) + require.Equal(t, tx.GetTxHandler().GetGasLimit(), tx.GetFeeInfo().GetGasUsed()) } func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { t.Parallel() tx1Hash := "h1" - tx1 := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - GasLimit: 30000000, - GasPrice: 1000000000, - }, 0, big.NewInt(0)) + tx1 := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + GasLimit: 30000000, + GasPrice: 1000000000, + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } tx2Hash := "h2" - tx2 := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - GasLimit: 50000000, - GasPrice: 1000000000, - }, 0, big.NewInt(0)) + tx2 := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + GasLimit: 50000000, + GasPrice: 1000000000, + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Txs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ tx1Hash: tx1, tx2Hash: tx2, - "t3": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), - }, - Logs: []*coreData.LogData{ - { - LogHandler: &transaction.Log{ - Events: []*transaction.Event{ - { - Identifier: []byte("ignore"), - }, + "t3": {Transaction: &transaction.Transaction{}, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}}}, + Logs: map[string]*transaction.Log{ + "hhh": { + Events: []*transaction.Event{ + { + Identifier: []byte("ignore"), }, }, }, - { - LogHandler: &transaction.Log{ - Events: []*transaction.Event{ - { - Identifier: []byte("ignore"), - }, - { - Identifier: []byte(core.SignalErrorOperation), - }, + tx1Hash: { + Events: []*transaction.Event{ + { + Identifier: []byte("ignore"), + }, + { + Identifier: []byte(core.SignalErrorOperation), }, }, - TxHash: tx1Hash, }, - { - LogHandler: &transaction.Log{ - Events: []*transaction.Event{ - { - Identifier: []byte(core.WriteLogIdentifier), - }, + tx2Hash: { + Events: []*transaction.Event{ + { + Identifier: []byte(core.WriteLogIdentifier), }, }, - TxHash: tx2Hash, }, }, } @@ -254,8 +267,8 @@ func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, tx1.GetGasLimit(), tx1.GetGasUsed()) - require.Equal(t, tx2.GetGasLimit(), tx2.GetGasUsed()) + require.Equal(t, tx1.GetTxHandler().GetGasLimit(), tx1.GetFeeInfo().GetGasUsed()) + require.Equal(t, tx2.GetTxHandler().GetGasLimit(), tx2.GetFeeInfo().GetGasUsed()) } func TestPutFeeAndGasUsedWrongRelayedTx(t *testing.T) { @@ -263,31 +276,37 @@ func TestPutFeeAndGasUsedWrongRelayedTx(t *testing.T) { txHash := []byte("relayedTx") scrHash1 := []byte("scrHash1") - initialTx := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - Nonce: 1011, - SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), - RcvAddr: []byte("erd1xlrw5j482m3fwl72fsu9saj984rxqdrjd860e02tcz0qakvqrp6q2pjqgg"), - GasLimit: 550000000, - GasPrice: 1000000000, - Data: []byte("relayedTxV2@000000000000000005005eaf5311cedc6fa17f08f33e156926f8f3816d8ed8dc@06e2@7472616e73666572546f6b656e4064633132346163313733323937623836623936316362636663363339326231643130303533326533336530663933313838373634396336613935636236633931403031@ba26daf1353b8fa62d183b7d7df8db48846ea982a0cb26450b703e16720c77b9d7d4e47b652d270b160ae6866ca7b04aae38ca83a58ce508bf660db07d5b6401"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)) + initialTx := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + Nonce: 1011, + SndAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: []byte("erd1xlrw5j482m3fwl72fsu9saj984rxqdrjd860e02tcz0qakvqrp6q2pjqgg"), + GasLimit: 550000000, + GasPrice: 1000000000, + Data: []byte("relayedTxV2@000000000000000005005eaf5311cedc6fa17f08f33e156926f8f3816d8ed8dc@06e2@7472616e73666572546f6b656e4064633132346163313733323937623836623936316362636663363339326231643130303533326533336530663933313838373634396336613935636236633931403031@ba26daf1353b8fa62d183b7d7df8db48846ea982a0cb26450b703e16720c77b9d7d4e47b652d270b160ae6866ca7b04aae38ca83a58ce508bf660db07d5b6401"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - scr1 := &smartContractResult.SmartContractResult{ - Nonce: 1011, - SndAddr: []byte("erd1xlrw5j482m3fwl72fsu9saj984rxqdrjd860e02tcz0qakvqrp6q2pjqgg"), - RcvAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), - PrevTxHash: txHash, - OriginalTxHash: txHash, - ReturnMessage: []byte("higher nonce in transaction"), + scr1 := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 1011, + SndAddr: []byte("erd1xlrw5j482m3fwl72fsu9saj984rxqdrjd860e02tcz0qakvqrp6q2pjqgg"), + RcvAddr: []byte("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + PrevTxHash: txHash, + OriginalTxHash: txHash, + ReturnMessage: []byte("higher nonce in transaction"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, } - pool := &outportcore.Pool{ - Txs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ string(txHash): initialTx, }, - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - string(scrHash1): outportcore.NewTransactionHandlerWithGasAndFee(scr1, 0, big.NewInt(0)), + SmartContractResults: map[string]*outportcore.SCRInfo{ + string(scrHash1): scr1, }, } @@ -298,27 +317,30 @@ func TestPutFeeAndGasUsedWrongRelayedTx(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(6103405000000000), initialTx.GetFee()) - require.Equal(t, uint64(550000000), initialTx.GetGasUsed()) - require.Equal(t, "6103405000000000", initialTx.GetInitialPaidFee().String()) + require.Equal(t, big.NewInt(6103405000000000), initialTx.GetFeeInfo().GetFee()) + require.Equal(t, uint64(550000000), initialTx.GetFeeInfo().GetGasUsed()) + require.Equal(t, "6103405000000000", initialTx.GetFeeInfo().GetInitialPaidFee().String()) } func TestPutFeeAndGasUsedESDTWithScCall(t *testing.T) { t.Parallel() txHash := []byte("tx") - tx := outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{ - Nonce: 1011, - SndAddr: silentDecodeAddress("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), - RcvAddr: silentDecodeAddress("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), - GasLimit: 55_000_000, - GasPrice: 1000000000, - Data: []byte("ESDTNFTTransfer@434f572d636434363364@080c@01@00000000000000000500d3b28828d62052124f07dcd50ed31b0825f60eee1526@616363657074476c6f62616c4f66666572@c3e5"), - Value: big.NewInt(0), - }, 0, big.NewInt(0)) + tx := &outportcore.TxInfo{ + Transaction: &transaction.Transaction{ + Nonce: 1011, + SndAddr: silentDecodeAddress("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + RcvAddr: silentDecodeAddress("erd1dglncxk6sl9a3xumj78n6z2xux4ghp5c92cstv5zsn56tjgtdwpsk46qrs"), + GasLimit: 55_000_000, + GasPrice: 1000000000, + Data: []byte("ESDTNFTTransfer@434f572d636434363364@080c@01@00000000000000000500d3b28828d62052124f07dcd50ed31b0825f60eee1526@616363657074476c6f62616c4f66666572@c3e5"), + Value: big.NewInt(0), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Txs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ + pool := &outportcore.TransactionPool{ + Transactions: map[string]*outportcore.TxInfo{ string(txHash): tx, }, } @@ -330,9 +352,9 @@ func TestPutFeeAndGasUsedESDTWithScCall(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(820765000000000), tx.GetFee()) - require.Equal(t, uint64(55_000_000), tx.GetGasUsed()) - require.Equal(t, "820765000000000", tx.GetInitialPaidFee().String()) + require.Equal(t, big.NewInt(820765000000000), tx.GetFeeInfo().GetFee()) + require.Equal(t, uint64(55_000_000), tx.GetFeeInfo().GetGasUsed()) + require.Equal(t, "820765000000000", tx.GetFeeInfo().GetInitialPaidFee().String()) } func silentDecodeAddress(address string) []byte { From 8c34b9b64cd3460aaa847d859c56054da84fe2d7 Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 16 Mar 2023 15:00:56 +0200 Subject: [PATCH 061/221] FIX: Continue refactor --- api/shared/interface.go | 4 +-- facade/initial/initialNodeFacade.go | 4 +-- factory/mock/accountsParserStub.go | 6 ++-- factory/status/statusComponents.go | 16 ++++++++- go.mod | 2 +- go.sum | 4 +-- outport/disabled/disabledOutport.go | 15 ++++---- outport/notifier/eventNotifier.go | 47 +++++++++++-------------- process/block/baseProcess.go | 3 +- process/block/metrics.go | 31 ++++++----------- process/sync/baseSync.go | 53 ++++++++++++++++++++++++++++- 11 files changed, 117 insertions(+), 68 deletions(-) diff --git a/api/shared/interface.go b/api/shared/interface.go index fffd5816ad6..73915c5dd24 100644 --- a/api/shared/interface.go +++ b/api/shared/interface.go @@ -5,9 +5,9 @@ import ( "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -76,7 +76,7 @@ type FacadeHandler interface { GetBlockByHash(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonce(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRound(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) + GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) GetInternalShardBlockByNonce(format common.ApiOutputFormat, nonce uint64) (interface{}, error) GetInternalShardBlockByHash(format common.ApiOutputFormat, hash string) (interface{}, error) GetInternalShardBlockByRound(format common.ApiOutputFormat, round uint64) (interface{}, error) diff --git a/facade/initial/initialNodeFacade.go b/facade/initial/initialNodeFacade.go index 8ad6102d227..db783bf700a 100644 --- a/facade/initial/initialNodeFacade.go +++ b/facade/initial/initialNodeFacade.go @@ -6,9 +6,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -279,7 +279,7 @@ func (inf *initialNodeFacade) GetBlockByRound(_ uint64, _ api.BlockQueryOptions) } // GetAlteredAccountsForBlock returns nil and error -func (inf *initialNodeFacade) GetAlteredAccountsForBlock(_ api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) { +func (inf *initialNodeFacade) GetAlteredAccountsForBlock(_ api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nil, errNodeStarting } diff --git a/factory/mock/accountsParserStub.go b/factory/mock/accountsParserStub.go index 27066140982..436a8a418de 100644 --- a/factory/mock/accountsParserStub.go +++ b/factory/mock/accountsParserStub.go @@ -16,7 +16,7 @@ type AccountsParserStub struct { InitialAccountsCalled func() []genesis.InitialAccountHandler GetTotalStakedForDelegationAddressCalled func(delegationAddress string) *big.Int GetInitialAccountsForDelegatedCalled func(addressBytes []byte) []genesis.InitialAccountHandler - GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) + GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) GenesisMintingAddressCalled func() string } @@ -75,12 +75,12 @@ func (aps *AccountsParserStub) InitialAccounts() []genesis.InitialAccountHandler } // GenerateInitialTransactions - -func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) { +func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) { if aps.GenerateInitialTransactionsCalled != nil { return aps.GenerateInitialTransactionsCalled(shardCoordinator, initialIndexingData) } - return make([]*block.MiniBlock, 0), make(map[uint32]*outport.Pool), nil + return make([]*block.MiniBlock, 0), make(map[uint32]*outport.TransactionPool), nil } // IsInterfaceNil - diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index ed66739b3c7..b432dd82d0f 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -7,6 +7,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" nodeData "github.com/multiversx/mx-chain-core-go/data" + outportCore "github.com/multiversx/mx-chain-core-go/data/outport" factoryMarshalizer "github.com/multiversx/mx-chain-core-go/marshal/factory" "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/data" wsDriverFactory "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/factory" @@ -175,13 +176,26 @@ func (pc *statusComponents) epochStartEventHandler() epochStart.ActionHandler { "error", err.Error()) } - pc.outportHandler.SaveValidatorsPubKeys(validatorsPubKeys, currentEpoch) + pc.outportHandler.SaveValidatorsPubKeys(&outportCore.ValidatorsPubKeys{ + ShardValidatorsPubKeys: convertPubKeys(validatorsPubKeys), + Epoch: currentEpoch, + }) }, func(_ nodeData.HeaderHandler) {}, common.IndexerOrder) return subscribeHandler } +func convertPubKeys(validatorsPubKeys map[uint32][][]byte) map[uint32]*outportCore.PubKeys { + ret := make(map[uint32]*outportCore.PubKeys, len(validatorsPubKeys)) + + for shard, validators := range validatorsPubKeys { + ret[shard] = &outportCore.PubKeys{Keys: validators} + } + + return ret +} + // IsInterfaceNil returns true if there is no value under the interface func (scf *statusComponentsFactory) IsInterfaceNil() bool { return scf == nil diff --git a/go.mod b/go.mod index 888d60914ab..57d2e53f6d7 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316095925-a74906deb369 + github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316125808-f4a738285ba2 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 9291c8b2bb8..d51c6c191e3 100644 --- a/go.sum +++ b/go.sum @@ -611,8 +611,8 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316095925-a74906deb369 h1:gkFwa3iiWoJuDZaKl4DNqKYXwK+zUCIAmUgKAHR9D8o= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316095925-a74906deb369/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316125808-f4a738285ba2 h1:kdNfw1/ZrFNTMSBtot7bQaUGeaN6mo9pziQ3GnjDrcU= +github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316125808-f4a738285ba2/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd h1:IPrhwnzjqCKKlmRd8h/uzYiCCoLOYiVHB5QTFfUbt00= diff --git a/outport/disabled/disabledOutport.go b/outport/disabled/disabledOutport.go index c97ecb91d83..8696bb2f79d 100644 --- a/outport/disabled/disabledOutport.go +++ b/outport/disabled/disabledOutport.go @@ -1,7 +1,6 @@ package disabled import ( - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport" ) @@ -14,31 +13,31 @@ func NewDisabledOutport() *disabledOutport { } // SaveBlock does nothing -func (n *disabledOutport) SaveBlock(_ *outportcore.ArgsSaveBlockData) { +func (n *disabledOutport) SaveBlock(_ *outportcore.OutportBlock) { } // RevertIndexedBlock does nothing -func (n *disabledOutport) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { +func (n *disabledOutport) RevertIndexedBlock(_ *outportcore.BlockData) { } // SaveRoundsInfo does nothing -func (n *disabledOutport) SaveRoundsInfo(_ []*outportcore.RoundInfo) { +func (n *disabledOutport) SaveRoundsInfo(_ *outportcore.RoundsInfo) { } // SaveValidatorsPubKeys does nothing -func (n *disabledOutport) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) { +func (n *disabledOutport) SaveValidatorsPubKeys(_ *outportcore.ValidatorsPubKeys) { } // SaveValidatorsRating does nothing -func (n *disabledOutport) SaveValidatorsRating(_ string, _ []*outportcore.ValidatorRatingInfo) { +func (n *disabledOutport) SaveValidatorsRating(_ *outportcore.ValidatorsRating) { } // SaveAccounts does nothing -func (n *disabledOutport) SaveAccounts(_ uint64, _ map[string]*outportcore.AlteredAccount, _ uint32) { +func (n *disabledOutport) SaveAccounts(_ *outportcore.Accounts) { } // FinalizedBlock does nothing -func (n *disabledOutport) FinalizedBlock(_ []byte) { +func (n *disabledOutport) FinalizedBlock(_ *outportcore.FinalizedBlock) { } // Close does nothing diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index f2d5a183d7a..6c851e88424 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -6,13 +6,10 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - nodeData "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/alteredAccount" + "github.com/multiversx/mx-chain-core-go/core/unmarshal" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-core-go/websocketOutportDriver" - outportSenderData "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/data" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -86,18 +83,13 @@ func checkEventNotifierArgs(args ArgsEventNotifier) error { } // SaveBlock converts block data in order to be pushed to subscribers -func (en *eventNotifier) SaveBlock(args *outport.ArgsSaveBlockData) error { - log.Debug("eventNotifier: SaveBlock called at block", "block hash", args.HeaderHash) - if args.TransactionsPool == nil { +func (en *eventNotifier) SaveBlock(args *outport.OutportBlock) error { + log.Debug("eventNotifier: SaveBlock called at block", "block hash", args.BlockData.HeaderHash) + if args.TransactionPool == nil { return ErrNilTransactionsPool } - argsSaveBlock := outportSenderData.ArgsSaveBlock{ - HeaderType: core.GetHeaderType(args.Header), - ArgsSaveBlockData: websocketOutportDriver.PrepareArgsSaveBlock(*args), - } - - err := en.httpClient.Post(pushEventEndpoint, argsSaveBlock) + err := en.httpClient.Post(pushEventEndpoint, args) if err != nil { return fmt.Errorf("%w in eventNotifier.SaveBlock while posting block data", err) } @@ -106,17 +98,22 @@ func (en *eventNotifier) SaveBlock(args *outport.ArgsSaveBlockData) error { } // RevertIndexedBlock converts revert data in order to be pushed to subscribers -func (en *eventNotifier) RevertIndexedBlock(header nodeData.HeaderHandler, _ nodeData.BodyHandler) error { - blockHash, err := core.CalculateHash(en.marshalizer, en.hasher, header) +func (en *eventNotifier) RevertIndexedBlock(blockData *outport.BlockData) error { + headerHandler, err := unmarshal.GetHeaderFromBytes(en.marshalizer, core.HeaderType(blockData.HeaderType), blockData.HeaderBytes) + if err != nil { + return err + } + + blockHash, err := core.CalculateHash(en.marshalizer, en.hasher, headerHandler) if err != nil { return fmt.Errorf("%w in eventNotifier.RevertIndexedBlock while computing the block hash", err) } revertBlock := RevertBlock{ Hash: hex.EncodeToString(blockHash), - Nonce: header.GetNonce(), - Round: header.GetRound(), - Epoch: header.GetEpoch(), + Nonce: headerHandler.GetNonce(), + Round: headerHandler.GetRound(), + Epoch: headerHandler.GetEpoch(), } err = en.httpClient.Post(revertEventsEndpoint, revertBlock) @@ -128,11 +125,7 @@ func (en *eventNotifier) RevertIndexedBlock(header nodeData.HeaderHandler, _ nod } // FinalizedBlock converts finalized block data in order to push it to subscribers -func (en *eventNotifier) FinalizedBlock(headerHash []byte) error { - finalizedBlock := FinalizedBlock{ - Hash: hex.EncodeToString(headerHash), - } - +func (en *eventNotifier) FinalizedBlock(finalizedBlock *outport.FinalizedBlock) error { err := en.httpClient.Post(finalizedEventsEndpoint, finalizedBlock) if err != nil { return fmt.Errorf("%w in eventNotifier.FinalizedBlock while posting event data", err) @@ -142,22 +135,22 @@ func (en *eventNotifier) FinalizedBlock(headerHash []byte) error { } // SaveRoundsInfo returns nil -func (en *eventNotifier) SaveRoundsInfo(_ []*outport.RoundInfo) error { +func (en *eventNotifier) SaveRoundsInfo(_ *outport.RoundsInfo) error { return nil } // SaveValidatorsRating returns nil -func (en *eventNotifier) SaveValidatorsRating(_ string, _ []*outport.ValidatorRatingInfo) error { +func (en *eventNotifier) SaveValidatorsRating(_ *outport.ValidatorsRating) error { return nil } // SaveValidatorsPubKeys returns nil -func (en *eventNotifier) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) error { +func (en *eventNotifier) SaveValidatorsPubKeys(_ *outport.ValidatorsPubKeys) error { return nil } // SaveAccounts does nothing -func (en *eventNotifier) SaveAccounts(_ uint64, _ map[string]*alteredAccount.AlteredAccount, _ uint32) error { +func (en *eventNotifier) SaveAccounts(_ *outport.Accounts) error { return nil } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 160f4cccfa7..a33c1482cb6 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/data/typeConverters" "github.com/multiversx/mx-chain-core-go/display" @@ -1378,7 +1379,7 @@ func getLastSelfNotarizedHeaderByItself(chainHandler data.ChainHandler) (data.He func (bp *baseProcessor) setFinalizedHeaderHashInIndexer(hdrHash []byte) { log.Debug("baseProcessor.setFinalizedBlockInIndexer", "finalized header hash", hdrHash) - bp.outportHandler.FinalizedBlock(hdrHash) + bp.outportHandler.FinalizedBlock(&outportcore.FinalizedBlock{HeaderHash: hdrHash}) } func (bp *baseProcessor) updateStateStorage( diff --git a/process/block/metrics.go b/process/block/metrics.go index 06bef33a096..a97e60e7602 100644 --- a/process/block/metrics.go +++ b/process/block/metrics.go @@ -208,16 +208,16 @@ func indexRoundInfo( signersIndexes []uint64, ) { roundInfo := &outportcore.RoundInfo{ - Index: header.GetRound(), + Round: header.GetRound(), SignersIndexes: signersIndexes, BlockWasProposed: true, ShardId: shardId, Epoch: header.GetEpoch(), - Timestamp: time.Duration(header.GetTimeStamp()), + Timestamp: uint64(time.Duration(header.GetTimeStamp())), } if check.IfNil(lastHeader) { - outportHandler.SaveRoundsInfo([]*outportcore.RoundInfo{roundInfo}) + outportHandler.SaveRoundsInfo(&outportcore.RoundsInfo{RoundsInfo: []*outportcore.RoundInfo{roundInfo}}) return } @@ -239,18 +239,18 @@ func indexRoundInfo( } roundInfo = &outportcore.RoundInfo{ - Index: i, + Round: i, SignersIndexes: signersIndexes, BlockWasProposed: false, ShardId: shardId, Epoch: header.GetEpoch(), - Timestamp: time.Duration(header.GetTimeStamp() - ((currentBlockRound - i) * roundDuration)), + Timestamp: uint64(time.Duration(header.GetTimeStamp() - ((currentBlockRound - i) * roundDuration))), } roundsInfo = append(roundsInfo, roundInfo) } - outportHandler.SaveRoundsInfo(roundsInfo) + outportHandler.SaveRoundsInfo(&outportcore.RoundsInfo{RoundsInfo: roundsInfo}) } func indexValidatorsRating( @@ -269,7 +269,6 @@ func indexValidatorsRating( return } - shardValidatorsRating := make(map[string][]*outportcore.ValidatorRatingInfo) for shardID, validatorInfosInShard := range validators { validatorsInfos := make([]*outportcore.ValidatorRatingInfo, 0) for _, validatorInfo := range validatorInfosInShard { @@ -279,19 +278,11 @@ func indexValidatorsRating( }) } - indexID := fmt.Sprintf("%d_%d", shardID, metaBlock.GetEpoch()) - shardValidatorsRating[indexID] = validatorsInfos - } - - indexShardValidatorsRating(outportHandler, shardValidatorsRating) -} - -func indexShardValidatorsRating( - outportHandler outport.OutportHandler, - shardValidatorsRating map[string][]*outportcore.ValidatorRatingInfo, -) { - for indexID, validatorsInfos := range shardValidatorsRating { - outportHandler.SaveValidatorsRating(indexID, validatorsInfos) + outportHandler.SaveValidatorsRating(&outportcore.ValidatorsRating{ + ShardID: shardID, + Epoch: metaBlock.GetEpoch(), + ValidatorsRatingInfo: validatorsInfos, + }) } } diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 2c011e50c10..eb4937616c8 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/closing" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" + outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/typeConverters" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" @@ -816,7 +817,22 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { boot.scheduledTxsExecutionHandler.SetScheduledInfo(scheduledInfo) } - boot.outportHandler.RevertIndexedBlock(currHeader, currBody) + headerBytes, headerType, err := boot.getHeaderBytes(currHeader) + if err != nil { + return err + } + + body, err := getBody(currBody) + if err != nil { + return err + } + + boot.outportHandler.RevertIndexedBlock(&outportcore.BlockData{ + HeaderBytes: headerBytes, + HeaderType: string(headerType), + Body: body, + HeaderHash: nil, // TODO: Do we need hash here? + }) shouldAddHeaderToBlackList := revertUsingForkNonce && boot.blockBootstrapper.isForkTriggeredByMeta() if shouldAddHeaderToBlackList { @@ -835,6 +851,41 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { return nil } +func getBody(bodyHandler data.BodyHandler) (*block.Body, error) { + if check.IfNil(bodyHandler) { + return nil, fmt.Errorf("nil body") + } + + body, castOk := bodyHandler.(*block.Body) + if !castOk { + return nil, fmt.Errorf("cannot cast body") + } + + return body, nil +} + +func (boot *baseBootstrap) getHeaderBytes(headerHandler data.HeaderHandler) ([]byte, core.HeaderType, error) { + var err error + var headerBytes []byte + var headerType core.HeaderType + + switch header := headerHandler.(type) { + case *block.MetaBlock: + headerType = core.MetaHeader + headerBytes, err = boot.marshalizer.Marshal(header) + case *block.Header: + headerType = core.ShardHeaderV1 + headerBytes, err = boot.marshalizer.Marshal(header) + case *block.HeaderV2: + headerType = core.ShardHeaderV2 + headerBytes, err = boot.marshalizer.Marshal(header) + default: + return nil, "", fmt.Errorf("invalid/unknown header type") + } + + return headerBytes, headerType, err +} + func (boot *baseBootstrap) shouldAllowRollback(currHeader data.HeaderHandler, currHeaderHash []byte) bool { finalBlockNonce := boot.forkDetector.GetHighestFinalBlockNonce() finalBlockHash := boot.forkDetector.GetHighestFinalBlockHash() From 35f1baf6385ba8894e9c708c0506124a1dd44ad6 Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 16 Mar 2023 15:38:53 +0200 Subject: [PATCH 062/221] FIX: Make solution build --- consensus/spos/bls/subroundStartRound.go | 6 +-- factory/mock/indexerStub.go | 62 ------------------------ go.mod | 2 +- go.sum | 4 +- node/mock/indexerStub.go | 62 ------------------------ node/nodeRunner.go | 16 +++++- process/block/shardblock_test.go | 6 +-- 7 files changed, 24 insertions(+), 134 deletions(-) delete mode 100644 factory/mock/indexerStub.go delete mode 100644 node/mock/indexerStub.go diff --git a/consensus/spos/bls/subroundStartRound.go b/consensus/spos/bls/subroundStartRound.go index a5c1f179609..344d26251e5 100644 --- a/consensus/spos/bls/subroundStartRound.go +++ b/consensus/spos/bls/subroundStartRound.go @@ -275,15 +275,15 @@ func (sr *subroundStartRound) indexRoundIfNeeded(pubKeys []string) { round := sr.RoundHandler().Index() roundInfo := &outportcore.RoundInfo{ - Index: uint64(round), + Round: uint64(round), SignersIndexes: signersIndexes, BlockWasProposed: false, ShardId: shardId, Epoch: epoch, - Timestamp: time.Duration(sr.RoundTimeStamp.Unix()), + Timestamp: uint64(sr.RoundTimeStamp.Unix()), } - sr.outportHandler.SaveRoundsInfo([]*outportcore.RoundInfo{roundInfo}) + sr.outportHandler.SaveRoundsInfo(&outportcore.RoundsInfo{RoundsInfo: []*outportcore.RoundInfo{roundInfo}}) } func (sr *subroundStartRound) generateNextConsensusGroup(roundIndex int64) error { diff --git a/factory/mock/indexerStub.go b/factory/mock/indexerStub.go deleted file mode 100644 index aed169943b6..00000000000 --- a/factory/mock/indexerStub.go +++ /dev/null @@ -1,62 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/state" -) - -// IndexerStub is a mock implementation fot the Indexer interface -type IndexerStub struct { - SaveBlockCalled func(args *outport.ArgsSaveBlockData) -} - -// SaveBlock - -func (im *IndexerStub) SaveBlock(args *outport.ArgsSaveBlockData) { - if im.SaveBlockCalled != nil { - im.SaveBlockCalled(args) - } -} - -// Close will do nothing -func (im *IndexerStub) Close() error { - return nil -} - -// SetTxLogsProcessor will do nothing -func (im *IndexerStub) SetTxLogsProcessor(_ process.TransactionLogProcessorDatabase) { -} - -// SaveRoundsInfo - -func (im *IndexerStub) SaveRoundsInfo(_ []*outport.RoundInfo) { - panic("implement me") -} - -// SaveValidatorsRating - -func (im *IndexerStub) SaveValidatorsRating(_ string, _ []*outport.ValidatorRatingInfo) { - -} - -// SaveValidatorsPubKeys - -func (im *IndexerStub) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) { - panic("implement me") -} - -// RevertIndexedBlock - -func (im *IndexerStub) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { -} - -// SaveAccounts - -func (im *IndexerStub) SaveAccounts(_ uint64, _ []state.UserAccountHandler) { -} - -// IsInterfaceNil returns true if there is no value under the interface -func (im *IndexerStub) IsInterfaceNil() bool { - return im == nil -} - -// IsNilIndexer - -func (im *IndexerStub) IsNilIndexer() bool { - return false -} diff --git a/go.mod b/go.mod index 57d2e53f6d7..2f3f350cef8 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316125808-f4a738285ba2 + github.com/multiversx/mx-chain-core-go v1.1.36-0.20230316133343-94217bd87ee6 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index d51c6c191e3..8cd812c3e38 100644 --- a/go.sum +++ b/go.sum @@ -611,8 +611,8 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316125808-f4a738285ba2 h1:kdNfw1/ZrFNTMSBtot7bQaUGeaN6mo9pziQ3GnjDrcU= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230316125808-f4a738285ba2/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230316133343-94217bd87ee6 h1:0U+nYw9bnmMgtxsWEhgQzZyV4OXMzazO6MDooM29w50= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230316133343-94217bd87ee6/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd h1:IPrhwnzjqCKKlmRd8h/uzYiCCoLOYiVHB5QTFfUbt00= diff --git a/node/mock/indexerStub.go b/node/mock/indexerStub.go deleted file mode 100644 index 6c8aaad7b6c..00000000000 --- a/node/mock/indexerStub.go +++ /dev/null @@ -1,62 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-core-go/data" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/state" -) - -// IndexerStub is a mock implementation fot the Indexer interface -type IndexerStub struct { - SaveBlockCalled func(args *outportcore.ArgsSaveBlockData) -} - -// SaveBlock - -func (im *IndexerStub) SaveBlock(args *outportcore.ArgsSaveBlockData) { - if im.SaveBlockCalled != nil { - im.SaveBlockCalled(args) - } -} - -// Close will do nothing -func (im *IndexerStub) Close() error { - return nil -} - -// SetTxLogsProcessor will do nothing -func (im *IndexerStub) SetTxLogsProcessor(_ process.TransactionLogProcessorDatabase) { -} - -// SaveRoundsInfo - -func (im *IndexerStub) SaveRoundsInfo(_ []*outportcore.RoundInfo) { - panic("implement me") -} - -// SaveValidatorsRating - -func (im *IndexerStub) SaveValidatorsRating(_ string, _ []*outportcore.ValidatorRatingInfo) { - -} - -// SaveValidatorsPubKeys - -func (im *IndexerStub) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) { - panic("implement me") -} - -// RevertIndexedBlock - -func (im *IndexerStub) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { -} - -// SaveAccounts - -func (im *IndexerStub) SaveAccounts(_ uint64, _ []state.UserAccountHandler) { -} - -// IsInterfaceNil returns true if there is no value under the interface -func (im *IndexerStub) IsInterfaceNil() bool { - return im == nil -} - -// IsNilIndexer - -func (im *IndexerStub) IsNilIndexer() bool { - return false -} diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 21dbf2f5d1a..19c09b10f2e 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -20,6 +20,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/closing" "github.com/multiversx/mx-chain-core-go/core/throttler" "github.com/multiversx/mx-chain-core-go/data/endProcess" + outportCore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/api/gin" "github.com/multiversx/mx-chain-go/api/shared" "github.com/multiversx/mx-chain-go/common" @@ -1673,8 +1674,21 @@ func indexValidatorsListIfNeeded( } if len(validatorsPubKeys) > 0 { - outportHandler.SaveValidatorsPubKeys(validatorsPubKeys, epoch) + outportHandler.SaveValidatorsPubKeys(&outportCore.ValidatorsPubKeys{ + ShardValidatorsPubKeys: convertPubKeys(validatorsPubKeys), + Epoch: epoch, + }) + } +} + +func convertPubKeys(validatorsPubKeys map[uint32][][]byte) map[uint32]*outportCore.PubKeys { + ret := make(map[uint32]*outportCore.PubKeys, len(validatorsPubKeys)) + + for shard, validators := range validatorsPubKeys { + ret[shard] = &outportCore.PubKeys{Keys: validators} } + + return ret } func enableGopsIfNeeded(gopsEnabled bool) { diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index 4da1f65e9a6..fe12797feec 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2228,7 +2228,7 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { called := false statusComponents.Outport = &outport.OutportStub{ - SaveBlockCalled: func(args *outportcore.ArgsSaveBlockData) { + SaveBlockCalled: func(args *outportcore.OutportBlock) { called = true }, HasDriversCalled: func() bool { @@ -2236,8 +2236,8 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { }, } arguments.OutportDataProvider = &outport.OutportDataProviderStub{ - PrepareOutportSaveBlockDataCalled: func(_ processOutport.ArgPrepareOutportSaveBlockData) (*outportcore.ArgsSaveBlockData, error) { - return &outportcore.ArgsSaveBlockData{}, nil + PrepareOutportSaveBlockDataCalled: func(_ processOutport.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlock, error) { + return &outportcore.OutportBlock{}, nil }} arguments.AccountsDB[state.UserAccountsState] = accounts From 88a089b8e549fea1409afcb960c87e3628acb7c6 Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 16 Mar 2023 16:39:01 +0200 Subject: [PATCH 063/221] FIX: Unit tests + build --- facade/mock/apiResolverStub.go | 6 +-- integrationTests/mock/nilOutport.go | 15 +++--- outport/notifier/eventNotifier_test.go | 53 +++++++++++---------- outport/process/outportDataProvider_test.go | 9 ++-- testscommon/components/components.go | 6 +-- 5 files changed, 47 insertions(+), 42 deletions(-) diff --git a/facade/mock/apiResolverStub.go b/facade/mock/apiResolverStub.go index 4b32df61b95..fb615fe2f08 100644 --- a/facade/mock/apiResolverStub.go +++ b/facade/mock/apiResolverStub.go @@ -3,8 +3,8 @@ package mock import ( "context" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/node/external" @@ -24,7 +24,7 @@ type ApiResolverStub struct { GetBlockByHashCalled func(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonceCalled func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) GetBlockByRoundCalled func(round uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) + GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) GetTransactionHandler func(hash string, withEvents bool) (*transaction.ApiTransactionResult, error) GetInternalShardBlockByNonceCalled func(format common.ApiOutputFormat, nonce uint64) (interface{}, error) GetInternalShardBlockByHashCalled func(format common.ApiOutputFormat, hash string) (interface{}, error) @@ -81,7 +81,7 @@ func (ars *ApiResolverStub) GetBlockByRound(round uint64, options api.BlockQuery } // GetAlteredAccountsForBlock - -func (ars *ApiResolverStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) { +func (ars *ApiResolverStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { if ars.GetAlteredAccountsForBlockCalled != nil { return ars.GetAlteredAccountsForBlockCalled(options) } diff --git a/integrationTests/mock/nilOutport.go b/integrationTests/mock/nilOutport.go index 67d4ca6b797..76afedf808a 100644 --- a/integrationTests/mock/nilOutport.go +++ b/integrationTests/mock/nilOutport.go @@ -1,7 +1,6 @@ package mock import ( - "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport" ) @@ -14,31 +13,31 @@ func NewNilOutport() *nilOutport { } // SaveBlock - -func (n *nilOutport) SaveBlock(_ *outportcore.ArgsSaveBlockData) { +func (n *nilOutport) SaveBlock(_ *outportcore.OutportBlock) { } // RevertIndexedBlock - -func (n *nilOutport) RevertIndexedBlock(_ data.HeaderHandler, _ data.BodyHandler) { +func (n *nilOutport) RevertIndexedBlock(_ *outportcore.BlockData) { } // SaveRoundsInfo - -func (n *nilOutport) SaveRoundsInfo(_ []*outportcore.RoundInfo) { +func (n *nilOutport) SaveRoundsInfo(_ *outportcore.RoundsInfo) { } // SaveValidatorsPubKeys - -func (n *nilOutport) SaveValidatorsPubKeys(_ map[uint32][][]byte, _ uint32) { +func (n *nilOutport) SaveValidatorsPubKeys(_ *outportcore.ValidatorsPubKeys) { } // SaveValidatorsRating - -func (n *nilOutport) SaveValidatorsRating(_ string, _ []*outportcore.ValidatorRatingInfo) { +func (n *nilOutport) SaveValidatorsRating(_ *outportcore.ValidatorsRating) { } // SaveAccounts - -func (n *nilOutport) SaveAccounts(_ uint64, _ map[string]*outportcore.AlteredAccount, _ uint32) { +func (n *nilOutport) SaveAccounts(_ *outportcore.Accounts) { } // FinalizedBlock - -func (n *nilOutport) FinalizedBlock(_ []byte) { +func (n *nilOutport) FinalizedBlock(_ *outportcore.FinalizedBlock) { } // Close - diff --git a/outport/notifier/eventNotifier_test.go b/outport/notifier/eventNotifier_test.go index b30766b59e8..47e577dfd4c 100644 --- a/outport/notifier/eventNotifier_test.go +++ b/outport/notifier/eventNotifier_test.go @@ -1,14 +1,13 @@ package notifier_test import ( - "encoding/hex" "fmt" "testing" - "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" - outportSenderData "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/data" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/outport/mock" "github.com/multiversx/mx-chain-go/outport/notifier" "github.com/multiversx/mx-chain-go/testscommon" @@ -93,15 +92,15 @@ func TestSaveBlock(t *testing.T) { wasCalled := false args.HttpClient = &mock.HTTPClientStub{ PostCalled: func(route string, payload interface{}) error { - saveBlockData := payload.(outportSenderData.ArgsSaveBlock) + saveBlockData := payload.(*outport.OutportBlock) - require.Equal(t, hex.EncodeToString([]byte(txHash1)), saveBlockData.TransactionsPool.Logs[0].TxHash) - for txHash := range saveBlockData.TransactionsPool.Txs { - require.Equal(t, hex.EncodeToString([]byte(txHash1)), txHash) + require.Contains(t, saveBlockData.TransactionPool.Logs, txHash1) + for txHash := range saveBlockData.TransactionPool.Transactions { + require.Equal(t, txHash1, txHash) } - for scrHash := range saveBlockData.TransactionsPool.Scrs { - require.Equal(t, hex.EncodeToString([]byte(scrHash1)), scrHash) + for scrHash := range saveBlockData.TransactionPool.SmartContractResults { + require.Equal(t, scrHash1, scrHash) } wasCalled = true @@ -111,19 +110,19 @@ func TestSaveBlock(t *testing.T) { en, _ := notifier.NewEventNotifier(args) - saveBlockData := &outport.ArgsSaveBlockData{ - HeaderHash: []byte{}, - TransactionsPool: &outport.Pool{ - Txs: map[string]data.TransactionHandlerWithGasUsedAndFee{ + saveBlockData := &outport.OutportBlock{ + BlockData: &outport.BlockData{ + HeaderHash: []byte{}, + }, + TransactionPool: &outport.TransactionPool{ + Transactions: map[string]*outport.TxInfo{ txHash1: nil, }, - Scrs: map[string]data.TransactionHandlerWithGasUsedAndFee{ + SmartContractResults: map[string]*outport.SCRInfo{ scrHash1: nil, }, - Logs: []*data.LogData{ - { - TxHash: txHash1, - }, + Logs: map[string]*transaction.Log{ + txHash1: {}, }, }, } @@ -154,9 +153,15 @@ func TestRevertIndexedBlock(t *testing.T) { Round: 2, Epoch: 3, } - err := en.RevertIndexedBlock(header, &block.Body{}) + headerBytes, _ := args.Marshaller.Marshal(header) + + err := en.RevertIndexedBlock(&outport.BlockData{ + HeaderBytes: headerBytes, + Body: &block.Body{}, + HeaderType: string(core.ShardHeaderV1), + }, + ) require.Nil(t, err) - require.True(t, wasCalled) } @@ -176,7 +181,7 @@ func TestFinalizedBlock(t *testing.T) { en, _ := notifier.NewEventNotifier(args) hash := []byte("headerHash") - err := en.FinalizedBlock(hash) + err := en.FinalizedBlock(&outport.FinalizedBlock{HeaderHash: hash}) require.Nil(t, err) require.True(t, wasCalled) @@ -199,13 +204,13 @@ func TestMockFunctions(t *testing.T) { err = en.SaveRoundsInfo(nil) require.Nil(t, err) - err = en.SaveValidatorsRating("", nil) + err = en.SaveValidatorsRating(nil) require.Nil(t, err) - err = en.SaveValidatorsPubKeys(nil, 0) + err = en.SaveValidatorsPubKeys(nil) require.Nil(t, err) - err = en.SaveAccounts(0, nil, 0) + err = en.SaveAccounts(nil) require.Nil(t, err) err = en.Close() diff --git a/outport/process/outportDataProvider_test.go b/outport/process/outportDataProvider_test.go index 0d7d4c75201..6bff67d924d 100644 --- a/outport/process/outportDataProvider_test.go +++ b/outport/process/outportDataProvider_test.go @@ -29,6 +29,7 @@ func createArgOutportDataProvider() ArgOutportDataProvider { EconomicsData: &mock.EconomicsHandlerMock{}, ShardCoordinator: &testscommon.ShardsCoordinatorMock{}, ExecutionOrderHandler: &mock.ExecutionOrderHandlerStub{}, + Marshaller: &testscommon.MarshalizerMock{}, } } @@ -84,10 +85,10 @@ func TestPrepareOutportSaveBlockData(t *testing.T) { }) require.Nil(t, err) require.NotNil(t, res) - require.NotNil(t, res.HeaderHash) - require.NotNil(t, res.Body) - require.NotNil(t, res.Header) + require.NotNil(t, res.BlockData.HeaderHash) + require.NotNil(t, res.BlockData.Body) + require.NotNil(t, res.BlockData.HeaderBytes) require.NotNil(t, res.SignersIndexes) require.NotNil(t, res.HeaderGasConsumption) - require.NotNil(t, res.TransactionsPool) + require.NotNil(t, res.TransactionPool) } diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 85e27f38e48..8790bbc67b9 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -496,10 +496,10 @@ func GetProcessArgs( return initialAccounts }, - GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) { - txsPool := make(map[uint32]*outport.Pool) + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) { + txsPool := make(map[uint32]*outport.TransactionPool) for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - txsPool[i] = &outport.Pool{} + txsPool[i] = &outport.TransactionPool{} } return make([]*block.MiniBlock, 4), txsPool, nil From 0f3f77e088a63416d1cec0b5e7c6f1a08860989a Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 16 Mar 2023 17:41:48 +0200 Subject: [PATCH 064/221] FIX: Unit tests --- api/mock/facadeStub.go | 6 +++--- node/external/blockAPI/baseBlock_test.go | 6 +++--- node/external/blockAPI/metaBlock_test.go | 13 +++++++------ node/external/blockAPI/shardBlock_test.go | 15 ++++++++------- testscommon/genesisMocks/accountsParserStub.go | 6 +++--- 5 files changed, 24 insertions(+), 22 deletions(-) diff --git a/api/mock/facadeStub.go b/api/mock/facadeStub.go index 808e66fbae7..d79d91e5c59 100644 --- a/api/mock/facadeStub.go +++ b/api/mock/facadeStub.go @@ -5,9 +5,9 @@ import ( "math/big" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/esdt" - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -55,7 +55,7 @@ type FacadeStub struct { GetNFTTokenIDsRegisteredByAddressCalled func(address string, options api.AccountQueryOptions) ([]string, api.BlockInfo, error) GetBlockByHashCalled func(hash string, options api.BlockQueryOptions) (*api.Block, error) GetBlockByNonceCalled func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) - GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) + GetAlteredAccountsForBlockCalled func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) GetBlockByRoundCalled func(round uint64, options api.BlockQueryOptions) (*api.Block, error) GetInternalShardBlockByNonceCalled func(format common.ApiOutputFormat, nonce uint64) (interface{}, error) GetInternalShardBlockByHashCalled func(format common.ApiOutputFormat, hash string) (interface{}, error) @@ -395,7 +395,7 @@ func (f *FacadeStub) GetBlockByRound(round uint64, options api.BlockQueryOptions } // GetAlteredAccountsForBlock - -func (f *FacadeStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*outportcore.AlteredAccount, error) { +func (f *FacadeStub) GetAlteredAccountsForBlock(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { if f.GetAlteredAccountsForBlockCalled != nil { return f.GetAlteredAccountsForBlockCalled(options) } diff --git a/node/external/blockAPI/baseBlock_test.go b/node/external/blockAPI/baseBlock_test.go index 4303f04a35a..020d2fbc4d5 100644 --- a/node/external/blockAPI/baseBlock_test.go +++ b/node/external/blockAPI/baseBlock_test.go @@ -87,7 +87,7 @@ func TestBaseBlockGetIntraMiniblocksSCRS(t *testing.T) { intraMbs, err := baseAPIBlockProc.getIntrashardMiniblocksFromReceiptsStorage(blockHeader, []byte{}, api.BlockQueryOptions{WithTransactions: true}) require.Nil(t, err) require.Equal(t, &api.MiniBlock{ - Hash: "7630a217810d1ad3ea67e32dbff0e8f3ea6d970191f03d3c71761b3b60e57b91", + Hash: "f4add7b23eb83cf290422b0f6b770e3007b8ed3cd9683797fc90c8b4881f27bd", Type: "SmartContractResultBlock", Transactions: []*transaction.ApiTransactionResult{ { @@ -97,7 +97,7 @@ func TestBaseBlockGetIntraMiniblocksSCRS(t *testing.T) { Receiver: "726376", Data: []byte("doSomething"), MiniBlockType: "SmartContractResultBlock", - MiniBlockHash: "7630a217810d1ad3ea67e32dbff0e8f3ea6d970191f03d3c71761b3b60e57b91", + MiniBlockHash: "f4add7b23eb83cf290422b0f6b770e3007b8ed3cd9683797fc90c8b4881f27bd", }, }, ProcessingType: block.Normal.String(), @@ -149,7 +149,7 @@ func TestBaseBlockGetIntraMiniblocksReceipts(t *testing.T) { intraMbs, err := baseAPIBlockProc.getIntrashardMiniblocksFromReceiptsStorage(blockHeader, []byte{}, api.BlockQueryOptions{WithTransactions: true}) require.Nil(t, err) require.Equal(t, &api.MiniBlock{ - Hash: "262b3023ca9ba61e90a60932b4db7f8b0d1dec7c2a00261cf0c5d43785f17f6f", + Hash: "596545f64319f2fcf8e0ebae06f40f3353d603f6070255588a48018c7b30c951", Type: "ReceiptBlock", Receipts: []*transaction.ApiReceipt{ { diff --git a/node/external/blockAPI/metaBlock_test.go b/node/external/blockAPI/metaBlock_test.go index 205f4716b2d..b3cbb6d4ffb 100644 --- a/node/external/blockAPI/metaBlock_test.go +++ b/node/external/blockAPI/metaBlock_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" @@ -892,11 +893,11 @@ func TestMetaAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { metaAPIBlockProc.logsFacade = &testscommon.LogsFacadeStub{} metaAPIBlockProc.alteredAccountsProvider = &testscommon.AlteredAccountsProviderStub{ - ExtractAlteredAccountsFromPoolCalled: func(outportPool *outportcore.Pool, options shared.AlteredAccountsOptions) (map[string]*outportcore.AlteredAccount, error) { - retMap := map[string]*outportcore.AlteredAccount{} - for _, tx := range outportPool.Txs { - retMap[string(tx.GetSndAddr())] = &outportcore.AlteredAccount{ - Address: string(tx.GetSndAddr()), + ExtractAlteredAccountsFromPoolCalled: func(outportPool *outportcore.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { + retMap := map[string]*alteredAccount.AlteredAccount{} + for _, tx := range outportPool.Transactions { + retMap[string(tx.Transaction.GetSndAddr())] = &alteredAccount.AlteredAccount{ + Address: string(tx.Transaction.GetSndAddr()), Balance: "10", } } @@ -912,7 +913,7 @@ func TestMetaAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { }, }) require.NoError(t, err) - require.True(t, areAlteredAccountsResponsesTheSame([]*outportcore.AlteredAccount{ + require.True(t, areAlteredAccountsResponsesTheSame([]*alteredAccount.AlteredAccount{ { Address: "addr0", Balance: "10", diff --git a/node/external/blockAPI/shardBlock_test.go b/node/external/blockAPI/shardBlock_test.go index a32a9fe4cdf..5dd2f2bf8fb 100644 --- a/node/external/blockAPI/shardBlock_test.go +++ b/node/external/blockAPI/shardBlock_test.go @@ -6,6 +6,7 @@ import ( "math/big" "testing" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" @@ -662,11 +663,11 @@ func TestShardAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { metaAPIBlockProc.logsFacade = &testscommon.LogsFacadeStub{} metaAPIBlockProc.alteredAccountsProvider = &testscommon.AlteredAccountsProviderStub{ - ExtractAlteredAccountsFromPoolCalled: func(txPool *outportcore.Pool, options shared.AlteredAccountsOptions) (map[string]*outportcore.AlteredAccount, error) { - retMap := map[string]*outportcore.AlteredAccount{} - for _, tx := range txPool.Txs { - retMap[string(tx.GetSndAddr())] = &outportcore.AlteredAccount{ - Address: string(tx.GetSndAddr()), + ExtractAlteredAccountsFromPoolCalled: func(txPool *outportcore.TransactionPool, options shared.AlteredAccountsOptions) (map[string]*alteredAccount.AlteredAccount, error) { + retMap := map[string]*alteredAccount.AlteredAccount{} + for _, tx := range txPool.Transactions { + retMap[string(tx.Transaction.GetSndAddr())] = &alteredAccount.AlteredAccount{ + Address: string(tx.Transaction.GetSndAddr()), Balance: "10", } } @@ -682,7 +683,7 @@ func TestShardAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { }, }) require.NoError(t, err) - require.True(t, areAlteredAccountsResponsesTheSame([]*outportcore.AlteredAccount{ + require.True(t, areAlteredAccountsResponsesTheSame([]*alteredAccount.AlteredAccount{ { Address: "addr0", Balance: "10", @@ -696,7 +697,7 @@ func TestShardAPIBlockProcessor_GetAlteredAccountsForBlock(t *testing.T) { }) } -func areAlteredAccountsResponsesTheSame(first []*outportcore.AlteredAccount, second []*outportcore.AlteredAccount) bool { +func areAlteredAccountsResponsesTheSame(first []*alteredAccount.AlteredAccount, second []*alteredAccount.AlteredAccount) bool { if len(first) != len(second) { return false } diff --git a/testscommon/genesisMocks/accountsParserStub.go b/testscommon/genesisMocks/accountsParserStub.go index 412fa70d817..04182b04ff6 100644 --- a/testscommon/genesisMocks/accountsParserStub.go +++ b/testscommon/genesisMocks/accountsParserStub.go @@ -16,7 +16,7 @@ type AccountsParserStub struct { InitialAccountsCalled func() []genesis.InitialAccountHandler GetTotalStakedForDelegationAddressCalled func(delegationAddress string) *big.Int GetInitialAccountsForDelegatedCalled func(addressBytes []byte) []genesis.InitialAccountHandler - GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) + GenerateInitialTransactionsCalled func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) GenesisMintingAddressCalled func() string } @@ -75,12 +75,12 @@ func (aps *AccountsParserStub) InitialAccounts() []genesis.InitialAccountHandler } // GenerateInitialTransactions - -func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.Pool, error) { +func (aps *AccountsParserStub) GenerateInitialTransactions(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*block.MiniBlock, map[uint32]*outport.TransactionPool, error) { if aps.GenerateInitialTransactionsCalled != nil { return aps.GenerateInitialTransactionsCalled(shardCoordinator, initialIndexingData) } - return make([]*block.MiniBlock, 0), make(map[uint32]*outport.Pool), nil + return make([]*block.MiniBlock, 0), make(map[uint32]*outport.TransactionPool), nil } // IsInterfaceNil - From 08362104236dd86dc1a7437e2949bae1a22a904e Mon Sep 17 00:00:00 2001 From: MariusC Date: Fri, 17 Mar 2023 15:33:21 +0200 Subject: [PATCH 065/221] FEAT: Change interfaces in outport and drivers --- factory/processing/processComponents.go | 60 ++++--------- go.mod | 4 +- go.sum | 8 +- integrationTests/mock/nilOutport.go | 61 ------------- integrationTests/testProcessorNode.go | 3 +- integrationTests/testSyncNode.go | 5 +- outport/disabled/disabledOutport.go | 6 +- outport/interface.go | 8 +- outport/mock/driverStub.go | 5 ++ outport/notifier/eventNotifier.go | 5 ++ outport/outport.go | 43 ++++++++- .../disabled/disabledOutportDataProvider.go | 4 +- outport/process/outportDataProvider.go | 90 ++++++------------- outport/process/outportDataProvider_test.go | 6 +- process/block/metablock.go | 7 +- process/block/shardblock.go | 7 +- process/sync/baseSync.go | 55 ++---------- .../outport/outportDataProviderStub.go | 4 +- testscommon/outport/outportStub.go | 12 +-- 19 files changed, 146 insertions(+), 247 deletions(-) delete mode 100644 integrationTests/mock/nilOutport.go diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index d56c52b8da5..c9e9e9bdb1f 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -1136,28 +1136,28 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( _ = genesisBlockHeader.SetTxCount(uint32(len(txsPoolPerShard[currentShardId].Transactions))) - headerBytes, headerType, err := pcf.getHeaderBytes(genesisBlockHeader) - if err != nil { - return err - } - - arg := &outport.OutportBlock{ - BlockData: &outport.BlockData{ - HeaderBytes: headerBytes, - HeaderType: string(headerType), - HeaderHash: genesisBlockHash, - Body: genesisBody, + arg := &outport.OutportBlockWithHeaderAndBody{ + OutportBlock: &outport.OutportBlock{ + BlockData: nil, // this will be filled by outport handler + HeaderGasConsumption: &outport.HeaderGasConsumption{ + GasProvided: 0, + GasRefunded: 0, + GasPenalized: 0, + MaxGasPerBlock: pcf.coreData.EconomicsData().MaxGasLimitPerBlock(currentShardId), + }, + TransactionPool: txsPoolPerShard[currentShardId], + AlteredAccounts: alteredAccounts, }, - HeaderGasConsumption: &outport.HeaderGasConsumption{ - GasProvided: 0, - GasRefunded: 0, - GasPenalized: 0, - MaxGasPerBlock: pcf.coreData.EconomicsData().MaxGasLimitPerBlock(currentShardId), + HeaderDataWithBody: &outport.HeaderDataWithBody{ + Body: genesisBody, + Header: genesisBlockHeader, + HeaderHash: genesisBlockHash, }, - TransactionPool: txsPoolPerShard[currentShardId], - AlteredAccounts: alteredAccounts, } - pcf.statusComponents.OutportHandler().SaveBlock(arg) + errOutport := pcf.statusComponents.OutportHandler().SaveBlock(arg) + if errOutport != nil { + log.Warn("indexGenesisBlocks.outportHandler.SaveBlock cannot save block", "error", err) + } } log.Info("indexGenesisBlocks(): historyRepo.RecordBlock", "shardID", currentShardId, "hash", genesisBlockHash) @@ -1207,28 +1207,6 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( return nil } -func (pcf *processComponentsFactory) getHeaderBytes(headerHandler data.HeaderHandler) ([]byte, core.HeaderType, error) { - var err error - var headerBytes []byte - var headerType core.HeaderType - - switch header := headerHandler.(type) { - case *dataBlock.MetaBlock: - headerType = core.MetaHeader - headerBytes, err = pcf.coreData.InternalMarshalizer().Marshal(header) - case *dataBlock.Header: - headerType = core.ShardHeaderV1 - headerBytes, err = pcf.coreData.InternalMarshalizer().Marshal(header) - case *dataBlock.HeaderV2: - headerType = core.ShardHeaderV2 - headerBytes, err = pcf.coreData.InternalMarshalizer().Marshal(header) - default: - return nil, "", fmt.Errorf("invalid/unknown header type") - } - - return headerBytes, headerType, err -} - func (pcf *processComponentsFactory) saveAlteredGenesisHeaderToStorage( genesisBlockHeader data.HeaderHandler, genesisBlockHash []byte, diff --git a/go.mod b/go.mod index 2f3f350cef8..d21c29be14a 100644 --- a/go.mod +++ b/go.mod @@ -13,9 +13,9 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.36-0.20230316133343-94217bd87ee6 + github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317131421-03a7f3c33ffd github.com/multiversx/mx-chain-crypto-go v1.2.5 - github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd + github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.13 github.com/multiversx/mx-chain-storage-go v1.0.7 diff --git a/go.sum b/go.sum index 8cd812c3e38..7c986b76ee3 100644 --- a/go.sum +++ b/go.sum @@ -611,12 +611,12 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230316133343-94217bd87ee6 h1:0U+nYw9bnmMgtxsWEhgQzZyV4OXMzazO6MDooM29w50= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230316133343-94217bd87ee6/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317131421-03a7f3c33ffd h1:lCP1HAUvczAz6pJn3ED6MMWCyt+wRBnDFVYQuhq+hzU= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317131421-03a7f3c33ffd/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= -github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd h1:IPrhwnzjqCKKlmRd8h/uzYiCCoLOYiVHB5QTFfUbt00= -github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230314145155-a08f7f7021bd/go.mod h1:QcF/hS31tE/Tq/YX1QTPdpKCcbXPQ5HqcKM76p6rYYE= +github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 h1:P+LnTGwHT9RfesB+Q1xvMEAJBy60NTJBwbRUFi9wmfo= +github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345/go.mod h1:QcF/hS31tE/Tq/YX1QTPdpKCcbXPQ5HqcKM76p6rYYE= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.13 h1:woIlYkDFCKYyJQ5urDcOzz8HUFGsSEhTfUXDDxNI2zM= diff --git a/integrationTests/mock/nilOutport.go b/integrationTests/mock/nilOutport.go deleted file mode 100644 index 76afedf808a..00000000000 --- a/integrationTests/mock/nilOutport.go +++ /dev/null @@ -1,61 +0,0 @@ -package mock - -import ( - outportcore "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-go/outport" -) - -type nilOutport struct{} - -// NewNilOutport - -func NewNilOutport() *nilOutport { - return new(nilOutport) -} - -// SaveBlock - -func (n *nilOutport) SaveBlock(_ *outportcore.OutportBlock) { -} - -// RevertIndexedBlock - -func (n *nilOutport) RevertIndexedBlock(_ *outportcore.BlockData) { -} - -// SaveRoundsInfo - -func (n *nilOutport) SaveRoundsInfo(_ *outportcore.RoundsInfo) { -} - -// SaveValidatorsPubKeys - -func (n *nilOutport) SaveValidatorsPubKeys(_ *outportcore.ValidatorsPubKeys) { -} - -// SaveValidatorsRating - -func (n *nilOutport) SaveValidatorsRating(_ *outportcore.ValidatorsRating) { -} - -// SaveAccounts - -func (n *nilOutport) SaveAccounts(_ *outportcore.Accounts) { -} - -// FinalizedBlock - -func (n *nilOutport) FinalizedBlock(_ *outportcore.FinalizedBlock) { -} - -// Close - -func (n *nilOutport) Close() error { - return nil -} - -// IsInterfaceNil - -func (n *nilOutport) IsInterfaceNil() bool { - return n == nil -} - -// SubscribeDriver - -func (n *nilOutport) SubscribeDriver(_ outport.Driver) error { - return nil -} - -// HasDrivers - -func (n *nilOutport) HasDrivers() bool { - return false -} diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index 8f4bb2a4c52..711d83dfd84 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -57,6 +57,7 @@ import ( "github.com/multiversx/mx-chain-go/node" "github.com/multiversx/mx-chain-go/node/external" "github.com/multiversx/mx-chain-go/node/nodeDebugFactory" + disabledOutport "github.com/multiversx/mx-chain-go/outport/disabled" "github.com/multiversx/mx-chain-go/p2p" p2pFactory "github.com/multiversx/mx-chain-go/p2p/factory" "github.com/multiversx/mx-chain-go/process" @@ -3147,7 +3148,7 @@ func GetDefaultNetworkComponents() *mock.NetworkComponentsStub { // GetDefaultStatusComponents - func GetDefaultStatusComponents() *mock.StatusComponentsStub { return &mock.StatusComponentsStub{ - Outport: mock.NewNilOutport(), + Outport: disabledOutport.NewDisabledOutport(), SoftwareVersionCheck: &mock.SoftwareVersionCheckerMock{}, AppStatusHandler: &statusHandlerMock.AppStatusHandlerStub{}, } diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 8b2b72d5419..6eb7407e137 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/provider" "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/outport/disabled" "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" "github.com/multiversx/mx-chain-go/process/sync" @@ -163,7 +164,7 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error MiniblocksProvider: tpn.MiniblocksProvider, Uint64Converter: TestUint64Converter, AppStatusHandler: TestAppStatusHandler, - OutportHandler: mock.NewNilOutport(), + OutportHandler: disabled.NewDisabledOutport(), AccountsDBSyncer: &mock.AccountsDBSyncerStub{}, CurrentEpochProvider: &testscommon.CurrentEpochProviderStub{}, IsInImportMode: false, @@ -208,7 +209,7 @@ func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, e MiniblocksProvider: tpn.MiniblocksProvider, Uint64Converter: TestUint64Converter, AppStatusHandler: TestAppStatusHandler, - OutportHandler: mock.NewNilOutport(), + OutportHandler: disabled.NewDisabledOutport(), AccountsDBSyncer: &mock.AccountsDBSyncerStub{}, CurrentEpochProvider: &testscommon.CurrentEpochProviderStub{}, IsInImportMode: false, diff --git a/outport/disabled/disabledOutport.go b/outport/disabled/disabledOutport.go index 8696bb2f79d..97be7894c9b 100644 --- a/outport/disabled/disabledOutport.go +++ b/outport/disabled/disabledOutport.go @@ -13,11 +13,13 @@ func NewDisabledOutport() *disabledOutport { } // SaveBlock does nothing -func (n *disabledOutport) SaveBlock(_ *outportcore.OutportBlock) { +func (n *disabledOutport) SaveBlock(_ *outportcore.OutportBlockWithHeaderAndBody) error { + return nil } // RevertIndexedBlock does nothing -func (n *disabledOutport) RevertIndexedBlock(_ *outportcore.BlockData) { +func (n *disabledOutport) RevertIndexedBlock(_ *outportcore.HeaderDataWithBody) error { + return nil } // SaveRoundsInfo does nothing diff --git a/outport/interface.go b/outport/interface.go index 5923ee47717..07dc8e9ce58 100644 --- a/outport/interface.go +++ b/outport/interface.go @@ -2,6 +2,7 @@ package outport import ( outportcore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/outport/process" ) @@ -15,6 +16,7 @@ type Driver interface { SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) error SaveAccounts(accounts *outportcore.Accounts) error FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) error + GetMarshaller() marshal.Marshalizer Close() error IsInterfaceNil() bool } @@ -22,8 +24,8 @@ type Driver interface { // OutportHandler is interface that defines what a proxy implementation should be able to do // The node is able to talk only with this interface type OutportHandler interface { - SaveBlock(outportBlock *outportcore.OutportBlock) - RevertIndexedBlock(blockData *outportcore.BlockData) + SaveBlock(outportBlock *outportcore.OutportBlockWithHeaderAndBody) error + RevertIndexedBlock(blockData *outportcore.HeaderDataWithBody) error SaveRoundsInfo(roundsInfos *outportcore.RoundsInfo) SaveValidatorsPubKeys(validatorsPubKeys *outportcore.ValidatorsPubKeys) SaveValidatorsRating(validatorsRating *outportcore.ValidatorsRating) @@ -37,6 +39,6 @@ type OutportHandler interface { // DataProviderOutport is an interface that defines what an implementation of data provider outport should be able to do type DataProviderOutport interface { - PrepareOutportSaveBlockData(arg process.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlock, error) + PrepareOutportSaveBlockData(arg process.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlockWithHeaderAndBody, error) IsInterfaceNil() bool } diff --git a/outport/mock/driverStub.go b/outport/mock/driverStub.go index 2136dfaa338..18d087b6d3d 100644 --- a/outport/mock/driverStub.go +++ b/outport/mock/driverStub.go @@ -2,6 +2,7 @@ package mock import ( outportcore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/marshal" ) // DriverStub - @@ -79,6 +80,10 @@ func (d *DriverStub) FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) return nil } +func (d *DriverStub) GetMarshaller() marshal.Marshalizer { + return nil +} + // Close - func (d *DriverStub) Close() error { if d.CloseCalled != nil { diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index 6c851e88424..3bba95397cd 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -154,6 +154,11 @@ func (en *eventNotifier) SaveAccounts(_ *outport.Accounts) error { return nil } +// GetMarshaller returns internal marshaller +func (en *eventNotifier) GetMarshaller() marshal.Marshalizer { + return en.marshalizer +} + // IsInterfaceNil returns whether the interface is nil func (en *eventNotifier) IsInterfaceNil() bool { return en == nil diff --git a/outport/outport.go b/outport/outport.go index 1e7b4dd219f..318facc0cca 100644 --- a/outport/outport.go +++ b/outport/outport.go @@ -43,13 +43,43 @@ func NewOutport(retrialInterval time.Duration) (*outport, error) { } // SaveBlock will save block for every driver -func (o *outport) SaveBlock(args *outportcore.OutportBlock) { +func (o *outport) SaveBlock(args *outportcore.OutportBlockWithHeaderAndBody) error { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { - o.saveBlockBlocking(args, driver) + blockData, err := prepareBlockData(args.HeaderDataWithBody, driver) + if err != nil { + return err + } + + args.OutportBlock.BlockData = blockData + o.saveBlockBlocking(args.OutportBlock, driver) + } + + return nil +} + +func prepareBlockData( + headerBodyData *outportcore.HeaderDataWithBody, + driver Driver, +) (*outportcore.BlockData, error) { + marshaller := driver.GetMarshaller() + headerBytes, headerType, err := outportcore.GetHeaderBytesAndType(marshaller, headerBodyData.Header) + if err != nil { + return nil, err } + body, err := outportcore.GetBody(headerBodyData.Body) + if err != nil { + return nil, err + } + + return &outportcore.BlockData{ + HeaderBytes: headerBytes, + HeaderType: string(headerType), + HeaderHash: headerBodyData.HeaderHash, + Body: body, + }, nil } func (o *outport) monitorCompletionOnDriver(function string, driver Driver) chan struct{} { @@ -107,13 +137,20 @@ func (o *outport) shouldTerminate() bool { } // RevertIndexedBlock will revert block for every driver -func (o *outport) RevertIndexedBlock(blockData *outportcore.BlockData) { +func (o *outport) RevertIndexedBlock(headerDataWithBody *outportcore.HeaderDataWithBody) error { o.mutex.RLock() defer o.mutex.RUnlock() for _, driver := range o.drivers { + blockData, err := prepareBlockData(headerDataWithBody, driver) + if err != nil { + return err + } + o.revertIndexedBlockBlocking(blockData, driver) } + + return nil } func (o *outport) revertIndexedBlockBlocking(blockData *outportcore.BlockData, driver Driver) { diff --git a/outport/process/disabled/disabledOutportDataProvider.go b/outport/process/disabled/disabledOutportDataProvider.go index dcbace5b465..777ea1bcc5b 100644 --- a/outport/process/disabled/disabledOutportDataProvider.go +++ b/outport/process/disabled/disabledOutportDataProvider.go @@ -13,8 +13,8 @@ func NewDisabledOutportDataProvider() *disabledOutportDataProvider { } // PrepareOutportSaveBlockData wil do nothing -func (d *disabledOutportDataProvider) PrepareOutportSaveBlockData(_ process.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlock, error) { - return &outportcore.OutportBlock{}, nil +func (d *disabledOutportDataProvider) PrepareOutportSaveBlockData(_ process.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlockWithHeaderAndBody, error) { + return &outportcore.OutportBlockWithHeaderAndBody{}, nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/outport/process/outportDataProvider.go b/outport/process/outportDataProvider.go index 1288b4b9a4a..e3f09bc0e74 100644 --- a/outport/process/outportDataProvider.go +++ b/outport/process/outportDataProvider.go @@ -39,6 +39,8 @@ type ArgOutportDataProvider struct { type ArgPrepareOutportSaveBlockData struct { HeaderHash []byte Header data.HeaderHandler + HeaderBytes []byte + HeaderType string Body data.BodyHandler PreviousHeader data.HeaderHandler RewardsTxs map[string]data.TransactionHandler @@ -78,7 +80,7 @@ func NewOutportDataProvider(arg ArgOutportDataProvider) (*outportDataProvider, e } // PrepareOutportSaveBlockData will prepare the provided data in a format that will be accepted by an outport driver -func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlock, error) { +func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlockWithHeaderAndBody, error) { if check.IfNil(arg.Header) { return nil, errNilHeaderHandler } @@ -115,38 +117,31 @@ func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutpor if err != nil { return nil, err } - body, err := getBody(arg.Body) - if err != nil { - return nil, err - } - headerBytes, headerType, err := odp.getHeaderBytes(arg.Header) - if err != nil { - return nil, err - } - - return &outportcore.OutportBlock{ - BlockData: &outportcore.BlockData{ - HeaderBytes: headerBytes, - HeaderType: string(headerType), - HeaderHash: arg.HeaderHash, - Body: body, + return &outportcore.OutportBlockWithHeaderAndBody{ + OutportBlock: &outportcore.OutportBlock{ + BlockData: nil, // this will be filed with specific data for each driver + TransactionPool: pool, + HeaderGasConsumption: &outportcore.HeaderGasConsumption{ + GasProvided: odp.gasConsumedProvider.TotalGasProvidedWithScheduled(), + GasRefunded: odp.gasConsumedProvider.TotalGasRefunded(), + GasPenalized: odp.gasConsumedProvider.TotalGasPenalized(), + MaxGasPerBlock: odp.economicsData.MaxGasLimitPerBlock(odp.shardID), + }, + AlteredAccounts: alteredAccounts, + NotarizedHeadersHashes: arg.NotarizedHeadersHashes, + NumberOfShards: odp.numOfShards, + IsImportDB: odp.isImportDBMode, + SignersIndexes: signersIndexes, + + HighestFinalBlockNonce: arg.HighestFinalBlockNonce, + HighestFinalBlockHash: arg.HighestFinalBlockHash, }, - TransactionPool: pool, - HeaderGasConsumption: &outportcore.HeaderGasConsumption{ - GasProvided: odp.gasConsumedProvider.TotalGasProvidedWithScheduled(), - GasRefunded: odp.gasConsumedProvider.TotalGasRefunded(), - GasPenalized: odp.gasConsumedProvider.TotalGasPenalized(), - MaxGasPerBlock: odp.economicsData.MaxGasLimitPerBlock(odp.shardID), + HeaderDataWithBody: &outportcore.HeaderDataWithBody{ + Body: arg.Body, + Header: arg.Header, + HeaderHash: arg.HeaderHash, }, - AlteredAccounts: alteredAccounts, - NotarizedHeadersHashes: arg.NotarizedHeadersHashes, - NumberOfShards: odp.numOfShards, - IsImportDB: odp.isImportDBMode, - SignersIndexes: signersIndexes, - - HighestFinalBlockNonce: arg.HighestFinalBlockNonce, - HighestFinalBlockHash: arg.HighestFinalBlockHash, }, nil } @@ -180,41 +175,6 @@ func (odp *outportDataProvider) getSignersIndexes(header data.HeaderHandler) ([] return signersIndexes, nil } -func getBody(bodyHandler data.BodyHandler) (*block.Body, error) { - if check.IfNil(bodyHandler) { - return nil, errNilBodyHandler - } - - body, castOk := bodyHandler.(*block.Body) - if !castOk { - return nil, errCannotCastBlockBody - } - - return body, nil -} - -func (odp *outportDataProvider) getHeaderBytes(headerHandler data.HeaderHandler) ([]byte, core.HeaderType, error) { - var err error - var headerBytes []byte - var headerType core.HeaderType - - switch header := headerHandler.(type) { - case *block.MetaBlock: - headerType = core.MetaHeader - headerBytes, err = odp.marshaller.Marshal(header) - case *block.Header: - headerType = core.ShardHeaderV1 - headerBytes, err = odp.marshaller.Marshal(header) - case *block.HeaderV2: - headerType = core.ShardHeaderV2 - headerBytes, err = odp.marshaller.Marshal(header) - default: - return nil, "", errInvalidHeaderType - } - - return headerBytes, headerType, err -} - func (odp *outportDataProvider) createPool(rewardsTxs map[string]data.TransactionHandler) (*outportcore.TransactionPool, error) { if odp.shardID == core.MetachainShardId { return odp.createPoolForMeta(rewardsTxs) diff --git a/outport/process/outportDataProvider_test.go b/outport/process/outportDataProvider_test.go index 6bff67d924d..3aa79ef774a 100644 --- a/outport/process/outportDataProvider_test.go +++ b/outport/process/outportDataProvider_test.go @@ -85,9 +85,9 @@ func TestPrepareOutportSaveBlockData(t *testing.T) { }) require.Nil(t, err) require.NotNil(t, res) - require.NotNil(t, res.BlockData.HeaderHash) - require.NotNil(t, res.BlockData.Body) - require.NotNil(t, res.BlockData.HeaderBytes) + require.NotNil(t, res.HeaderDataWithBody.HeaderHash) + require.NotNil(t, res.HeaderDataWithBody.Body) + require.NotNil(t, res.HeaderDataWithBody.Header) require.NotNil(t, res.SignersIndexes) require.NotNil(t, res.HeaderGasConsumption) require.NotNil(t, res.TransactionPool) diff --git a/process/block/metablock.go b/process/block/metablock.go index fcb1ba141ef..5d62625016c 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -627,7 +627,12 @@ func (mp *metaProcessor) indexBlock( log.Warn("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error()) return } - mp.outportHandler.SaveBlock(argSaveBlock) + err = mp.outportHandler.SaveBlock(argSaveBlock) + if err != nil { + log.Warn("metaProcessor.outportHandler.SaveBlock cannot save block", "error", err) + return + } + log.Debug("indexed block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) indexRoundInfo(mp.outportHandler, mp.nodesCoordinator, core.MetachainShardId, metaBlock, lastMetaBlock, argSaveBlock.SignersIndexes) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 8a90cc45295..21fc794c3ef 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -607,7 +607,12 @@ func (sp *shardProcessor) indexBlockIfNeeded( log.Warn("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error()) return } - sp.outportHandler.SaveBlock(argSaveBlock) + err = sp.outportHandler.SaveBlock(argSaveBlock) + if err != nil { + log.Warn("shardProcessor.outportHandler.SaveBlock cannot save block", "error", err) + return + } + log.Debug("indexed block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) shardID := sp.shardCoordinator.SelfId() diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index eb4937616c8..553949ee853 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -817,23 +817,15 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { boot.scheduledTxsExecutionHandler.SetScheduledInfo(scheduledInfo) } - headerBytes, headerType, err := boot.getHeaderBytes(currHeader) + err = boot.outportHandler.RevertIndexedBlock(&outportcore.HeaderDataWithBody{ + Body: currBody, + HeaderHash: currHeaderHash, + Header: currHeader, + }) if err != nil { - return err + log.Warn("baseBootstrap.outportHandler.RevertIndexedBlock cannot revert indexed block", "error", err) } - body, err := getBody(currBody) - if err != nil { - return err - } - - boot.outportHandler.RevertIndexedBlock(&outportcore.BlockData{ - HeaderBytes: headerBytes, - HeaderType: string(headerType), - Body: body, - HeaderHash: nil, // TODO: Do we need hash here? - }) - shouldAddHeaderToBlackList := revertUsingForkNonce && boot.blockBootstrapper.isForkTriggeredByMeta() if shouldAddHeaderToBlackList { process.AddHeaderToBlackList(boot.blackListHandler, currHeaderHash) @@ -851,41 +843,6 @@ func (boot *baseBootstrap) rollBack(revertUsingForkNonce bool) error { return nil } -func getBody(bodyHandler data.BodyHandler) (*block.Body, error) { - if check.IfNil(bodyHandler) { - return nil, fmt.Errorf("nil body") - } - - body, castOk := bodyHandler.(*block.Body) - if !castOk { - return nil, fmt.Errorf("cannot cast body") - } - - return body, nil -} - -func (boot *baseBootstrap) getHeaderBytes(headerHandler data.HeaderHandler) ([]byte, core.HeaderType, error) { - var err error - var headerBytes []byte - var headerType core.HeaderType - - switch header := headerHandler.(type) { - case *block.MetaBlock: - headerType = core.MetaHeader - headerBytes, err = boot.marshalizer.Marshal(header) - case *block.Header: - headerType = core.ShardHeaderV1 - headerBytes, err = boot.marshalizer.Marshal(header) - case *block.HeaderV2: - headerType = core.ShardHeaderV2 - headerBytes, err = boot.marshalizer.Marshal(header) - default: - return nil, "", fmt.Errorf("invalid/unknown header type") - } - - return headerBytes, headerType, err -} - func (boot *baseBootstrap) shouldAllowRollback(currHeader data.HeaderHandler, currHeaderHash []byte) bool { finalBlockNonce := boot.forkDetector.GetHighestFinalBlockNonce() finalBlockHash := boot.forkDetector.GetHighestFinalBlockHash() diff --git a/testscommon/outport/outportDataProviderStub.go b/testscommon/outport/outportDataProviderStub.go index 6f8e13b4798..9bf8e95c7c6 100644 --- a/testscommon/outport/outportDataProviderStub.go +++ b/testscommon/outport/outportDataProviderStub.go @@ -9,13 +9,13 @@ import ( type OutportDataProviderStub struct { PrepareOutportSaveBlockDataCalled func( arg process.ArgPrepareOutportSaveBlockData, - ) (*outportcore.OutportBlock, error) + ) (*outportcore.OutportBlockWithHeaderAndBody, error) } // PrepareOutportSaveBlockData - func (a *OutportDataProviderStub) PrepareOutportSaveBlockData( arg process.ArgPrepareOutportSaveBlockData, -) (*outportcore.OutportBlock, error) { +) (*outportcore.OutportBlockWithHeaderAndBody, error) { if a.PrepareOutportSaveBlockDataCalled != nil { return a.PrepareOutportSaveBlockDataCalled(arg) } diff --git a/testscommon/outport/outportStub.go b/testscommon/outport/outportStub.go index bd9ea033a73..e9cd2649d3e 100644 --- a/testscommon/outport/outportStub.go +++ b/testscommon/outport/outportStub.go @@ -7,17 +7,19 @@ import ( // OutportStub is a mock implementation fot the OutportHandler interface type OutportStub struct { - SaveBlockCalled func(args *outportcore.OutportBlock) + SaveBlockCalled func(args *outportcore.OutportBlockWithHeaderAndBody) error SaveValidatorsRatingCalled func(validatorsRating *outportcore.ValidatorsRating) SaveValidatorsPubKeysCalled func(validatorsPubKeys *outportcore.ValidatorsPubKeys) HasDriversCalled func() bool } // SaveBlock - -func (as *OutportStub) SaveBlock(args *outportcore.OutportBlock) { +func (as *OutportStub) SaveBlock(args *outportcore.OutportBlockWithHeaderAndBody) error { if as.SaveBlockCalled != nil { - as.SaveBlockCalled(args) + return as.SaveBlockCalled(args) } + + return nil } // SaveValidatorsRating - @@ -48,8 +50,8 @@ func (as *OutportStub) HasDrivers() bool { } // RevertIndexedBlock - -func (as *OutportStub) RevertIndexedBlock(_ *outportcore.BlockData) { - +func (as *OutportStub) RevertIndexedBlock(_ *outportcore.HeaderDataWithBody) error { + return nil } // SaveAccounts - From 9aa20044b81f0787020696942188e712df9beb77 Mon Sep 17 00:00:00 2001 From: MariusC Date: Fri, 17 Mar 2023 16:16:41 +0200 Subject: [PATCH 066/221] FIX: Unit tests --- factory/processing/processComponents_test.go | 6 ++-- outport/errors.go | 4 +++ outport/mock/driverStub.go | 3 +- outport/outport.go | 8 +++++ outport/outport_test.go | 32 +++++++++++++++----- process/block/shardblock_test.go | 14 +++++++-- 6 files changed, 53 insertions(+), 14 deletions(-) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index a1e46aa8266..ffc6456df7c 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -76,7 +76,7 @@ func TestProcessComponents_IndexGenesisBlocks(t *testing.T) { HasDriversCalled: func() bool { return true }, - SaveBlockCalled: func(args *outportCore.OutportBlock) { + SaveBlockCalled: func(args *outportCore.OutportBlockWithHeaderAndBody) error { saveBlockCalledMutex.Lock() require.NotNil(t, args) @@ -87,8 +87,10 @@ func TestProcessComponents_IndexGenesisBlocks(t *testing.T) { txsPoolRequired := &outportCore.TransactionPool{} assert.Equal(t, txsPoolRequired, args.TransactionPool) - assert.Equal(t, bodyRequired, args.BlockData.Body) + assert.Equal(t, bodyRequired, args.HeaderDataWithBody.Body) saveBlockCalledMutex.Unlock() + + return nil }, } diff --git a/outport/errors.go b/outport/errors.go index eb44d2c671f..8c7ce22bb98 100644 --- a/outport/errors.go +++ b/outport/errors.go @@ -13,3 +13,7 @@ var ErrInvalidRetrialInterval = errors.New("invalid retrial interval") // ErrNilPubKeyConverter signals that a nil pubkey converter has been provided var ErrNilPubKeyConverter = errors.New("nil pub key converter") + +var errNilSaveBlockArgs = errors.New("nil save blocks args provided") + +var errNilHeaderAndBodyArgs = errors.New("nil header and body args provided") diff --git a/outport/mock/driverStub.go b/outport/mock/driverStub.go index 18d087b6d3d..f77cb3be1f5 100644 --- a/outport/mock/driverStub.go +++ b/outport/mock/driverStub.go @@ -3,6 +3,7 @@ package mock import ( outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/testscommon" ) // DriverStub - @@ -81,7 +82,7 @@ func (d *DriverStub) FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) } func (d *DriverStub) GetMarshaller() marshal.Marshalizer { - return nil + return testscommon.MarshalizerMock{} } // Close - diff --git a/outport/outport.go b/outport/outport.go index 318facc0cca..542c867b28f 100644 --- a/outport/outport.go +++ b/outport/outport.go @@ -47,6 +47,10 @@ func (o *outport) SaveBlock(args *outportcore.OutportBlockWithHeaderAndBody) err o.mutex.RLock() defer o.mutex.RUnlock() + if args == nil { + return fmt.Errorf("outport.SaveBlock error: %w", errNilSaveBlockArgs) + } + for _, driver := range o.drivers { blockData, err := prepareBlockData(args.HeaderDataWithBody, driver) if err != nil { @@ -64,6 +68,10 @@ func prepareBlockData( headerBodyData *outportcore.HeaderDataWithBody, driver Driver, ) (*outportcore.BlockData, error) { + if headerBodyData == nil { + return nil, fmt.Errorf("outport.prepareBlockData error: %w", errNilHeaderAndBodyArgs) + } + marshaller := driver.GetMarshaller() headerBytes, headerType, err := outportcore.GetHeaderBytesAndType(marshaller, headerBodyData.Header) if err != nil { diff --git a/outport/outport_test.go b/outport/outport_test.go index 88c1010af56..b83603c24af 100644 --- a/outport/outport_test.go +++ b/outport/outport_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/atomic" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/block" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/outport/mock" logger "github.com/multiversx/mx-chain-logger-go" @@ -18,6 +19,17 @@ import ( const counterPositionInLogMessage = 5 +func createSaveBlockArgs() *outportcore.OutportBlockWithHeaderAndBody { + return &outportcore.OutportBlockWithHeaderAndBody{ + OutportBlock: &outportcore.OutportBlock{}, + HeaderDataWithBody: &outportcore.HeaderDataWithBody{ + Body: &block.Body{}, + Header: &block.HeaderV2{}, + HeaderHash: []byte("hash"), + }, + } +} + func TestNewOutport(t *testing.T) { t.Parallel() @@ -114,11 +126,12 @@ func TestOutport_SaveBlock(t *testing.T) { } } - outportHandler.SaveBlock(nil) + args := createSaveBlockArgs() + _ = outportHandler.SaveBlock(args) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.SaveBlock(nil) + _ = outportHandler.SaveBlock(args) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -298,13 +311,14 @@ func TestOutport_RevertIndexedBlock(t *testing.T) { } } - outportHandler.RevertIndexedBlock(&outportcore.BlockData{}) + args := createSaveBlockArgs() + _ = outportHandler.RevertIndexedBlock(args.HeaderDataWithBody) time.Sleep(time.Second) _ = outportHandler.SubscribeDriver(driver1) _ = outportHandler.SubscribeDriver(driver2) - outportHandler.RevertIndexedBlock(&outportcore.BlockData{}) + _ = outportHandler.RevertIndexedBlock(args.HeaderDataWithBody) time.Sleep(time.Second) assert.Equal(t, 10, numCalled1) @@ -448,11 +462,11 @@ func TestOutport_CloseWhileDriverIsStuckInContinuousErrors(t *testing.T) { wg.Done() }() go func() { - outportHandler.SaveBlock(nil) + _ = outportHandler.SaveBlock(nil) wg.Done() }() go func() { - outportHandler.RevertIndexedBlock(nil) + _ = outportHandler.RevertIndexedBlock(nil) wg.Done() }() go func() { @@ -521,7 +535,8 @@ func TestOutport_SaveBlockDriverStuck(t *testing.T) { }, }) - outportHandler.SaveBlock(nil) + args := createSaveBlockArgs() + _ = outportHandler.SaveBlock(args) assert.True(t, logErrorCalled.IsSet()) assert.Equal(t, uint32(1), atomicGo.LoadUint32(&numLogDebugCalled)) @@ -559,7 +574,8 @@ func TestOutport_SaveBlockDriverIsNotStuck(t *testing.T) { }, }) - outportHandler.SaveBlock(nil) + args := createSaveBlockArgs() + _ = outportHandler.SaveBlock(args) time.Sleep(time.Second) assert.Equal(t, uint32(2), atomicGo.LoadUint32(&numLogDebugCalled)) diff --git a/process/block/shardblock_test.go b/process/block/shardblock_test.go index fe12797feec..46f1a1d7073 100644 --- a/process/block/shardblock_test.go +++ b/process/block/shardblock_test.go @@ -2228,16 +2228,24 @@ func TestShardProcessor_CommitBlockCallsIndexerMethods(t *testing.T) { called := false statusComponents.Outport = &outport.OutportStub{ - SaveBlockCalled: func(args *outportcore.OutportBlock) { + SaveBlockCalled: func(args *outportcore.OutportBlockWithHeaderAndBody) error { called = true + return nil }, HasDriversCalled: func() bool { return true }, } arguments.OutportDataProvider = &outport.OutportDataProviderStub{ - PrepareOutportSaveBlockDataCalled: func(_ processOutport.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlock, error) { - return &outportcore.OutportBlock{}, nil + PrepareOutportSaveBlockDataCalled: func(_ processOutport.ArgPrepareOutportSaveBlockData) (*outportcore.OutportBlockWithHeaderAndBody, error) { + return &outportcore.OutportBlockWithHeaderAndBody{ + HeaderDataWithBody: &outportcore.HeaderDataWithBody{ + Body: &block.Body{}, + Header: &block.HeaderV2{}, + HeaderHash: []byte("hash"), + }, + OutportBlock: &outportcore.OutportBlock{}, + }, nil }} arguments.AccountsDB[state.UserAccountsState] = accounts From deb08fa69900921ac3670986693325850ddd652c Mon Sep 17 00:00:00 2001 From: MariusC Date: Fri, 17 Mar 2023 16:24:51 +0200 Subject: [PATCH 067/221] CLN: Move ConvertPubKeys to core --- factory/status/statusComponents.go | 12 +----------- go.mod | 2 +- go.sum | 4 ++-- node/nodeRunner.go | 12 +----------- outport/process/errors.go | 4 ---- 5 files changed, 5 insertions(+), 29 deletions(-) diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index b432dd82d0f..38f69923a00 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -177,7 +177,7 @@ func (pc *statusComponents) epochStartEventHandler() epochStart.ActionHandler { } pc.outportHandler.SaveValidatorsPubKeys(&outportCore.ValidatorsPubKeys{ - ShardValidatorsPubKeys: convertPubKeys(validatorsPubKeys), + ShardValidatorsPubKeys: outportCore.ConvertPubKeys(validatorsPubKeys), Epoch: currentEpoch, }) @@ -186,16 +186,6 @@ func (pc *statusComponents) epochStartEventHandler() epochStart.ActionHandler { return subscribeHandler } -func convertPubKeys(validatorsPubKeys map[uint32][][]byte) map[uint32]*outportCore.PubKeys { - ret := make(map[uint32]*outportCore.PubKeys, len(validatorsPubKeys)) - - for shard, validators := range validatorsPubKeys { - ret[shard] = &outportCore.PubKeys{Keys: validators} - } - - return ret -} - // IsInterfaceNil returns true if there is no value under the interface func (scf *statusComponentsFactory) IsInterfaceNil() bool { return scf == nil diff --git a/go.mod b/go.mod index d21c29be14a..86a7c12c925 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317131421-03a7f3c33ffd + github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317142053-b83f1fd1059c github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 7c986b76ee3..e446364816e 100644 --- a/go.sum +++ b/go.sum @@ -611,8 +611,8 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317131421-03a7f3c33ffd h1:lCP1HAUvczAz6pJn3ED6MMWCyt+wRBnDFVYQuhq+hzU= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317131421-03a7f3c33ffd/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317142053-b83f1fd1059c h1:PcX+GqYfEP/zjQmjJ6R02dVi8NGq60CGr1X5/Z5iHE8= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317142053-b83f1fd1059c/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 h1:P+LnTGwHT9RfesB+Q1xvMEAJBy60NTJBwbRUFi9wmfo= diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 19c09b10f2e..cbfc7a614dc 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -1675,22 +1675,12 @@ func indexValidatorsListIfNeeded( if len(validatorsPubKeys) > 0 { outportHandler.SaveValidatorsPubKeys(&outportCore.ValidatorsPubKeys{ - ShardValidatorsPubKeys: convertPubKeys(validatorsPubKeys), + ShardValidatorsPubKeys: outportCore.ConvertPubKeys(validatorsPubKeys), Epoch: epoch, }) } } -func convertPubKeys(validatorsPubKeys map[uint32][][]byte) map[uint32]*outportCore.PubKeys { - ret := make(map[uint32]*outportCore.PubKeys, len(validatorsPubKeys)) - - for shard, validators := range validatorsPubKeys { - ret[shard] = &outportCore.PubKeys{Keys: validators} - } - - return ret -} - func enableGopsIfNeeded(gopsEnabled bool) { if gopsEnabled { if err := agent.Listen(agent.Options{}); err != nil { diff --git a/outport/process/errors.go b/outport/process/errors.go index 4d9ba29005a..301b23316ae 100644 --- a/outport/process/errors.go +++ b/outport/process/errors.go @@ -17,10 +17,6 @@ var errNilHeaderHandler = errors.New("nil header handler") // errNilBodyHandler signal that provided body handler is nil var errNilBodyHandler = errors.New("nil body handler") -var errCannotCastBlockBody = errors.New("cannot cast block body") - -var errInvalidHeaderType = errors.New("received invalid/unknown header type") - var errCannotCastTransaction = errors.New("cannot cast transaction") var errCannotCastSCR = errors.New("cannot cast smart contract result") From b57002f927c55320ff82934a72829bde115a1739 Mon Sep 17 00:00:00 2001 From: MariusC Date: Sun, 19 Mar 2023 20:11:53 +0200 Subject: [PATCH 068/221] CLN: RevertIndexedBlock in notifier --- outport/notifier/eventNotifier.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index 3bba95397cd..3d62a864aa8 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -37,7 +37,7 @@ type FinalizedBlock struct { type eventNotifier struct { httpClient httpClientHandler marshalizer marshal.Marshalizer - hasher hashing.Hasher + hasher hashing.Hasher // todo: remove this pubKeyConverter core.PubkeyConverter } @@ -104,13 +104,8 @@ func (en *eventNotifier) RevertIndexedBlock(blockData *outport.BlockData) error return err } - blockHash, err := core.CalculateHash(en.marshalizer, en.hasher, headerHandler) - if err != nil { - return fmt.Errorf("%w in eventNotifier.RevertIndexedBlock while computing the block hash", err) - } - revertBlock := RevertBlock{ - Hash: hex.EncodeToString(blockHash), + Hash: hex.EncodeToString(blockData.HeaderHash), Nonce: headerHandler.GetNonce(), Round: headerHandler.GetRound(), Epoch: headerHandler.GetEpoch(), From 9854cb38a2a13bb5f933cbe4374ba19cfcc335c4 Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 20 Mar 2023 11:27:14 +0200 Subject: [PATCH 069/221] CLN: Event notifier --- factory/status/statusComponents.go | 2 -- outport/factory/notifierFactory.go | 15 ++----------- outport/factory/notifierFactory_test.go | 26 ---------------------- outport/factory/outportFactory_test.go | 3 --- outport/notifier/eventNotifier.go | 25 +++++---------------- outport/notifier/eventNotifier_test.go | 29 ++----------------------- 6 files changed, 10 insertions(+), 90 deletions(-) diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index 38f69923a00..f0294b3137f 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -252,8 +252,6 @@ func (scf *statusComponentsFactory) makeEventNotifierArgs() *outportDriverFactor Password: eventNotifierConfig.Password, RequestTimeoutSec: eventNotifierConfig.RequestTimeoutSec, Marshaller: scf.coreComponents.InternalMarshalizer(), - Hasher: scf.coreComponents.Hasher(), - PubKeyConverter: scf.coreComponents.AddressPubKeyConverter(), } } diff --git a/outport/factory/notifierFactory.go b/outport/factory/notifierFactory.go index d1ceb412230..0e57628a3c5 100644 --- a/outport/factory/notifierFactory.go +++ b/outport/factory/notifierFactory.go @@ -3,7 +3,6 @@ package factory import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/outport/notifier" @@ -18,8 +17,6 @@ type EventNotifierFactoryArgs struct { Password string RequestTimeoutSec int Marshaller marshal.Marshalizer - Hasher hashing.Hasher - PubKeyConverter core.PubkeyConverter } // CreateEventNotifier will create a new event notifier client instance @@ -41,10 +38,8 @@ func CreateEventNotifier(args *EventNotifierFactoryArgs) (outport.Driver, error) } notifierArgs := notifier.ArgsEventNotifier{ - HttpClient: httpClient, - Marshaller: args.Marshaller, - Hasher: args.Hasher, - PubKeyConverter: args.PubKeyConverter, + HttpClient: httpClient, + Marshaller: args.Marshaller, } return notifier.NewEventNotifier(notifierArgs) @@ -54,12 +49,6 @@ func checkInputArgs(args *EventNotifierFactoryArgs) error { if check.IfNil(args.Marshaller) { return core.ErrNilMarshalizer } - if check.IfNil(args.Hasher) { - return core.ErrNilHasher - } - if check.IfNil(args.PubKeyConverter) { - return outport.ErrNilPubKeyConverter - } return nil } diff --git a/outport/factory/notifierFactory_test.go b/outport/factory/notifierFactory_test.go index c588e586c83..ae38fe7964b 100644 --- a/outport/factory/notifierFactory_test.go +++ b/outport/factory/notifierFactory_test.go @@ -4,10 +4,8 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/outport/factory" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/stretchr/testify/require" ) @@ -20,8 +18,6 @@ func createMockNotifierFactoryArgs() *factory.EventNotifierFactoryArgs { Password: "", RequestTimeoutSec: 1, Marshaller: &testscommon.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubKeyConverter: &testscommon.PubkeyConverterMock{}, } } @@ -39,28 +35,6 @@ func TestCreateEventNotifier(t *testing.T) { require.Equal(t, core.ErrNilMarshalizer, err) }) - t.Run("nil hasher", func(t *testing.T) { - t.Parallel() - - args := createMockNotifierFactoryArgs() - args.Hasher = nil - - en, err := factory.CreateEventNotifier(args) - require.Nil(t, en) - require.Equal(t, core.ErrNilHasher, err) - }) - - t.Run("nil pub key converter", func(t *testing.T) { - t.Parallel() - - args := createMockNotifierFactoryArgs() - args.PubKeyConverter = nil - - en, err := factory.CreateEventNotifier(args) - require.Nil(t, en) - require.Equal(t, outport.ErrNilPubKeyConverter, err) - }) - t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/outport/factory/outportFactory_test.go b/outport/factory/outportFactory_test.go index 0fb10987b09..29446844737 100644 --- a/outport/factory/outportFactory_test.go +++ b/outport/factory/outportFactory_test.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-go/outport/factory" notifierFactory "github.com/multiversx/mx-chain-go/outport/factory" "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/stretchr/testify/require" ) @@ -95,8 +94,6 @@ func TestCreateOutport_SubscribeNotifierDriver(t *testing.T) { args := createMockArgsOutportHandler(false, true) args.EventNotifierFactoryArgs.Marshaller = &mock.MarshalizerMock{} - args.EventNotifierFactoryArgs.Hasher = &hashingMocks.HasherMock{} - args.EventNotifierFactoryArgs.PubKeyConverter = &mock.PubkeyConverterMock{} args.EventNotifierFactoryArgs.RequestTimeoutSec = 1 outPort, err := factory.CreateOutport(args) require.Nil(t, err) diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index 3d62a864aa8..db5ae102fc4 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/core/unmarshal" "github.com/multiversx/mx-chain-core-go/data/outport" - "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -35,18 +34,14 @@ type FinalizedBlock struct { } type eventNotifier struct { - httpClient httpClientHandler - marshalizer marshal.Marshalizer - hasher hashing.Hasher // todo: remove this - pubKeyConverter core.PubkeyConverter + httpClient httpClientHandler + marshalizer marshal.Marshalizer } // ArgsEventNotifier defines the arguments needed for event notifier creation type ArgsEventNotifier struct { - HttpClient httpClientHandler - Marshaller marshal.Marshalizer - Hasher hashing.Hasher - PubKeyConverter core.PubkeyConverter + HttpClient httpClientHandler + Marshaller marshal.Marshalizer } // NewEventNotifier creates a new instance of the eventNotifier @@ -58,10 +53,8 @@ func NewEventNotifier(args ArgsEventNotifier) (*eventNotifier, error) { } return &eventNotifier{ - httpClient: args.HttpClient, - marshalizer: args.Marshaller, - hasher: args.Hasher, - pubKeyConverter: args.PubKeyConverter, + httpClient: args.HttpClient, + marshalizer: args.Marshaller, }, nil } @@ -72,12 +65,6 @@ func checkEventNotifierArgs(args ArgsEventNotifier) error { if check.IfNil(args.Marshaller) { return ErrNilMarshaller } - if check.IfNil(args.Hasher) { - return ErrNilHasher - } - if check.IfNil(args.PubKeyConverter) { - return ErrNilPubKeyConverter - } return nil } diff --git a/outport/notifier/eventNotifier_test.go b/outport/notifier/eventNotifier_test.go index 47e577dfd4c..a7f65f4244e 100644 --- a/outport/notifier/eventNotifier_test.go +++ b/outport/notifier/eventNotifier_test.go @@ -11,17 +11,14 @@ import ( "github.com/multiversx/mx-chain-go/outport/mock" "github.com/multiversx/mx-chain-go/outport/notifier" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func createMockEventNotifierArgs() notifier.ArgsEventNotifier { return notifier.ArgsEventNotifier{ - HttpClient: &mock.HTTPClientStub{}, - Marshaller: &testscommon.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - PubKeyConverter: &testscommon.PubkeyConverterMock{}, + HttpClient: &mock.HTTPClientStub{}, + Marshaller: &testscommon.MarshalizerMock{}, } } @@ -50,28 +47,6 @@ func TestNewEventNotifier(t *testing.T) { require.Equal(t, notifier.ErrNilMarshaller, err) }) - t.Run("nil hasher", func(t *testing.T) { - t.Parallel() - - args := createMockEventNotifierArgs() - args.Hasher = nil - - en, err := notifier.NewEventNotifier(args) - require.Nil(t, en) - require.Equal(t, notifier.ErrNilHasher, err) - }) - - t.Run("nil pub key converter", func(t *testing.T) { - t.Parallel() - - args := createMockEventNotifierArgs() - args.PubKeyConverter = nil - - en, err := notifier.NewEventNotifier(args) - require.Nil(t, en) - require.Equal(t, notifier.ErrNilPubKeyConverter, err) - }) - t.Run("should work", func(t *testing.T) { t.Parallel() From b383d109a00dd27c45a5770f6d5a88021d7d97ba Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 20 Mar 2023 11:33:00 +0200 Subject: [PATCH 070/221] FIX: go.mod --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 86a7c12c925..fc139b17997 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317142053-b83f1fd1059c + github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320093125-be2a2793d0fe github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index e446364816e..f52c6a62094 100644 --- a/go.sum +++ b/go.sum @@ -611,8 +611,8 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317142053-b83f1fd1059c h1:PcX+GqYfEP/zjQmjJ6R02dVi8NGq60CGr1X5/Z5iHE8= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230317142053-b83f1fd1059c/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320093125-be2a2793d0fe h1:mWdWpK7TDqvpT9VAQT12T4Alh42Oau7zfAaC8WGjCic= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320093125-be2a2793d0fe/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 h1:P+LnTGwHT9RfesB+Q1xvMEAJBy60NTJBwbRUFi9wmfo= From d2151a1b42b312e1ff78b776908803782da75308 Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 20 Mar 2023 12:09:03 +0200 Subject: [PATCH 071/221] FIX: After self review --- factory/processing/processComponents.go | 22 +++++++++---------- .../transactionsExecutionOrder.go | 16 +++++++------- outport/process/outportDataProvider.go | 2 +- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index c9e9e9bdb1f..5145f732f35 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -1166,10 +1166,10 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( genesisBlockHash, originalGenesisBlockHeader, genesisBody, - unwrapSCRsInfo(txsPoolPerShard[currentShardId].SmartContractResults), - unwrapReceipts(txsPoolPerShard[currentShardId].Receipts), + wrapSCRsInfo(txsPoolPerShard[currentShardId].SmartContractResults), + wrapReceipts(txsPoolPerShard[currentShardId].Receipts), intraShardMiniBlocks, - unwrapLogs(txsPoolPerShard[currentShardId].Logs)) + wrapLogs(txsPoolPerShard[currentShardId].Logs)) if err != nil { return err } @@ -1181,7 +1181,7 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( } if txsPoolPerShard[currentShardId] != nil { - err = pcf.saveGenesisTxsToStorage(unwrapTxsInfo(txsPoolPerShard[currentShardId].Transactions)) + err = pcf.saveGenesisTxsToStorage(wrapTxsInfo(txsPoolPerShard[currentShardId].Transactions)) if err != nil { return err } @@ -1239,10 +1239,10 @@ func (pcf *processComponentsFactory) saveAlteredGenesisHeaderToStorage( genesisBlockHash, genesisBlockHeader, genesisBody, - unwrapSCRsInfo(txsPoolPerShard[currentShardId].SmartContractResults), - unwrapReceipts(txsPoolPerShard[currentShardId].Receipts), + wrapSCRsInfo(txsPoolPerShard[currentShardId].SmartContractResults), + wrapReceipts(txsPoolPerShard[currentShardId].Receipts), intraShardMiniBlocks, - unwrapLogs(txsPoolPerShard[currentShardId].Logs)) + wrapLogs(txsPoolPerShard[currentShardId].Logs)) if err != nil { return err } @@ -1991,7 +1991,7 @@ func (pc *processComponents) Close() error { return nil } -func unwrapTxsInfo(txs map[string]*outport.TxInfo) map[string]data.TransactionHandler { +func wrapTxsInfo(txs map[string]*outport.TxInfo) map[string]data.TransactionHandler { ret := make(map[string]data.TransactionHandler, len(txs)) for hash, tx := range txs { ret[hash] = tx.Transaction @@ -2000,7 +2000,7 @@ func unwrapTxsInfo(txs map[string]*outport.TxInfo) map[string]data.TransactionHa return ret } -func unwrapSCRsInfo(scrs map[string]*outport.SCRInfo) map[string]data.TransactionHandler { +func wrapSCRsInfo(scrs map[string]*outport.SCRInfo) map[string]data.TransactionHandler { ret := make(map[string]data.TransactionHandler, len(scrs)) for hash, scr := range scrs { ret[hash] = scr.SmartContractResult @@ -2009,7 +2009,7 @@ func unwrapSCRsInfo(scrs map[string]*outport.SCRInfo) map[string]data.Transactio return ret } -func unwrapReceipts(receipts map[string]*receipt.Receipt) map[string]data.TransactionHandler { +func wrapReceipts(receipts map[string]*receipt.Receipt) map[string]data.TransactionHandler { ret := make(map[string]data.TransactionHandler, len(receipts)) for hash, r := range receipts { ret[hash] = r @@ -2018,7 +2018,7 @@ func unwrapReceipts(receipts map[string]*receipt.Receipt) map[string]data.Transa return ret } -func unwrapLogs(logs map[string]*transaction.Log) []*data.LogData { +func wrapLogs(logs map[string]*transaction.Log) []*data.LogData { ret := make([]*data.LogData, len(logs)) idx := 0 diff --git a/outport/process/executionOrder/transactionsExecutionOrder.go b/outport/process/executionOrder/transactionsExecutionOrder.go index ebd486c375c..c60b2af2548 100644 --- a/outport/process/executionOrder/transactionsExecutionOrder.go +++ b/outport/process/executionOrder/transactionsExecutionOrder.go @@ -259,27 +259,27 @@ func extractTxsFromMap(txsHashes [][]byte, txs map[string]*outport.TxInfo) ([]da return result, nil } -func extractSCRsFromMap(txsHashes [][]byte, txs map[string]*outport.SCRInfo) ([]data.TxWithExecutionOrderHandler, error) { +func extractSCRsFromMap(txsHashes [][]byte, scrs map[string]*outport.SCRInfo) ([]data.TxWithExecutionOrderHandler, error) { result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) for _, txHash := range txsHashes { - tx, found := txs[string(txHash)] + scr, found := scrs[string(txHash)] if !found { - return nil, fmt.Errorf("cannot find transaction in pool, txHash: %s", hex.EncodeToString(txHash)) + return nil, fmt.Errorf("cannot find scr in pool, txHash: %s", hex.EncodeToString(txHash)) } - result = append(result, tx) + result = append(result, scr) } return result, nil } -func extractRewardsFromMap(txsHashes [][]byte, txs map[string]*outport.RewardInfo) ([]data.TxWithExecutionOrderHandler, error) { +func extractRewardsFromMap(txsHashes [][]byte, rewards map[string]*outport.RewardInfo) ([]data.TxWithExecutionOrderHandler, error) { result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) for _, txHash := range txsHashes { - tx, found := txs[string(txHash)] + reward, found := rewards[string(txHash)] if !found { - return nil, fmt.Errorf("cannot find transaction in pool, txHash: %s", hex.EncodeToString(txHash)) + return nil, fmt.Errorf("cannot find reward in pool, txHash: %s", hex.EncodeToString(txHash)) } - result = append(result, tx) + result = append(result, reward) } return result, nil diff --git a/outport/process/outportDataProvider.go b/outport/process/outportDataProvider.go index e3f09bc0e74..56f047f53b4 100644 --- a/outport/process/outportDataProvider.go +++ b/outport/process/outportDataProvider.go @@ -120,7 +120,7 @@ func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutpor return &outportcore.OutportBlockWithHeaderAndBody{ OutportBlock: &outportcore.OutportBlock{ - BlockData: nil, // this will be filed with specific data for each driver + BlockData: nil, // this will be filled with specific data for each driver TransactionPool: pool, HeaderGasConsumption: &outportcore.HeaderGasConsumption{ GasProvided: odp.gasConsumedProvider.TotalGasProvidedWithScheduled(), From 48acbd8afa9536f718d4f4439c3f07157bb3d27e Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 20 Mar 2023 12:56:27 +0200 Subject: [PATCH 072/221] FEAT: Update go.mod --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fc139b17997..c12925e15d2 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320093125-be2a2793d0fe + github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320105458-1ac37b30d984 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index f52c6a62094..039e0840b1e 100644 --- a/go.sum +++ b/go.sum @@ -611,8 +611,8 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320093125-be2a2793d0fe h1:mWdWpK7TDqvpT9VAQT12T4Alh42Oau7zfAaC8WGjCic= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320093125-be2a2793d0fe/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320105458-1ac37b30d984 h1:vqjSw8MKp99Ku3r5rqm24JT9q9tQmZ+SFdhvB0t6eoI= +github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320105458-1ac37b30d984/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 h1:P+LnTGwHT9RfesB+Q1xvMEAJBy60NTJBwbRUFi9wmfo= From cd6bcd06d2a1effe3dd1d22080f72644c7e76ee5 Mon Sep 17 00:00:00 2001 From: MariusC Date: Tue, 21 Mar 2023 15:29:57 +0200 Subject: [PATCH 073/221] FIX: After merge --- go.mod | 4 ++-- go.sum | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 7583bf93f07..c33460f0c31 100644 --- a/go.mod +++ b/go.mod @@ -13,9 +13,9 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320105458-1ac37b30d984 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230321130308-19c4c93dcae5 github.com/multiversx/mx-chain-crypto-go v1.2.5 - github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 + github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230321130745-62076807b905 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.13 github.com/multiversx/mx-chain-storage-go v1.0.7 diff --git a/go.sum b/go.sum index 039e0840b1e..7fb1b5f2a69 100644 --- a/go.sum +++ b/go.sum @@ -320,8 +320,9 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= +github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/herumi/bls-go-binary v1.0.0 h1:PRPF6vPd35zyDy+tp86HwNnGdufCH2lZL0wZGxYvkRs= github.com/herumi/bls-go-binary v1.0.0/go.mod h1:O4Vp1AfR4raRGwFeQpr9X/PQtncEicMoOe6BQt1oX0Y= @@ -610,23 +611,23 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.35-0.20230314140225-cdd3af9ba37e/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320105458-1ac37b30d984 h1:vqjSw8MKp99Ku3r5rqm24JT9q9tQmZ+SFdhvB0t6eoI= -github.com/multiversx/mx-chain-core-go v1.1.36-0.20230320105458-1ac37b30d984/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230321130308-19c4c93dcae5 h1:jMxFHf7I5QdHknAzCADFerVZIinHwoAShgM2OaDWCHY= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230321130308-19c4c93dcae5/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= -github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345 h1:P+LnTGwHT9RfesB+Q1xvMEAJBy60NTJBwbRUFi9wmfo= -github.com/multiversx/mx-chain-es-indexer-go v1.3.15-0.20230317105636-87b2eb6dd345/go.mod h1:QcF/hS31tE/Tq/YX1QTPdpKCcbXPQ5HqcKM76p6rYYE= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230321130745-62076807b905 h1:Ifmhkyjcnur+PLuWbGcdV9n4Zx42QkKNqHpyevy3vMU= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230321130745-62076807b905/go.mod h1:D5D4AKnSBXZ3d1nVxi1KWhBmngYTHd/0fnfu8j5YjEs= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.13 h1:woIlYkDFCKYyJQ5urDcOzz8HUFGsSEhTfUXDDxNI2zM= github.com/multiversx/mx-chain-p2p-go v1.0.13/go.mod h1:j9Ueo2ptCnL7TQvQg6KS/KWAoJEJpjkPgE5ZTaqEAn4= github.com/multiversx/mx-chain-storage-go v1.0.7 h1:UqLo/OLTD3IHiE/TB/SEdNRV1GG2f1R6vIP5ehHwCNw= github.com/multiversx/mx-chain-storage-go v1.0.7/go.mod h1:gtKoV32Cg2Uy8deHzF8Ud0qAl0zv92FvWgPSYIP0Zmg= -github.com/multiversx/mx-chain-vm-common-go v1.3.34/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.36/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.3.37 h1:KeK6JCjeNUOHC5Z12/CTQIa8Z1at0dnnL9hY1LNrHS8= github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= +github.com/multiversx/mx-chain-vm-common-go v1.4.0 h1:0i0cJZJOXGzqYzwtKFHSr2yGmnFAdizOuISK8HgsnYo= +github.com/multiversx/mx-chain-vm-common-go v1.4.0/go.mod h1:odBJC92ANA8zLtPh/wwajUUGJOaS88F5QYGf0t8Wgzw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 h1:ScUq7/wq78vthMTQ6v5Ux1DvSMQMHxQ2Sl7aPP26q1w= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50/go.mod h1:e3uYdgoKzs3puaznbmSjDcRisJc5Do4tpg7VqyYwoek= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 h1:axtp5/mpA+xYJ1cu4KtAGETV4t6v6/tNfQh0HCclBYY= @@ -811,7 +812,6 @@ github.com/ugorji/go/codec v1.2.9 h1:rmenucSohSTiyL09Y+l2OCk+FrMxGMzho2+tjr5ticU github.com/ugorji/go/codec v1.2.9/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= From 14fde5c1eff20b684b21e51c49c54b072038cb61 Mon Sep 17 00:00:00 2001 From: MariusC Date: Tue, 21 Mar 2023 16:52:07 +0200 Subject: [PATCH 074/221] FIX: After merge 2 --- outport/factory/outportFactory_test.go | 4 -- .../transactionsFeeProcessor_test.go | 60 ++++++++++--------- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/outport/factory/outportFactory_test.go b/outport/factory/outportFactory_test.go index 04dd4fe6633..29446844737 100644 --- a/outport/factory/outportFactory_test.go +++ b/outport/factory/outportFactory_test.go @@ -10,8 +10,6 @@ import ( "github.com/multiversx/mx-chain-go/outport/factory" notifierFactory "github.com/multiversx/mx-chain-go/outport/factory" "github.com/multiversx/mx-chain-go/process/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/stretchr/testify/require" ) @@ -96,8 +94,6 @@ func TestCreateOutport_SubscribeNotifierDriver(t *testing.T) { args := createMockArgsOutportHandler(false, true) args.EventNotifierFactoryArgs.Marshaller = &mock.MarshalizerMock{} - args.EventNotifierFactoryArgs.Hasher = &hashingMocks.HasherMock{} - args.EventNotifierFactoryArgs.PubKeyConverter = &testscommon.PubkeyConverterMock{} args.EventNotifierFactoryArgs.RequestTimeoutSec = 1 outPort, err := factory.CreateOutport(args) require.Nil(t, err) diff --git a/outport/process/transactionsfee/transactionsFeeProcessor_test.go b/outport/process/transactionsfee/transactionsFeeProcessor_test.go index a894b2c3438..aa046694d22 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor_test.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor_test.go @@ -374,19 +374,21 @@ func TestPutFeeAndGasUsedScrWithRefundNoTx(t *testing.T) { refundValueBig, _ := big.NewInt(0).SetString("226498540000000", 10) - scr := outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), - RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), - OriginalTxHash: txHash, - Value: refundValueBig, - Data: []byte("@ok"), - }, 0, big.NewInt(0)) + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), + RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte("@ok"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - "wrong": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ string(scrWithRefund): scr, }, } @@ -407,8 +409,8 @@ func TestPutFeeAndGasUsedScrWithRefundNoTx(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(0), scr.GetFee()) - require.Equal(t, uint64(0), scr.GetGasUsed()) + require.Equal(t, big.NewInt(0), scr.GetFeeInfo().GetFee()) + require.Equal(t, uint64(0), scr.GetFeeInfo().GetGasUsed()) require.True(t, wasCalled) } @@ -420,19 +422,21 @@ func TestPutFeeAndGasUsedScrWithRefundNotForInitialSender(t *testing.T) { refundValueBig, _ := big.NewInt(0).SetString("226498540000000", 10) - scr := outportcore.NewTransactionHandlerWithGasAndFee(&smartContractResult.SmartContractResult{ - Nonce: 3, - SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), - RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), - PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), - OriginalTxHash: txHash, - Value: refundValueBig, - Data: []byte(""), - }, 0, big.NewInt(0)) + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 3, + SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), + RcvAddr: []byte("erd1k7j6ewjsla4zsgv8v6f6fe3dvrkgv3d0d9jerczw45hzedhyed8sh2u34u"), + PrevTxHash: []byte("f639cb7a0231191e04ec19dcb1359bd93a03fe8dc4a28a80d00835c5d1c988f8"), + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte(""), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } - pool := &outportcore.Pool{ - Scrs: map[string]coreData.TransactionHandlerWithGasUsedAndFee{ - "wrong": outportcore.NewTransactionHandlerWithGasAndFee(&transaction.Transaction{}, 0, big.NewInt(0)), + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ string(scrWithRefund): scr, }, } @@ -454,6 +458,6 @@ func TestPutFeeAndGasUsedScrWithRefundNotForInitialSender(t *testing.T) { err = txsFeeProc.PutFeeAndGasUsed(pool) require.Nil(t, err) - require.Equal(t, big.NewInt(0), scr.GetFee()) - require.Equal(t, uint64(0), scr.GetGasUsed()) + require.Equal(t, big.NewInt(0), scr.GetFeeInfo().GetFee()) + require.Equal(t, uint64(0), scr.GetFeeInfo().GetGasUsed()) } From 1a3729804b5025accc6cde646ccd6bbd29be79f8 Mon Sep 17 00:00:00 2001 From: MariusC Date: Tue, 21 Mar 2023 17:07:30 +0200 Subject: [PATCH 075/221] FIX: After review --- factory/processing/processComponents.go | 30 ++++++++++++------------- outport/mock/driverStub.go | 1 + process/block/baseProcess.go | 2 +- process/block/metablock.go | 6 +++-- process/block/shardblock.go | 6 +++-- 5 files changed, 25 insertions(+), 20 deletions(-) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 7e6f5c8ee47..872746f7d78 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -1111,8 +1111,8 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( initialIndexingData map[uint32]*genesis.IndexingData, alteredAccounts map[string]*alteredAccount.AlteredAccount, ) error { - currentShardId := pcf.bootstrapComponents.ShardCoordinator().SelfId() - originalGenesisBlockHeader := genesisBlocks[currentShardId] + currentShardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() + originalGenesisBlockHeader := genesisBlocks[currentShardID] genesisBlockHeader := originalGenesisBlockHeader.ShallowClone() genesisBlockHash, err := core.CalculateHash(pcf.coreData.InternalMarshalizer(), pcf.coreData.Hasher(), genesisBlockHeader) @@ -1126,7 +1126,7 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( } intraShardMiniBlocks := getGenesisIntraShardMiniblocks(miniBlocks) - genesisBody := getGenesisBlockForShard(miniBlocks, currentShardId) + genesisBody := getGenesisBlockForShard(miniBlocks, currentShardID) if pcf.statusComponents.OutportHandler().HasDrivers() { log.Info("indexGenesisBlocks(): indexer.SaveBlock", "hash", genesisBlockHash) @@ -1139,7 +1139,7 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( Balance: "0", } - _ = genesisBlockHeader.SetTxCount(uint32(len(txsPoolPerShard[currentShardId].Transactions))) + _ = genesisBlockHeader.SetTxCount(uint32(len(txsPoolPerShard[currentShardID].Transactions))) arg := &outport.OutportBlockWithHeaderAndBody{ OutportBlock: &outport.OutportBlock{ @@ -1148,9 +1148,9 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( GasProvided: 0, GasRefunded: 0, GasPenalized: 0, - MaxGasPerBlock: pcf.coreData.EconomicsData().MaxGasLimitPerBlock(currentShardId), + MaxGasPerBlock: pcf.coreData.EconomicsData().MaxGasLimitPerBlock(currentShardID), }, - TransactionPool: txsPoolPerShard[currentShardId], + TransactionPool: txsPoolPerShard[currentShardID], AlteredAccounts: alteredAccounts, }, HeaderDataWithBody: &outport.HeaderDataWithBody{ @@ -1161,20 +1161,20 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( } errOutport := pcf.statusComponents.OutportHandler().SaveBlock(arg) if errOutport != nil { - log.Warn("indexGenesisBlocks.outportHandler.SaveBlock cannot save block", "error", err) + log.Error("indexGenesisBlocks.outportHandler.SaveBlock cannot save block", "error", errOutport) } } - log.Info("indexGenesisBlocks(): historyRepo.RecordBlock", "shardID", currentShardId, "hash", genesisBlockHash) - if txsPoolPerShard[currentShardId] != nil { + log.Info("indexGenesisBlocks(): historyRepo.RecordBlock", "shardID", currentShardID, "hash", genesisBlockHash) + if txsPoolPerShard[currentShardID] != nil { err = pcf.historyRepo.RecordBlock( genesisBlockHash, originalGenesisBlockHeader, genesisBody, - wrapSCRsInfo(txsPoolPerShard[currentShardId].SmartContractResults), - wrapReceipts(txsPoolPerShard[currentShardId].Receipts), + wrapSCRsInfo(txsPoolPerShard[currentShardID].SmartContractResults), + wrapReceipts(txsPoolPerShard[currentShardID].Receipts), intraShardMiniBlocks, - wrapLogs(txsPoolPerShard[currentShardId].Logs)) + wrapLogs(txsPoolPerShard[currentShardID].Logs)) if err != nil { return err } @@ -1185,14 +1185,14 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( return err } - if txsPoolPerShard[currentShardId] != nil { - err = pcf.saveGenesisTxsToStorage(wrapTxsInfo(txsPoolPerShard[currentShardId].Transactions)) + if txsPoolPerShard[currentShardID] != nil { + err = pcf.saveGenesisTxsToStorage(wrapTxsInfo(txsPoolPerShard[currentShardID].Transactions)) if err != nil { return err } } - nonceByHashDataUnit := dataRetriever.GetHdrNonceHashDataUnit(currentShardId) + nonceByHashDataUnit := dataRetriever.GetHdrNonceHashDataUnit(currentShardID) nonceAsBytes := pcf.coreData.Uint64ByteSliceConverter().ToByteSlice(genesisBlockHeader.GetNonce()) err = pcf.data.StorageService().Put(nonceByHashDataUnit, nonceAsBytes, genesisBlockHash) if err != nil { diff --git a/outport/mock/driverStub.go b/outport/mock/driverStub.go index f77cb3be1f5..e9f4e4a56ab 100644 --- a/outport/mock/driverStub.go +++ b/outport/mock/driverStub.go @@ -81,6 +81,7 @@ func (d *DriverStub) FinalizedBlock(finalizedBlock *outportcore.FinalizedBlock) return nil } +// GetMarshaller - func (d *DriverStub) GetMarshaller() marshal.Marshalizer { return testscommon.MarshalizerMock{} } diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index a33c1482cb6..d439d9e5ebd 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -1377,7 +1377,7 @@ func getLastSelfNotarizedHeaderByItself(chainHandler data.ChainHandler) (data.He } func (bp *baseProcessor) setFinalizedHeaderHashInIndexer(hdrHash []byte) { - log.Debug("baseProcessor.setFinalizedBlockInIndexer", "finalized header hash", hdrHash) + log.Debug("baseProcessor.setFinalizedHeaderHashInIndexer", "finalized header hash", hdrHash) bp.outportHandler.FinalizedBlock(&outportcore.FinalizedBlock{HeaderHash: hdrHash}) } diff --git a/process/block/metablock.go b/process/block/metablock.go index 04fc22bc76e..2ac04957461 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -624,12 +624,14 @@ func (mp *metaProcessor) indexBlock( HighestFinalBlockHash: mp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { - log.Error("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error()) + log.Error("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error(), + "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) return } err = mp.outportHandler.SaveBlock(argSaveBlock) if err != nil { - log.Warn("metaProcessor.outportHandler.SaveBlock cannot save block", "error", err) + log.Error("metaProcessor.outportHandler.SaveBlock cannot save block", "error", err, + "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) return } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index d3142113b67..cc85a6710df 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -604,12 +604,14 @@ func (sp *shardProcessor) indexBlockIfNeeded( HighestFinalBlockHash: sp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { - log.Error("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error()) + log.Error("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error(), + "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) return } err = sp.outportHandler.SaveBlock(argSaveBlock) if err != nil { - log.Warn("shardProcessor.outportHandler.SaveBlock cannot save block", "error", err) + log.Error("shardProcessor.outportHandler.SaveBlock cannot save block", "error", err, + "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) return } From d51c0488ecf747e3860a036afa639c14f0e0715d Mon Sep 17 00:00:00 2001 From: MariusC Date: Wed, 22 Mar 2023 12:09:07 +0200 Subject: [PATCH 076/221] FIX: Revert logs map to slice --- factory/processing/processComponents.go | 12 +- go.mod | 4 +- go.sum | 8 +- node/external/blockAPI/baseBlock.go | 13 +- outport/notifier/eventNotifier_test.go | 9 +- .../alteredAccountsProvider_test.go | 283 ++++++++++-------- .../alteredaccounts/tokensProcessor.go | 2 +- outport/process/errors.go | 2 +- outport/process/outportDataProvider.go | 38 +-- .../process/transactionsfee/dataHolders.go | 5 +- .../transactionsfee/dataHolders_test.go | 12 +- .../transactionsFeeProcessor_test.go | 41 ++- 12 files changed, 228 insertions(+), 201 deletions(-) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 872746f7d78..9e3367dc823 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -16,7 +16,6 @@ import ( dataBlock "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/receipt" - "github.com/multiversx/mx-chain-core-go/data/transaction" nodeFactory "github.com/multiversx/mx-chain-go/cmd/node/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" @@ -2023,17 +2022,14 @@ func wrapReceipts(receipts map[string]*receipt.Receipt) map[string]data.Transact return ret } -func wrapLogs(logs map[string]*transaction.Log) []*data.LogData { +func wrapLogs(logs []*outport.LogData) []*data.LogData { ret := make([]*data.LogData, len(logs)) - idx := 0 - for hash, lg := range logs { + for idx, logData := range logs { ret[idx] = &data.LogData{ - LogHandler: lg, - TxHash: hash, + LogHandler: logData.Log, + TxHash: logData.TxHash, } - - idx++ } return ret diff --git a/go.mod b/go.mod index c33460f0c31..112652627d3 100644 --- a/go.mod +++ b/go.mod @@ -13,9 +13,9 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230321130308-19c4c93dcae5 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230322093158-35195fa155c0 github.com/multiversx/mx-chain-crypto-go v1.2.5 - github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230321130745-62076807b905 + github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230322095713-a82089993c0d github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.13 github.com/multiversx/mx-chain-storage-go v1.0.7 diff --git a/go.sum b/go.sum index 7fb1b5f2a69..1f771cf385b 100644 --- a/go.sum +++ b/go.sum @@ -612,12 +612,12 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230321130308-19c4c93dcae5 h1:jMxFHf7I5QdHknAzCADFerVZIinHwoAShgM2OaDWCHY= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230321130308-19c4c93dcae5/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230322093158-35195fa155c0 h1:Hr0bAMUJigh5xGFm2qoKKglEtpIXckxCLjCDBUYv1DM= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230322093158-35195fa155c0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230321130745-62076807b905 h1:Ifmhkyjcnur+PLuWbGcdV9n4Zx42QkKNqHpyevy3vMU= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230321130745-62076807b905/go.mod h1:D5D4AKnSBXZ3d1nVxi1KWhBmngYTHd/0fnfu8j5YjEs= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230322095713-a82089993c0d h1:PDkG7SrzIsyRo5PLI68vlBM85PiQhWHhpafW7VCzhgM= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230322095713-a82089993c0d/go.mod h1:iDLxzwUi9CGzzEbFRi4TgYvmhsp21qNj5eqMgT32d2M= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.13 h1:woIlYkDFCKYyJQ5urDcOzz8HUFGsSEhTfUXDDxNI2zM= diff --git a/node/external/blockAPI/baseBlock.go b/node/external/blockAPI/baseBlock.go index 255464fa879..de881108c64 100644 --- a/node/external/blockAPI/baseBlock.go +++ b/node/external/blockAPI/baseBlock.go @@ -455,7 +455,7 @@ func (bap *baseAPIBlockProcessor) apiBlockToOutportPool(apiBlock *api.Block) (*o SmartContractResults: make(map[string]*outport.SCRInfo), InvalidTxs: make(map[string]*outport.TxInfo), Rewards: make(map[string]*outport.RewardInfo), - Logs: make(map[string]*transaction.Log), + Logs: make([]*outport.LogData, 0), } var err error @@ -501,10 +501,13 @@ func (bap *baseAPIBlockProcessor) addLogsToPool(tx *transaction.ApiTransactionRe }) } - pool.Logs[tx.Hash] = &transaction.Log{ - Address: logAddressBytes, - Events: logsEvents, - } + pool.Logs = append(pool.Logs, &outport.LogData{ + TxHash: tx.Hash, + Log: &transaction.Log{ + Address: logAddressBytes, + Events: logsEvents, + }, + }) return nil } diff --git a/outport/notifier/eventNotifier_test.go b/outport/notifier/eventNotifier_test.go index a7f65f4244e..28f8950bcec 100644 --- a/outport/notifier/eventNotifier_test.go +++ b/outport/notifier/eventNotifier_test.go @@ -69,7 +69,7 @@ func TestSaveBlock(t *testing.T) { PostCalled: func(route string, payload interface{}) error { saveBlockData := payload.(*outport.OutportBlock) - require.Contains(t, saveBlockData.TransactionPool.Logs, txHash1) + require.Equal(t, saveBlockData.TransactionPool.Logs[0].TxHash, txHash1) for txHash := range saveBlockData.TransactionPool.Transactions { require.Equal(t, txHash1, txHash) } @@ -96,8 +96,11 @@ func TestSaveBlock(t *testing.T) { SmartContractResults: map[string]*outport.SCRInfo{ scrHash1: nil, }, - Logs: map[string]*transaction.Log{ - txHash1: {}, + Logs: []*outport.LogData{ + { + TxHash: txHash1, + Log: &transaction.Log{}, + }, }, }, } diff --git a/outport/process/alteredaccounts/alteredAccountsProvider_test.go b/outport/process/alteredaccounts/alteredAccountsProvider_test.go index 58badee6318..e1298ac28a4 100644 --- a/outport/process/alteredaccounts/alteredAccountsProvider_test.go +++ b/outport/process/alteredaccounts/alteredAccountsProvider_test.go @@ -520,24 +520,27 @@ func testExtractAlteredAccountsFromPoolShouldReturnErrorWhenCastingToVmCommonUse aap, _ := NewAlteredAccountsProvider(args) res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ - Logs: map[string]*transaction.Log{ - "hash": { - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(0).Bytes(), + Logs: []*outportcore.LogData{ + { + TxHash: "hash", + Log: &transaction.Log{ + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(0).Bytes(), + }, }, - }, - { - Address: []byte("addr"), // other event for the same token, to ensure it isn't added twice - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(0).Bytes(), + { + Address: []byte("addr"), // other event for the same token, to ensure it isn't added twice + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(0).Bytes(), + }, }, }, }, @@ -569,24 +572,27 @@ func testExtractAlteredAccountsFromPoolShouldIncludeESDT(t *testing.T) { aap, _ := NewAlteredAccountsProvider(args) res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ - Logs: map[string]*transaction.Log{ - "hash": { - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(0).Bytes(), + Logs: []*outportcore.LogData{ + { + TxHash: "hash", + Log: &transaction.Log{ + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(0).Bytes(), + }, }, - }, - { - Address: []byte("addr"), // other event for the same token, to ensure it isn't added twice - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(0).Bytes(), + { + Address: []byte("addr"), // other event for the same token, to ensure it isn't added twice + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(0).Bytes(), + }, }, }, }, @@ -630,16 +636,19 @@ func testExtractAlteredAccountsFromPoolShouldIncludeNFT(t *testing.T) { aap, _ := NewAlteredAccountsProvider(args) res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ - Logs: map[string]*transaction.Log{ - "hash": { - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(38).Bytes(), + Logs: []*outportcore.LogData{ + { + TxHash: "hash", + Log: &transaction.Log{ + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(38).Bytes(), + }, }, }, }, @@ -695,18 +704,21 @@ func testExtractAlteredAccountsFromPoolShouldNotIncludeReceiverAddressIfNftCreat }, }, }, - Logs: map[string]*transaction.Log{ - "hh": { - Address: sendAddrShard0, - Events: []*transaction.Event{ - { - Address: sendAddrShard0, - Identifier: []byte(core.BuiltInFunctionESDTNFTCreate), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(38).Bytes(), - nil, - receiverOnDestination, + Logs: []*outportcore.LogData{ + { + TxHash: "hh", + Log: &transaction.Log{ + Address: sendAddrShard0, + Events: []*transaction.Event{ + { + Address: sendAddrShard0, + Identifier: []byte(core.BuiltInFunctionESDTNFTCreate), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(38).Bytes(), + nil, + receiverOnDestination, + }, }, }, }, @@ -759,18 +771,21 @@ func testExtractAlteredAccountsFromPoolShouldIncludeDestinationFromTokensLogsTop aap, _ := NewAlteredAccountsProvider(args) res, err := aap.ExtractAlteredAccountsFromPool(&outportcore.TransactionPool{ - Logs: map[string]*transaction.Log{ - "hash0": { - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - []byte("token0"), - big.NewInt(38).Bytes(), - nil, - receiverOnDestination, + Logs: []*outportcore.LogData{ + { + TxHash: "hash0", + Log: &transaction.Log{ + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + []byte("token0"), + big.NewInt(38).Bytes(), + nil, + receiverOnDestination, + }, }, }, }, @@ -836,25 +851,28 @@ func testExtractAlteredAccountsFromPoolAddressHasBalanceChangeEsdtAndfNft(t *tes }, }, }, - Logs: map[string]*transaction.Log{ - "hash0": { - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("esdt"), - big.NewInt(1).Bytes(), + Logs: []*outportcore.LogData{ + { + TxHash: "hash0", + Log: &transaction.Log{ + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("esdt"), + big.NewInt(1).Bytes(), + }, }, - }, - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - []byte("nft"), - big.NewInt(38).Bytes(), - big.NewInt(1).Bytes(), + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + []byte("nft"), + big.NewInt(38).Bytes(), + big.NewInt(1).Bytes(), + }, }, }, }, @@ -950,32 +968,35 @@ func testExtractAlteredAccountsFromPoolAddressHasMultipleNfts(t *testing.T) { }, }, }, - Logs: map[string]*transaction.Log{ - "hash0": { - Address: []byte("addr"), - Events: []*transaction.Event{ - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("esdttoken"), - big.NewInt(0).Bytes(), + Logs: []*outportcore.LogData{ + { + TxHash: "hash0", + Log: &transaction.Log{ + Address: []byte("addr"), + Events: []*transaction.Event{ + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("esdttoken"), + big.NewInt(0).Bytes(), + }, }, - }, - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - expectedToken1.TokenMetaData.Name, - big.NewInt(0).SetUint64(expectedToken1.TokenMetaData.Nonce).Bytes(), + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + expectedToken1.TokenMetaData.Name, + big.NewInt(0).SetUint64(expectedToken1.TokenMetaData.Nonce).Bytes(), + }, }, - }, - { - Address: []byte("addr"), - Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), - Topics: [][]byte{ - expectedToken2.TokenMetaData.Name, - big.NewInt(0).SetUint64(expectedToken2.TokenMetaData.Nonce).Bytes(), + { + Address: []byte("addr"), + Identifier: []byte(core.BuiltInFunctionESDTNFTTransfer), + Topics: [][]byte{ + expectedToken2.TokenMetaData.Name, + big.NewInt(0).SetUint64(expectedToken2.TokenMetaData.Nonce).Bytes(), + }, }, }, }, @@ -1053,15 +1074,18 @@ func testExtractAlteredAccountsFromPoolESDTTransferBalanceNotChanged(t *testing. }, }, }, - Logs: map[string]*transaction.Log{ - "txHash": { - Address: []byte("snd"), - Events: []*transaction.Event{ - { - Address: []byte("snd"), - Identifier: []byte(core.BuiltInFunctionESDTTransfer), - Topics: [][]byte{ - []byte("token0"), big.NewInt(0).Bytes(), big.NewInt(10).Bytes(), []byte("rcv"), + Logs: []*outportcore.LogData{ + { + TxHash: "txHash", + Log: &transaction.Log{ + Address: []byte("snd"), + Events: []*transaction.Event{ + { + Address: []byte("snd"), + Identifier: []byte(core.BuiltInFunctionESDTTransfer), + Topics: [][]byte{ + []byte("token0"), big.NewInt(0).Bytes(), big.NewInt(10).Bytes(), []byte("rcv"), + }, }, }, }, @@ -1261,15 +1285,18 @@ func textExtractAlteredAccountsFromPoolNftCreate(t *testing.T) { }, }, }, - Logs: map[string]*transaction.Log{ - "txHash": { - Address: []byte("snd"), - Events: []*transaction.Event{ - { - Address: []byte("snd"), - Identifier: []byte(core.BuiltInFunctionESDTNFTCreate), - Topics: [][]byte{ - []byte("token0"), big.NewInt(0).Bytes(), big.NewInt(10).Bytes(), []byte("a"), + Logs: []*outportcore.LogData{ + { + TxHash: "txHash", + Log: &transaction.Log{ + Address: []byte("snd"), + Events: []*transaction.Event{ + { + Address: []byte("snd"), + Identifier: []byte(core.BuiltInFunctionESDTNFTCreate), + Topics: [][]byte{ + []byte("token0"), big.NewInt(0).Bytes(), big.NewInt(10).Bytes(), []byte("a"), + }, }, }, }, diff --git a/outport/process/alteredaccounts/tokensProcessor.go b/outport/process/alteredaccounts/tokensProcessor.go index 82ae98f7b84..a6f0ef4b276 100644 --- a/outport/process/alteredaccounts/tokensProcessor.go +++ b/outport/process/alteredaccounts/tokensProcessor.go @@ -46,7 +46,7 @@ func (tp *tokensProcessor) extractESDTAccounts( ) error { var err error for _, txLog := range txPool.Logs { - for _, event := range txLog.Events { + for _, event := range txLog.Log.Events { err = tp.processEvent(event, markedAlteredAccounts) if err != nil { return err diff --git a/outport/process/errors.go b/outport/process/errors.go index 301b23316ae..e7881c29a59 100644 --- a/outport/process/errors.go +++ b/outport/process/errors.go @@ -25,7 +25,7 @@ var errCannotCastReward = errors.New("cannot cast reward transaction") var errCannotCastReceipt = errors.New("cannot cast receipt transaction") -var errCannotCastEvent = errors.New("cannot cast event") +var errCannotCastLog = errors.New("cannot cast log") // ErrNilHasher signals that a nil hasher has been provided var ErrNilHasher = errors.New("nil hasher provided") diff --git a/outport/process/outportDataProvider.go b/outport/process/outportDataProvider.go index 56f047f53b4..b25bb460186 100644 --- a/outport/process/outportDataProvider.go +++ b/outport/process/outportDataProvider.go @@ -338,38 +338,22 @@ func getReceipts(receipts map[string]data.TransactionHandler) (map[string]*recei return ret, nil } -func getLogs(logs []*data.LogData) (map[string]*transaction.Log, error) { - ret := make(map[string]*transaction.Log, len(logs)) - - for _, logHandler := range logs { - eventHandlers := logHandler.GetLogEvents() - events, err := getEvents(eventHandlers) - txHashHex := getHexEncodedHash(logHandler.TxHash) - if err != nil { - return nil, fmt.Errorf("%w, hash: %s", err, txHashHex) - } - - ret[txHashHex] = &transaction.Log{ - Address: logHandler.GetAddress(), - Events: events, - } - } - return ret, nil -} - -func getEvents(eventHandlers []data.EventHandler) ([]*transaction.Event, error) { - events := make([]*transaction.Event, len(eventHandlers)) +func getLogs(logs []*data.LogData) ([]*outportcore.LogData, error) { + ret := make([]*outportcore.LogData, len(logs)) - for idx, eventHandler := range eventHandlers { - event, castOk := eventHandler.(*transaction.Event) + for idx, logData := range logs { + txHashHex := getHexEncodedHash(logData.TxHash) + log, castOk := logData.LogHandler.(*transaction.Log) if !castOk { - return nil, errCannotCastEvent + return nil, fmt.Errorf("%w, hash: %s", errCannotCastLog, txHashHex) } - events[idx] = event + ret[idx] = &outportcore.LogData{ + TxHash: txHashHex, + Log: log, + } } - - return events, nil + return ret, nil } // IsInterfaceNil returns true if there is no value under the interface diff --git a/outport/process/transactionsfee/dataHolders.go b/outport/process/transactionsfee/dataHolders.go index 03ca3f0f91e..805b645990a 100644 --- a/outport/process/transactionsfee/dataHolders.go +++ b/outport/process/transactionsfee/dataHolders.go @@ -41,14 +41,15 @@ func prepareTransactionsAndScrs(txPool *outportcore.TransactionPool) *transactio } } - for txHash, txLog := range txPool.Logs { + for _, txLog := range txPool.Logs { + txHash := txLog.TxHash txWithResults, ok := transactionsAndScrs.txsWithResults[txHash] if !ok { continue } txWithResults.log = &data.LogData{ - LogHandler: txLog, + LogHandler: txLog.Log, TxHash: txHash, } } diff --git a/outport/process/transactionsfee/dataHolders_test.go b/outport/process/transactionsfee/dataHolders_test.go index f5de81d2aa5..36311aec775 100644 --- a/outport/process/transactionsfee/dataHolders_test.go +++ b/outport/process/transactionsfee/dataHolders_test.go @@ -55,10 +55,14 @@ func TestTransactionsAndScrsHolder(t *testing.T) { }, }, }, - Logs: map[string]*transaction.Log{ - "hash": {}, - txHash: { - Address: []byte("addr"), + Logs: []*outportcore.LogData{ + { + Log: &transaction.Log{Address: []byte("addr")}, + TxHash: txHash, + }, + { + Log: &transaction.Log{}, + TxHash: "hash", }, }, } diff --git a/outport/process/transactionsfee/transactionsFeeProcessor_test.go b/outport/process/transactionsfee/transactionsFeeProcessor_test.go index aa046694d22..a2c06d3e6fa 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor_test.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor_test.go @@ -233,30 +233,39 @@ func TestPutFeeAndGasUsedLogWithErrorAndInformative(t *testing.T) { tx1Hash: tx1, tx2Hash: tx2, "t3": {Transaction: &transaction.Transaction{}, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}}}, - Logs: map[string]*transaction.Log{ - "hhh": { - Events: []*transaction.Event{ - { - Identifier: []byte("ignore"), + Logs: []*outportcore.LogData{ + { + Log: &transaction.Log{ + Events: []*transaction.Event{ + { + Identifier: []byte("ignore"), + }, }, }, + TxHash: "hhh", }, - tx1Hash: { - Events: []*transaction.Event{ - { - Identifier: []byte("ignore"), - }, - { - Identifier: []byte(core.SignalErrorOperation), + { + Log: &transaction.Log{ + Events: []*transaction.Event{ + { + Identifier: []byte("ignore"), + }, + { + Identifier: []byte(core.SignalErrorOperation), + }, }, }, + TxHash: tx1Hash, }, - tx2Hash: { - Events: []*transaction.Event{ - { - Identifier: []byte(core.WriteLogIdentifier), + { + Log: &transaction.Log{ + Events: []*transaction.Event{ + { + Identifier: []byte(core.WriteLogIdentifier), + }, }, }, + TxHash: tx2Hash, }, }, } From 9d6218f8413d9acd2d09e2910726b526f96dcd64 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Mar 2023 13:19:44 +0300 Subject: [PATCH 077/221] add marshaller type for notifier to external config --- cmd/node/config/external.toml | 4 ++++ config/externalConfig.go | 1 + factory/status/statusComponents.go | 19 +++++++++++++++---- outport/notifier/eventNotifier.go | 8 -------- testscommon/components/components.go | 6 ++++++ 5 files changed, 26 insertions(+), 12 deletions(-) diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index 3e55323e94e..84571be50fe 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -37,6 +37,10 @@ # RequestTimeoutSec defines the timeout in seconds for the http client RequestTimeoutSec = 60 + # MarshallerType is used to define the marshalled type to be used for inner + # marshalled structures in block events data + MarshallerType = "json" + [WebSocketConnector] # This flag shall only be used for observer nodes Enabled = false diff --git a/config/externalConfig.go b/config/externalConfig.go index 760e1dcd99b..8cd108474c5 100644 --- a/config/externalConfig.go +++ b/config/externalConfig.go @@ -27,6 +27,7 @@ type EventNotifierConfig struct { Username string Password string RequestTimeoutSec int + MarshallerType string } // CovalentConfig will hold the configurations for covalent indexer diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index f0294b3137f..b40b025abe8 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -210,10 +210,15 @@ func (scf *statusComponentsFactory) createOutportDriver() (outport.OutportHandle return nil, err } + eventNotifierArgs, err := scf.makeEventNotifierArgs() + if err != nil { + return nil, err + } + outportFactoryArgs := &outportDriverFactory.OutportFactoryArgs{ RetrialInterval: common.RetrialIntervalForOutportDriver, ElasticIndexerFactoryArgs: scf.makeElasticIndexerArgs(), - EventNotifierFactoryArgs: scf.makeEventNotifierArgs(), + EventNotifierFactoryArgs: eventNotifierArgs, WebSocketSenderDriverFactoryArgs: outportDriverFactory.WrappedOutportDriverWebSocketSenderFactoryArgs{ Enabled: scf.externalConfig.WebSocketConnector.Enabled, OutportDriverWebSocketSenderFactoryArgs: webSocketSenderDriverFactoryArgs, @@ -242,8 +247,14 @@ func (scf *statusComponentsFactory) makeElasticIndexerArgs() indexerFactory.Args } } -func (scf *statusComponentsFactory) makeEventNotifierArgs() *outportDriverFactory.EventNotifierFactoryArgs { +func (scf *statusComponentsFactory) makeEventNotifierArgs() (*outportDriverFactory.EventNotifierFactoryArgs, error) { eventNotifierConfig := scf.externalConfig.EventNotifierConnector + + marshaller, err := factoryMarshalizer.NewMarshalizer(eventNotifierConfig.MarshallerType) + if err != nil { + return &outportDriverFactory.EventNotifierFactoryArgs{}, err + } + return &outportDriverFactory.EventNotifierFactoryArgs{ Enabled: eventNotifierConfig.Enabled, UseAuthorization: eventNotifierConfig.UseAuthorization, @@ -251,8 +262,8 @@ func (scf *statusComponentsFactory) makeEventNotifierArgs() *outportDriverFactor Username: eventNotifierConfig.Username, Password: eventNotifierConfig.Password, RequestTimeoutSec: eventNotifierConfig.RequestTimeoutSec, - Marshaller: scf.coreComponents.InternalMarshalizer(), - } + Marshaller: marshaller, + }, nil } func (scf *statusComponentsFactory) makeWebSocketDriverArgs() (wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs, error) { diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index db5ae102fc4..f6c7b1206d5 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -28,11 +28,6 @@ type RevertBlock struct { Epoch uint32 `json:"epoch"` } -// FinalizedBlock holds finalized block data -type FinalizedBlock struct { - Hash string `json:"hash"` -} - type eventNotifier struct { httpClient httpClientHandler marshalizer marshal.Marshalizer @@ -72,9 +67,6 @@ func checkEventNotifierArgs(args ArgsEventNotifier) error { // SaveBlock converts block data in order to be pushed to subscribers func (en *eventNotifier) SaveBlock(args *outport.OutportBlock) error { log.Debug("eventNotifier: SaveBlock called at block", "block hash", args.BlockData.HeaderHash) - if args.TransactionPool == nil { - return ErrNilTransactionsPool - } err := en.httpClient.Post(pushEventEndpoint, args) if err != nil { diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 7b1ddd9751b..819206a685e 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -651,6 +651,12 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin Password: elasticPassword, EnabledIndexes: []string{"transactions", "blocks"}, }, + EventNotifierConnector: config.EventNotifierConfig{ + Enabled: false, + ProxyUrl: "http://localhost:5000", + RequestTimeoutSec: 30, + MarshallerType: "json", + }, }, EconomicsConfig: config.EconomicsConfig{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), From df66015b80ce90ae13c054f9b825e696094b2033 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Tue, 28 Mar 2023 21:23:36 +0300 Subject: [PATCH 078/221] fix unit tests for outport driver --- testscommon/components/components.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 819206a685e..06ce75db163 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -594,6 +594,11 @@ func GetStatusComponents( Password: elasticPassword, EnabledIndexes: []string{"transactions", "blocks"}, }, + EventNotifierConnector: config.EventNotifierConfig{ + Enabled: false, + ProxyUrl: "https://localhost:5000", + MarshallerType: "json", + }, }, EconomicsConfig: config.EconomicsConfig{}, ShardCoordinator: shardCoordinator, From e154667a64476f7867a1422fab3035cbe3d1682e Mon Sep 17 00:00:00 2001 From: Darius Date: Wed, 29 Mar 2023 13:56:40 +0300 Subject: [PATCH 079/221] Update cmd/node/config/external.toml Co-authored-by: Sorin Stanculeanu <34831323+sstanculeanu@users.noreply.github.com> --- cmd/node/config/external.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index 84571be50fe..f94282e93ee 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -37,7 +37,7 @@ # RequestTimeoutSec defines the timeout in seconds for the http client RequestTimeoutSec = 60 - # MarshallerType is used to define the marshalled type to be used for inner + # MarshallerType is used to define the marshaller type to be used for inner # marshalled structures in block events data MarshallerType = "json" From 1f7e6be83d0e8598518deea38198b715cc0514fa Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 29 Mar 2023 16:22:44 +0300 Subject: [PATCH 080/221] fixes txs execution order --- .../transactionsExecutionOrder.go | 20 ++++--- .../transactionsExecutionOrder_test.go | 53 ++++++++++--------- 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/outport/process/executionOrder/transactionsExecutionOrder.go b/outport/process/executionOrder/transactionsExecutionOrder.go index c60b2af2548..15603a7e2fa 100644 --- a/outport/process/executionOrder/transactionsExecutionOrder.go +++ b/outport/process/executionOrder/transactionsExecutionOrder.go @@ -173,7 +173,7 @@ func (s *sorter) getInvalidTxsExecutedInCurrentBlock(scheduledMbsFromPreviousBlo for _, hash := range mb.TxHashes { _, found := allScheduledTxs[string(hash)] if found { - scheduledExecutedInvalidTxsHashesPrevBlock = append(scheduledExecutedInvalidTxsHashesPrevBlock, string(hash)) + scheduledExecutedInvalidTxsHashesPrevBlock = append(scheduledExecutedInvalidTxsHashesPrevBlock, hex.EncodeToString(hash)) continue } invalidTxHashes = append(invalidTxHashes, hash) @@ -249,9 +249,10 @@ func getRewardsTxsFromMe(pool *outport.TransactionPool, blockBody *block.Body, h func extractTxsFromMap(txsHashes [][]byte, txs map[string]*outport.TxInfo) ([]data.TxWithExecutionOrderHandler, error) { result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) for _, txHash := range txsHashes { - tx, found := txs[string(txHash)] + txHashHex := hex.EncodeToString(txHash) + tx, found := txs[txHashHex] if !found { - return nil, fmt.Errorf("cannot find transaction in pool, txHash: %s", hex.EncodeToString(txHash)) + return nil, fmt.Errorf("cannot find transaction in pool, txHash: %s", txHashHex) } result = append(result, tx) } @@ -262,9 +263,10 @@ func extractTxsFromMap(txsHashes [][]byte, txs map[string]*outport.TxInfo) ([]da func extractSCRsFromMap(txsHashes [][]byte, scrs map[string]*outport.SCRInfo) ([]data.TxWithExecutionOrderHandler, error) { result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) for _, txHash := range txsHashes { - scr, found := scrs[string(txHash)] + txHashHex := hex.EncodeToString(txHash) + scr, found := scrs[txHashHex] if !found { - return nil, fmt.Errorf("cannot find scr in pool, txHash: %s", hex.EncodeToString(txHash)) + return nil, fmt.Errorf("cannot find scr in pool, txHash: %s", txHashHex) } result = append(result, scr) } @@ -275,9 +277,10 @@ func extractSCRsFromMap(txsHashes [][]byte, scrs map[string]*outport.SCRInfo) ([ func extractRewardsFromMap(txsHashes [][]byte, rewards map[string]*outport.RewardInfo) ([]data.TxWithExecutionOrderHandler, error) { result := make([]data.TxWithExecutionOrderHandler, 0, len(txsHashes)) for _, txHash := range txsHashes { - reward, found := rewards[string(txHash)] + txHashHex := hex.EncodeToString(txHash) + reward, found := rewards[txHashHex] if !found { - return nil, fmt.Errorf("cannot find reward in pool, txHash: %s", hex.EncodeToString(txHash)) + return nil, fmt.Errorf("cannot find reward in pool, txHash: %s", txHashHex) } result = append(result, reward) } @@ -299,7 +302,8 @@ func extractExecutedTxHashes(mbIndex int, mbTxHashes [][]byte, header data.Heade func extractAndPutScrsToDestinationMap(scrsHashes [][]byte, scrsMap map[string]*outport.SCRInfo, destinationMap map[string]data.TxWithExecutionOrderHandler) { for _, scrHash := range scrsHashes { - scr, found := scrsMap[string(scrHash)] + scrHashHex := hex.EncodeToString(scrHash) + scr, found := scrsMap[scrHashHex] if !found { continue } diff --git a/outport/process/executionOrder/transactionsExecutionOrder_test.go b/outport/process/executionOrder/transactionsExecutionOrder_test.go index 41df8a351b3..b2e09e47da2 100644 --- a/outport/process/executionOrder/transactionsExecutionOrder_test.go +++ b/outport/process/executionOrder/transactionsExecutionOrder_test.go @@ -1,6 +1,7 @@ package executionOrder import ( + "encoding/hex" "testing" "github.com/multiversx/mx-chain-core-go/core" @@ -130,25 +131,25 @@ func TestAddExecutionOrderInTransactionPool(t *testing.T) { pool := &outport.TransactionPool{ Transactions: map[string]*outport.TxInfo{ - string(txHashToMe): {Transaction: &transaction.Transaction{Nonce: 1}}, - string(txHashFromMe): {Transaction: &transaction.Transaction{Nonce: 2}}, + hex.EncodeToString(txHashToMe): {Transaction: &transaction.Transaction{Nonce: 1}}, + hex.EncodeToString(txHashFromMe): {Transaction: &transaction.Transaction{Nonce: 2}}, }, SmartContractResults: map[string]*outport.SCRInfo{ - string(scrHashToMe): {SmartContractResult: &smartContractResult.SmartContractResult{Nonce: 3}}, - string(scrHashFromMe): {SmartContractResult: &smartContractResult.SmartContractResult{ + hex.EncodeToString(scrHashToMe): {SmartContractResult: &smartContractResult.SmartContractResult{Nonce: 3}}, + hex.EncodeToString(scrHashFromMe): {SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 4, OriginalTxHash: txHashToMe, }}, - string(scrHashIntra): {SmartContractResult: &smartContractResult.SmartContractResult{ + hex.EncodeToString(scrHashIntra): {SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 0, OriginalTxHash: txHashToMe, }}, }, Rewards: map[string]*outport.RewardInfo{ - string(rewardTxHash): {Reward: &rewardTx.RewardTx{}}, + hex.EncodeToString(rewardTxHash): {Reward: &rewardTx.RewardTx{}}, }, InvalidTxs: map[string]*outport.TxInfo{ - string(invalidTxHash): {Transaction: &transaction.Transaction{Nonce: 5}}, + hex.EncodeToString(invalidTxHash): {Transaction: &transaction.Transaction{Nonce: 5}}, }, Receipts: map[string]*receipt.Receipt{}, Logs: nil, @@ -159,28 +160,28 @@ func TestAddExecutionOrderInTransactionPool(t *testing.T) { require.Equal(t, &outport.TransactionPool{ Transactions: map[string]*outport.TxInfo{ - string(txHashToMe): { + hex.EncodeToString(txHashToMe): { Transaction: &transaction.Transaction{Nonce: 1}, ExecutionOrder: 0, }, - string(txHashFromMe): { + hex.EncodeToString(txHashFromMe): { Transaction: &transaction.Transaction{Nonce: 2}, ExecutionOrder: 3, }, }, SmartContractResults: map[string]*outport.SCRInfo{ - string(scrHashToMe): { + hex.EncodeToString(scrHashToMe): { SmartContractResult: &smartContractResult.SmartContractResult{Nonce: 3}, ExecutionOrder: 1, }, - string(scrHashFromMe): { + hex.EncodeToString(scrHashFromMe): { SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 4, OriginalTxHash: txHashToMe, }, ExecutionOrder: 0, }, - string(scrHashIntra): { + hex.EncodeToString(scrHashIntra): { SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 0, OriginalTxHash: txHashToMe, @@ -189,13 +190,13 @@ func TestAddExecutionOrderInTransactionPool(t *testing.T) { }, }, Rewards: map[string]*outport.RewardInfo{ - string(rewardTxHash): { + hex.EncodeToString(rewardTxHash): { Reward: &rewardTx.RewardTx{}, ExecutionOrder: 2, }, }, InvalidTxs: map[string]*outport.TxInfo{ - string(invalidTxHash): { + hex.EncodeToString(invalidTxHash): { Transaction: &transaction.Transaction{Nonce: 5}, ExecutionOrder: 4, }, @@ -256,8 +257,8 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduled(t *test pool := &outport.TransactionPool{ Transactions: map[string]*outport.TxInfo{ - string(firstTxHash): {Transaction: &transaction.Transaction{Nonce: 1}}, - string(secondTxHash): {Transaction: &transaction.Transaction{Nonce: 2}}, + hex.EncodeToString(firstTxHash): {Transaction: &transaction.Transaction{Nonce: 1}}, + hex.EncodeToString(secondTxHash): {Transaction: &transaction.Transaction{Nonce: 2}}, }, } @@ -266,11 +267,11 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduled(t *test require.Equal(t, &outport.TransactionPool{ Transactions: map[string]*outport.TxInfo{ - string(firstTxHash): { + hex.EncodeToString(firstTxHash): { Transaction: &transaction.Transaction{Nonce: 1}, ExecutionOrder: 0, }, - string(secondTxHash): { + hex.EncodeToString(secondTxHash): { Transaction: &transaction.Transaction{Nonce: 2}, ExecutionOrder: 1, }, @@ -369,13 +370,13 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduledInvalid( pool := &outport.TransactionPool{ Transactions: map[string]*outport.TxInfo{ - string(secondTxHash): {Transaction: &transaction.Transaction{Nonce: 2}}, + hex.EncodeToString(secondTxHash): {Transaction: &transaction.Transaction{Nonce: 2}}, }, InvalidTxs: map[string]*outport.TxInfo{ - string(firstTxHash): {Transaction: &transaction.Transaction{Nonce: 1}}, + hex.EncodeToString(firstTxHash): {Transaction: &transaction.Transaction{Nonce: 1}}, }, SmartContractResults: map[string]*outport.SCRInfo{ - string(scrHash): {SmartContractResult: &smartContractResult.SmartContractResult{ + hex.EncodeToString(scrHash): {SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 3, OriginalTxHash: scheduledTx, }}, @@ -386,19 +387,19 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduledInvalid( require.Nil(t, err) require.Equal(t, &outport.TransactionPool{ Transactions: map[string]*outport.TxInfo{ - string(secondTxHash): { + hex.EncodeToString(secondTxHash): { Transaction: &transaction.Transaction{Nonce: 2}, ExecutionOrder: 1, }, }, InvalidTxs: map[string]*outport.TxInfo{ - string(firstTxHash): { + hex.EncodeToString(firstTxHash): { Transaction: &transaction.Transaction{Nonce: 1}, ExecutionOrder: 0, }, }, SmartContractResults: map[string]*outport.SCRInfo{ - string(scrHash): { + hex.EncodeToString(scrHash): { SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 3, OriginalTxHash: scheduledTx, @@ -408,6 +409,6 @@ func TestAddExecutionOrderInTransactionPoolFromMeTransactionAndScheduledInvalid( }, }, pool) - require.Equal(t, []string{string(scrHash)}, scrsHashes) - require.Equal(t, []string{string(scheduledInvalidTxHash)}, invalidTxsHashes) + require.Equal(t, []string{hex.EncodeToString(scrHash)}, scrsHashes) + require.Equal(t, []string{hex.EncodeToString(scheduledInvalidTxHash)}, invalidTxsHashes) } From fdc540f2adb9129730c581746a3c18e911690c01 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Fri, 31 Mar 2023 12:28:48 +0300 Subject: [PATCH 081/221] latest version of indexer and mx-chain-core --- go.mod | 4 +-- go.sum | 8 +++--- outport/notifier/eventNotifier.go | 46 +++++++++++++++++++++++++++---- outport/notifier/interface.go | 10 +++++++ 4 files changed, 56 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 112652627d3..15c251425ee 100644 --- a/go.mod +++ b/go.mod @@ -13,9 +13,9 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230322093158-35195fa155c0 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a github.com/multiversx/mx-chain-crypto-go v1.2.5 - github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230322095713-a82089993c0d + github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.13 github.com/multiversx/mx-chain-storage-go v1.0.7 diff --git a/go.sum b/go.sum index 1f771cf385b..95fc609ad2e 100644 --- a/go.sum +++ b/go.sum @@ -612,12 +612,12 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230322093158-35195fa155c0 h1:Hr0bAMUJigh5xGFm2qoKKglEtpIXckxCLjCDBUYv1DM= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230322093158-35195fa155c0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a h1:cjPv/NIb4c3oBuBxxc2ggyaMvpmGlo1RO8mmzrkWARM= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230322095713-a82089993c0d h1:PDkG7SrzIsyRo5PLI68vlBM85PiQhWHhpafW7VCzhgM= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230322095713-a82089993c0d/go.mod h1:iDLxzwUi9CGzzEbFRi4TgYvmhsp21qNj5eqMgT32d2M= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96/go.mod h1:Y6jgeoMBpDCtm7lurtChhgPyhpQ0GF5OruW/tl/++JI= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.13 h1:woIlYkDFCKYyJQ5urDcOzz8HUFGsSEhTfUXDDxNI2zM= diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index db5ae102fc4..349164fde3b 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -6,7 +6,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-core-go/core/unmarshal" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/marshal" logger "github.com/multiversx/mx-chain-logger-go" @@ -34,8 +35,9 @@ type FinalizedBlock struct { } type eventNotifier struct { - httpClient httpClientHandler - marshalizer marshal.Marshalizer + httpClient httpClientHandler + marshalizer marshal.Marshalizer + blockContainer blockContainerHandler } // ArgsEventNotifier defines the arguments needed for event notifier creation @@ -51,10 +53,15 @@ func NewEventNotifier(args ArgsEventNotifier) (*eventNotifier, error) { if err != nil { return nil, err } + blockContainer, err := createBlockCreatorsContainer() + if err != nil { + return nil, err + } return &eventNotifier{ - httpClient: args.HttpClient, - marshalizer: args.Marshaller, + httpClient: args.HttpClient, + marshalizer: args.Marshaller, + blockContainer: blockContainer, }, nil } @@ -86,7 +93,7 @@ func (en *eventNotifier) SaveBlock(args *outport.OutportBlock) error { // RevertIndexedBlock converts revert data in order to be pushed to subscribers func (en *eventNotifier) RevertIndexedBlock(blockData *outport.BlockData) error { - headerHandler, err := unmarshal.GetHeaderFromBytes(en.marshalizer, core.HeaderType(blockData.HeaderType), blockData.HeaderBytes) + headerHandler, err := en.getHeaderFromBytes(core.HeaderType(blockData.HeaderType), blockData.HeaderBytes) if err != nil { return err } @@ -150,3 +157,30 @@ func (en *eventNotifier) IsInterfaceNil() bool { func (en *eventNotifier) Close() error { return nil } + +func (en *eventNotifier) getHeaderFromBytes(headerType core.HeaderType, headerBytes []byte) (header data.HeaderHandler, err error) { + creator, err := en.blockContainer.Get(headerType) + if err != nil { + return nil, err + } + + return block.GetHeaderFromBytes(en.marshalizer, creator, headerBytes) +} + +func createBlockCreatorsContainer() (blockContainerHandler, error) { + container := block.NewEmptyBlockCreatorsContainer() + err := container.Add(core.ShardHeaderV1, block.NewEmptyHeaderCreator()) + if err != nil { + return nil, err + } + err = container.Add(core.ShardHeaderV2, block.NewEmptyHeaderV2Creator()) + if err != nil { + return nil, err + } + err = container.Add(core.MetaHeader, block.NewEmptyMetaBlockCreator()) + if err != nil { + return nil, err + } + + return container, nil +} diff --git a/outport/notifier/interface.go b/outport/notifier/interface.go index 52bdf53eb52..612ab5988cf 100644 --- a/outport/notifier/interface.go +++ b/outport/notifier/interface.go @@ -1,6 +1,16 @@ package notifier +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" +) + type httpClientHandler interface { Post(route string, payload interface{}) error IsInterfaceNil() bool } + +// BlockContainerHandler defines what a block container should be able to do +type blockContainerHandler interface { + Get(headerType core.HeaderType) (block.EmptyBlockCreator, error) +} From 74dc1e4fc13011157328de0f014f3fd75809ddca Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Fri, 31 Mar 2023 16:40:19 +0300 Subject: [PATCH 082/221] fixes after review --- go.mod | 2 +- go.sum | 3 +- outport/factory/notifierFactory.go | 29 +++++++++++++++++-- outport/notifier/errors.go | 10 ++----- outport/notifier/eventNotifier.go | 34 ++++++----------------- outport/notifier/eventNotifier_test.go | 22 +++++++++++++-- outport/notifier/interface.go | 2 +- testscommon/outport/blockContainerStub.go | 20 +++++++++++++ 8 files changed, 81 insertions(+), 41 deletions(-) create mode 100644 testscommon/outport/blockContainerStub.go diff --git a/go.mod b/go.mod index 15c251425ee..8c2797ac2af 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230330105824-932a718276f6 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 95fc609ad2e..667d3d6905b 100644 --- a/go.sum +++ b/go.sum @@ -612,8 +612,9 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a h1:cjPv/NIb4c3oBuBxxc2ggyaMvpmGlo1RO8mmzrkWARM= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230330105824-932a718276f6 h1:4Nv0uxJbfSZ1fqWcQEYyQ1SdAAluDEbHjTi0X8ZFXFs= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230330105824-932a718276f6/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= diff --git a/outport/factory/notifierFactory.go b/outport/factory/notifierFactory.go index 0e57628a3c5..a4cc572f491 100644 --- a/outport/factory/notifierFactory.go +++ b/outport/factory/notifierFactory.go @@ -3,6 +3,7 @@ package factory import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/outport/notifier" @@ -37,9 +38,15 @@ func CreateEventNotifier(args *EventNotifierFactoryArgs) (outport.Driver, error) return nil, err } + blockContainer, err := createBlockCreatorsContainer() + if err != nil { + return nil, err + } + notifierArgs := notifier.ArgsEventNotifier{ - HttpClient: httpClient, - Marshaller: args.Marshaller, + HttpClient: httpClient, + Marshaller: args.Marshaller, + BlockContainer: blockContainer, } return notifier.NewEventNotifier(notifierArgs) @@ -52,3 +59,21 @@ func checkInputArgs(args *EventNotifierFactoryArgs) error { return nil } + +func createBlockCreatorsContainer() (notifier.BlockContainerHandler, error) { + container := block.NewEmptyBlockCreatorsContainer() + err := container.Add(core.ShardHeaderV1, block.NewEmptyHeaderCreator()) + if err != nil { + return nil, err + } + err = container.Add(core.ShardHeaderV2, block.NewEmptyHeaderV2Creator()) + if err != nil { + return nil, err + } + err = container.Add(core.MetaHeader, block.NewEmptyMetaBlockCreator()) + if err != nil { + return nil, err + } + + return container, nil +} diff --git a/outport/notifier/errors.go b/outport/notifier/errors.go index 7c6fff363ac..154faf4c1dd 100644 --- a/outport/notifier/errors.go +++ b/outport/notifier/errors.go @@ -4,9 +4,6 @@ import ( "errors" ) -// ErrNilTransactionsPool signals that a nil transactions pool was provided -var ErrNilTransactionsPool = errors.New("nil transactions pool") - // ErrInvalidValue signals that an invalid value has been provided var ErrInvalidValue = errors.New("invalid value") @@ -16,8 +13,5 @@ var ErrNilHTTPClientWrapper = errors.New("nil http client wrapper") // ErrNilMarshaller signals that a nil marshaller has been provided var ErrNilMarshaller = errors.New("nil marshaller") -// ErrNilPubKeyConverter signals that a nil pubkey converter has been provided -var ErrNilPubKeyConverter = errors.New("nil pub key converter") - -// ErrNilHasher is raised when a valid hasher is expected but nil used -var ErrNilHasher = errors.New("hasher is nil") +// ErrNilBlockContainerHandler signals that a nil block container handler has been provided +var ErrNilBlockContainerHandler = errors.New("nil bock container handler") diff --git a/outport/notifier/eventNotifier.go b/outport/notifier/eventNotifier.go index 4518d16de8d..b8e8b007258 100644 --- a/outport/notifier/eventNotifier.go +++ b/outport/notifier/eventNotifier.go @@ -32,13 +32,14 @@ type RevertBlock struct { type eventNotifier struct { httpClient httpClientHandler marshalizer marshal.Marshalizer - blockContainer blockContainerHandler + blockContainer BlockContainerHandler } // ArgsEventNotifier defines the arguments needed for event notifier creation type ArgsEventNotifier struct { - HttpClient httpClientHandler - Marshaller marshal.Marshalizer + HttpClient httpClientHandler + Marshaller marshal.Marshalizer + BlockContainer BlockContainerHandler } // NewEventNotifier creates a new instance of the eventNotifier @@ -48,15 +49,11 @@ func NewEventNotifier(args ArgsEventNotifier) (*eventNotifier, error) { if err != nil { return nil, err } - blockContainer, err := createBlockCreatorsContainer() - if err != nil { - return nil, err - } return &eventNotifier{ httpClient: args.HttpClient, marshalizer: args.Marshaller, - blockContainer: blockContainer, + blockContainer: args.BlockContainer, }, nil } @@ -67,6 +64,9 @@ func checkEventNotifierArgs(args ArgsEventNotifier) error { if check.IfNil(args.Marshaller) { return ErrNilMarshaller } + if check.IfNilReflect(args.BlockContainer) { + return ErrNilBlockContainerHandler + } return nil } @@ -158,21 +158,3 @@ func (en *eventNotifier) getHeaderFromBytes(headerType core.HeaderType, headerBy return block.GetHeaderFromBytes(en.marshalizer, creator, headerBytes) } - -func createBlockCreatorsContainer() (blockContainerHandler, error) { - container := block.NewEmptyBlockCreatorsContainer() - err := container.Add(core.ShardHeaderV1, block.NewEmptyHeaderCreator()) - if err != nil { - return nil, err - } - err = container.Add(core.ShardHeaderV2, block.NewEmptyHeaderV2Creator()) - if err != nil { - return nil, err - } - err = container.Add(core.MetaHeader, block.NewEmptyMetaBlockCreator()) - if err != nil { - return nil, err - } - - return container, nil -} diff --git a/outport/notifier/eventNotifier_test.go b/outport/notifier/eventNotifier_test.go index 28f8950bcec..60a3d354206 100644 --- a/outport/notifier/eventNotifier_test.go +++ b/outport/notifier/eventNotifier_test.go @@ -11,14 +11,16 @@ import ( "github.com/multiversx/mx-chain-go/outport/mock" "github.com/multiversx/mx-chain-go/outport/notifier" "github.com/multiversx/mx-chain-go/testscommon" + outportStub "github.com/multiversx/mx-chain-go/testscommon/outport" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func createMockEventNotifierArgs() notifier.ArgsEventNotifier { return notifier.ArgsEventNotifier{ - HttpClient: &mock.HTTPClientStub{}, - Marshaller: &testscommon.MarshalizerMock{}, + HttpClient: &mock.HTTPClientStub{}, + Marshaller: &testscommon.MarshalizerMock{}, + BlockContainer: &outportStub.BlockContainerStub{}, } } @@ -47,6 +49,17 @@ func TestNewEventNotifier(t *testing.T) { require.Equal(t, notifier.ErrNilMarshaller, err) }) + t.Run("nil block container", func(t *testing.T) { + t.Parallel() + + args := createMockEventNotifierArgs() + args.BlockContainer = nil + + en, err := notifier.NewEventNotifier(args) + require.Nil(t, en) + require.Equal(t, notifier.ErrNilBlockContainerHandler, err) + }) + t.Run("should work", func(t *testing.T) { t.Parallel() @@ -123,6 +136,11 @@ func TestRevertIndexedBlock(t *testing.T) { return nil }, } + args.BlockContainer = &outportStub.BlockContainerStub{ + GetCalled: func(headerType core.HeaderType) (block.EmptyBlockCreator, error) { + return block.NewEmptyHeaderCreator(), nil + }, + } en, _ := notifier.NewEventNotifier(args) diff --git a/outport/notifier/interface.go b/outport/notifier/interface.go index 612ab5988cf..2fd931d0295 100644 --- a/outport/notifier/interface.go +++ b/outport/notifier/interface.go @@ -11,6 +11,6 @@ type httpClientHandler interface { } // BlockContainerHandler defines what a block container should be able to do -type blockContainerHandler interface { +type BlockContainerHandler interface { Get(headerType core.HeaderType) (block.EmptyBlockCreator, error) } diff --git a/testscommon/outport/blockContainerStub.go b/testscommon/outport/blockContainerStub.go new file mode 100644 index 00000000000..cec28498b2f --- /dev/null +++ b/testscommon/outport/blockContainerStub.go @@ -0,0 +1,20 @@ +package outport + +import ( + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" +) + +// BlockContainerStub - +type BlockContainerStub struct { + GetCalled func(headerType core.HeaderType) (block.EmptyBlockCreator, error) +} + +// Get - +func (bcs *BlockContainerStub) Get(headerType core.HeaderType) (block.EmptyBlockCreator, error) { + if bcs.GetCalled != nil { + return bcs.GetCalled(headerType) + } + + return nil, nil +} From 69e7acaa037228db9665618195e658c1204daf3a Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Fri, 31 Mar 2023 17:45:12 +0300 Subject: [PATCH 083/221] go mod tidy --- go.sum | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/go.sum b/go.sum index 1cc5571c731..6dba64f6532 100644 --- a/go.sum +++ b/go.sum @@ -612,12 +612,14 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.0 h1:K539hKZKcHjBiFQpowFbA3BUd95Fe5+FLC+rKBOFZF0= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230330105824-932a718276f6 h1:4Nv0uxJbfSZ1fqWcQEYyQ1SdAAluDEbHjTi0X8ZFXFs= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230330105824-932a718276f6/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= -github.com/multiversx/mx-chain-es-indexer-go v1.4.0 h1:t2UCfbLRbFPBWK1IC1/qOVg+2D6y189xZZ1BoV83gq8= -github.com/multiversx/mx-chain-es-indexer-go v1.4.0/go.mod h1:3glMXvE42VvLlUdiMMtQoDr6uKYS6RGb0icRgyAGXIY= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96/go.mod h1:Y6jgeoMBpDCtm7lurtChhgPyhpQ0GF5OruW/tl/++JI= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.15 h1:H7273huZG/zAR6MPvWuXwBEVBsJWH1MeSIDshYV0nh0= From 90c6e2130b0a9b8103ee890f2d5c3adbf7e1d55c Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 3 Apr 2023 16:54:39 +0300 Subject: [PATCH 084/221] move IsGetNodeFromDbErr() to core --- common/logging/errorLogging.go | 4 +- epochStart/shardchain/trigger.go | 3 +- errors/closingError_test.go | 45 ------------------- errors/missingTrieNodeError.go | 19 -------- go.mod | 6 +-- go.sum | 12 ++--- process/block/baseProcess.go | 3 +- process/block/preprocess/basePreProcess.go | 3 +- process/block/preprocess/transactions.go | 7 ++- process/block/preprocess/transactionsV2.go | 7 ++- process/block/shardblock.go | 3 +- process/coordinator/process.go | 3 +- process/rewardTransaction/process.go | 3 +- process/smartContract/process.go | 15 +++---- process/sync/metablock.go | 3 +- process/sync/shardblock.go | 3 +- process/transaction/shardProcess.go | 3 +- .../indexHashedNodesCoordinator.go | 3 +- state/accountsDB.go | 3 +- .../storagePruningManager.go | 3 +- trie/node.go | 3 +- trie/patriciaMerkleTrie.go | 4 +- trie/snapshotTrieStorageManager.go | 2 +- trie/trieStorageManager.go | 6 +-- trie/trieStorageManagerInEpoch.go | 3 +- 25 files changed, 47 insertions(+), 122 deletions(-) delete mode 100644 errors/closingError_test.go diff --git a/common/logging/errorLogging.go b/common/logging/errorLogging.go index 17693e3b4f3..94bc88ae74d 100644 --- a/common/logging/errorLogging.go +++ b/common/logging/errorLogging.go @@ -1,8 +1,8 @@ package logging import ( + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - chainErrors "github.com/multiversx/mx-chain-go/errors" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -24,7 +24,7 @@ func logErrAsLevelExceptAsDebugIfClosingError(logInstance logger.Logger, logLeve return } - if chainErrors.IsClosingError(err) { + if core.IsClosingError(err) { logLevel = logger.LogDebug } diff --git a/epochStart/shardchain/trigger.go b/epochStart/shardchain/trigger.go index cd0637d724e..76a949b6961 100644 --- a/epochStart/shardchain/trigger.go +++ b/epochStart/shardchain/trigger.go @@ -22,7 +22,6 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-logger-go" @@ -997,7 +996,7 @@ func (t *trigger) SetProcessed(header data.HeaderHandler, _ data.BodyHandler) { errNotCritical = t.shardHdrStorage.Put([]byte(epochStartIdentifier), shardHdrBuff) if errNotCritical != nil { logLevel := logger.LogWarning - if errors.IsClosingError(errNotCritical) { + if core.IsClosingError(errNotCritical) { logLevel = logger.LogDebug } log.Log(logLevel, "SetProcessed put to shard header storage error", "error", errNotCritical) diff --git a/errors/closingError_test.go b/errors/closingError_test.go deleted file mode 100644 index 27316a4dc42..00000000000 --- a/errors/closingError_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package errors_test - -import ( - "fmt" - "testing" - - "github.com/multiversx/mx-chain-go/errors" - "github.com/multiversx/mx-chain-go/storage" - "github.com/stretchr/testify/assert" -) - -func TestIsClosingError(t *testing.T) { - t.Parallel() - - t.Run("nil error should return false", func(t *testing.T) { - t.Parallel() - - assert.False(t, errors.IsClosingError(nil)) - }) - t.Run("context closing error should return true", func(t *testing.T) { - t.Parallel() - - assert.True(t, errors.IsClosingError(fmt.Errorf("%w random string", errors.ErrContextClosing))) - }) - t.Run("DB closed error should return true", func(t *testing.T) { - t.Parallel() - - assert.True(t, errors.IsClosingError(fmt.Errorf("%w random string", storage.ErrDBIsClosed))) - }) - t.Run("contains 'DB is closed' should return true", func(t *testing.T) { - t.Parallel() - - assert.True(t, errors.IsClosingError(fmt.Errorf("random string DB is closed random string"))) - }) - t.Run("contains 'DB is closed' should return true", func(t *testing.T) { - t.Parallel() - - assert.True(t, errors.IsClosingError(fmt.Errorf("random string context closing random string"))) - }) - t.Run("random error should return false", func(t *testing.T) { - t.Parallel() - - assert.False(t, errors.IsClosingError(fmt.Errorf("random error"))) - }) -} diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index c78ef8bd794..ebb92003085 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -3,28 +3,9 @@ package errors import ( "encoding/hex" "fmt" - "strings" - "github.com/multiversx/mx-chain-go/common" ) -// IsGetNodeFromDBError returns true if the provided error is of type getNodeFromDB -func IsGetNodeFromDBError(err error) bool { - if err == nil { - return false - } - - if IsClosingError(err) { - return false - } - - if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { - return true - } - - return false -} - // GetNodeFromDBErrWithKey defines a custom error for trie get node type GetNodeFromDBErrWithKey struct { getErr error diff --git a/go.mod b/go.mod index 09573845409..9fc383bb950 100644 --- a/go.mod +++ b/go.mod @@ -13,13 +13,13 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.0 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230403113932-916b16d18978 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.0 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.15 - github.com/multiversx/mx-chain-storage-go v1.0.7 - github.com/multiversx/mx-chain-vm-common-go v1.4.0 + github.com/multiversx/mx-chain-storage-go v1.0.8-0.20230403115027-9139fce478e0 + github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230403123953-7fc57accc0c6 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77 diff --git a/go.sum b/go.sum index 1cc5571c731..90649986acf 100644 --- a/go.sum +++ b/go.sum @@ -611,9 +611,8 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.0 h1:K539hKZKcHjBiFQpowFbA3BUd95Fe5+FLC+rKBOFZF0= -github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230403113932-916b16d18978 h1:caHg1OhZmaA8oX3TbimkBaty+eHvhpNSO8rQOicrS7o= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230403113932-916b16d18978/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.0 h1:t2UCfbLRbFPBWK1IC1/qOVg+2D6y189xZZ1BoV83gq8= @@ -624,10 +623,13 @@ github.com/multiversx/mx-chain-p2p-go v1.0.15 h1:H7273huZG/zAR6MPvWuXwBEVBsJWH1M github.com/multiversx/mx-chain-p2p-go v1.0.15/go.mod h1:hUE4H8kGJk3u9gTqeetF3uhjJpnfdV/hALKsJ6bMI+8= github.com/multiversx/mx-chain-storage-go v1.0.7 h1:UqLo/OLTD3IHiE/TB/SEdNRV1GG2f1R6vIP5ehHwCNw= github.com/multiversx/mx-chain-storage-go v1.0.7/go.mod h1:gtKoV32Cg2Uy8deHzF8Ud0qAl0zv92FvWgPSYIP0Zmg= +github.com/multiversx/mx-chain-storage-go v1.0.8-0.20230403115027-9139fce478e0 h1:jTGuq0IAQdghGLoNx2BgkxWvkcZV9ZmJ0qB8/oU4MNQ= +github.com/multiversx/mx-chain-storage-go v1.0.8-0.20230403115027-9139fce478e0/go.mod h1:FGhaeTNIcLZOPqsJZQ1TdcMaPVLhj642OzRNmt6+RQs= +github.com/multiversx/mx-chain-vm-common-go v1.3.34/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.36/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.4.0 h1:0i0cJZJOXGzqYzwtKFHSr2yGmnFAdizOuISK8HgsnYo= -github.com/multiversx/mx-chain-vm-common-go v1.4.0/go.mod h1:odBJC92ANA8zLtPh/wwajUUGJOaS88F5QYGf0t8Wgzw= +github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230403123953-7fc57accc0c6 h1:3G8BHyVfz1DkeZcds4iME5vDHzg8Yg2++wet0DDYZ3c= +github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230403123953-7fc57accc0c6/go.mod h1:rxb8laeh06wayB/dZPpN5LT3qcwv4SgpNHiSvPsNjuw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 h1:ScUq7/wq78vthMTQ6v5Ux1DvSMQMHxQ2Sl7aPP26q1w= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50/go.mod h1:e3uYdgoKzs3puaznbmSjDcRisJc5Do4tpg7VqyYwoek= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 h1:axtp5/mpA+xYJ1cu4KtAGETV4t6v6/tNfQh0HCclBYY= diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 160f4cccfa7..191e91972de 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -28,7 +28,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dblookupext" debugFactory "github.com/multiversx/mx-chain-go/debug/factory" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -1685,7 +1684,7 @@ func (bp *baseProcessor) recordBlockInHistory(blockHeaderHash []byte, blockHeade err := bp.historyRepo.RecordBlock(blockHeaderHash, blockHeader, blockBody, scrResultsFromPool, receiptsFromPool, intraMiniBlocks, logs) if err != nil { logLevel := logger.LogError - if errors.IsClosingError(err) { + if core.IsClosingError(err) { logLevel = logger.LogDebug } log.Log(logLevel, "historyRepo.RecordBlock()", "blockHeaderHash", blockHeaderHash, "error", err.Error()) diff --git a/process/block/preprocess/basePreProcess.go b/process/block/preprocess/basePreProcess.go index 826d1b6bd35..08f3e4cfa37 100644 --- a/process/block/preprocess/basePreProcess.go +++ b/process/block/preprocess/basePreProcess.go @@ -14,7 +14,6 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" @@ -414,7 +413,7 @@ func (bpp *basePreProcess) saveAccountBalanceForAddress(address []byte) error { balance, err := bpp.getBalanceForAddress(address) if err != nil { - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return err } balance = big.NewInt(0) diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index 3d1b86024d6..deb7dacb733 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -17,7 +17,6 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" - chainErr "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" @@ -1091,7 +1090,7 @@ func (txs *transactions) CreateAndProcessMiniBlocks(haveTime func() bool, random if err != nil { log.Debug("createAndProcessMiniBlocksFromMe", "error", err.Error()) - if chainErr.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return nil, err } @@ -1206,7 +1205,7 @@ func (txs *transactions) createAndProcessMiniBlocksFromMeV1( err = txs.processMiniBlockBuilderTx(mbBuilder, wtx, tx) if err != nil { - if chainErr.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return nil, nil, err } continue @@ -1298,7 +1297,7 @@ func (txs *transactions) handleBadTransaction( ) { log.Trace("bad tx", "error", err.Error(), "hash", wtx.TxHash) errRevert := txs.accounts.RevertToSnapshot(snapshot) - if errRevert != nil && !chainErr.IsClosingError(errRevert) { + if errRevert != nil && !core.IsClosingError(errRevert) { log.Warn("revert to snapshot", "error", err.Error()) } diff --git a/process/block/preprocess/transactionsV2.go b/process/block/preprocess/transactionsV2.go index 430aa373028..d94434965d7 100644 --- a/process/block/preprocess/transactionsV2.go +++ b/process/block/preprocess/transactionsV2.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/common" - chainErr "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/storage/txcache" ) @@ -71,7 +70,7 @@ func (txs *transactions) createAndProcessMiniBlocksFromMeV2( receiverShardID, mbInfo) if err != nil { - if chainErr.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return nil, nil, nil, err } if shouldAddToRemaining { @@ -189,7 +188,7 @@ func (txs *transactions) processTransaction( log.Trace("bad tx", "error", err.Error(), "hash", txHash) errRevert := txs.accounts.RevertToSnapshot(snapshot) - if errRevert != nil && !chainErr.IsClosingError(errRevert) { + if errRevert != nil && !core.IsClosingError(errRevert) { log.Warn("revert to snapshot", "error", errRevert.Error()) } @@ -316,7 +315,7 @@ func (txs *transactions) createScheduledMiniBlocks( receiverShardID, mbInfo) if err != nil { - if chainErr.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return nil, err } continue diff --git a/process/block/shardblock.go b/process/block/shardblock.go index dd1d36fcd0f..9ab6c655780 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -13,7 +13,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data/headerVersionData" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/errors" processOutport "github.com/multiversx/mx-chain-go/outport/process" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -2028,7 +2027,7 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by if err != nil { log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return nil, nil, err } } diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 5a1a685a478..29ca913d13f 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -16,7 +16,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/block/processedMb" @@ -721,7 +720,7 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe "error", errProc, ) - if errors.IsGetNodeFromDBError(errProc) { + if core.IsGetNodeFromDBError(errProc) { return nil, 0, false, err } diff --git a/process/rewardTransaction/process.go b/process/rewardTransaction/process.go index fed488bb606..e641ef5d0cd 100644 --- a/process/rewardTransaction/process.go +++ b/process/rewardTransaction/process.go @@ -6,7 +6,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/rewardTx" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" @@ -120,7 +119,7 @@ func (rtp *rewardTxProcessor) saveAccumulatedRewards( existingReward.SetBytes(val) } - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return err } diff --git a/process/smartContract/process.go b/process/smartContract/process.go index 361a5ebd4b3..620dafae8b2 100644 --- a/process/smartContract/process.go +++ b/process/smartContract/process.go @@ -19,7 +19,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" @@ -339,7 +338,7 @@ func (sc *scProcessor) doExecuteSmartContractTransaction( var results []data.TransactionHandler results, err = sc.processVMOutput(vmOutput, txHash, tx, vmInput.CallType, vmInput.GasProvided) if err != nil { - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return vmcommon.ExecutionFailed, err } log.Trace("process vm output returned with problem ", "err", err.Error()) @@ -381,7 +380,7 @@ func (sc *scProcessor) executeSmartContractCall( vmOutput, err = vmExec.RunSmartContractCall(vmInput) sc.wasmVMChangeLocker.RUnlock() if err != nil { - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return nil, err } log.Debug("run smart contract call error", "error", err.Error()) @@ -984,7 +983,7 @@ func (sc *scProcessor) doExecuteBuiltInFunction( tmpCreatedAsyncCallback := false tmpCreatedAsyncCallback, newSCRTxs, err = sc.processSCOutputAccounts(newVMOutput, vmInput.CallType, outPutAccounts, tx, txHash) if err != nil { - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return vmcommon.ExecutionFailed, err } return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(err.Error()), snapshot, vmInput.GasLocked) @@ -1086,7 +1085,7 @@ func (sc *scProcessor) resolveBuiltInFunctions( GasRemaining: 0, } - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return nil, err } @@ -1376,7 +1375,7 @@ func (sc *scProcessor) processIfErrorWithAddedLogs( err := sc.accounts.RevertToSnapshot(snapshot) if err != nil { - if !errors.IsClosingError(err) { + if !core.IsClosingError(err) { log.Warn("revert to snapshot", "error", err.Error()) } @@ -1730,7 +1729,7 @@ func (sc *scProcessor) doDeploySmartContract( sc.wasmVMChangeLocker.RUnlock() if err != nil { log.Debug("VM error", "error", err.Error()) - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return vmcommon.ExecutionFailed, err } return vmcommon.UserError, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(""), snapshot, vmInput.GasLocked) @@ -1755,7 +1754,7 @@ func (sc *scProcessor) doDeploySmartContract( results, err := sc.processVMOutput(vmOutput, txHash, tx, vmInput.CallType, vmInput.GasProvided) if err != nil { log.Trace("Processing error", "error", err.Error()) - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { return vmcommon.ExecutionFailed, err } return vmcommon.ExecutionFailed, sc.ProcessIfError(acntSnd, txHash, tx, err.Error(), []byte(vmOutput.ReturnMessage), snapshot, vmInput.GasLocked) diff --git a/process/sync/metablock.go b/process/sync/metablock.go index c71d95fe321..d8ca3cf4954 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" @@ -180,7 +179,7 @@ func (boot *MetaBootstrap) setLastEpochStartRound() { // in the blockchain, and all this mechanism will be reiterated for the next block. func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { getNodeErr, ok := err.(getKeyHandler) if !ok { return err diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index be48c42de00..68a3c70f52d 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/storage" ) @@ -143,7 +142,7 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { // in the blockchain, and all this mechanism will be reiterated for the next block. func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() - if errors.IsGetNodeFromDBError(err) { + if core.IsGetNodeFromDBError(err) { getNodeErr, ok := err.(getKeyHandler) if !ok { return err diff --git a/process/transaction/shardProcess.go b/process/transaction/shardProcess.go index aeeec86b2e8..7ebb6faa014 100644 --- a/process/transaction/shardProcess.go +++ b/process/transaction/shardProcess.go @@ -16,7 +16,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - errorsCommon "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" @@ -218,7 +217,7 @@ func (txProc *txProcessor) executeAfterFailedMoveBalanceTransaction( tx *transaction.Transaction, txError error, ) error { - if errorsCommon.IsGetNodeFromDBError(txError) { + if core.IsGetNodeFromDBError(txError) { return txError } diff --git a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go index fbdfe04914b..6840eec6cd1 100644 --- a/sharding/nodesCoordinator/indexHashedNodesCoordinator.go +++ b/sharding/nodesCoordinator/indexHashedNodesCoordinator.go @@ -17,7 +17,6 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" logger "github.com/multiversx/mx-chain-logger-go" @@ -810,7 +809,7 @@ func (ihnc *indexHashedNodesCoordinator) handleErrorLog(err error, message strin } logLevel := logger.LogError - if errors.IsClosingError(err) { + if core.IsClosingError(err) { logLevel = logger.LogDebug } diff --git a/state/accountsDB.go b/state/accountsDB.go index 2c6b57c2559..289f940cfd7 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -17,7 +17,6 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/holders" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/statistics" logger "github.com/multiversx/mx-chain-logger-go" @@ -219,7 +218,7 @@ func handleLoggingWhenError(message string, err error, extraArguments ...interfa if err == nil { return } - if errors.IsClosingError(err) { + if core.IsClosingError(err) { args := []interface{}{"reason", err} log.Debug(message, append(args, extraArguments...)...) return diff --git a/state/storagePruningManager/storagePruningManager.go b/state/storagePruningManager/storagePruningManager.go index 73d9af30847..c985a5378ab 100644 --- a/state/storagePruningManager/storagePruningManager.go +++ b/state/storagePruningManager/storagePruningManager.go @@ -7,7 +7,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/storagePruningManager/pruningBuffer" logger "github.com/multiversx/mx-chain-logger-go" @@ -175,7 +174,7 @@ func (spm *storagePruningManager) prune(rootHash []byte, tsm common.StorageManag err := spm.removeFromDb(rootHash, tsm, handler) if err != nil { - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("did not remove hash", "rootHash", rootHash, "error", err) return } diff --git a/trie/node.go b/trie/node.go index 190bb41dd9b..b464d7ff510 100644 --- a/trie/node.go +++ b/trie/node.go @@ -7,6 +7,7 @@ import ( "runtime/debug" "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" @@ -269,7 +270,7 @@ func shouldStopIfContextDoneBlockingIfBusy(ctx context.Context, idleProvider Idl } func treatCommitSnapshotError(err error, hash []byte, missingNodesChan chan []byte) { - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("context closing", "hash", hash) return } diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index dbe5e9fa7a0..b3fa3019dd3 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -7,12 +7,12 @@ import ( "fmt" "sync" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/statistics" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -276,7 +276,7 @@ func (tr *patriciaMerkleTrie) recreate(root []byte, tsm common.StorageManager) ( newTr, _, err := tr.recreateFromDb(root, tsm) if err != nil { - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("could not recreate", "rootHash", root, "error", err) return nil, err } diff --git a/trie/snapshotTrieStorageManager.go b/trie/snapshotTrieStorageManager.go index 5fe208be6df..784533fec3a 100644 --- a/trie/snapshotTrieStorageManager.go +++ b/trie/snapshotTrieStorageManager.go @@ -41,7 +41,7 @@ func (stsm *snapshotTrieStorageManager) Get(key []byte) ([]byte, error) { // test point get during snapshot val, epoch, err := stsm.mainSnapshotStorer.GetFromOldEpochsWithoutAddingToCache(key) - if errors.IsClosingError(err) { + if core.IsClosingError(err) { return nil, err } if len(val) != 0 { diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index 5ac0979f943..b4c5e6e857c 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -180,7 +180,7 @@ func (tsm *trieStorageManager) Get(key []byte) ([]byte, error) { } val, err := tsm.mainStorer.Get(key) - if errors.IsClosingError(err) { + if core.IsClosingError(err) { return nil, err } if len(val) != 0 { @@ -214,7 +214,7 @@ func (tsm *trieStorageManager) GetFromCurrentEpoch(key []byte) ([]byte, error) { func (tsm *trieStorageManager) getFromOtherStorers(key []byte) ([]byte, error) { val, err := tsm.checkpointsStorer.Get(key) - if errors.IsClosingError(err) { + if core.IsClosingError(err) { return nil, err } if len(val) != 0 { @@ -516,7 +516,7 @@ func (tsm *trieStorageManager) takeCheckpoint(checkpointEntry *snapshotsQueueEnt } func treatSnapshotError(err error, message string, rootHash []byte, mainTrieRootHash []byte) { - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("context closing", "message", message, "rootHash", rootHash, "mainTrieRootHash", mainTrieRootHash) return } diff --git a/trie/trieStorageManagerInEpoch.go b/trie/trieStorageManagerInEpoch.go index fee9a9dad31..c4cc7c9d195 100644 --- a/trie/trieStorageManagerInEpoch.go +++ b/trie/trieStorageManagerInEpoch.go @@ -3,6 +3,7 @@ package trie import ( "fmt" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/errors" @@ -73,7 +74,7 @@ func treatGetFromEpochError(err error, epoch uint32) { return } - if errors.IsClosingError(err) { + if core.IsClosingError(err) { log.Debug("trieStorageManagerInEpoch closing err", "error", err.Error(), "epoch", epoch) return } From 0c39afdc229c8821e336b085e4d49741fb8d6081 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Tue, 11 Apr 2023 17:37:30 +0300 Subject: [PATCH 085/221] integrate the new web-sockets connector --- cmd/node/config/external.toml | 5 ++++- config/externalConfig.go | 7 ++++--- factory/status/statusComponents.go | 31 +++++++++++++++--------------- go.mod | 2 +- go.sum | 4 ++-- outport/factory/outportFactory.go | 22 ++++++++++----------- 6 files changed, 38 insertions(+), 33 deletions(-) diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index f94282e93ee..276451b9a08 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -41,9 +41,12 @@ # marshalled structures in block events data MarshallerType = "json" -[WebSocketConnector] +[WebSocketsConnector] # This flag shall only be used for observer nodes Enabled = false + # This flag will start the WebSockets connector as server or client + IsServer = false + # The url of the web-sockets client/server URL = "localhost:22111" WithAcknowledge = true # Currently, only "json" is supported. In the future, "gogo protobuf" could also be supported diff --git a/config/externalConfig.go b/config/externalConfig.go index 8cd108474c5..14ac2870bc0 100644 --- a/config/externalConfig.go +++ b/config/externalConfig.go @@ -4,7 +4,7 @@ package config type ExternalConfig struct { ElasticSearchConnector ElasticSearchConfig EventNotifierConnector EventNotifierConfig - WebSocketConnector WebSocketDriverConfig + WebSocketsConnector WebSocketsDriverConfig } // ElasticSearchConfig will hold the configuration for the elastic search @@ -38,9 +38,10 @@ type CovalentConfig struct { RouteAcknowledgeData string } -// WebSocketDriverConfig will hold the configuration for web socket driver -type WebSocketDriverConfig struct { +// WebSocketsDriverConfig will hold the configuration for web socket driver +type WebSocketsDriverConfig struct { Enabled bool + IsServer bool WithAcknowledge bool URL string MarshallerType string diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index b40b025abe8..232111cfe84 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -9,8 +9,8 @@ import ( nodeData "github.com/multiversx/mx-chain-core-go/data" outportCore "github.com/multiversx/mx-chain-core-go/data/outport" factoryMarshalizer "github.com/multiversx/mx-chain-core-go/marshal/factory" - "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/data" - wsDriverFactory "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/factory" + "github.com/multiversx/mx-chain-core-go/webSockets/data" + wsDriverFactory "github.com/multiversx/mx-chain-core-go/webSockets/factory" indexerFactory "github.com/multiversx/mx-chain-es-indexer-go/process/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/statistics" @@ -205,7 +205,7 @@ func (pc *statusComponents) Close() error { // createOutportDriver creates a new outport.OutportHandler which is used to register outport drivers // once a driver is subscribed it will receive data through the implemented outport.Driver methods func (scf *statusComponentsFactory) createOutportDriver() (outport.OutportHandler, error) { - webSocketSenderDriverFactoryArgs, err := scf.makeWebSocketDriverArgs() + webSocketsSenderDriverFactoryArgs, err := scf.makeWebSocketsDriverArgs() if err != nil { return nil, err } @@ -219,9 +219,9 @@ func (scf *statusComponentsFactory) createOutportDriver() (outport.OutportHandle RetrialInterval: common.RetrialIntervalForOutportDriver, ElasticIndexerFactoryArgs: scf.makeElasticIndexerArgs(), EventNotifierFactoryArgs: eventNotifierArgs, - WebSocketSenderDriverFactoryArgs: outportDriverFactory.WrappedOutportDriverWebSocketSenderFactoryArgs{ - Enabled: scf.externalConfig.WebSocketConnector.Enabled, - OutportDriverWebSocketSenderFactoryArgs: webSocketSenderDriverFactoryArgs, + WebSocketsSenderDriverFactoryArgs: outportDriverFactory.WrappedOutportDriverWebSocketsSenderFactoryArgs{ + Enabled: scf.externalConfig.WebSocketsConnector.Enabled, + ArgsWebSocketsDriverFactory: webSocketsSenderDriverFactoryArgs, }, } @@ -266,24 +266,25 @@ func (scf *statusComponentsFactory) makeEventNotifierArgs() (*outportDriverFacto }, nil } -func (scf *statusComponentsFactory) makeWebSocketDriverArgs() (wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs, error) { - if !scf.externalConfig.WebSocketConnector.Enabled { - return wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs{}, nil +func (scf *statusComponentsFactory) makeWebSocketsDriverArgs() (wsDriverFactory.ArgsWebSocketsDriverFactory, error) { + if !scf.externalConfig.WebSocketsConnector.Enabled { + return wsDriverFactory.ArgsWebSocketsDriverFactory{}, nil } - marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.WebSocketConnector.MarshallerType) + marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.WebSocketsConnector.MarshallerType) if err != nil { - return wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs{}, err + return wsDriverFactory.ArgsWebSocketsDriverFactory{}, err } - return wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs{ + return wsDriverFactory.ArgsWebSocketsDriverFactory{ Marshaller: marshaller, WebSocketConfig: data.WebSocketConfig{ - URL: scf.externalConfig.WebSocketConnector.URL, - WithAcknowledge: scf.externalConfig.WebSocketConnector.WithAcknowledge, + URL: scf.externalConfig.WebSocketsConnector.URL, + WithAcknowledge: scf.externalConfig.WebSocketsConnector.WithAcknowledge, + IsServer: scf.externalConfig.WebSocketsConnector.IsServer, }, Uint64ByteSliceConverter: scf.coreComponents.Uint64ByteSliceConverter(), Log: log, - WithAcknowledge: scf.externalConfig.WebSocketConnector.WithAcknowledge, + WithAcknowledge: scf.externalConfig.WebSocketsConnector.WithAcknowledge, }, nil } diff --git a/go.mod b/go.mod index 6cb3703d79c..d9cb0e5760c 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230330105824-932a718276f6 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230411142928-ed395b4c04f1 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 6dba64f6532..6b950f53b00 100644 --- a/go.sum +++ b/go.sum @@ -614,8 +614,8 @@ github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZ github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230330105824-932a718276f6 h1:4Nv0uxJbfSZ1fqWcQEYyQ1SdAAluDEbHjTi0X8ZFXFs= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230330105824-932a718276f6/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230411142928-ed395b4c04f1 h1:cA4bVIK03RPX37eqGkC61NVo3VyXNGPv2rZe01VPIf4= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230411142928-ed395b4c04f1/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= diff --git a/outport/factory/outportFactory.go b/outport/factory/outportFactory.go index 82465385efe..15b55771966 100644 --- a/outport/factory/outportFactory.go +++ b/outport/factory/outportFactory.go @@ -3,23 +3,23 @@ package factory import ( "time" - wsDriverFactory "github.com/multiversx/mx-chain-core-go/websocketOutportDriver/factory" + wsDriverFactory "github.com/multiversx/mx-chain-core-go/webSockets/factory" indexerFactory "github.com/multiversx/mx-chain-es-indexer-go/process/factory" "github.com/multiversx/mx-chain-go/outport" ) -// WrappedOutportDriverWebSocketSenderFactoryArgs extends the wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs structure with the Enabled field -type WrappedOutportDriverWebSocketSenderFactoryArgs struct { +// WrappedOutportDriverWebSocketsSenderFactoryArgs extends the wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs structure with the Enabled field +type WrappedOutportDriverWebSocketsSenderFactoryArgs struct { Enabled bool - wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs + wsDriverFactory.ArgsWebSocketsDriverFactory } // OutportFactoryArgs holds the factory arguments of different outport drivers type OutportFactoryArgs struct { - RetrialInterval time.Duration - ElasticIndexerFactoryArgs indexerFactory.ArgsIndexerFactory - EventNotifierFactoryArgs *EventNotifierFactoryArgs - WebSocketSenderDriverFactoryArgs WrappedOutportDriverWebSocketSenderFactoryArgs + RetrialInterval time.Duration + ElasticIndexerFactoryArgs indexerFactory.ArgsIndexerFactory + EventNotifierFactoryArgs *EventNotifierFactoryArgs + WebSocketsSenderDriverFactoryArgs WrappedOutportDriverWebSocketsSenderFactoryArgs } // CreateOutport will create a new instance of OutportHandler @@ -53,7 +53,7 @@ func createAndSubscribeDrivers(outport outport.OutportHandler, args *OutportFact return err } - return createAndSubscribeWebSocketDriver(outport, args.WebSocketSenderDriverFactoryArgs) + return createAndSubscribeWebSocketDriver(outport, args.WebSocketsSenderDriverFactoryArgs) } func createAndSubscribeElasticDriverIfNeeded( @@ -98,13 +98,13 @@ func checkArguments(args *OutportFactoryArgs) error { func createAndSubscribeWebSocketDriver( outport outport.OutportHandler, - args WrappedOutportDriverWebSocketSenderFactoryArgs, + args WrappedOutportDriverWebSocketsSenderFactoryArgs, ) error { if !args.Enabled { return nil } - wsFactory, err := wsDriverFactory.NewOutportDriverWebSocketSenderFactory(args.OutportDriverWebSocketSenderFactoryArgs) + wsFactory, err := wsDriverFactory.NewWebSocketsDriverFactory(args.ArgsWebSocketsDriverFactory) if err != nil { return err } From 6ab26f4e1ae07643097d083ae53c24e66a6eddd9 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 12 Apr 2023 10:36:06 +0300 Subject: [PATCH 086/221] extra field in config --- cmd/node/config/external.toml | 2 ++ config/externalConfig.go | 11 ++++++----- factory/status/statusComponents.go | 7 ++++--- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index 276451b9a08..3d572ce7c20 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -51,3 +51,5 @@ WithAcknowledge = true # Currently, only "json" is supported. In the future, "gogo protobuf" could also be supported MarshallerType = "json" + # The number of seconds when the client will try again to send the data + RetryDurationInSec = 5 diff --git a/config/externalConfig.go b/config/externalConfig.go index 14ac2870bc0..55655b4cb0e 100644 --- a/config/externalConfig.go +++ b/config/externalConfig.go @@ -40,9 +40,10 @@ type CovalentConfig struct { // WebSocketsDriverConfig will hold the configuration for web socket driver type WebSocketsDriverConfig struct { - Enabled bool - IsServer bool - WithAcknowledge bool - URL string - MarshallerType string + Enabled bool + IsServer bool + WithAcknowledge bool + URL string + MarshallerType string + RetryDurationInSec int } diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index 232111cfe84..0a57d70a683 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -279,9 +279,10 @@ func (scf *statusComponentsFactory) makeWebSocketsDriverArgs() (wsDriverFactory. return wsDriverFactory.ArgsWebSocketsDriverFactory{ Marshaller: marshaller, WebSocketConfig: data.WebSocketConfig{ - URL: scf.externalConfig.WebSocketsConnector.URL, - WithAcknowledge: scf.externalConfig.WebSocketsConnector.WithAcknowledge, - IsServer: scf.externalConfig.WebSocketsConnector.IsServer, + URL: scf.externalConfig.WebSocketsConnector.URL, + WithAcknowledge: scf.externalConfig.WebSocketsConnector.WithAcknowledge, + IsServer: scf.externalConfig.WebSocketsConnector.IsServer, + RetryDurationInSec: scf.externalConfig.WebSocketsConnector.RetryDurationInSec, }, Uint64ByteSliceConverter: scf.coreComponents.Uint64ByteSliceConverter(), Log: log, From 1899b859f0210b936217673cfb95940eadc4b6ae Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 12 Apr 2023 10:42:27 +0300 Subject: [PATCH 087/221] small fix --- cmd/node/config/external.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index 3d572ce7c20..2c2c85232cf 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -47,7 +47,7 @@ # This flag will start the WebSockets connector as server or client IsServer = false # The url of the web-sockets client/server - URL = "localhost:22111" + URL = "127.0.0.1:22111" WithAcknowledge = true # Currently, only "json" is supported. In the future, "gogo protobuf" could also be supported MarshallerType = "json" From f774a29a5ca7b9a7289259996ddb52571013aa2c Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 12 Apr 2023 10:59:24 +0300 Subject: [PATCH 088/221] change core version --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d9cb0e5760c..f4d085b7fb3 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230411142928-ed395b4c04f1 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412075823-994f1653dc25 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 6b950f53b00..23192ccaf08 100644 --- a/go.sum +++ b/go.sum @@ -614,8 +614,8 @@ github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZ github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230411142928-ed395b4c04f1 h1:cA4bVIK03RPX37eqGkC61NVo3VyXNGPv2rZe01VPIf4= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230411142928-ed395b4c04f1/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412075823-994f1653dc25 h1:3XrxBRkZB0qOv5JuNLumqvGaryqBOv9DIKekP4Wt/10= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412075823-994f1653dc25/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= From 71523acfb9eec322da8c697769eaedbb6602d054 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 12 Apr 2023 14:11:50 +0300 Subject: [PATCH 089/221] latest core commit --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f4d085b7fb3..c953399df4c 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412075823-994f1653dc25 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412111042-b48b7b49946f github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 23192ccaf08..a3a33aab51d 100644 --- a/go.sum +++ b/go.sum @@ -614,8 +614,8 @@ github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZ github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412075823-994f1653dc25 h1:3XrxBRkZB0qOv5JuNLumqvGaryqBOv9DIKekP4Wt/10= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412075823-994f1653dc25/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412111042-b48b7b49946f h1:2F0RjrpRWYO+7FavSoWLMBCTpWMW4jpnDKLoPRcnRtw= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412111042-b48b7b49946f/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= From 162d3346b58a2c3c44d2b55049d58aef715f6448 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 12 Apr 2023 14:54:11 +0300 Subject: [PATCH 090/221] commit --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c953399df4c..5c0cc16a08b 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412111042-b48b7b49946f + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412115309-ff30fe262862 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index a3a33aab51d..8c7bd4532fc 100644 --- a/go.sum +++ b/go.sum @@ -614,8 +614,8 @@ github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZ github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412111042-b48b7b49946f h1:2F0RjrpRWYO+7FavSoWLMBCTpWMW4jpnDKLoPRcnRtw= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412111042-b48b7b49946f/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412115309-ff30fe262862 h1:Bn4F47ZutkGwiXDb3jiwPEDvZJyHbgD8kaN3Hu5Y8AI= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412115309-ff30fe262862/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= From 2ebe5485b5d7db0be9f9e2e3e7c0fa2dab05b12f Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 12 Apr 2023 15:06:38 +0300 Subject: [PATCH 091/221] new commit hash --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5c0cc16a08b..cce8e71fee2 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412115309-ff30fe262862 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412120535-3d7236f4510d github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 8c7bd4532fc..4da83388a6c 100644 --- a/go.sum +++ b/go.sum @@ -614,8 +614,8 @@ github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZ github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412115309-ff30fe262862 h1:Bn4F47ZutkGwiXDb3jiwPEDvZJyHbgD8kaN3Hu5Y8AI= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412115309-ff30fe262862/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412120535-3d7236f4510d h1:iyGcH5HJB83y79qD4SSOOLkL/6F154wrrS2d9GXfHC0= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412120535-3d7236f4510d/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= From be9bf90f972302ec79500b2d913d304ce745f4ef Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 13 Apr 2023 15:59:33 +0300 Subject: [PATCH 092/221] tests for factory/processing --- errors/errors.go | 21 +- factory/processing/blockProcessorCreator.go | 5 +- factory/processing/export_test.go | 8 - factory/processing/processComponents.go | 203 ++- .../processComponentsHandler_test.go | 272 ++-- factory/processing/processComponents_test.go | 1282 ++++++++++++++++- genesis/errors.go | 6 +- genesis/process/genesisBlockCreator.go | 3 + .../mock/p2pAntifloodHandlerStub.go | 54 +- process/track/baseBlockTrack.go | 3 + process/track/baseBlockTrack_test.go | 18 + .../bootstrapComponentsStub.go | 4 + 12 files changed, 1531 insertions(+), 348 deletions(-) diff --git a/errors/errors.go b/errors/errors.go index b9b526eb95b..d30593eb8c0 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -296,15 +296,9 @@ var ErrNilP2pPublicKey = errors.New("nil p2p public key") // ErrNilRater signals that a nil rater was provided var ErrNilRater = errors.New("nil rater") -// ErrNilRatingData signals that nil rating data were provided -var ErrNilRatingData = errors.New("nil rating data") - // ErrNilRatingsInfoHandler signals that nil ratings data information was provided var ErrNilRatingsInfoHandler = errors.New("nil ratings info handler") -// ErrNilRequestedItemHandler signals that a nil requested items handler was provided -var ErrNilRequestedItemHandler = errors.New("nil requested item handler") - // ErrNilRequestHandler signals that a nil request handler was provided var ErrNilRequestHandler = errors.New("nil request handler") @@ -317,9 +311,6 @@ var ErrNilRoundHandler = errors.New("nil roundHandler") // ErrNilShardCoordinator signals that a nil shard coordinator was provided var ErrNilShardCoordinator = errors.New("nil shard coordinator provided") -// ErrNilSmartContractParser signals that a nil smart contract parser was provided -var ErrNilSmartContractParser = errors.New("nil smart contract parser") - // ErrNilSoftwareVersion signals that a nil software version was provided var ErrNilSoftwareVersion = errors.New("nil software version") @@ -395,12 +386,6 @@ var ErrNilVmMarshalizer = errors.New("nil vm marshalizer") // ErrNilWatchdog signals that a nil watchdog was provided var ErrNilWatchdog = errors.New("nil watchdog") -// ErrNilWhiteListHandler signals that a nil whitelist handler was provided -var ErrNilWhiteListHandler = errors.New("nil white list handler") - -// ErrNilWhiteListVerifiedTxs signals that a nil whitelist for verified transactions was prvovided -var ErrNilWhiteListVerifiedTxs = errors.New("nil white list verified txs") - // ErrPollingFunctionRegistration signals an error while registering the polling function registration var ErrPollingFunctionRegistration = errors.New("cannot register handler func for num of connected peers") @@ -479,9 +464,6 @@ var ErrNilProcessStatusHandler = errors.New("nil process status handler") // ErrNilESDTDataStorage signals that a nil esdt data storage has been provided var ErrNilESDTDataStorage = errors.New("nil esdt data storage") -// ErrDBIsClosed is raised when the DB is closed -var ErrDBIsClosed = errors.New("DB is closed") - // ErrNilEnableEpochsHandler signals that a nil enable epochs handler was provided var ErrNilEnableEpochsHandler = errors.New("nil enable epochs handler") @@ -541,3 +523,6 @@ var ErrNilPersistentHandler = errors.New("nil persistent handler") // ErrNilGenesisNodesSetupHandler signals that a nil genesis nodes setup handler has been provided var ErrNilGenesisNodesSetupHandler = errors.New("nil genesis nodes setup handler") + +// ErrNilHistoryRepository signals that history processor is nil +var ErrNilHistoryRepository = errors.New("history repository is nil") diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 9f8d8e8a892..72ebe28491e 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -65,7 +65,8 @@ func (pcf *processComponentsFactory) newBlockProcessor( processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, ) (*blockProcessorAndVmFactories, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return pcf.newShardBlockProcessor( requestHandler, forkDetector, @@ -81,7 +82,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( receiptsRepository, ) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return pcf.newMetaBlockProcessor( requestHandler, forkDetector, diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index f9cae468a41..1a1c90a383f 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -1,12 +1,9 @@ package processing import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/factory" - "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/txsimulator" ) @@ -48,8 +45,3 @@ func (pcf *processComponentsFactory) NewBlockProcessor( return blockProcessorComponents.blockProcessor, blockProcessorComponents.vmFactoryForTxSimulate, nil } - -// IndexGenesisBlocks - -func (pcf *processComponentsFactory) IndexGenesisBlocks(genesisBlocks map[uint32]data.HeaderHandler, indexingData map[uint32]*genesis.IndexingData) error { - return pcf.indexGenesisBlocks(genesisBlocks, indexingData, map[string]*outport.AlteredAccount{}) -} diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index f64de2f7447..e38962a64bc 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -30,7 +30,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart/metachain" "github.com/multiversx/mx-chain-go/epochStart/notifier" "github.com/multiversx/mx-chain-go/epochStart/shardchain" - errErd "github.com/multiversx/mx-chain-go/errors" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory" mainFactory "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/factory/disabled" @@ -717,6 +717,12 @@ func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process. if hardforkConfig.AfterHardFork { ratingEnabledEpoch = hardforkConfig.StartEpoch + hardforkConfig.ValidatorGracePeriodInEpochs } + + genesisHeader := pcf.data.Blockchain().GetGenesisHeader() + if check.IfNil(genesisHeader) { + return nil, errorsMx.ErrGenesisBlockNotInitialized + } + arguments := peer.ArgValidatorStatisticsProcessor{ PeerAdapter: pcf.state.PeerAccounts(), PubkeyConv: pcf.coreData.ValidatorPubKeyConverter(), @@ -731,20 +737,16 @@ func (pcf *processComponentsFactory) newValidatorStatisticsProcessor() (process. RewardsHandler: pcf.coreData.EconomicsData(), NodesSetup: pcf.coreData.GenesisNodesSetup(), RatingEnableEpoch: ratingEnabledEpoch, - GenesisNonce: pcf.data.Blockchain().GetGenesisHeader().GetNonce(), + GenesisNonce: genesisHeader.GetNonce(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), } - validatorStatisticsProcessor, err := peer.NewValidatorStatisticsProcessor(arguments) - if err != nil { - return nil, err - } - - return validatorStatisticsProcessor, nil + return peer.NewValidatorStatisticsProcessor(arguments) } func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochStart.RequestHandler) (epochStart.TriggerHandler, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { argsHeaderValidator := block.ArgsHeaderValidator{ Hasher: pcf.coreData.Hasher(), Marshalizer: pcf.coreData.InternalMarshalizer(), @@ -782,20 +784,20 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), } - epochStartTrigger, err := shardchain.NewEpochStartTrigger(argEpochStart) - if err != nil { - return nil, errors.New("error creating new start of epoch trigger" + err.Error()) - } - - return epochStartTrigger, nil + return shardchain.NewEpochStartTrigger(argEpochStart) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { + genesisHeader := pcf.data.Blockchain().GetGenesisHeader() + if check.IfNil(genesisHeader) { + return nil, errorsMx.ErrGenesisBlockNotInitialized + } + argEpochStart := &metachain.ArgsNewMetaEpochStartTrigger{ GenesisTime: time.Unix(pcf.coreData.GenesisNodesSetup().GetStartTime(), 0), Settings: &pcf.config.EpochStartConfig, Epoch: pcf.bootstrapComponents.EpochBootstrapParams().Epoch(), - EpochStartRound: pcf.data.Blockchain().GetGenesisHeader().GetRound(), + EpochStartRound: genesisHeader.GetRound(), EpochStartNotifier: pcf.coreData.EpochStartNotifierWithConfirm(), Storage: pcf.data.StorageService(), Marshalizer: pcf.coreData.InternalMarshalizer(), @@ -803,12 +805,7 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), DataPool: pcf.data.Datapool(), } - epochStartTrigger, err := metachain.NewEpochStartTrigger(argEpochStart) - if err != nil { - return nil, errors.New("error creating new start of epoch trigger" + err.Error()) - } - - return epochStartTrigger, nil + return metachain.NewEpochStartTrigger(argEpochStart) } return nil, errors.New("error creating new start of epoch trigger because of invalid shard id") @@ -870,7 +867,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string rootHash, err := pcf.state.AccountsAdapter().RootHash() if err != nil { - return nil, err + return map[string]*outport.AlteredAccount{}, err } leavesChannels := &common.TrieIteratorChannels{ @@ -879,7 +876,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string } err = pcf.state.AccountsAdapter().GetAllLeaves(leavesChannels, context.Background(), rootHash) if err != nil { - return nil, err + return map[string]*outport.AlteredAccount{}, err } genesisAccounts := make(map[string]*outport.AlteredAccount, 0) @@ -904,7 +901,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { - return nil, err + return map[string]*outport.AlteredAccount{}, err } shardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() @@ -931,12 +928,7 @@ func (pcf *processComponentsFactory) setGenesisHeader(genesisBlocks map[uint32]d return errors.New("genesis block does not exist") } - err := pcf.data.Blockchain().SetGenesisHeader(genesisBlock) - if err != nil { - return err - } - - return nil + return pcf.data.Blockchain().SetGenesisHeader(genesisBlock) } func (pcf *processComponentsFactory) prepareGenesisBlock( @@ -960,12 +952,7 @@ func (pcf *processComponentsFactory) prepareGenesisBlock( pcf.data.Blockchain().SetGenesisHeaderHash(genesisBlockHash) nonceToByteSlice := pcf.coreData.Uint64ByteSliceConverter().ToByteSlice(genesisBlock.GetNonce()) - err = pcf.saveGenesisHeaderToStorage(genesisBlock, genesisBlockHash, nonceToByteSlice) - if err != nil { - return err - } - - return nil + return pcf.saveGenesisHeaderToStorage(genesisBlock, genesisBlockHash, nonceToByteSlice) } func (pcf *processComponentsFactory) saveGenesisHeaderToStorage( @@ -1158,17 +1145,12 @@ func (pcf *processComponentsFactory) indexGenesisBlocks( return err } - err = pcf.saveAlteredGenesisHeaderToStorage( + return pcf.saveAlteredGenesisHeaderToStorage( genesisBlockHeader, genesisBlockHash, genesisBody, intraShardMiniBlocks, txsPoolPerShard) - if err != nil { - return err - } - - return nil } func (pcf *processComponentsFactory) saveAlteredGenesisHeaderToStorage( @@ -1257,13 +1239,14 @@ func (pcf *processComponentsFactory) newBlockTracker( requestHandler process.RequestHandler, genesisBlocks map[uint32]data.HeaderHandler, ) (process.BlockTracker, error) { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() argBaseTracker := track.ArgBaseTracker{ Hasher: pcf.coreData.Hasher(), HeaderValidator: headerValidator, Marshalizer: pcf.coreData.InternalMarshalizer(), RequestHandler: requestHandler, RoundHandler: pcf.coreData.RoundHandler(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ShardCoordinator: shardCoordinator, Store: pcf.data.StorageService(), StartHeaders: genesisBlocks, PoolsHolder: pcf.data.Datapool(), @@ -1271,7 +1254,7 @@ func (pcf *processComponentsFactory) newBlockTracker( FeeHandler: pcf.coreData.EconomicsData(), } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { arguments := track.ArgShardTracker{ ArgBaseTracker: argBaseTracker, } @@ -1279,7 +1262,7 @@ func (pcf *processComponentsFactory) newBlockTracker( return track.NewShardBlockTrack(arguments) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { arguments := track.ArgMetaTracker{ ArgBaseTracker: argBaseTracker, } @@ -1383,11 +1366,7 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( PeersRatingHandler: pcf.network.PeersRatingHandler(), PayloadValidator: payloadValidator, } - resolversContainerFactory, err := resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) - if err != nil { - return nil, err - } - return resolversContainerFactory, nil + return resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) } func (pcf *processComponentsFactory) newInterceptorContainerFactory( @@ -1399,7 +1378,8 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( peerShardMapper *networksharding.PeerShardMapper, hardforkTrigger factory.HardforkTrigger, ) (process.InterceptorsContainerFactory, process.TimeCacher, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return pcf.newShardInterceptorContainerFactory( headerSigVerifier, headerIntegrityVerifier, @@ -1410,7 +1390,7 @@ func (pcf *processComponentsFactory) newInterceptorContainerFactory( hardforkTrigger, ) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return pcf.newMetaInterceptorContainerFactory( headerSigVerifier, headerIntegrityVerifier, @@ -1649,10 +1629,11 @@ func (pcf *processComponentsFactory) newForkDetector( headerBlackList process.TimeCacher, blockTracker process.BlockTracker, ) (process.ForkDetector, error) { - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return sync.NewShardForkDetector(pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return sync.NewMetaForkDetector(pcf.coreData.RoundHandler(), headerBlackList, blockTracker, pcf.coreData.GenesisNodesSetup().GetStartTime()) } @@ -1793,12 +1774,7 @@ func createNetworkShardingCollector( NodesCoordinator: nodesCoordinator, PreferredPeersHolder: preferredPeersHolder, } - psm, err := networksharding.NewPeerShardMapper(arg) - if err != nil { - return nil, err - } - - return psm, nil + return networksharding.NewPeerShardMapper(arg) } func createCache(cacheConfig config.CacheConfig) (storage.Cacher, error) { @@ -1808,82 +1784,91 @@ func createCache(cacheConfig config.CacheConfig) (storage.Cacher, error) { func checkProcessComponentsArgs(args ProcessComponentsFactoryArgs) error { baseErrMessage := "error creating process components" if check.IfNil(args.AccountsParser) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilAccountsParser) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilAccountsParser) } - if check.IfNil(args.SmartContractParser) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilSmartContractParser) + if check.IfNil(args.GasSchedule) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilGasSchedule) } - if args.GasSchedule == nil { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilGasSchedule) + if check.IfNil(args.Data) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilDataComponentsHolder) } - if check.IfNil(args.NodesCoordinator) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilNodesCoordinator) + if check.IfNil(args.Data.Blockchain()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBlockChainHandler) } - if check.IfNil(args.Data) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilDataComponentsHolder) + if check.IfNil(args.Data.Datapool()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilDataPoolsHolder) + } + if check.IfNil(args.Data.StorageService()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStorageService) } if check.IfNil(args.CoreData) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilCoreComponentsHolder) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilCoreComponentsHolder) } - if args.CoreData.EconomicsData() == nil { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEconomicsData) + if check.IfNil(args.CoreData.EconomicsData()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilEconomicsData) } - if check.IfNil(args.CoreData.RoundHandler()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRoundHandler) + if check.IfNil(args.CoreData.GenesisNodesSetup()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilGenesisNodesSetupHandler) } - if check.IfNil(args.Crypto) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilCryptoComponentsHolder) + if check.IfNil(args.CoreData.AddressPubKeyConverter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilAddressPublicKeyConverter) } - if check.IfNil(args.State) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilStateComponentsHolder) + if check.IfNil(args.CoreData.EpochNotifier()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilEpochNotifier) } - if check.IfNil(args.Network) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilNetworkComponentsHolder) + if check.IfNil(args.CoreData.ValidatorPubKeyConverter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilPubKeyConverter) } - if check.IfNil(args.RequestedItemsHandler) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRequestedItemHandler) + if check.IfNil(args.CoreData.InternalMarshalizer()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilInternalMarshalizer) } - if check.IfNil(args.WhiteListHandler) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilWhiteListHandler) + if check.IfNil(args.CoreData.Uint64ByteSliceConverter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilUint64ByteSliceConverter) } - if check.IfNil(args.WhiteListerVerifiedTxs) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilWhiteListVerifiedTxs) + if check.IfNil(args.Crypto) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilCryptoComponentsHolder) } - if check.IfNil(args.CoreData.EpochStartNotifierWithConfirm()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEpochStartNotifier) + if check.IfNil(args.Crypto.BlockSignKeyGen()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBlockSignKeyGen) } - if check.IfNil(args.CoreData.Rater()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRater) + if check.IfNil(args.State) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStateComponentsHolder) } - if check.IfNil(args.CoreData.RatingsData()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilRatingData) + if check.IfNil(args.State.AccountsAdapter()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilAccountsAdapter) } - if check.IfNil(args.CoreData.ValidatorPubKeyConverter()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilPubKeyConverter) + if check.IfNil(args.Network) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilNetworkComponentsHolder) } - if args.SystemSCConfig == nil { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilSystemSCConfig) + if check.IfNil(args.Network.NetworkMessenger()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilMessenger) } - if check.IfNil(args.CoreData.EpochNotifier()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEpochNotifier) + if check.IfNil(args.Network.InputAntiFloodHandler()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilInputAntiFloodHandler) } - if check.IfNil(args.CoreData.EnableEpochsHandler()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilEnableEpochsHandler) + if args.SystemSCConfig == nil { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilSystemSCConfig) } if check.IfNil(args.BootstrapComponents) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilBootstrapComponentsHolder) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBootstrapComponentsHolder) } if check.IfNil(args.BootstrapComponents.ShardCoordinator()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilShardCoordinator) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilShardCoordinator) + } + if check.IfNil(args.BootstrapComponents.EpochBootstrapParams()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilBootstrapParamsHandler) } if check.IfNil(args.StatusComponents) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilStatusComponentsHolder) + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStatusComponentsHolder) } - if check.IfNil(args.StatusCoreComponents) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilStatusCoreComponents) + if check.IfNil(args.StatusComponents.OutportHandler()) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilOutportHandler) } - if check.IfNil(args.StatusCoreComponents.AppStatusHandler()) { - return fmt.Errorf("%s: %w", baseErrMessage, errErd.ErrNilAppStatusHandler) + if check.IfNil(args.HistoryRepo) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilHistoryRepository) + } + if check.IfNil(args.StatusCoreComponents) { + return fmt.Errorf("%s: %w", baseErrMessage, errorsMx.ErrNilStatusCoreComponents) } return nil diff --git a/factory/processing/processComponentsHandler_test.go b/factory/processing/processComponentsHandler_test.go index b8c00301261..05b5b6382bb 100644 --- a/factory/processing/processComponentsHandler_test.go +++ b/factory/processing/processComponentsHandler_test.go @@ -3,153 +3,173 @@ package processing_test import ( "testing" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" - "github.com/multiversx/mx-chain-go/factory/mock" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/factory" processComp "github.com/multiversx/mx-chain-go/factory/processing" - componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/stretchr/testify/require" ) -// ------------ Test TestManagedProcessComponents -------------------- -func TestManagedProcessComponents_CreateWithInvalidArgsShouldErr(t *testing.T) { +func TestNewManagedProcessComponents(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - _ = processArgs.CoreData.SetInternalMarshalizer(nil) - processComponentsFactory, _ := processComp.NewProcessComponentsFactory(processArgs) - managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) - require.NoError(t, err) - err = managedProcessComponents.Create() - require.Error(t, err) - require.Nil(t, managedProcessComponents.NodesCoordinator()) + t.Run("nil factory should error", func(t *testing.T) { + t.Parallel() + + managedProcessComponents, err := processComp.NewManagedProcessComponents(nil) + require.Equal(t, errorsMx.ErrNilProcessComponentsFactory, err) + require.Nil(t, managedProcessComponents) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NoError(t, err) + require.NotNil(t, managedProcessComponents) + }) } -func TestManagedProcessComponents_CreateShouldWork(t *testing.T) { +func TestManagedProcessComponents_Create(t *testing.T) { t.Parallel() - coreComponents := componentsMock.GetCoreComponents() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - shardCoordinator.SelfIDCalled = func() uint32 { - return core.MetachainShardId - } - shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { - if core.IsSmartContractOnMetachain(address[len(address)-1:], address) { - return core.MetachainShardId - } - - return 0 - } - - shardCoordinator.CurrentShard = core.MetachainShardId - dataComponents := componentsMock.GetDataComponents(coreComponents, shardCoordinator) - cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) - networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) - stateComponents := componentsMock.GetStateComponents(coreComponents, shardCoordinator) - processArgs := componentsMock.GetProcessArgs( - shardCoordinator, - coreComponents, - dataComponents, - cryptoComponents, - stateComponents, - networkComponents, - ) - - componentsMock.SetShardCoordinator(t, processArgs.BootstrapComponents, shardCoordinator) - - processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) - managedProcessComponents, err := processComp.NewManagedProcessComponents(processComponentsFactory) - require.NoError(t, err) - require.True(t, check.IfNil(managedProcessComponents.NodesCoordinator())) - require.True(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) - require.True(t, check.IfNil(managedProcessComponents.ResolversFinder())) - require.True(t, check.IfNil(managedProcessComponents.RoundHandler())) - require.True(t, check.IfNil(managedProcessComponents.ForkDetector())) - require.True(t, check.IfNil(managedProcessComponents.BlockProcessor())) - require.True(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) - require.True(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) - require.True(t, check.IfNil(managedProcessComponents.BlackListHandler())) - require.True(t, check.IfNil(managedProcessComponents.BootStorer())) - require.True(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) - require.True(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) - require.True(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) - require.True(t, check.IfNil(managedProcessComponents.BlockTracker())) - require.True(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) - require.True(t, check.IfNil(managedProcessComponents.RequestHandler())) - require.True(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) - require.True(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) - require.True(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) - require.True(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) - require.True(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) - require.True(t, check.IfNil(managedProcessComponents.WhiteListHandler())) - require.True(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) - require.True(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) - require.True(t, check.IfNil(managedProcessComponents.ImportStartHandler())) - require.True(t, check.IfNil(managedProcessComponents.HistoryRepository())) - require.True(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) - require.True(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) - require.True(t, check.IfNil(managedProcessComponents.PeerShardMapper())) - require.True(t, check.IfNil(managedProcessComponents.ShardCoordinator())) - require.True(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) - require.True(t, check.IfNil(managedProcessComponents.HardforkTrigger())) - require.True(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) - - err = managedProcessComponents.Create() + t.Run("invalid params should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PublicKeyPeerId.Type = "invalid" + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(args) + managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NotNil(t, managedProcessComponents) + + err := managedProcessComponents.Create() + require.Error(t, err) + }) + t.Run("should work with getters", func(t *testing.T) { + t.Parallel() + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NotNil(t, managedProcessComponents) + + require.True(t, check.IfNil(managedProcessComponents.NodesCoordinator())) + require.True(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) + require.True(t, check.IfNil(managedProcessComponents.ResolversFinder())) + require.True(t, check.IfNil(managedProcessComponents.RoundHandler())) + require.True(t, check.IfNil(managedProcessComponents.ForkDetector())) + require.True(t, check.IfNil(managedProcessComponents.BlockProcessor())) + require.True(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) + require.True(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) + require.True(t, check.IfNil(managedProcessComponents.BlackListHandler())) + require.True(t, check.IfNil(managedProcessComponents.BootStorer())) + require.True(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) + require.True(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) + require.True(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) + require.True(t, check.IfNil(managedProcessComponents.BlockTracker())) + require.True(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) + require.True(t, check.IfNil(managedProcessComponents.RequestHandler())) + require.True(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) + require.True(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) + require.True(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) + require.True(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) + require.True(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) + require.True(t, check.IfNil(managedProcessComponents.WhiteListHandler())) + require.True(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) + require.True(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) + require.True(t, check.IfNil(managedProcessComponents.ImportStartHandler())) + require.True(t, check.IfNil(managedProcessComponents.HistoryRepository())) + require.True(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) + require.True(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) + require.True(t, check.IfNil(managedProcessComponents.PeerShardMapper())) + require.True(t, check.IfNil(managedProcessComponents.ShardCoordinator())) + require.True(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) + require.True(t, check.IfNil(managedProcessComponents.HardforkTrigger())) + require.True(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) + require.True(t, check.IfNil(managedProcessComponents.AccountsParser())) + require.True(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) + require.True(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) + require.True(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + + err := managedProcessComponents.Create() + require.NoError(t, err) + require.False(t, check.IfNil(managedProcessComponents.NodesCoordinator())) + require.False(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) + require.False(t, check.IfNil(managedProcessComponents.ResolversFinder())) + require.False(t, check.IfNil(managedProcessComponents.RoundHandler())) + require.False(t, check.IfNil(managedProcessComponents.ForkDetector())) + require.False(t, check.IfNil(managedProcessComponents.BlockProcessor())) + require.False(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) + require.False(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) + require.False(t, check.IfNil(managedProcessComponents.BlackListHandler())) + require.False(t, check.IfNil(managedProcessComponents.BootStorer())) + require.False(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) + require.False(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) + require.False(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) + require.False(t, check.IfNil(managedProcessComponents.BlockTracker())) + require.False(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) + require.False(t, check.IfNil(managedProcessComponents.RequestHandler())) + require.False(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) + require.False(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) + require.False(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) + require.False(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) + require.False(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) + require.False(t, check.IfNil(managedProcessComponents.WhiteListHandler())) + require.False(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) + require.False(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) + require.False(t, check.IfNil(managedProcessComponents.ImportStartHandler())) + require.False(t, check.IfNil(managedProcessComponents.HistoryRepository())) + require.False(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) + require.False(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) + require.False(t, check.IfNil(managedProcessComponents.PeerShardMapper())) + require.False(t, check.IfNil(managedProcessComponents.ShardCoordinator())) + require.False(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) + require.False(t, check.IfNil(managedProcessComponents.HardforkTrigger())) + require.False(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) + require.False(t, check.IfNil(managedProcessComponents.AccountsParser())) + require.False(t, check.IfNil(managedProcessComponents.ScheduledTxsExecutionHandler())) + require.False(t, check.IfNil(managedProcessComponents.ESDTDataStorageHandlerForAPI())) + require.False(t, check.IfNil(managedProcessComponents.ReceiptsRepository())) + + require.Equal(t, factory.ProcessComponentsName, managedProcessComponents.String()) + }) +} + +func TestManagedProcessComponents_CheckSubcomponents(t *testing.T) { + t.Parallel() + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) + require.NotNil(t, managedProcessComponents) + require.Equal(t, errorsMx.ErrNilProcessComponents, managedProcessComponents.CheckSubcomponents()) + + err := managedProcessComponents.Create() require.NoError(t, err) - require.False(t, check.IfNil(managedProcessComponents.NodesCoordinator())) - require.False(t, check.IfNil(managedProcessComponents.InterceptorsContainer())) - require.False(t, check.IfNil(managedProcessComponents.ResolversFinder())) - require.False(t, check.IfNil(managedProcessComponents.RoundHandler())) - require.False(t, check.IfNil(managedProcessComponents.ForkDetector())) - require.False(t, check.IfNil(managedProcessComponents.BlockProcessor())) - require.False(t, check.IfNil(managedProcessComponents.EpochStartTrigger())) - require.False(t, check.IfNil(managedProcessComponents.EpochStartNotifier())) - require.False(t, check.IfNil(managedProcessComponents.BlackListHandler())) - require.False(t, check.IfNil(managedProcessComponents.BootStorer())) - require.False(t, check.IfNil(managedProcessComponents.HeaderSigVerifier())) - require.False(t, check.IfNil(managedProcessComponents.ValidatorsStatistics())) - require.False(t, check.IfNil(managedProcessComponents.ValidatorsProvider())) - require.False(t, check.IfNil(managedProcessComponents.BlockTracker())) - require.False(t, check.IfNil(managedProcessComponents.PendingMiniBlocksHandler())) - require.False(t, check.IfNil(managedProcessComponents.RequestHandler())) - require.False(t, check.IfNil(managedProcessComponents.TxLogsProcessor())) - require.False(t, check.IfNil(managedProcessComponents.HeaderConstructionValidator())) - require.False(t, check.IfNil(managedProcessComponents.HeaderIntegrityVerifier())) - require.False(t, check.IfNil(managedProcessComponents.CurrentEpochProvider())) - require.False(t, check.IfNil(managedProcessComponents.NodeRedundancyHandler())) - require.False(t, check.IfNil(managedProcessComponents.WhiteListHandler())) - require.False(t, check.IfNil(managedProcessComponents.WhiteListerVerifiedTxs())) - require.False(t, check.IfNil(managedProcessComponents.RequestedItemsHandler())) - require.False(t, check.IfNil(managedProcessComponents.ImportStartHandler())) - require.False(t, check.IfNil(managedProcessComponents.HistoryRepository())) - require.False(t, check.IfNil(managedProcessComponents.TransactionSimulatorProcessor())) - require.False(t, check.IfNil(managedProcessComponents.FallbackHeaderValidator())) - require.False(t, check.IfNil(managedProcessComponents.PeerShardMapper())) - require.False(t, check.IfNil(managedProcessComponents.ShardCoordinator())) - require.False(t, check.IfNil(managedProcessComponents.TxsSenderHandler())) - require.False(t, check.IfNil(managedProcessComponents.HardforkTrigger())) - require.False(t, check.IfNil(managedProcessComponents.ProcessedMiniBlocksTracker())) - - nodeSkBytes, err := cryptoComponents.PrivateKey().ToByteArray() - require.Nil(t, err) - observerSkBytes, err := managedProcessComponents.NodeRedundancyHandler().ObserverPrivateKey().ToByteArray() - require.Nil(t, err) - require.NotEqual(t, nodeSkBytes, observerSkBytes) + + require.Nil(t, managedProcessComponents.CheckSubcomponents()) } func TestManagedProcessComponents_Close(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - processComponentsFactory, _ := processComp.NewProcessComponentsFactory(processArgs) + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) managedProcessComponents, _ := processComp.NewManagedProcessComponents(processComponentsFactory) err := managedProcessComponents.Create() require.NoError(t, err) err = managedProcessComponents.Close() require.NoError(t, err) - require.Nil(t, managedProcessComponents.NodesCoordinator()) + + err = managedProcessComponents.Close() + require.NoError(t, err) +} + +func TestManagedProcessComponents_IsInterfaceNil(t *testing.T) { + t.Parallel() + + managedProcessComponents, _ := processComp.NewManagedProcessComponents(nil) + require.True(t, managedProcessComponents.IsInterfaceNil()) + + processComponentsFactory, _ := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + managedProcessComponents, _ = processComp.NewManagedProcessComponents(processComponentsFactory) + require.False(t, managedProcessComponents.IsInterfaceNil()) } diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index ebae3a2c893..df5b087d2b2 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -1,102 +1,1270 @@ package processing_test import ( + "bytes" + "context" + "errors" + "math/big" "strings" "sync" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/keyValStorage" coreData "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/block" dataBlock "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/endProcess" outportCore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/factory" + "github.com/multiversx/mx-chain-go/config" + retriever "github.com/multiversx/mx-chain-go/dataRetriever" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/mock" processComp "github.com/multiversx/mx-chain-go/factory/processing" "github.com/multiversx/mx-chain-go/genesis" - "github.com/multiversx/mx-chain-go/process" + genesisMocks "github.com/multiversx/mx-chain-go/genesis/mock" + testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/sharding" + "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + mxState "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" + "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + "github.com/multiversx/mx-chain-go/testscommon/dblookupext" + "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" + "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + "github.com/multiversx/mx-chain-go/testscommon/guardianMocks" "github.com/multiversx/mx-chain-go/testscommon/mainFactoryMocks" + "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/outport" + "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" + "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - "github.com/stretchr/testify/assert" + "github.com/multiversx/mx-chain-go/testscommon/trie" + trieFactory "github.com/multiversx/mx-chain-go/trie/factory" + updateMocks "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/require" ) -// ------------ Test TestProcessComponents -------------------- -func TestProcessComponents_CloseShouldWork(t *testing.T) { +func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { + gasSchedule, _ := common.LoadGasScheduleConfig("../../cmd/node/config/gasSchedules/gasScheduleV1.toml") + addrPubKeyConv, _ := factory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 32, + Type: "bech32", + SignatureLength: 0, + }) + valPubKeyConv, _ := factory.NewPubkeyConverter(config.PubkeyConfig{ + Length: 96, + Type: "hex", + SignatureLength: 48, + }) + return processComp.ProcessComponentsFactoryArgs{ + Config: testscommon.GetGeneralConfig(), + EpochConfig: config.EpochConfig{}, + PrefConfigs: config.PreferencesConfig{}, + ImportDBConfig: config.ImportDbConfig{}, + AccountsParser: &mock.AccountsParserStub{ + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.Pool, error) { + return []*dataBlock.MiniBlock{ + {}, + }, + map[uint32]*outportCore.Pool{ + 0: {}, + }, nil + }, + }, + SmartContractParser: &mock.SmartContractParserStub{}, + GasSchedule: &testscommon.GasScheduleNotifierMock{ + GasSchedule: gasSchedule, + }, + NodesCoordinator: &shardingMocks.NodesCoordinatorStub{}, + RequestedItemsHandler: &testscommon.RequestedItemsHandlerStub{}, + WhiteListHandler: &testscommon.WhiteListHandlerStub{}, + WhiteListerVerifiedTxs: &testscommon.WhiteListHandlerStub{}, + MaxRating: 100, + SystemSCConfig: &config.SystemSmartContractsConfig{ + ESDTSystemSCConfig: config.ESDTSystemSCConfig{ + BaseIssuingCost: "1000", + OwnerAddress: "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c", + }, + GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ + V1: config.GovernanceSystemSCConfigV1{ + ProposalCost: "500", + NumNodes: 100, + MinQuorum: 50, + MinPassThreshold: 50, + MinVetoThreshold: 50, + }, + Active: config.GovernanceSystemSCConfigActive{ + ProposalCost: "500", + MinQuorum: "50", + MinPassThreshold: "50", + MinVetoThreshold: "50", + }, + FirstWhitelistedAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + StakingSystemSCConfig: config.StakingSystemSCConfig{ + GenesisNodePrice: "2500000000000000000000", + MinStakeValue: "1", + UnJailValue: "1", + MinStepValue: "1", + UnBondPeriod: 0, + NumRoundsWithoutBleed: 0, + MaximumPercentageToBleed: 0, + BleedPercentagePerRound: 0, + MaxNumberOfNodesForStake: 10, + ActivateBLSPubKeyMessageVerification: false, + MinUnstakeTokensValue: "1", + }, + DelegationManagerSystemSCConfig: config.DelegationManagerSystemSCConfig{ + MinCreationDeposit: "100", + MinStakeAmount: "100", + ConfigChangeAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + }, + DelegationSystemSCConfig: config.DelegationSystemSCConfig{ + MinServiceFee: 0, + MaxServiceFee: 100, + }, + }, + Version: "v1.0.0", + ImportStartHandler: &testscommon.ImportStartHandlerStub{}, + HistoryRepo: &dblookupext.HistoryRepositoryStub{}, + Data: &testsMocks.DataComponentsStub{ + DataPool: dataRetriever.NewPoolsHolderMock(), + BlockChain: &testscommon.ChainHandlerStub{ + GetGenesisHeaderHashCalled: func() []byte { + return []byte("genesis hash") + }, + GetGenesisHeaderCalled: func() coreData.HeaderHandler { + return &testscommon.HeaderHandlerStub{} + }, + }, + MbProvider: &testsMocks.MiniBlocksProviderStub{}, + Store: genericMocks.NewChainStorerMock(0), + }, + CoreData: &mock.CoreComponentsMock{ + IntMarsh: &testscommon.MarshalizerStub{}, + TxMarsh: &testscommon.MarshalizerStub{}, + UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, + AddrPubKeyConv: addrPubKeyConv, + ValPubKeyConv: valPubKeyConv, + NodesConfig: &testscommon.NodesSetupStub{ + GetShardConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + }, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + Hash: &testscommon.HasherStub{}, + TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, + RatingHandler: &testscommon.RaterMock{}, + EnableEpochsHandlerField: &testscommon.EnableEpochsHandlerStub{}, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + EpochNotifierWithConfirm: &updateMocks.EpochStartNotifierStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + ChanStopProcess: make(chan endProcess.ArgEndProcess, 1), + TxSignHasherField: &testscommon.HasherStub{}, + HardforkTriggerPubKeyField: []byte("hardfork pub key"), + WasmVMChangeLockerInternal: &sync.RWMutex{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + }, + Crypto: &testsMocks.CryptoComponentsStub{ + BlKeyGen: &cryptoMocks.KeyGenStub{}, + BlockSig: &cryptoMocks.SingleSignerStub{}, + MultiSigContainer: &cryptoMocks.MultiSignerContainerMock{ + MultiSigner: &cryptoMocks.MultisignerMock{}, + }, + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + PubKeyString: "pub key string", + PubKeyBytes: []byte("pub key bytes"), + TxKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + }, + State: &testscommon.StateComponentsMock{ + Accounts: &state.AccountsStub{ + CommitCalled: func() ([]byte, error) { + return []byte(""), nil + }, + RootHashCalled: func() ([]byte, error) { + return []byte("root hash"), nil + }, + }, + PeersAcc: &state.AccountsStub{ + CommitCalled: func() ([]byte, error) { + return []byte("hash"), nil + }, + RootHashCalled: func() ([]byte, error) { + return []byte("root hash"), nil + }, + }, + Tries: &trie.TriesHolderStub{ + GetCalled: func(bytes []byte) common.Trie { + return &trie.TrieStub{} + }, + }, + AccountsAPI: &state.AccountsStub{}, + StorageManagers: map[string]common.StorageManager{ + trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, + trieFactory.PeerAccountTrie: &testscommon.StorageManagerStub{}, + }, + }, + Network: &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + OutputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, + PreferredPeersHolder: &p2pmocks.PeersHolderStub{}, + PeersRatingHandlerField: &p2pmocks.PeersRatingHandlerStub{}, + }, + BootstrapComponents: &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: mock.NewMultiShardsCoordinatorMock(2), + BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, + HdrIntegrityVerifier: &mock.HeaderIntegrityVerifierStub{}, + GuardedAccountHandlerField: &guardianMocks.GuardedAccountHandlerStub{}, + VersionedHdrFactory: &testscommon.VersionedHeaderFactoryStub{}, + }, + StatusComponents: &testsMocks.StatusComponentsStub{ + Outport: &outport.OutportStub{}, + }, + StatusCoreComponents: &factoryMocks.StatusCoreComponentsStub{ + AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, + }, + } +} + +func TestNewProcessComponentsFactory(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - pcf, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) + t.Run("nil AccountsParser should error", func(t *testing.T) { + t.Parallel() - pc, err := pcf.Create() - require.Nil(t, err) + args := createMockProcessComponentsFactoryArgs() + args.AccountsParser = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilAccountsParser)) + require.Nil(t, pcf) + }) + t.Run("nil GasSchedule should error", func(t *testing.T) { + t.Parallel() - err = pc.Close() - require.NoError(t, err) -} + args := createMockProcessComponentsFactoryArgs() + args.GasSchedule = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilGasSchedule)) + require.Nil(t, pcf) + }) + t.Run("nil Data should error", func(t *testing.T) { + t.Parallel() -func TestProcessComponentsFactory_CreateWithInvalidTxAccumulatorTimeExpectError(t *testing.T) { - t.Parallel() + args := createMockProcessComponentsFactoryArgs() + args.Data = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilDataComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil Blockchain should error", func(t *testing.T) { + t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - processArgs.Config.Antiflood.TxAccumulator.MaxAllowedTimeInMilliseconds = 0 - pcf, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) + args := createMockProcessComponentsFactoryArgs() + args.Data = &testsMocks.DataComponentsStub{ + BlockChain: &testscommon.ChainHandlerStub{}, + DataPool: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilDataPoolsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil StorageService should error", func(t *testing.T) { + t.Parallel() - instance, err := pcf.Create() - require.Nil(t, instance) - require.Error(t, err) - require.True(t, strings.Contains(err.Error(), process.ErrInvalidValue.Error())) + args := createMockProcessComponentsFactoryArgs() + args.Data = &testsMocks.DataComponentsStub{ + BlockChain: &testscommon.ChainHandlerStub{}, + DataPool: &dataRetriever.PoolsHolderStub{}, + Store: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStorageService)) + require.Nil(t, pcf) + }) + t.Run("nil CoreData should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilCoreComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil EconomicsData should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilEconomicsData)) + require.Nil(t, pcf) + }) + t.Run("nil GenesisNodesSetup should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilGenesisNodesSetupHandler)) + require.Nil(t, pcf) + }) + t.Run("nil AddressPubKeyConverter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilAddressPublicKeyConverter)) + require.Nil(t, pcf) + }) + t.Run("nil EpochNotifier should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &mock.PubkeyConverterStub{}, + EpochChangeNotifier: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilEpochNotifier)) + require.Nil(t, pcf) + }) + t.Run("nil ValidatorPubKeyConverter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &mock.PubkeyConverterStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + ValPubKeyConv: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilPubKeyConverter)) + require.Nil(t, pcf) + }) + t.Run("nil InternalMarshalizer should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &mock.PubkeyConverterStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + ValPubKeyConv: &mock.PubkeyConverterStub{}, + IntMarsh: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilInternalMarshalizer)) + require.Nil(t, pcf) + }) + t.Run("nil Uint64ByteSliceConverter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.CoreData = &mock.CoreComponentsMock{ + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + NodesConfig: &testscommon.NodesSetupStub{}, + AddrPubKeyConv: &mock.PubkeyConverterStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + ValPubKeyConv: &mock.PubkeyConverterStub{}, + IntMarsh: &testscommon.MarshalizerStub{}, + UInt64ByteSliceConv: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilUint64ByteSliceConverter)) + require.Nil(t, pcf) + }) + t.Run("nil Crypto should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Crypto = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilCryptoComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil BlockSignKeyGen should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Crypto = &testsMocks.CryptoComponentsStub{ + BlKeyGen: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBlockSignKeyGen)) + require.Nil(t, pcf) + }) + t.Run("nil State should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.State = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStateComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil AccountsAdapter should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.State = &testscommon.StateComponentsMock{ + Accounts: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilAccountsAdapter)) + require.Nil(t, pcf) + }) + t.Run("nil Network should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Network = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilNetworkComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil NetworkMessenger should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Network = &testsMocks.NetworkComponentsStub{ + Messenger: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilMessenger)) + require.Nil(t, pcf) + }) + t.Run("nil InputAntiFloodHandler should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Network = &testsMocks.NetworkComponentsStub{ + Messenger: &p2pmocks.MessengerStub{}, + InputAntiFlood: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilInputAntiFloodHandler)) + require.Nil(t, pcf) + }) + t.Run("nil SystemSCConfig should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.SystemSCConfig = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilSystemSCConfig)) + require.Nil(t, pcf) + }) + t.Run("nil BootstrapComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.BootstrapComponents = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBootstrapComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil ShardCoordinator should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.BootstrapComponents = &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilShardCoordinator)) + require.Nil(t, pcf) + }) + t.Run("nil EpochBootstrapParams should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.BootstrapComponents = &mainFactoryMocks.BootstrapComponentsStub{ + ShCoordinator: &testscommon.ShardsCoordinatorMock{}, + BootstrapParams: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBootstrapParamsHandler)) + require.Nil(t, pcf) + }) + t.Run("nil StatusComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.StatusComponents = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStatusComponentsHolder)) + require.Nil(t, pcf) + }) + t.Run("nil OutportHandler should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.StatusComponents = &testsMocks.StatusComponentsStub{ + Outport: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilOutportHandler)) + require.Nil(t, pcf) + }) + t.Run("nil HistoryRepo should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.HistoryRepo = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilHistoryRepository)) + require.Nil(t, pcf) + }) + t.Run("nil StatusCoreComponents should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.StatusCoreComponents = nil + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilStatusCoreComponents)) + require.Nil(t, pcf) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + pcf, err := processComp.NewProcessComponentsFactory(createMockProcessComponentsFactoryArgs()) + require.NoError(t, err) + require.NotNil(t, pcf) + }) } -func TestProcessComponents_IndexGenesisBlocks(t *testing.T) { +func TestProcessComponentsFactory_Create(t *testing.T) { t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(1) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - processArgs.Data = &mock.DataComponentsMock{ - Storage: &storageStubs.ChainStorerStub{}, - } + expectedErr := errors.New("expected error") + t.Run("CreateCurrentEpochProvider fails should error", func(t *testing.T) { + t.Parallel() - saveBlockCalledMutex := sync.Mutex{} + args := createMockProcessComponentsFactoryArgs() + args.Config.EpochStartConfig.RoundsPerEpoch = 0 + args.PrefConfigs.FullArchive = true + testCreateWithArgs(t, args, "rounds per epoch") + }) + t.Run("NewFallbackHeaderValidator fails should error", testWithNilMarshaller(1, "Marshalizer")) + t.Run("NewHeaderSigVerifier fails should error", testWithNilMarshaller(2, "Marshalizer")) + t.Run("createNetworkShardingCollector fails due to invalid PublicKeyPeerId config should error", func(t *testing.T) { + t.Parallel() - outportHandler := &outport.OutportStub{ - HasDriversCalled: func() bool { - return true - }, - SaveBlockCalled: func(args *outportCore.ArgsSaveBlockData) { - saveBlockCalledMutex.Lock() - require.NotNil(t, args) + args := createMockProcessComponentsFactoryArgs() + args.Config.PublicKeyPeerId.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("createNetworkShardingCollector fails due to invalid PublicKeyShardId config should error", func(t *testing.T) { + t.Parallel() - bodyRequired := &dataBlock.Body{ - MiniBlocks: make([]*block.MiniBlock, 4), + args := createMockProcessComponentsFactoryArgs() + args.Config.PublicKeyShardId.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("prepareNetworkShardingCollector fails due to SetPeerShardResolver failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + netwCompStub, ok := args.Network.(*testsMocks.NetworkComponentsStub) + require.True(t, ok) + netwCompStub.Messenger = &p2pmocks.MessengerStub{ + SetPeerShardResolverCalled: func(peerShardResolver p2p.PeerShardResolver) error { + return expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("newStorageResolver fails due to NewStorageServiceFactory failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true + args.Config.StoragePruning.NumActivePersisters = 0 + testCreateWithArgs(t, args, "active persisters") + }) + t.Run("newStorageResolver fails due to CreateForMeta failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true + args.Config.ShardHdrNonceHashStorage.Cache.Type = "invalid" + bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) + require.True(t, ok) + bootstrapCompStub.ShCoordinator = &testscommon.ShardsCoordinatorMock{ + NoShards: 2, + CurrentShard: common.MetachainShardId, + } + testCreateWithArgs(t, args, "ShardHdrNonceHashStorage") + }) + t.Run("newStorageResolver fails due to CreateForMeta failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true + args.Config.ShardHdrNonceHashStorage.Cache.Type = "invalid" + testCreateWithArgs(t, args, "ShardHdrNonceHashStorage") + }) + t.Run("newResolverContainerFactory fails due to NewPeerAuthenticationPayloadValidator failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.HeartbeatV2.HeartbeatExpiryTimespanInSec = 0 + testCreateWithArgs(t, args, "expiry timespan") + }) + t.Run("newResolverContainerFactory fails due to invalid shard should error", + testWithInvalidShard(0, "could not create interceptor and resolver container factory")) + t.Run("newMetaResolverContainerFactory fails due to NewSimpleDataPacker failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) + require.True(t, ok) + bootstrapCompStub.ShCoordinator = &testscommon.ShardsCoordinatorMock{ + NoShards: 2, + CurrentShard: common.MetachainShardId, + } + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + cnt := 0 + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + cnt++ + if cnt > 3 { + return nil + } + return &testscommon.MarshalizerStub{} + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, "marshalizer") + }) + t.Run("newShardResolverContainerFactory fails due to NewSimpleDataPacker failure should error", testWithNilMarshaller(3, "marshalizer")) + t.Run("NewResolversFinder fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true // coverage + bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) + require.True(t, ok) + cnt := 0 + bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > 5 { + return nil + } + return &testscommon.ShardsCoordinatorMock{ + NoShards: 2, + CurrentShard: common.MetachainShardId, // coverage } + } + testCreateWithArgs(t, args, "shard coordinator") + }) + t.Run("GetStorer TxLogsUnit fails should error", func(t *testing.T) { + t.Parallel() - txsPoolRequired := &outportCore.Pool{} + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true // coverage + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + dataCompStub.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (storage.Storer, error) { + return nil, expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("NewResolversFinder fails should error", testWithNilMarshaller(5, "Marshalizer")) + t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to invalid GenesisNodePrice should error", func(t *testing.T) { + t.Parallel() - assert.Equal(t, txsPoolRequired, args.TransactionsPool) - assert.Equal(t, bodyRequired, args.Body) - saveBlockCalledMutex.Unlock() - }, - } + args := createMockProcessComponentsFactoryArgs() + args.Config.LogsAndEvents.SaveInStorageEnabled = false // coverage + args.Config.DbLookupExtensions.Enabled = true // coverage + args.SystemSCConfig.StakingSystemSCConfig.GenesisNodePrice = "invalid" + testCreateWithArgs(t, args, "invalid genesis node price") + }) + t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to NewGenesisBlockCreator failure should error", + testWithNilMarshaller(6, "Marshalizer")) + t.Run("setGenesisHeader fails due to invalid shard should error", + testWithInvalidShard(8, "genesis block does not exist")) + t.Run("newValidatorStatisticsProcessor fails due to nil genesis header should error", func(t *testing.T) { + t.Parallel() - processArgs.StatusComponents = &mainFactoryMocks.StatusComponentsStub{ - Outport: outportHandler, + args := createMockProcessComponentsFactoryArgs() + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + blockChainStub, ok := dataCompStub.BlockChain.(*testscommon.ChainHandlerStub) + require.True(t, ok) + blockChainStub.GetGenesisHeaderCalled = func() coreData.HeaderHandler { + return nil + } + testCreateWithArgs(t, args, errorsMx.ErrGenesisBlockNotInitialized.Error()) + }) + t.Run("indexGenesisBlocks fails due to CalculateHash failure should error", testWithNilMarshaller(41, "marshalizer")) + t.Run("indexGenesisBlocks fails due to GenerateInitialTransactions failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.AccountsParser = &mock.AccountsParserStub{ + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.Pool, error) { + return nil, nil, expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("NewValidatorsProvider fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + pubKeyConv := args.CoreData.ValidatorPubKeyConverter() + cnt := 0 + coreCompStub.ValidatorPubKeyConverterCalled = func() core.PubkeyConverter { + cnt++ + if cnt > 2 { + return nil + } + return pubKeyConv + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, "pubkey converter") + }) + t.Run("newEpochStartTrigger fails due to invalid shard should error", + testWithInvalidShard(16, "error creating new start of epoch trigger because of invalid shard id")) + t.Run("newEpochStartTrigger fails due to NewHeaderValidator failure should error", testWithNilMarshaller(46, "Marshalizer")) + t.Run("newEpochStartTrigger fails due to NewPeerMiniBlockSyncer failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + dataPool := dataCompStub.DataPool + cnt := 0 + dataCompStub.DataPool = &dataRetriever.PoolsHolderStub{ + HeadersCalled: dataPool.Headers, + TransactionsCalled: dataPool.Transactions, + MiniBlocksCalled: dataPool.MiniBlocks, + CurrBlockTxsCalled: dataPool.CurrentBlockTxs, + TrieNodesCalled: dataPool.TrieNodes, + ValidatorsInfoCalled: func() retriever.ShardedDataCacherNotifier { + cnt++ + if cnt > 3 { + return nil + } + return dataPool.ValidatorsInfo() + }, + CloseCalled: nil, + } + testCreateWithArgs(t, args, "validators info pool") + }) + t.Run("newEpochStartTrigger fails due to NewPeerMiniBlockSyncer failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) + require.True(t, ok) + cntShardC := 0 + bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cntShardC++ + shardC := testscommon.NewMultiShardsCoordinatorMock(2) + if cntShardC > 16 { + shardC.CurrentShard = common.MetachainShardId + } + return shardC + } + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + blockChainStub, ok := dataCompStub.BlockChain.(*testscommon.ChainHandlerStub) + require.True(t, ok) + cnt := 0 + blockChainStub.GetGenesisHeaderCalled = func() coreData.HeaderHandler { + cnt++ + if cnt > 1 { + return nil + } + return &testscommon.HeaderHandlerStub{} + } + testCreateWithArgs(t, args, errorsMx.ErrGenesisBlockNotInitialized.Error()) + }) + t.Run("newEpochStartTrigger fails due to invalid shard should error", + testWithInvalidShard(17, "genesis block does not exist")) + t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(48, "marshalizer")) + t.Run("prepareGenesisBlock fails due to CalculateHash failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + blockChainStub, ok := dataCompStub.BlockChain.(*testscommon.ChainHandlerStub) + require.True(t, ok) + cnt := 0 + blockChainStub.SetGenesisHeaderCalled = func(handler coreData.HeaderHandler) error { + cnt++ + if cnt > 1 { + return expectedErr + } + return nil + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("saveGenesisHeaderToStorage fails due to Marshal failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + cnt := 0 + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + return &testscommon.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + cnt++ + if cnt > 38 { + return nil, expectedErr + } + return []byte(""), nil + }, + } + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("GetStorer BootstrapUnit fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + store := args.Data.StorageService() + cnt := 0 + dataCompStub.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (storage.Storer, error) { + if unitType == retriever.BootstrapUnit { + cnt++ + if cnt > 2 { + return nil, expectedErr + } + } + return store.GetStorer(unitType) + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("NewBootstrapStorer fails should error", testWithNilMarshaller(50, "Marshalizer")) + t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(51, "Marshalizer")) + t.Run("newBlockTracker fails due to invalid shard should error", + testWithInvalidShard(19, "could not create block tracker")) + t.Run("NewMiniBlocksPoolsCleaner fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PoolsCleanersConfig.MaxRoundsToKeepUnprocessedMiniBlocks = 0 + testCreateWithArgs(t, args, "MaxRoundsToKeepUnprocessedData") + }) + t.Run("NewTxsPoolsCleaner fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PoolsCleanersConfig.MaxRoundsToKeepUnprocessedTransactions = 0 + testCreateWithArgs(t, args, "MaxRoundsToKeepUnprocessedData") + }) + t.Run("NewMiniBlockTrack fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) + require.True(t, ok) + cnt := 0 + bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > 22 { + return nil + } + return testscommon.NewMultiShardsCoordinatorMock(2) + } + testCreateWithArgs(t, args, "shard coordinator") + }) + t.Run("createHardforkTrigger fails due to Decode failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.Hardfork.PublicKeyToListenFrom = "invalid key" + testCreateWithArgs(t, args, "PublicKeyToListenFrom") + }) + t.Run("newInterceptorContainerFactory fails due to invalid shard should error", + testWithInvalidShard(23, "could not create interceptor container factory")) + t.Run("createExportFactoryHandler fails", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) + require.True(t, ok) + cnt := 0 + bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > 25 { + return nil + } + return testscommon.NewMultiShardsCoordinatorMock(2) + } + testCreateWithArgs(t, args, "shard coordinator") + }) + t.Run("newForkDetector fails due to invalid shard should error", + testWithInvalidShard(27, "could not create fork detector")) + t.Run("NewCache fails for vmOutput should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.VMOutputCacher.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) + t.Run("GetStorer ScheduledSCRsUnit fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + store := args.Data.StorageService() + dataCompStub.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (storage.Storer, error) { + if unitType == retriever.ScheduledSCRsUnit { + return nil, expectedErr + } + return store.GetStorer(unitType) + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) + t.Run("NewScheduledTxsExecution fails should error", testWithNilMarshaller(104, "Marshalizer")) + t.Run("NewESDTDataStorage fails should error", testWithNilMarshaller(105, "Marshalizer")) + t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(106, "marshalizer")) + t.Run("newBlockProcessor fails due to invalid shard should error", + testWithInvalidShard(31, "could not create block processor")) + t.Run("NewNodesSetupChecker fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + pubKeyConv := args.CoreData.ValidatorPubKeyConverter() + cnt := 0 + coreCompStub.ValidatorPubKeyConverterCalled = func() core.PubkeyConverter { + cnt++ + if cnt > 5 { + return nil + } + return pubKeyConv + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, "pubkey converter") + }) + t.Run("nodesSetupChecker.Check fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + coreCompStub.GenesisNodesSetupCalled = func() sharding.GenesisNodesSetupHandler { + return &testscommon.NodesSetupStub{ + AllInitialNodesCalled: func() []nodesCoordinator.GenesisNodeInfoHandler { + return []nodesCoordinator.GenesisNodeInfoHandler{ + &genesisMocks.GenesisNodeInfoHandlerMock{ + PubKeyBytesValue: []byte("no stake"), + }, + } + }, + GetShardConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + GetMetaConsensusGroupSizeCalled: func() uint32 { + return 2 + }, + } + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, "no one staked") + }) + t.Run("NewNodeRedundancy fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + netwCompStub, ok := args.Network.(*testsMocks.NetworkComponentsStub) + require.True(t, ok) + cnt := 0 + netwCompStub.MessengerCalled = func() p2p.Messenger { + cnt++ + if cnt > 7 { + return nil + } + return &p2pmocks.MessengerStub{} + } + testCreateWithArgs(t, args, "messenger") + }) + t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(123, "marshalizer")) + t.Run("NewTxsSenderWithAccumulator fails should error", testWithNilMarshaller(124, "Marshalizer")) + t.Run("should work with indexAndReturnGenesisAccounts failing due to RootHash failure", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) + require.True(t, ok) + accountsStub, ok := stateCompStub.Accounts.(*state.AccountsStub) + require.True(t, ok) + accountsStub.RootHashCalled = func() ([]byte, error) { + return nil, expectedErr + } + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to GetAllLeaves failure", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) + require.True(t, ok) + accountsStub, ok := stateCompStub.Accounts.(*state.AccountsStub) + require.True(t, ok) + accountsStub.GetAllLeavesCalled = func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return expectedErr + } + + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to Unmarshal failure", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) + require.True(t, ok) + accountsStub, ok := stateCompStub.Accounts.(*state.AccountsStub) + require.True(t, ok) + accountsStub.GetAllLeavesCalled = func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("key_ok"), []byte("value")) // coverage + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("key_invalid"), []byte("value")) + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return nil + } + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + cnt := 0 + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + return &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt == 1 { + return nil // coverage, key_ok + } + return expectedErr + }, + } + } + args.CoreData = coreCompStub + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + }) + t.Run("should work with indexAndReturnGenesisAccounts failing due to error on GetAllLeaves", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) + require.True(t, ok) + accountsStub, ok := stateCompStub.Accounts.(*state.AccountsStub) + require.True(t, ok) + accountsStub.GetAllLeavesCalled = func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) + leavesChannels.ErrChan.Close() + return nil + } + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + }) + t.Run("should work - shard", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) + pcf, _ := processComp.NewProcessComponentsFactory(processArgs) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.NoError(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + }) + t.Run("should work - meta", func(t *testing.T) { + t.Parallel() + + shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) + shardCoordinator.CurrentShard = common.MetachainShardId + processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) + + protocolSustainabilityAddress := "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" + shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { + protocolSustainabilityAddr, err := processArgs.CoreData.AddressPubKeyConverter().Decode(protocolSustainabilityAddress) + require.NoError(t, err) + if bytes.Equal(protocolSustainabilityAddr, address) { + return 0 + } + return shardCoordinator.CurrentShard + } + fundGenesisWallets(t, processArgs) + + pcf, _ := processComp.NewProcessComponentsFactory(processArgs) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.NoError(t, err) + require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + }) +} + +func fundGenesisWallets(t *testing.T, args processComp.ProcessComponentsFactoryArgs) { + accounts := args.State.AccountsAdapter() + initialNodes := args.CoreData.GenesisNodesSetup().AllInitialNodes() + nodePrice, ok := big.NewInt(0).SetString(args.SystemSCConfig.StakingSystemSCConfig.GenesisNodePrice, 10) + require.True(t, ok) + for _, node := range initialNodes { + account, err := accounts.LoadAccount(node.AddressBytes()) + require.NoError(t, err) + + userAccount := account.(mxState.UserAccountHandler) + err = userAccount.AddToBalance(nodePrice) + require.NoError(t, err) + + require.NoError(t, accounts.SaveAccount(userAccount)) + _, err = accounts.Commit() + require.NoError(t, err) } +} - pcf, err := processComp.NewProcessComponentsFactory(processArgs) - require.Nil(t, err) +func testWithNilMarshaller(nilStep int, expectedErrSubstr string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + step := 0 + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + step++ + if step > nilStep { + return nil + } + return &testscommon.MarshalizerStub{} + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, expectedErrSubstr) + } +} - genesisBlocks := make(map[uint32]coreData.HeaderHandler) - indexingData := make(map[uint32]*genesis.IndexingData) +func testWithInvalidShard(failingStep int, expectedErrSubstr string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() - for i := uint32(0); i < shardCoordinator.NumberOfShards(); i++ { - genesisBlocks[i] = &block.Header{} + args := createMockProcessComponentsFactoryArgs() + bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) + require.True(t, ok) + cnt := 0 + bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + cnt++ + if cnt > failingStep { + return &testscommon.ShardsCoordinatorMock{ + NoShards: 2, + CurrentShard: 3, + } + } + return testscommon.NewMultiShardsCoordinatorMock(2) + } + testCreateWithArgs(t, args, expectedErrSubstr) } +} + +func testCreateWithArgs(t *testing.T, args processComp.ProcessComponentsFactoryArgs, expectedErrSubstr string) { + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) - err = pcf.IndexGenesisBlocks(genesisBlocks, indexingData) - require.Nil(t, err) + instance, err := pcf.Create() + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), expectedErrSubstr)) + require.Nil(t, instance) } diff --git a/genesis/errors.go b/genesis/errors.go index 2553b9650aa..77fee48171b 100644 --- a/genesis/errors.go +++ b/genesis/errors.go @@ -167,8 +167,8 @@ var ErrBLSKeyNotStaked = errors.New("bls key not staked") // ErrMissingDeployedSC signals that a delegation referenced an un-deployed contract var ErrMissingDeployedSC = errors.New("missing deployed SC") -// ErrNilGeneralSettingsConfig signals that a nil general settings config was provided -var ErrNilGeneralSettingsConfig = errors.New("nil general settings config") - // ErrNilEpochConfig signals that a nil epoch config was provided var ErrNilEpochConfig = errors.New("nil epoch config") + +// ErrNilGasSchedule signals that an operation has been attempted with a nil gas schedule +var ErrNilGasSchedule = errors.New("nil GasSchedule") diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index ba898b40414..f6a1ee38eb4 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -204,6 +204,9 @@ func checkArgumentsForBlockCreator(arg ArgsGenesisBlockCreator) error { if arg.EpochConfig == nil { return genesis.ErrNilEpochConfig } + if arg.GasSchedule == nil { + return genesis.ErrNilGasSchedule + } return nil } diff --git a/integrationTests/mock/p2pAntifloodHandlerStub.go b/integrationTests/mock/p2pAntifloodHandlerStub.go index bda3da406d5..c181d10909d 100644 --- a/integrationTests/mock/p2pAntifloodHandlerStub.go +++ b/integrationTests/mock/p2pAntifloodHandlerStub.go @@ -16,81 +16,85 @@ type P2PAntifloodHandlerStub struct { SetDebuggerCalled func(debugger process.AntifloodDebugger) error BlacklistPeerCalled func(peer core.PeerID, reason string, duration time.Duration) IsOriginatorEligibleForTopicCalled func(pid core.PeerID, topic string) error + SetPeerValidatorMapperCalled func(validatorMapper process.PeerValidatorMapper) error } // CanProcessMessage - -func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { - if p2pahs.CanProcessMessageCalled == nil { +func (stub *P2PAntifloodHandlerStub) CanProcessMessage(message p2p.MessageP2P, fromConnectedPeer core.PeerID) error { + if stub.CanProcessMessageCalled == nil { return nil } - return p2pahs.CanProcessMessageCalled(message, fromConnectedPeer) + return stub.CanProcessMessageCalled(message, fromConnectedPeer) } // IsOriginatorEligibleForTopic - -func (p2pahs *P2PAntifloodHandlerStub) IsOriginatorEligibleForTopic(pid core.PeerID, topic string) error { - if p2pahs.IsOriginatorEligibleForTopicCalled != nil { - return p2pahs.IsOriginatorEligibleForTopicCalled(pid, topic) +func (stub *P2PAntifloodHandlerStub) IsOriginatorEligibleForTopic(pid core.PeerID, topic string) error { + if stub.IsOriginatorEligibleForTopicCalled != nil { + return stub.IsOriginatorEligibleForTopicCalled(pid, topic) } return nil } // CanProcessMessagesOnTopic - -func (p2pahs *P2PAntifloodHandlerStub) CanProcessMessagesOnTopic(peer core.PeerID, topic string, numMessages uint32, totalSize uint64, sequence []byte) error { - if p2pahs.CanProcessMessagesOnTopicCalled == nil { +func (stub *P2PAntifloodHandlerStub) CanProcessMessagesOnTopic(peer core.PeerID, topic string, numMessages uint32, totalSize uint64, sequence []byte) error { + if stub.CanProcessMessagesOnTopicCalled == nil { return nil } - return p2pahs.CanProcessMessagesOnTopicCalled(peer, topic, numMessages, totalSize, sequence) + return stub.CanProcessMessagesOnTopicCalled(peer, topic, numMessages, totalSize, sequence) } // ApplyConsensusSize - -func (p2pahs *P2PAntifloodHandlerStub) ApplyConsensusSize(size int) { - if p2pahs.ApplyConsensusSizeCalled != nil { - p2pahs.ApplyConsensusSizeCalled(size) +func (stub *P2PAntifloodHandlerStub) ApplyConsensusSize(size int) { + if stub.ApplyConsensusSizeCalled != nil { + stub.ApplyConsensusSizeCalled(size) } } // SetDebugger - -func (p2pahs *P2PAntifloodHandlerStub) SetDebugger(debugger process.AntifloodDebugger) error { - if p2pahs.SetDebuggerCalled != nil { - return p2pahs.SetDebuggerCalled(debugger) +func (stub *P2PAntifloodHandlerStub) SetDebugger(debugger process.AntifloodDebugger) error { + if stub.SetDebuggerCalled != nil { + return stub.SetDebuggerCalled(debugger) } return nil } // BlacklistPeer - -func (p2pahs *P2PAntifloodHandlerStub) BlacklistPeer(peer core.PeerID, reason string, duration time.Duration) { - if p2pahs.BlacklistPeerCalled != nil { - p2pahs.BlacklistPeerCalled(peer, reason, duration) +func (stub *P2PAntifloodHandlerStub) BlacklistPeer(peer core.PeerID, reason string, duration time.Duration) { + if stub.BlacklistPeerCalled != nil { + stub.BlacklistPeerCalled(peer, reason, duration) } } // ResetForTopic - -func (p2pahs *P2PAntifloodHandlerStub) ResetForTopic(_ string) { +func (stub *P2PAntifloodHandlerStub) ResetForTopic(_ string) { } // SetMaxMessagesForTopic - -func (p2pahs *P2PAntifloodHandlerStub) SetMaxMessagesForTopic(_ string, _ uint32) { +func (stub *P2PAntifloodHandlerStub) SetMaxMessagesForTopic(_ string, _ uint32) { } // SetPeerValidatorMapper - -func (p2pahs *P2PAntifloodHandlerStub) SetPeerValidatorMapper(_ process.PeerValidatorMapper) error { +func (stub *P2PAntifloodHandlerStub) SetPeerValidatorMapper(validatorMapper process.PeerValidatorMapper) error { + if stub.SetPeerValidatorMapperCalled != nil { + return stub.SetPeerValidatorMapperCalled(validatorMapper) + } return nil } // SetTopicsForAll - -func (p2pahs *P2PAntifloodHandlerStub) SetTopicsForAll(_ ...string) { +func (stub *P2PAntifloodHandlerStub) SetTopicsForAll(_ ...string) { } // Close - -func (p2pahs *P2PAntifloodHandlerStub) Close() error { +func (stub *P2PAntifloodHandlerStub) Close() error { return nil } // IsInterfaceNil - -func (p2pahs *P2PAntifloodHandlerStub) IsInterfaceNil() bool { - return p2pahs == nil +func (stub *P2PAntifloodHandlerStub) IsInterfaceNil() bool { + return stub == nil } diff --git a/process/track/baseBlockTrack.go b/process/track/baseBlockTrack.go index f4a264e5086..22eb1c86cc1 100644 --- a/process/track/baseBlockTrack.go +++ b/process/track/baseBlockTrack.go @@ -787,6 +787,9 @@ func checkTrackerNilParameters(arguments ArgBaseTracker) error { if check.IfNil(arguments.FeeHandler) { return process.ErrNilEconomicsData } + if check.IfNil(arguments.WhitelistHandler) { + return process.ErrNilWhiteListHandler + } return nil } diff --git a/process/track/baseBlockTrack_test.go b/process/track/baseBlockTrack_test.go index 7f633452179..8c919cd9ee7 100644 --- a/process/track/baseBlockTrack_test.go +++ b/process/track/baseBlockTrack_test.go @@ -307,6 +307,24 @@ func TestNewBlockTrack_ShouldErrNotarizedHeadersSliceIsNil(t *testing.T) { assert.True(t, check.IfNil(mbt)) } +func TestNewBlockTrack_ShouldErrNilWhitelistHandler(t *testing.T) { + t.Parallel() + + shardArguments := CreateShardTrackerMockArguments() + shardArguments.WhitelistHandler = nil + sbt, err := track.NewShardBlockTrack(shardArguments) + + assert.Equal(t, process.ErrNilWhiteListHandler, err) + assert.Nil(t, sbt) + + metaArguments := CreateMetaTrackerMockArguments() + metaArguments.WhitelistHandler = nil + mbt, err := track.NewMetaBlockTrack(metaArguments) + + assert.Equal(t, process.ErrNilWhiteListHandler, err) + assert.True(t, check.IfNil(mbt)) +} + func TestNewBlockTrack_ShouldWork(t *testing.T) { t.Parallel() diff --git a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go index f76a59c7150..8c9d56dca7b 100644 --- a/testscommon/mainFactoryMocks/bootstrapComponentsStub.go +++ b/testscommon/mainFactoryMocks/bootstrapComponentsStub.go @@ -14,6 +14,7 @@ type BootstrapComponentsStub struct { BootstrapParams factory.BootstrapParamsHolder NodeRole core.NodeType ShCoordinator sharding.Coordinator + ShardCoordinatorCalled func() sharding.Coordinator HdrVersionHandler nodeFactory.HeaderVersionHandler VersionedHdrFactory nodeFactory.VersionedHeaderFactory HdrIntegrityVerifier nodeFactory.HeaderIntegrityVerifierHandler @@ -52,6 +53,9 @@ func (bcs *BootstrapComponentsStub) NodeType() core.NodeType { // ShardCoordinator - func (bcs *BootstrapComponentsStub) ShardCoordinator() sharding.Coordinator { + if bcs.ShardCoordinatorCalled != nil { + return bcs.ShardCoordinatorCalled() + } return bcs.ShCoordinator } From 26fb309515b27574639c11a3a8a818b08ff41c7d Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 13 Apr 2023 17:05:21 +0300 Subject: [PATCH 093/221] tests for newShardBlockProcessor --- factory/processing/blockProcessorCreator.go | 6 +- factory/processing/processComponents_test.go | 226 ++++++++++++------- 2 files changed, 142 insertions(+), 90 deletions(-) diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 72ebe28491e..6bcd309f3be 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -436,13 +436,11 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( return nil, err } - blockProcessorComponents := &blockProcessorAndVmFactories{ + return &blockProcessorAndVmFactories{ blockProcessor: blockProcessor, vmFactoryForTxSimulate: vmFactoryTxSimulator, vmFactoryForProcessing: vmFactory, - } - - return blockProcessorComponents, nil + }, nil } func (pcf *processComponentsFactory) newMetaBlockProcessor( diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index df5b087d2b2..87cb3a83d7f 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -691,20 +691,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, "shard coordinator") }) - t.Run("GetStorer TxLogsUnit fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - args.ImportDBConfig.IsImportDBMode = true // coverage - dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) - require.True(t, ok) - dataCompStub.Store = &storageStubs.ChainStorerStub{ - GetStorerCalled: func(unitType retriever.UnitType) (storage.Storer, error) { - return nil, expectedErr - }, - } - testCreateWithArgs(t, args, expectedErr.Error()) - }) + t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(0, retriever.TxLogsUnit)) t.Run("NewResolversFinder fails should error", testWithNilMarshaller(5, "Marshalizer")) t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to invalid GenesisNodePrice should error", func(t *testing.T) { t.Parallel() @@ -723,6 +710,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true // coverage dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) require.True(t, ok) blockChainStub, ok := dataCompStub.BlockChain.(*testscommon.ChainHandlerStub) @@ -744,23 +732,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, expectedErr.Error()) }) - t.Run("NewValidatorsProvider fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - pubKeyConv := args.CoreData.ValidatorPubKeyConverter() - cnt := 0 - coreCompStub.ValidatorPubKeyConverterCalled = func() core.PubkeyConverter { - cnt++ - if cnt > 2 { - return nil - } - return pubKeyConv - } - args.CoreData = coreCompStub - testCreateWithArgs(t, args, "pubkey converter") - }) + t.Run("NewValidatorsProvider fails should error", testWithNilPubKeyConv(2, "pubkey converter")) t.Run("newEpochStartTrigger fails due to invalid shard should error", testWithInvalidShard(16, "error creating new start of epoch trigger because of invalid shard id")) t.Run("newEpochStartTrigger fails due to NewHeaderValidator failure should error", testWithNilMarshaller(46, "Marshalizer")) @@ -859,27 +831,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.CoreData = coreCompStub testCreateWithArgs(t, args, expectedErr.Error()) }) - t.Run("GetStorer BootstrapUnit fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) - require.True(t, ok) - store := args.Data.StorageService() - cnt := 0 - dataCompStub.Store = &storageStubs.ChainStorerStub{ - GetStorerCalled: func(unitType retriever.UnitType) (storage.Storer, error) { - if unitType == retriever.BootstrapUnit { - cnt++ - if cnt > 2 { - return nil, expectedErr - } - } - return store.GetStorer(unitType) - }, - } - testCreateWithArgs(t, args, expectedErr.Error()) - }) + t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(2, retriever.BootstrapUnit)) t.Run("NewBootstrapStorer fails should error", testWithNilMarshaller(50, "Marshalizer")) t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(51, "Marshalizer")) t.Run("newBlockTracker fails due to invalid shard should error", @@ -948,45 +900,58 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.VMOutputCacher.Type = "invalid" testCreateWithArgs(t, args, "cache type") }) - t.Run("GetStorer ScheduledSCRsUnit fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) - require.True(t, ok) - store := args.Data.StorageService() - dataCompStub.Store = &storageStubs.ChainStorerStub{ - GetStorerCalled: func(unitType retriever.UnitType) (storage.Storer, error) { - if unitType == retriever.ScheduledSCRsUnit { - return nil, expectedErr - } - return store.GetStorer(unitType) - }, - } - testCreateWithArgs(t, args, expectedErr.Error()) - }) + t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(0, retriever.ScheduledSCRsUnit)) t.Run("NewScheduledTxsExecution fails should error", testWithNilMarshaller(104, "Marshalizer")) t.Run("NewESDTDataStorage fails should error", testWithNilMarshaller(105, "Marshalizer")) t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(106, "marshalizer")) t.Run("newBlockProcessor fails due to invalid shard should error", testWithInvalidShard(31, "could not create block processor")) - t.Run("NewNodesSetupChecker fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - pubKeyConv := args.CoreData.ValidatorPubKeyConverter() - cnt := 0 - coreCompStub.ValidatorPubKeyConverterCalled = func() core.PubkeyConverter { - cnt++ - if cnt > 5 { - return nil - } - return pubKeyConv - } - args.CoreData = coreCompStub - testCreateWithArgs(t, args, "pubkey converter") - }) + t.Run("newShardBlockProcessor: NewESDTTransferParser fails should error", + testWithNilMarshaller(107, "marshaller")) + t.Run("newShardBlockProcessor: createBuiltInFunctionContainer fails should error", + testWithNilAddressPubKeyConv(46, "public key converter")) + t.Run("newShardBlockProcessor: createVMFactoryShard fails due to NewBlockChainHookImpl failure should error", + testWithNilAddressPubKeyConv(47, "pubkey converter")) + t.Run("newShardBlockProcessor: NewIntermediateProcessorsContainerFactory fails should error", + testWithNilMarshaller(110, "Marshalizer")) + t.Run("newShardBlockProcessor: NewTxTypeHandler fails should error", + testWithNilAddressPubKeyConv(49, "pubkey converter")) + t.Run("newShardBlockProcessor: NewGasComputation fails should error", + testWithNilEnableEpochsHandler(13, "enable epochs handler")) + t.Run("newShardBlockProcessor: NewSmartContractProcessor fails should error", + testWithNilAddressPubKeyConv(50, "pubkey converter")) + t.Run("newShardBlockProcessor: NewRewardTxProcessor fails should error", + testWithNilAddressPubKeyConv(51, "pubkey converter")) + t.Run("newShardBlockProcessor: NewTxProcessor fails should error", + testWithNilAddressPubKeyConv(52, "pubkey converter")) + t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to NewIntermediateProcessorsContainerFactory failure should error", + testWithNilAddressPubKeyConv(53, "pubkey converter")) + t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to createBuiltInFunctionContainer failure should error", + testWithNilAddressPubKeyConv(54, "public key converter")) + t.Run("newShardBlockProcessor: createOutportDataProvider fails due to missing TransactionUnit should error", + testWithMissingStorer(3, retriever.TransactionUnit)) + t.Run("newShardBlockProcessor: createOutportDataProvider fails due to missing MiniBlockUnit should error", + testWithMissingStorer(4, retriever.MiniBlockUnit)) + t.Run("newShardBlockProcessor: NewShardProcessor fails should error", + testWithNilEnableEpochsHandler(23, "enable epochs handler")) + t.Run("newShardBlockProcessor: attachProcessDebugger fails should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.Debug.Process.Enabled = true + args.Config.Debug.Process.PollingTimeInSeconds = 0 + testCreateWithArgs(t, args, "PollingTimeInSeconds") + }) + t.Run("newShardBlockProcessor: NewBlockSizeComputation fails should error", + testWithNilMarshaller(116, "Marshalizer")) + t.Run("newShardBlockProcessor: NewPreProcessorsContainerFactory fails should error", + testWithNilMarshaller(117, "Marshalizer")) + t.Run("newShardBlockProcessor: NewPrintDoubleTransactionsDetector fails should error", + testWithNilMarshaller(118, "Marshalizer")) + t.Run("newShardBlockProcessor: NewTransactionCoordinator fails should error", + testWithNilMarshaller(119, "Marshalizer")) + + t.Run("NewNodesSetupChecker fails should error", testWithNilPubKeyConv(5, "pubkey converter")) t.Run("nodesSetupChecker.Check fails should error", func(t *testing.T) { t.Parallel() @@ -1227,6 +1192,7 @@ func testWithNilMarshaller(nilStep int, expectedErrSubstr string) func(t *testin step := 0 coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { step++ + println(step) if step > nilStep { return nil } @@ -1237,6 +1203,94 @@ func testWithNilMarshaller(nilStep int, expectedErrSubstr string) func(t *testin } } +func testWithNilPubKeyConv(nilStep int, expectedErrSubstr string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + pubKeyConv := args.CoreData.ValidatorPubKeyConverter() + step := 0 + coreCompStub.ValidatorPubKeyConverterCalled = func() core.PubkeyConverter { + step++ + if step > nilStep { + return nil + } + return pubKeyConv + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, expectedErrSubstr) + } +} + +func testWithNilAddressPubKeyConv(nilStep int, expectedErrSubstr string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + pubKeyConv := args.CoreData.AddressPubKeyConverter() + step := 0 + coreCompStub.AddressPubKeyConverterCalled = func() core.PubkeyConverter { + step++ + println(step) + if step > nilStep { + return nil + } + return pubKeyConv + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, expectedErrSubstr) + } +} + +func testWithNilEnableEpochsHandler(nilStep int, expectedErrSubstr string) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + enableEpochsHandler := coreCompStub.EnableEpochsHandler() + step := 0 + coreCompStub.EnableEpochsHandlerCalled = func() common.EnableEpochsHandler { + step++ + println(step) + if step > nilStep { + return nil + } + return enableEpochsHandler + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, expectedErrSubstr) + } +} + +func testWithMissingStorer(failStep int, missingUnitType retriever.UnitType) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + args := createMockProcessComponentsFactoryArgs() + dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) + require.True(t, ok) + store := args.Data.StorageService() + cnt := 0 + dataCompStub.Store = &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType retriever.UnitType) (storage.Storer, error) { + if unitType == missingUnitType { + cnt++ + println(cnt) + if cnt > failStep { + return nil, expectedErr + } + } + return store.GetStorer(unitType) + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + } +} + func testWithInvalidShard(failingStep int, expectedErrSubstr string) func(t *testing.T) { return func(t *testing.T) { t.Parallel() From d829b0c0c9759a82f62a32ec6450561f00957383 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 13 Apr 2023 18:25:05 +0300 Subject: [PATCH 094/221] more tests on processComponents --- factory/processing/processComponents_test.go | 239 ++++++++++++------ .../factory/metachain/vmContainerFactory.go | 2 +- .../metachain/vmContainerFactory_test.go | 2 +- testscommon/stateComponentsMock.go | 16 +- 4 files changed, 176 insertions(+), 83 deletions(-) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index 87cb3a83d7f..dfa2992a6ab 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -56,6 +56,12 @@ import ( "github.com/stretchr/testify/require" ) +const ( + unreachableStep = 10000 + blockProcessorOnMetaStep = 31 + testingProtocolSustainabilityAddress = "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" +) + func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { gasSchedule, _ := common.LoadGasScheduleConfig("../../cmd/node/config/gasSchedules/gasScheduleV1.toml") addrPubKeyConv, _ := factory.NewPubkeyConverter(config.PubkeyConfig{ @@ -166,8 +172,12 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto return 2 }, }, - EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, - EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, + EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, + EconomicsHandler: &economicsmocks.EconomicsHandlerStub{ + ProtocolSustainabilityAddressCalled: func() string { + return testingProtocolSustainabilityAddress + }, + }, Hash: &testscommon.HasherStub{}, TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, RatingHandler: &testscommon.RaterMock{}, @@ -180,6 +190,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto HardforkTriggerPubKeyField: []byte("hardfork pub key"), WasmVMChangeLockerInternal: &sync.RWMutex{}, NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, }, Crypto: &testsMocks.CryptoComponentsStub{ BlKeyGen: &cryptoMocks.KeyGenStub{}, @@ -194,6 +205,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto TxKeyGen: &cryptoMocks.KeyGenStub{}, TxSig: &cryptoMocks.SingleSignerStub{}, PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, }, State: &testscommon.StateComponentsMock{ Accounts: &state.AccountsStub{ @@ -580,8 +592,8 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.PrefConfigs.FullArchive = true testCreateWithArgs(t, args, "rounds per epoch") }) - t.Run("NewFallbackHeaderValidator fails should error", testWithNilMarshaller(1, "Marshalizer")) - t.Run("NewHeaderSigVerifier fails should error", testWithNilMarshaller(2, "Marshalizer")) + t.Run("NewFallbackHeaderValidator fails should error", testWithNilMarshaller(1, "Marshalizer", unreachableStep)) + t.Run("NewHeaderSigVerifier fails should error", testWithNilMarshaller(2, "Marshalizer", unreachableStep)) t.Run("createNetworkShardingCollector fails due to invalid PublicKeyPeerId config should error", func(t *testing.T) { t.Parallel() @@ -623,12 +635,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.ImportDBConfig.IsImportDBMode = true args.Config.ShardHdrNonceHashStorage.Cache.Type = "invalid" - bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) - require.True(t, ok) - bootstrapCompStub.ShCoordinator = &testscommon.ShardsCoordinatorMock{ - NoShards: 2, - CurrentShard: common.MetachainShardId, - } + updateShardCoordinatorForMetaAtStep(t, args, 0) testCreateWithArgs(t, args, "ShardHdrNonceHashStorage") }) t.Run("newStorageResolver fails due to CreateForMeta failure should error", func(t *testing.T) { @@ -652,12 +659,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockProcessComponentsFactoryArgs() - bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) - require.True(t, ok) - bootstrapCompStub.ShCoordinator = &testscommon.ShardsCoordinatorMock{ - NoShards: 2, - CurrentShard: common.MetachainShardId, - } + updateShardCoordinatorForMetaAtStep(t, args, 0) coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) cnt := 0 coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { @@ -670,7 +672,8 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.CoreData = coreCompStub testCreateWithArgs(t, args, "marshalizer") }) - t.Run("newShardResolverContainerFactory fails due to NewSimpleDataPacker failure should error", testWithNilMarshaller(3, "marshalizer")) + t.Run("newShardResolverContainerFactory fails due to NewSimpleDataPacker failure should error", + testWithNilMarshaller(3, "marshalizer", unreachableStep)) t.Run("NewResolversFinder fails should error", func(t *testing.T) { t.Parallel() @@ -691,8 +694,8 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, "shard coordinator") }) - t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(0, retriever.TxLogsUnit)) - t.Run("NewResolversFinder fails should error", testWithNilMarshaller(5, "Marshalizer")) + t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(0, retriever.TxLogsUnit, unreachableStep)) + t.Run("NewResolversFinder fails should error", testWithNilMarshaller(5, "Marshalizer", unreachableStep)) t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to invalid GenesisNodePrice should error", func(t *testing.T) { t.Parallel() @@ -703,7 +706,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { testCreateWithArgs(t, args, "invalid genesis node price") }) t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to NewGenesisBlockCreator failure should error", - testWithNilMarshaller(6, "Marshalizer")) + testWithNilMarshaller(6, "Marshalizer", unreachableStep)) t.Run("setGenesisHeader fails due to invalid shard should error", testWithInvalidShard(8, "genesis block does not exist")) t.Run("newValidatorStatisticsProcessor fails due to nil genesis header should error", func(t *testing.T) { @@ -720,7 +723,8 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, errorsMx.ErrGenesisBlockNotInitialized.Error()) }) - t.Run("indexGenesisBlocks fails due to CalculateHash failure should error", testWithNilMarshaller(41, "marshalizer")) + t.Run("indexGenesisBlocks fails due to CalculateHash failure should error", + testWithNilMarshaller(41, "marshalizer", unreachableStep)) t.Run("indexGenesisBlocks fails due to GenerateInitialTransactions failure should error", func(t *testing.T) { t.Parallel() @@ -732,10 +736,12 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, expectedErr.Error()) }) - t.Run("NewValidatorsProvider fails should error", testWithNilPubKeyConv(2, "pubkey converter")) + t.Run("NewValidatorsProvider fails should error", + testWithNilPubKeyConv(2, "pubkey converter", unreachableStep)) t.Run("newEpochStartTrigger fails due to invalid shard should error", testWithInvalidShard(16, "error creating new start of epoch trigger because of invalid shard id")) - t.Run("newEpochStartTrigger fails due to NewHeaderValidator failure should error", testWithNilMarshaller(46, "Marshalizer")) + t.Run("newEpochStartTrigger fails due to NewHeaderValidator failure should error", + testWithNilMarshaller(46, "Marshalizer", unreachableStep)) t.Run("newEpochStartTrigger fails due to NewPeerMiniBlockSyncer failure should error", func(t *testing.T) { t.Parallel() @@ -765,17 +771,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockProcessComponentsFactoryArgs() - bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) - require.True(t, ok) - cntShardC := 0 - bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { - cntShardC++ - shardC := testscommon.NewMultiShardsCoordinatorMock(2) - if cntShardC > 16 { - shardC.CurrentShard = common.MetachainShardId - } - return shardC - } + updateShardCoordinatorForMetaAtStep(t, args, 16) dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) require.True(t, ok) blockChainStub, ok := dataCompStub.BlockChain.(*testscommon.ChainHandlerStub) @@ -792,7 +788,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { }) t.Run("newEpochStartTrigger fails due to invalid shard should error", testWithInvalidShard(17, "genesis block does not exist")) - t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(48, "marshalizer")) + t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(48, "marshalizer", unreachableStep)) t.Run("prepareGenesisBlock fails due to CalculateHash failure should error", func(t *testing.T) { t.Parallel() @@ -831,9 +827,9 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.CoreData = coreCompStub testCreateWithArgs(t, args, expectedErr.Error()) }) - t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(2, retriever.BootstrapUnit)) - t.Run("NewBootstrapStorer fails should error", testWithNilMarshaller(50, "Marshalizer")) - t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(51, "Marshalizer")) + t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(2, retriever.BootstrapUnit, unreachableStep)) + t.Run("NewBootstrapStorer fails should error", testWithNilMarshaller(50, "Marshalizer", unreachableStep)) + t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(51, "Marshalizer", unreachableStep)) t.Run("newBlockTracker fails due to invalid shard should error", testWithInvalidShard(19, "could not create block tracker")) t.Run("NewMiniBlocksPoolsCleaner fails should error", func(t *testing.T) { @@ -900,40 +896,50 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.VMOutputCacher.Type = "invalid" testCreateWithArgs(t, args, "cache type") }) - t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(0, retriever.ScheduledSCRsUnit)) - t.Run("NewScheduledTxsExecution fails should error", testWithNilMarshaller(104, "Marshalizer")) - t.Run("NewESDTDataStorage fails should error", testWithNilMarshaller(105, "Marshalizer")) - t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(106, "marshalizer")) + t.Run("GetStorer TxLogsUnit fails should error", + testWithMissingStorer(0, retriever.ScheduledSCRsUnit, unreachableStep)) + t.Run("NewScheduledTxsExecution fails should error", + testWithNilMarshaller(104, "Marshalizer", unreachableStep)) + t.Run("NewESDTDataStorage fails should error", + testWithNilMarshaller(105, "Marshalizer", unreachableStep)) + t.Run("NewReceiptsRepository fails should error", + testWithNilMarshaller(106, "marshalizer", unreachableStep)) t.Run("newBlockProcessor fails due to invalid shard should error", testWithInvalidShard(31, "could not create block processor")) + + // newShardBlockProcessor t.Run("newShardBlockProcessor: NewESDTTransferParser fails should error", - testWithNilMarshaller(107, "marshaller")) + testWithNilMarshaller(107, "marshaller", unreachableStep)) t.Run("newShardBlockProcessor: createBuiltInFunctionContainer fails should error", - testWithNilAddressPubKeyConv(46, "public key converter")) + testWithNilAddressPubKeyConv(46, "public key converter", unreachableStep)) t.Run("newShardBlockProcessor: createVMFactoryShard fails due to NewBlockChainHookImpl failure should error", - testWithNilAddressPubKeyConv(47, "pubkey converter")) + testWithNilAddressPubKeyConv(47, "pubkey converter", unreachableStep)) t.Run("newShardBlockProcessor: NewIntermediateProcessorsContainerFactory fails should error", - testWithNilMarshaller(110, "Marshalizer")) + testWithNilMarshaller(110, "Marshalizer", unreachableStep)) t.Run("newShardBlockProcessor: NewTxTypeHandler fails should error", - testWithNilAddressPubKeyConv(49, "pubkey converter")) + testWithNilAddressPubKeyConv(49, "pubkey converter", unreachableStep)) t.Run("newShardBlockProcessor: NewGasComputation fails should error", - testWithNilEnableEpochsHandler(13, "enable epochs handler")) + testWithNilEnableEpochsHandler(13, "enable epochs handler", unreachableStep)) t.Run("newShardBlockProcessor: NewSmartContractProcessor fails should error", - testWithNilAddressPubKeyConv(50, "pubkey converter")) + testWithNilAddressPubKeyConv(50, "pubkey converter", unreachableStep)) t.Run("newShardBlockProcessor: NewRewardTxProcessor fails should error", - testWithNilAddressPubKeyConv(51, "pubkey converter")) + testWithNilAddressPubKeyConv(51, "pubkey converter", unreachableStep)) t.Run("newShardBlockProcessor: NewTxProcessor fails should error", - testWithNilAddressPubKeyConv(52, "pubkey converter")) + testWithNilAddressPubKeyConv(52, "pubkey converter", unreachableStep)) + t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to NewReadOnlyAccountsDB failure should error", + testWithNilAccountsAdapterAPI(1, "accounts adapter", unreachableStep)) t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to NewIntermediateProcessorsContainerFactory failure should error", - testWithNilAddressPubKeyConv(53, "pubkey converter")) + testWithNilAddressPubKeyConv(53, "pubkey converter", unreachableStep)) t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to createBuiltInFunctionContainer failure should error", - testWithNilAddressPubKeyConv(54, "public key converter")) + testWithNilAddressPubKeyConv(54, "public key converter", unreachableStep)) + t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to createVMFactoryShard failure should error", + testWithNilAddressPubKeyConv(55, "pubkey converter", unreachableStep)) t.Run("newShardBlockProcessor: createOutportDataProvider fails due to missing TransactionUnit should error", - testWithMissingStorer(3, retriever.TransactionUnit)) + testWithMissingStorer(3, retriever.TransactionUnit, unreachableStep)) t.Run("newShardBlockProcessor: createOutportDataProvider fails due to missing MiniBlockUnit should error", - testWithMissingStorer(4, retriever.MiniBlockUnit)) + testWithMissingStorer(4, retriever.MiniBlockUnit, unreachableStep)) t.Run("newShardBlockProcessor: NewShardProcessor fails should error", - testWithNilEnableEpochsHandler(23, "enable epochs handler")) + testWithNilEnableEpochsHandler(23, "enable epochs handler", unreachableStep)) t.Run("newShardBlockProcessor: attachProcessDebugger fails should error", func(t *testing.T) { t.Parallel() @@ -943,15 +949,63 @@ func TestProcessComponentsFactory_Create(t *testing.T) { testCreateWithArgs(t, args, "PollingTimeInSeconds") }) t.Run("newShardBlockProcessor: NewBlockSizeComputation fails should error", - testWithNilMarshaller(116, "Marshalizer")) + testWithNilMarshaller(116, "Marshalizer", unreachableStep)) t.Run("newShardBlockProcessor: NewPreProcessorsContainerFactory fails should error", - testWithNilMarshaller(117, "Marshalizer")) + testWithNilMarshaller(117, "Marshalizer", unreachableStep)) t.Run("newShardBlockProcessor: NewPrintDoubleTransactionsDetector fails should error", - testWithNilMarshaller(118, "Marshalizer")) + testWithNilMarshaller(118, "Marshalizer", unreachableStep)) t.Run("newShardBlockProcessor: NewTransactionCoordinator fails should error", - testWithNilMarshaller(119, "Marshalizer")) - - t.Run("NewNodesSetupChecker fails should error", testWithNilPubKeyConv(5, "pubkey converter")) + testWithNilMarshaller(119, "Marshalizer", unreachableStep)) + + // newMetaBlockProcessor, step for meta is 31 inside newBlockProcessor + t.Run("newMetaBlockProcessor: createBuiltInFunctionContainer fails should error", + testWithNilAddressPubKeyConv(46, "public key converter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: createVMFactoryMeta fails due to NewBlockChainHookImpl failure should error", + testWithNilAddressPubKeyConv(47, "pubkey converter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewIntermediateProcessorsContainerFactory fails should error", + testWithNilMarshaller(110, "Marshalizer", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewESDTTransferParser fails should error", + testWithNilMarshaller(111, "marshaller", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewTxTypeHandler fails should error", + testWithNilAddressPubKeyConv(49, "pubkey converter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewGasComputation fails should error", + testWithNilEnableEpochsHandler(13, "enable epochs handler", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewSmartContractProcessor fails should error", + testWithNilAddressPubKeyConv(50, "pubkey converter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewMetaTxProcessor fails should error", + testWithNilAddressPubKeyConv(51, "pubkey converter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to NewIntermediateProcessorsContainerFactory failure should error", + testWithNilAddressPubKeyConv(52, "pubkey converter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to NewReadOnlyAccountsDB failure should error", + testWithNilAccountsAdapterAPI(1, "accounts adapter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to createBuiltInFunctionContainer failure should error", + testWithNilAddressPubKeyConv(53, "public key converter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to createVMFactoryMeta failure should error", + testWithNilAddressPubKeyConv(54, "pubkey converter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to NewMetaTxProcessor failure second time should error", + testWithNilAddressPubKeyConv(55, "pubkey converter", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewBlockSizeComputation fails should error", + testWithNilMarshaller(119, "Marshalizer", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewPreProcessorsContainerFactory fails should error", + testWithNilMarshaller(120, "Marshalizer", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewPrintDoubleTransactionsDetector fails should error", + testWithNilMarshaller(121, "Marshalizer", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewTransactionCoordinator fails should error", + testWithNilMarshaller(122, "Marshalizer", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewStakingToPeer fails should error", + testWithNilMarshaller(123, "Marshalizer", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewEpochStartData fails should error", + testWithNilMarshaller(124, "Marshalizer", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewEndOfEpochEconomicsDataCreator fails should error", + testWithNilMarshaller(125, "marshalizer", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: GetStorer RewardTransactionUnit fails should error", + testWithMissingStorer(1, retriever.RewardTransactionUnit, blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: GetStorer MiniBlockUnit fails should error", + testWithMissingStorer(4, retriever.MiniBlockUnit, blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewRewardsCreatorProxy fails should error", + testWithNilMarshaller(126, "marshalizer", blockProcessorOnMetaStep)) + + t.Run("NewNodesSetupChecker fails should error", testWithNilPubKeyConv(5, "pubkey converter", unreachableStep)) t.Run("nodesSetupChecker.Check fails should error", func(t *testing.T) { t.Parallel() @@ -993,8 +1047,8 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, "messenger") }) - t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(123, "marshalizer")) - t.Run("NewTxsSenderWithAccumulator fails should error", testWithNilMarshaller(124, "Marshalizer")) + t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(123, "marshalizer", unreachableStep)) + t.Run("NewTxsSenderWithAccumulator fails should error", testWithNilMarshaller(124, "Marshalizer", unreachableStep)) t.Run("should work with indexAndReturnGenesisAccounts failing due to RootHash failure", func(t *testing.T) { t.Parallel() @@ -1141,9 +1195,8 @@ func TestProcessComponentsFactory_Create(t *testing.T) { shardCoordinator.CurrentShard = common.MetachainShardId processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) - protocolSustainabilityAddress := "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { - protocolSustainabilityAddr, err := processArgs.CoreData.AddressPubKeyConverter().Decode(protocolSustainabilityAddress) + protocolSustainabilityAddr, err := processArgs.CoreData.AddressPubKeyConverter().Decode(testingProtocolSustainabilityAddress) require.NoError(t, err) if bytes.Equal(protocolSustainabilityAddr, address) { return 0 @@ -1183,7 +1236,7 @@ func fundGenesisWallets(t *testing.T, args processComp.ProcessComponentsFactoryA } } -func testWithNilMarshaller(nilStep int, expectedErrSubstr string) func(t *testing.T) { +func testWithNilMarshaller(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { return func(t *testing.T) { t.Parallel() @@ -1192,18 +1245,18 @@ func testWithNilMarshaller(nilStep int, expectedErrSubstr string) func(t *testin step := 0 coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { step++ - println(step) if step > nilStep { return nil } return &testscommon.MarshalizerStub{} } args.CoreData = coreCompStub + updateShardCoordinatorForMetaAtStep(t, args, metaStep) testCreateWithArgs(t, args, expectedErrSubstr) } } -func testWithNilPubKeyConv(nilStep int, expectedErrSubstr string) func(t *testing.T) { +func testWithNilPubKeyConv(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { return func(t *testing.T) { t.Parallel() @@ -1219,11 +1272,12 @@ func testWithNilPubKeyConv(nilStep int, expectedErrSubstr string) func(t *testin return pubKeyConv } args.CoreData = coreCompStub + updateShardCoordinatorForMetaAtStep(t, args, metaStep) testCreateWithArgs(t, args, expectedErrSubstr) } } -func testWithNilAddressPubKeyConv(nilStep int, expectedErrSubstr string) func(t *testing.T) { +func testWithNilAddressPubKeyConv(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { return func(t *testing.T) { t.Parallel() @@ -1233,18 +1287,18 @@ func testWithNilAddressPubKeyConv(nilStep int, expectedErrSubstr string) func(t step := 0 coreCompStub.AddressPubKeyConverterCalled = func() core.PubkeyConverter { step++ - println(step) if step > nilStep { return nil } return pubKeyConv } args.CoreData = coreCompStub + updateShardCoordinatorForMetaAtStep(t, args, metaStep) testCreateWithArgs(t, args, expectedErrSubstr) } } -func testWithNilEnableEpochsHandler(nilStep int, expectedErrSubstr string) func(t *testing.T) { +func testWithNilEnableEpochsHandler(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { return func(t *testing.T) { t.Parallel() @@ -1254,18 +1308,39 @@ func testWithNilEnableEpochsHandler(nilStep int, expectedErrSubstr string) func( step := 0 coreCompStub.EnableEpochsHandlerCalled = func() common.EnableEpochsHandler { step++ - println(step) if step > nilStep { return nil } return enableEpochsHandler } args.CoreData = coreCompStub + updateShardCoordinatorForMetaAtStep(t, args, metaStep) testCreateWithArgs(t, args, expectedErrSubstr) } } -func testWithMissingStorer(failStep int, missingUnitType retriever.UnitType) func(t *testing.T) { +func testWithNilAccountsAdapterAPI(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) + require.True(t, ok) + accountsAdapterAPI := stateCompStub.AccountsAdapterAPI() + step := 0 + stateCompStub.AccountsAdapterAPICalled = func() mxState.AccountsAdapter { + step++ + if step > nilStep { + return nil + } + return accountsAdapterAPI + } + updateShardCoordinatorForMetaAtStep(t, args, metaStep) + testCreateWithArgs(t, args, expectedErrSubstr) + } +} + +func testWithMissingStorer(failStep int, missingUnitType retriever.UnitType, metaStep int) func(t *testing.T) { return func(t *testing.T) { t.Parallel() @@ -1279,7 +1354,6 @@ func testWithMissingStorer(failStep int, missingUnitType retriever.UnitType) fun GetStorerCalled: func(unitType retriever.UnitType) (storage.Storer, error) { if unitType == missingUnitType { cnt++ - println(cnt) if cnt > failStep { return nil, expectedErr } @@ -1287,10 +1361,25 @@ func testWithMissingStorer(failStep int, missingUnitType retriever.UnitType) fun return store.GetStorer(unitType) }, } + updateShardCoordinatorForMetaAtStep(t, args, metaStep) testCreateWithArgs(t, args, expectedErr.Error()) } } +func updateShardCoordinatorForMetaAtStep(t *testing.T, args processComp.ProcessComponentsFactoryArgs, metaStep int) { + bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) + require.True(t, ok) + step := 0 + bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { + step++ + shardC := testscommon.NewMultiShardsCoordinatorMock(2) + if step > metaStep { + shardC.CurrentShard = common.MetachainShardId + } + return shardC + } +} + func testWithInvalidShard(failingStep int, expectedErrSubstr string) func(t *testing.T) { return func(t *testing.T) { t.Parallel() diff --git a/process/factory/metachain/vmContainerFactory.go b/process/factory/metachain/vmContainerFactory.go index 665be533e17..0134cda878b 100644 --- a/process/factory/metachain/vmContainerFactory.go +++ b/process/factory/metachain/vmContainerFactory.go @@ -68,7 +68,7 @@ func NewVMContainerFactory(args ArgsNewVMContainerFactory) (*vmContainerFactory, return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilEconomicsData) } if check.IfNil(args.MessageSignVerifier) { - return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilKeyGen) + return nil, fmt.Errorf("%w in NewVMContainerFactory", vm.ErrNilMessageSignVerifier) } if check.IfNil(args.NodesConfigProvider) { return nil, fmt.Errorf("%w in NewVMContainerFactory", process.ErrNilNodesConfigProvider) diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index 9abc40c6375..ea1a12fb3ee 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -89,7 +89,7 @@ func TestNewVMContainerFactory_NilMessageSignVerifier(t *testing.T) { vmf, err := NewVMContainerFactory(argsNewVmContainerFactory) assert.True(t, check.IfNil(vmf)) - assert.True(t, errors.Is(err, process.ErrNilKeyGen)) + assert.True(t, errors.Is(err, vm.ErrNilMessageSignVerifier)) } func TestNewVMContainerFactory_NilNodesConfigProvider(t *testing.T) { diff --git a/testscommon/stateComponentsMock.go b/testscommon/stateComponentsMock.go index 15b11bb4ad0..9a08cb328df 100644 --- a/testscommon/stateComponentsMock.go +++ b/testscommon/stateComponentsMock.go @@ -7,12 +7,13 @@ import ( // StateComponentsMock - type StateComponentsMock struct { - PeersAcc state.AccountsAdapter - Accounts state.AccountsAdapter - AccountsAPI state.AccountsAdapter - AccountsRepo state.AccountsRepository - Tries common.TriesHolder - StorageManagers map[string]common.StorageManager + PeersAcc state.AccountsAdapter + Accounts state.AccountsAdapter + AccountsAPI state.AccountsAdapter + AccountsAdapterAPICalled func() state.AccountsAdapter + AccountsRepo state.AccountsRepository + Tries common.TriesHolder + StorageManagers map[string]common.StorageManager } // Create - @@ -42,6 +43,9 @@ func (scm *StateComponentsMock) AccountsAdapter() state.AccountsAdapter { // AccountsAdapterAPI - func (scm *StateComponentsMock) AccountsAdapterAPI() state.AccountsAdapter { + if scm.AccountsAdapterAPICalled != nil { + return scm.AccountsAdapterAPICalled() + } return scm.AccountsAPI } From 2a46bcca585028d651e4fcd81dd4b272f2565532 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Wed, 19 Apr 2023 11:39:44 +0300 Subject: [PATCH 095/221] fix after review --- common/constants.go | 3 -- errors/closingError.go | 17 ---------- errors/errors.go | 7 ++-- errors/missingTrieNodeError.go | 7 ++-- .../preprocess/rewardTxPreProcessor_test.go | 3 +- .../preprocess/smartContractResults_test.go | 3 +- process/block/preprocess/transactions.go | 5 --- .../block/preprocess/transactionsV2_test.go | 3 +- process/block/preprocess/transactions_test.go | 2 +- process/block/shardblock.go | 4 --- process/coordinator/process.go | 4 --- process/rewardTransaction/process_test.go | 3 +- process/sync/export_test.go | 5 +++ process/sync/interface.go | 1 + process/sync/metablock.go | 4 +-- process/sync/shardblock.go | 20 +++++++++-- process/sync/shardblock_test.go | 20 +++++++++++ state/accountsDB.go | 3 +- trie/branchNode.go | 7 ++-- trie/branchNode_test.go | 8 ++--- trie/depthFirstSync.go | 3 +- trie/depthFirstSync_test.go | 3 +- trie/doubleListSync.go | 3 +- trie/doubleListSync_test.go | 3 +- trie/extensionNode.go | 7 ++-- trie/extensionNode_test.go | 8 ++--- trie/leafNode.go | 5 ++- trie/leafNode_test.go | 5 ++- trie/node.go | 6 ++-- trie/patriciaMerkleTrie.go | 2 +- trie/snapshotTrieStorageManager.go | 7 ++-- trie/sync.go | 3 +- trie/trieStorageManager.go | 13 ++++--- trie/trieStorageManagerInEpoch.go | 3 +- trie/trieStorageManager_test.go | 34 +++++++++++++++++-- update/genesis/import.go | 4 +-- 36 files changed, 129 insertions(+), 109 deletions(-) delete mode 100644 errors/closingError.go diff --git a/common/constants.go b/common/constants.go index dcf620095ac..71306020854 100644 --- a/common/constants.go +++ b/common/constants.go @@ -768,9 +768,6 @@ const HardforkResolversIdentifier = "hardfork resolver" // EpochStartInterceptorsIdentifier represents the identifier that is used in the start-in-epoch process const EpochStartInterceptorsIdentifier = "epoch start interceptor" -// GetNodeFromDBErrorString represents the string which is returned when a getting node from DB returns an error -const GetNodeFromDBErrorString = "getNodeFromDB error" - // TimeoutGettingTrieNodes defines the timeout in trie sync operation if no node is received const TimeoutGettingTrieNodes = 2 * time.Minute // to consider syncing a very large trie node of 64MB at ~1MB/s diff --git a/errors/closingError.go b/errors/closingError.go deleted file mode 100644 index 81d051990b6..00000000000 --- a/errors/closingError.go +++ /dev/null @@ -1,17 +0,0 @@ -package errors - -import ( - "strings" - - "github.com/multiversx/mx-chain-go/storage" -) - -// IsClosingError returns true if the provided error is used whenever the node is in the closing process -func IsClosingError(err error) bool { - if err == nil { - return false - } - - return strings.Contains(err.Error(), storage.ErrDBIsClosed.Error()) || - strings.Contains(err.Error(), ErrContextClosing.Error()) -} diff --git a/errors/errors.go b/errors/errors.go index 255ac1362c2..b897f2f1a6f 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -1,6 +1,8 @@ package errors -import "errors" +import ( + "errors" +) // ErrAccountsAdapterCreation signals that the accounts adapter cannot be created based on provided data var ErrAccountsAdapterCreation = errors.New("error creating accounts adapter") @@ -470,9 +472,6 @@ var ErrNilScheduledTxsExecutionHandler = errors.New("nil scheduled transactions // ErrNilScheduledProcessor signals that a nil scheduled processor was provided var ErrNilScheduledProcessor = errors.New("nil scheduled processor") -// ErrContextClosing signals that the parent context requested the closing of its children -var ErrContextClosing = errors.New("context closing") - // ErrNilTxsSender signals that a nil transactions sender has been provided var ErrNilTxsSender = errors.New("nil transactions sender has been provided") diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go index ebb92003085..7ddfefbfcbc 100644 --- a/errors/missingTrieNodeError.go +++ b/errors/missingTrieNodeError.go @@ -3,7 +3,8 @@ package errors import ( "encoding/hex" "fmt" - "github.com/multiversx/mx-chain-go/common" + + "github.com/multiversx/mx-chain-core-go/core" ) // GetNodeFromDBErrWithKey defines a custom error for trie get node @@ -13,7 +14,7 @@ type GetNodeFromDBErrWithKey struct { dbIdentifier string } -// NewGetNodeFromDBErrWithKey will create a new instance of GetNodeFromDBErr +// NewGetNodeFromDBErrWithKey will create a new instance of GetNodeFromDBErrWithKey func NewGetNodeFromDBErrWithKey(key []byte, err error, id string) *GetNodeFromDBErrWithKey { return &GetNodeFromDBErrWithKey{ getErr: err, @@ -26,7 +27,7 @@ func NewGetNodeFromDBErrWithKey(key []byte, err error, id string) *GetNodeFromDB func (e *GetNodeFromDBErrWithKey) Error() string { return fmt.Sprintf( "%s: %s for key %v", - common.GetNodeFromDBErrorString, + core.GetNodeFromDBErrorString, e.getErr.Error(), hex.EncodeToString(e.key), ) diff --git a/process/block/preprocess/rewardTxPreProcessor_test.go b/process/block/preprocess/rewardTxPreProcessor_test.go index 9871ba22081..80b29223e34 100644 --- a/process/block/preprocess/rewardTxPreProcessor_test.go +++ b/process/block/preprocess/rewardTxPreProcessor_test.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/rewardTx" - "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" @@ -684,7 +683,7 @@ func TestRewardTxPreprocessor_ProcessBlockTransactions(t *testing.T) { func TestRewardTxPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) txHash := testTxHash tdp := initDataPool() rtp, _ := NewRewardTxPreprocessor( diff --git a/process/block/preprocess/smartContractResults_test.go b/process/block/preprocess/smartContractResults_test.go index 1834156392e..af8b9ffaa14 100644 --- a/process/block/preprocess/smartContractResults_test.go +++ b/process/block/preprocess/smartContractResults_test.go @@ -12,7 +12,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" - "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" @@ -1200,7 +1199,7 @@ func TestScrsPreprocessor_ProcessBlockTransactionsShouldWork(t *testing.T) { func TestScrsPreprocessor_ProcessBlockTransactionsMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) tdp := initDataPool() requestTransaction := func(shardID uint32, txHashes [][]byte) {} scrPreproc, _ := NewSmartContractResultPreprocessor( diff --git a/process/block/preprocess/transactions.go b/process/block/preprocess/transactions.go index deb7dacb733..645ac0d8cf0 100644 --- a/process/block/preprocess/transactions.go +++ b/process/block/preprocess/transactions.go @@ -1089,11 +1089,6 @@ func (txs *transactions) CreateAndProcessMiniBlocks(haveTime func() bool, random if err != nil { log.Debug("createAndProcessMiniBlocksFromMe", "error", err.Error()) - - if core.IsGetNodeFromDBError(err) { - return nil, err - } - return make(block.MiniBlockSlice, 0), nil } diff --git a/process/block/preprocess/transactionsV2_test.go b/process/block/preprocess/transactionsV2_test.go index 28aeff4c8a5..72624deafb5 100644 --- a/process/block/preprocess/transactionsV2_test.go +++ b/process/block/preprocess/transactionsV2_test.go @@ -11,7 +11,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/smartContractResult" "github.com/multiversx/mx-chain-core-go/data/transaction" - "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/storage/txcache" @@ -757,7 +756,7 @@ func TestTransactions_CreateAndProcessMiniBlocksFromMeV2ShouldWork(t *testing.T) func TestTransactions_CreateAndProcessMiniBlocksFromMeV2MissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) preprocessor := createTransactionPreprocessor() preprocessor.txProcessor = &testscommon.TxProcessorMock{ ProcessTransactionCalled: func(transaction *transaction.Transaction) (vmcommon.ReturnCode, error) { diff --git a/process/block/preprocess/transactions_test.go b/process/block/preprocess/transactions_test.go index d51760840a7..69fd9d71c39 100644 --- a/process/block/preprocess/transactions_test.go +++ b/process/block/preprocess/transactions_test.go @@ -1156,7 +1156,7 @@ func TestTransactionPreprocessor_ProcessTxsToMeShouldUseCorrectSenderAndReceiver func TestTransactionPreprocessor_ProcessTxsToMeMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) args := createDefaultTransactionsProcessorArgs() args.Accounts = &stateMock.AccountsStub{ diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 9ab6c655780..14faf2a8507 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -2026,10 +2026,6 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by log.Debug("elapsed time to create mbs to me", "time", elapsedTime) if err != nil { log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) - - if core.IsGetNodeFromDBError(err) { - return nil, nil, err - } } if createAndProcessMBsDestMeInfo != nil { processedMiniBlocksDestMeInfo = createAndProcessMBsDestMeInfo.allProcessedMiniBlocksInfo diff --git a/process/coordinator/process.go b/process/coordinator/process.go index 29ca913d13f..32e75824e5c 100644 --- a/process/coordinator/process.go +++ b/process/coordinator/process.go @@ -720,10 +720,6 @@ func (tc *transactionCoordinator) CreateMbsAndProcessCrossShardTransactionsDstMe "error", errProc, ) - if core.IsGetNodeFromDBError(errProc) { - return nil, 0, false, err - } - continue } diff --git a/process/rewardTransaction/process_test.go b/process/rewardTransaction/process_test.go index c54848addaf..97112e792b3 100644 --- a/process/rewardTransaction/process_test.go +++ b/process/rewardTransaction/process_test.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/rewardTx" - "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/process/rewardTransaction" @@ -218,7 +217,7 @@ func TestRewardTxProcessor_ProcessRewardTransactionShouldWork(t *testing.T) { func TestRewardTxProcessor_ProcessRewardTransactionMissingTrieNode(t *testing.T) { t.Parallel() - missingNodeErr := fmt.Errorf(common.GetNodeFromDBErrorString) + missingNodeErr := fmt.Errorf(core.GetNodeFromDBErrorString) accountsDb := &stateMock.AccountsStub{ LoadAccountCalled: func(address []byte) (vmcommon.AccountHandler, error) { acc, _ := state.NewUserAccount(address) diff --git a/process/sync/export_test.go b/process/sync/export_test.go index 719e7599f9f..dae5be09c68 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -288,3 +288,8 @@ func (boot *baseBootstrap) IsInImportMode() bool { func (boot *baseBootstrap) ProcessWaitTime() time.Duration { return boot.processWaitTime } + +// UnwrapGetNodeFromDBErr - +func UnwrapGetNodeFromDBErr(wrappedErr error) getKeyHandler { + return unwrapGetNodeFromDBErr(wrappedErr) +} diff --git a/process/sync/interface.go b/process/sync/interface.go index e87d9c537d6..fb7e11e3b5f 100644 --- a/process/sync/interface.go +++ b/process/sync/interface.go @@ -32,6 +32,7 @@ type forkDetector interface { // getKeyHandler defines the behaviour of a component that can provide a trie node key and identifier type getKeyHandler interface { + Error() string GetKey() []byte GetIdentifier() string } diff --git a/process/sync/metablock.go b/process/sync/metablock.go index d8ca3cf4954..6b820235074 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -180,8 +180,8 @@ func (boot *MetaBootstrap) setLastEpochStartRound() { func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if core.IsGetNodeFromDBError(err) { - getNodeErr, ok := err.(getKeyHandler) - if !ok { + getNodeErr := unwrapGetNodeFromDBErr(err) + if getNodeErr == nil { return err } diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 68a3c70f52d..55f6c7f6d84 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -2,6 +2,7 @@ package sync import ( "context" + "errors" "math" "github.com/multiversx/mx-chain-core-go/core" @@ -143,8 +144,8 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if core.IsGetNodeFromDBError(err) { - getNodeErr, ok := err.(getKeyHandler) - if !ok { + getNodeErr := unwrapGetNodeFromDBErr(err) + if getNodeErr == nil { return err } @@ -155,6 +156,21 @@ func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { return err } +func unwrapGetNodeFromDBErr(wrappedErr error) getKeyHandler { + errWithKeyHandler, ok := wrappedErr.(getKeyHandler) + for !ok { + if wrappedErr == nil { + return nil + } + + err := errors.Unwrap(wrappedErr) + errWithKeyHandler, ok = err.(getKeyHandler) + wrappedErr = err + } + + return errWithKeyHandler +} + // Close closes the synchronization loop func (boot *ShardBootstrap) Close() error { if check.IfNil(boot.baseBootstrap) { diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 7c3f2ab1d9c..b0175890e43 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -2156,3 +2156,23 @@ func TestShardBootstrap_NilInnerBootstrapperClose(t *testing.T) { bootstrapper := &sync.ShardBootstrap{} assert.Nil(t, bootstrapper.Close()) } + +func TestUnwrapGetNodeFromDBErr(t *testing.T) { + t.Parallel() + + key := []byte("key") + identifier := "identifier" + err := fmt.Errorf("key not found") + + getNodeFromDbErr := commonErrors.NewGetNodeFromDBErrWithKey(key, err, identifier) + wrappedErr1 := fmt.Errorf("wrapped error 1: %w", getNodeFromDbErr) + wrappedErr2 := fmt.Errorf("wrapped error 2: %w", wrappedErr1) + wrappedErr3 := fmt.Errorf("wrapped error 3: %w", wrappedErr2) + + assert.Nil(t, sync.UnwrapGetNodeFromDBErr(nil)) + assert.Nil(t, sync.UnwrapGetNodeFromDBErr(err)) + assert.Equal(t, getNodeFromDbErr, sync.UnwrapGetNodeFromDBErr(getNodeFromDbErr)) + assert.Equal(t, getNodeFromDbErr, sync.UnwrapGetNodeFromDBErr(wrappedErr1)) + assert.Equal(t, getNodeFromDbErr, sync.UnwrapGetNodeFromDBErr(wrappedErr2)) + assert.Equal(t, getNodeFromDbErr, sync.UnwrapGetNodeFromDBErr(wrappedErr3)) +} diff --git a/state/accountsDB.go b/state/accountsDB.go index 289f940cfd7..ef200093467 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -523,8 +523,7 @@ func (adb *AccountsDB) loadDataTrie(accountHandler baseAccountHandler, mainTrie dataTrie, err := mainTrie.Recreate(accountHandler.GetRootHash()) if err != nil { - log.Error("trie was not found for hash", "rootHash", accountHandler.GetRootHash(), "err", err) - return err + return fmt.Errorf("trie was not found for hash, rootHash = %s, err = %w", hex.EncodeToString(accountHandler.GetRootHash()), err) } accountHandler.SetDataTrie(dataTrie) diff --git a/trie/branchNode.go b/trie/branchNode.go index 3e6f26768b5..4b2591d0982 100644 --- a/trie/branchNode.go +++ b/trie/branchNode.go @@ -13,7 +13,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) var _ = node(&branchNode{}) @@ -299,7 +298,7 @@ func (bn *branchNode) commitCheckpoint( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := bn.isEmptyOrNil() @@ -347,7 +346,7 @@ func (bn *branchNode) commitSnapshot( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := bn.isEmptyOrNil() @@ -358,7 +357,7 @@ func (bn *branchNode) commitSnapshot( for i := range bn.children { err = resolveIfCollapsed(bn, byte(i), db) if err != nil { - if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { + if strings.Contains(err.Error(), core.GetNodeFromDBErrorString) { treatCommitSnapshotError(err, bn.EncodedChildren[i], missingNodesChan) continue } diff --git a/trie/branchNode_test.go b/trie/branchNode_test.go index a121e8b21aa..70665720bb4 100644 --- a/trie/branchNode_test.go +++ b/trie/branchNode_test.go @@ -7,12 +7,12 @@ import ( "fmt" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/mock" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - chainErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -1352,10 +1352,10 @@ func TestBranchNode_commitContextDone(t *testing.T) { cancel() err := bn.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) err = bn.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) } func TestBranchNode_commitSnapshotDbIsClosing(t *testing.T) { @@ -1363,7 +1363,7 @@ func TestBranchNode_commitSnapshotDbIsClosing(t *testing.T) { db := &mock.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { - return nil, chainErrors.ErrContextClosing + return nil, core.ErrContextClosing }, } _, collapsedBn := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) diff --git a/trie/depthFirstSync.go b/trie/depthFirstSync.go index 5f2d088fc7d..a26e0bdfe2a 100644 --- a/trie/depthFirstSync.go +++ b/trie/depthFirstSync.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" ) @@ -104,7 +103,7 @@ func (d *depthFirstTrieSyncer) StartSyncing(rootHash []byte, ctx context.Context case <-time.After(d.waitTimeBetweenChecks): continue case <-ctx.Done(): - return errors.ErrContextClosing + return core.ErrContextClosing } } } diff --git a/trie/depthFirstSync_test.go b/trie/depthFirstSync_test.go index 6ace7fbdb3f..95ea4db62aa 100644 --- a/trie/depthFirstSync_test.go +++ b/trie/depthFirstSync_test.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -80,7 +79,7 @@ func TestDepthFirstTrieSyncer_StartSyncingCanTimeout(t *testing.T) { defer cancelFunc() err := d.StartSyncing(roothash, ctx) - require.Equal(t, errors.ErrContextClosing, err) + require.Equal(t, core.ErrContextClosing, err) } func TestDepthFirstTrieSyncer_StartSyncingTimeoutNoNodesReceived(t *testing.T) { diff --git a/trie/doubleListSync.go b/trie/doubleListSync.go index 6477023c7d2..b855e942d28 100644 --- a/trie/doubleListSync.go +++ b/trie/doubleListSync.go @@ -9,7 +9,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" ) @@ -121,7 +120,7 @@ func (d *doubleListTrieSyncer) StartSyncing(rootHash []byte, ctx context.Context case <-time.After(d.waitTimeBetweenChecks): continue case <-ctx.Done(): - return errors.ErrContextClosing + return core.ErrContextClosing } } } diff --git a/trie/doubleListSync_test.go b/trie/doubleListSync_test.go index 719d578e5c6..a1d67e102fa 100644 --- a/trie/doubleListSync_test.go +++ b/trie/doubleListSync_test.go @@ -11,7 +11,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/storageunit" @@ -185,7 +184,7 @@ func TestDoubleListTrieSyncer_StartSyncingCanTimeout(t *testing.T) { defer cancelFunc() err := d.StartSyncing(roothash, ctx) - require.Equal(t, errors.ErrContextClosing, err) + require.Equal(t, core.ErrContextClosing, err) } func TestDoubleListTrieSyncer_StartSyncingTimeoutNoNodesReceived(t *testing.T) { diff --git a/trie/extensionNode.go b/trie/extensionNode.go index 8130a761233..79d8e16cdad 100644 --- a/trie/extensionNode.go +++ b/trie/extensionNode.go @@ -14,7 +14,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) var _ = node(&extensionNode{}) @@ -211,7 +210,7 @@ func (en *extensionNode) commitCheckpoint( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := en.isEmptyOrNil() @@ -253,7 +252,7 @@ func (en *extensionNode) commitSnapshot( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := en.isEmptyOrNil() @@ -264,7 +263,7 @@ func (en *extensionNode) commitSnapshot( err = resolveIfCollapsed(en, 0, db) isMissingNodeErr := false if err != nil { - isMissingNodeErr = strings.Contains(err.Error(), common.GetNodeFromDBErrorString) + isMissingNodeErr = strings.Contains(err.Error(), core.GetNodeFromDBErrorString) if !isMissingNodeErr { return err } diff --git a/trie/extensionNode_test.go b/trie/extensionNode_test.go index cc8dd806d2c..34b87199e99 100644 --- a/trie/extensionNode_test.go +++ b/trie/extensionNode_test.go @@ -6,9 +6,9 @@ import ( "errors" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/data/mock" "github.com/multiversx/mx-chain-go/common" - chainErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -1021,10 +1021,10 @@ func TestExtensionNode_commitContextDone(t *testing.T) { cancel() err := en.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) err = en.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) } func TestExtensionNode_getValueReturnsEmptyByteSlice(t *testing.T) { @@ -1039,7 +1039,7 @@ func TestExtensionNode_commitSnapshotDbIsClosing(t *testing.T) { db := &mock.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { - return nil, chainErrors.ErrContextClosing + return nil, core.ErrContextClosing }, } _, collapsedEn := getEnAndCollapsedEn() diff --git a/trie/leafNode.go b/trie/leafNode.go index cb4c4bfdc76..ed037c7f0e0 100644 --- a/trie/leafNode.go +++ b/trie/leafNode.go @@ -14,7 +14,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) var _ = node(&leafNode{}) @@ -139,7 +138,7 @@ func (ln *leafNode) commitCheckpoint( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := ln.isEmptyOrNil() @@ -184,7 +183,7 @@ func (ln *leafNode) commitSnapshot( depthLevel int, ) error { if shouldStopIfContextDoneBlockingIfBusy(ctx, idleProvider) { - return errors.ErrContextClosing + return core.ErrContextClosing } err := ln.isEmptyOrNil() diff --git a/trie/leafNode_test.go b/trie/leafNode_test.go index b534e700d44..bf9cab8209b 100644 --- a/trie/leafNode_test.go +++ b/trie/leafNode_test.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" - chainErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -733,10 +732,10 @@ func TestLeafNode_commitContextDone(t *testing.T) { cancel() err := ln.commitCheckpoint(db, db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) err = ln.commitSnapshot(db, nil, nil, ctx, statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) - assert.Equal(t, chainErrors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) } func TestLeafNode_getValue(t *testing.T) { diff --git a/trie/node.go b/trie/node.go index b464d7ff510..9a127a7ecf7 100644 --- a/trie/node.go +++ b/trie/node.go @@ -120,12 +120,12 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { - log.Trace(common.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) + log.Trace(core.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { - log.Warn("db does not have an identifier", "db type", fmt.Sprintf("%T", db)) - return nil, errors.NewGetNodeFromDBErrWithKey(n, err, "") + getNodeFromDbErr := errors.NewGetNodeFromDBErrWithKey(n, err, "") + return nil, fmt.Errorf("db does not have an identifier, db type: %T, error: %w", db, getNodeFromDbErr) } return nil, errors.NewGetNodeFromDBErrWithKey(n, err, dbWithID.GetIdentifier()) diff --git a/trie/patriciaMerkleTrie.go b/trie/patriciaMerkleTrie.go index b3fa3019dd3..75b035966af 100644 --- a/trie/patriciaMerkleTrie.go +++ b/trie/patriciaMerkleTrie.go @@ -88,7 +88,7 @@ func (tr *patriciaMerkleTrie) Get(key []byte) ([]byte, uint32, error) { val, depth, err := tr.root.tryGet(hexKey, rootDepthLevel, tr.trieStorage) if err != nil { - log.Error("trie get error", "error", err.Error(), "key", key) + err = fmt.Errorf("trie get error: %w, for key %v", err, hex.EncodeToString(key)) return nil, depth, err } diff --git a/trie/snapshotTrieStorageManager.go b/trie/snapshotTrieStorageManager.go index 784533fec3a..133cb9080e4 100644 --- a/trie/snapshotTrieStorageManager.go +++ b/trie/snapshotTrieStorageManager.go @@ -6,7 +6,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) type snapshotTrieStorageManager struct { @@ -35,7 +34,7 @@ func (stsm *snapshotTrieStorageManager) Get(key []byte) ([]byte, error) { if stsm.closed { log.Debug("snapshotTrieStorageManager get context closing", "key", key) - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } // test point get during snapshot @@ -86,7 +85,7 @@ func (stsm *snapshotTrieStorageManager) Put(key, data []byte) error { if stsm.closed { log.Debug("snapshotTrieStorageManager put context closing", "key", key, "data", data) - return errors.ErrContextClosing + return core.ErrContextClosing } log.Trace("put hash in snapshot storer", "hash", key, "epoch", stsm.epoch) @@ -100,7 +99,7 @@ func (stsm *snapshotTrieStorageManager) GetFromLastEpoch(key []byte) ([]byte, er if stsm.closed { log.Debug("snapshotTrieStorageManager getFromLastEpoch context closing", "key", key) - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } return stsm.mainSnapshotStorer.GetFromLastEpoch(key) diff --git a/trie/sync.go b/trie/sync.go index 465ebf71a99..2ef4bb807ca 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -13,7 +13,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" ) @@ -160,7 +159,7 @@ func (ts *trieSyncer) StartSyncing(rootHash []byte, ctx context.Context) error { case <-time.After(ts.waitTimeBetweenRequests): continue case <-ctx.Done(): - return errors.ErrContextClosing + return core.ErrContextClosing } } } diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index b4c5e6e857c..45c251d4983 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -16,7 +16,6 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/statistics" ) @@ -176,7 +175,7 @@ func (tsm *trieStorageManager) Get(key []byte) ([]byte, error) { if tsm.closed { log.Trace("trieStorageManager get context closing", "key", key) - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } val, err := tsm.mainStorer.Get(key) @@ -197,7 +196,7 @@ func (tsm *trieStorageManager) GetFromCurrentEpoch(key []byte) ([]byte, error) { if tsm.closed { log.Trace("trieStorageManager get context closing", "key", key) tsm.storageOperationMutex.Unlock() - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } storer, ok := tsm.mainStorer.(snapshotPruningStorer) @@ -232,7 +231,7 @@ func (tsm *trieStorageManager) Put(key []byte, val []byte) error { if tsm.closed { log.Trace("trieStorageManager put context closing", "key", key, "value", val) - return errors.ErrContextClosing + return core.ErrContextClosing } return tsm.mainStorer.Put(key, val) @@ -246,7 +245,7 @@ func (tsm *trieStorageManager) PutInEpoch(key []byte, val []byte, epoch uint32) if tsm.closed { log.Trace("trieStorageManager putInEpoch context closing", "key", key, "value", val, "epoch", epoch) - return errors.ErrContextClosing + return core.ErrContextClosing } storer, ok := tsm.mainStorer.(snapshotPruningStorer) @@ -265,7 +264,7 @@ func (tsm *trieStorageManager) PutInEpochWithoutCache(key []byte, val []byte, ep if tsm.closed { log.Trace("trieStorageManager putInEpochWithoutCache context closing", "key", key, "value", val, "epoch", epoch) - return errors.ErrContextClosing + return core.ErrContextClosing } storer, ok := tsm.mainStorer.(snapshotPruningStorer) @@ -533,7 +532,7 @@ func newSnapshotNode( ) (snapshotNode, error) { newRoot, err := getNodeFromDBAndDecode(rootHash, db, msh, hsh) if err != nil { - if strings.Contains(err.Error(), common.GetNodeFromDBErrorString) { + if strings.Contains(err.Error(), core.GetNodeFromDBErrorString) { treatCommitSnapshotError(err, rootHash, missingNodesCh) } return nil, err diff --git a/trie/trieStorageManagerInEpoch.go b/trie/trieStorageManagerInEpoch.go index c4cc7c9d195..5726290b3fd 100644 --- a/trie/trieStorageManagerInEpoch.go +++ b/trie/trieStorageManagerInEpoch.go @@ -6,7 +6,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" ) // numEpochsToVerify needs to be at least 2 due to a snapshotting edge-case. @@ -50,7 +49,7 @@ func (tsmie *trieStorageManagerInEpoch) Get(key []byte) ([]byte, error) { if tsmie.closed { log.Debug("trieStorageManagerInEpoch get context closing", "key", key) - return nil, errors.ErrContextClosing + return nil, core.ErrContextClosing } for i := uint32(0); i < numEpochsToVerify; i++ { diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index f634024514d..dc742c03afc 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -10,9 +10,9 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" @@ -248,7 +248,7 @@ func TestTrieStorageManager_PutInEpochClosedDb(t *testing.T) { key := []byte("key") value := []byte("value") err := ts.PutInEpoch(key, value, 0) - assert.Equal(t, errors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) } func TestTrieStorageManager_PutInEpochInvalidStorer(t *testing.T) { @@ -390,7 +390,7 @@ func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { require.Equal(t, 1, len(iteratorChannels.ErrChan)) errRecovered := <-iteratorChannels.ErrChan - assert.True(t, strings.Contains(errRecovered.Error(), common.GetNodeFromDBErrorString)) + assert.True(t, strings.Contains(errRecovered.Error(), core.GetNodeFromDBErrorString)) } func TestTrieStorageManager_ShouldTakeSnapshotInvalidStorer(t *testing.T) { @@ -518,3 +518,31 @@ func TestWriteInChanNonBlocking(t *testing.T) { assert.Equal(t, err2, recovered) }) } + +func TestTrieStorageManager_GetIdentifier(t *testing.T) { + t.Parallel() + + t.Run("db without identifier", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + id := ts.GetIdentifier() + assert.Equal(t, "", id) + }) + + t.Run("db with identifier", func(t *testing.T) { + t.Parallel() + + expectedIdentifier := "identifier" + args := getNewTrieStorageManagerArgs() + args.MainStorer = &storage.StorerStub{ + GetIdentifierCalled: func() string { + return expectedIdentifier + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + id := ts.GetIdentifier() + assert.Equal(t, expectedIdentifier, id) + }) +} diff --git a/update/genesis/import.go b/update/genesis/import.go index ce9f3c33f53..e740564c424 100644 --- a/update/genesis/import.go +++ b/update/genesis/import.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "encoding/json" "fmt" - "github.com/multiversx/mx-chain-go/dataRetriever" "strings" "github.com/multiversx/mx-chain-core-go/core" @@ -17,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/common" commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" @@ -292,7 +292,7 @@ func (si *stateImport) getTrie(shardID uint32, accType Type) (common.Trie, error return trieForShard, nil } - trieStorageManager := si.trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] + trieStorageManager := si.trieStorageManagers[dataRetriever.UserAccountsUnit.String()] if accType == ValidatorAccount { trieStorageManager = si.trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] } From 220916203baaf9937ddf78c357b5aed7ea75d7d8 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Wed, 19 Apr 2023 13:20:46 +0300 Subject: [PATCH 096/221] added semi-integration test for relayed v2 multi nft transfers --- .../vm/txsFee/multiShard/esdt_test.go | 146 ++++++++++++++++++ integrationTests/vm/txsFee/utils/utilsESDT.go | 6 +- 2 files changed, 151 insertions(+), 1 deletion(-) diff --git a/integrationTests/vm/txsFee/multiShard/esdt_test.go b/integrationTests/vm/txsFee/multiShard/esdt_test.go index 60941642343..f224b528ef6 100644 --- a/integrationTests/vm/txsFee/multiShard/esdt_test.go +++ b/integrationTests/vm/txsFee/multiShard/esdt_test.go @@ -1,9 +1,13 @@ package multiShard import ( + "encoding/hex" "math/big" + "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests/vm" "github.com/multiversx/mx-chain-go/integrationTests/vm/txsFee/utils" @@ -40,3 +44,145 @@ func TestESDTTransferShouldWork(t *testing.T) { expectedReceiverBalance := big.NewInt(100) utils.CheckESDTBalance(t, testContext, rcvAddr, token, expectedReceiverBalance) } + +func TestMultiESDTNFTTransferViaRelayedV2(t *testing.T) { + tokenID1 := []byte("MYNFT1") + tokenID2 := []byte("MYNFT2") + sh0Addr := []byte("12345678901234567890123456789010") + sh1Addr := []byte("12345678901234567890123456789011") + + relayerSh0 := []byte("12345678901234567890123456789110") + relayerSh1 := []byte("12345678901234567890123456789111") + sh0Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(0, config.EnableEpochs{}) + require.Nil(t, err) + defer sh0Context.Close() + + sh1Context, err := vm.CreatePreparedTxProcessorWithVMsMultiShard(1, config.EnableEpochs{}) + require.Nil(t, err) + defer sh1Context.Close() + _, _ = vm.CreateAccount(sh1Context.Accounts, sh1Addr, 0, big.NewInt(10000000000)) + _, _ = vm.CreateAccount(sh0Context.Accounts, relayerSh0, 0, big.NewInt(1000000000)) + _, _ = vm.CreateAccount(sh1Context.Accounts, relayerSh1, 0, big.NewInt(1000000000)) + + // create the nfts, add the liquidity to the system accounts and check for balances + utils.CreateAccountWithESDTBalance(t, sh0Context.Accounts, sh0Addr, big.NewInt(100000000), tokenID1, 1, big.NewInt(1)) + utils.CreateAccountWithESDTBalance(t, sh0Context.Accounts, sh0Addr, big.NewInt(100000000), tokenID2, 1, big.NewInt(1)) + + sh0Accnt, _ := sh0Context.Accounts.LoadAccount(sh0Addr) + sh1Accnt, _ := sh1Context.Accounts.LoadAccount(sh1Addr) + + transfers := []*utils.TransferESDTData{ + { + Token: tokenID1, + Nonce: 1, + Value: big.NewInt(1), + }, + { + Token: tokenID2, + Nonce: 1, + Value: big.NewInt(1), + }, + } + + // + // Step 1: transfer the NFTs sh0->sh1 via multi transfer with a shard 0 relayer + // + + innerTx := utils.CreateMultiTransferTX(sh0Accnt.GetNonce(), sh0Addr, sh1Addr, 10, 10000000, transfers...) + relayedTx := createRelayedV2FromInnerTx(0, relayerSh0, innerTx) + + retCode, err := sh0Context.TxProcessor.ProcessTransaction(relayedTx) + require.Equal(t, vmcommon.Ok, retCode) + require.NoError(t, err) + + scrs := sh0Context.GetIntermediateTransactions(t) + + shard1Scr := scrs[0] + for _, scr := range scrs { + if scr.GetRcvAddr()[len(scr.GetRcvAddr())-1] == byte(0) { + shard1Scr = scr + break + } + } + // check the balances after the transfer, as well as the liquidity + utils.ProcessSCRResult(t, sh1Context, shard1Scr, vmcommon.Ok, nil) + utils.CheckESDTNFTBalance(t, sh0Context, sh0Addr, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh0Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh1Context, sh1Addr, tokenID1, 1, big.NewInt(1)) + utils.CheckESDTNFTBalance(t, sh1Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(1)) + + // + // Step 2: transfer the NFTs sh1->sh0 via multi transfer with a shard 1 relayer + // + + sh0Context.CleanIntermediateTransactions(t) + sh1Context.CleanIntermediateTransactions(t) + + innerTx = utils.CreateMultiTransferTX(sh1Accnt.GetNonce(), sh1Addr, sh0Addr, 10, 10000000, transfers...) + relayedTx = createRelayedV2FromInnerTx(0, relayerSh1, innerTx) + + retCode, err = sh1Context.TxProcessor.ProcessTransaction(relayedTx) + require.Equal(t, vmcommon.Ok, retCode) + require.NoError(t, err) + + scrs = sh1Context.GetIntermediateTransactions(t) + shard0Scr := scrs[0] + for _, scr := range scrs { + if scr.GetRcvAddr()[len(scr.GetRcvAddr())-1] == byte(0) { + shard0Scr = scr + break + } + } + // check the balances after the transfer, as well as the liquidity + utils.ProcessSCRResult(t, sh0Context, shard0Scr, vmcommon.Ok, nil) + utils.CheckESDTNFTBalance(t, sh0Context, sh0Addr, tokenID1, 1, big.NewInt(1)) + utils.CheckESDTNFTBalance(t, sh0Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(1)) + utils.CheckESDTNFTBalance(t, sh1Context, sh1Addr, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh1Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(0)) + + // + // Step 3: transfer the NFTs sh0->s1 via multi transfer with a shard 1 relayer + // + + sh0Context.CleanIntermediateTransactions(t) + sh1Context.CleanIntermediateTransactions(t) + + innerTx = utils.CreateMultiTransferTX(sh0Accnt.GetNonce()+1, sh0Addr, sh1Addr, 10, 10000000, transfers...) + relayedTx = createRelayedV2FromInnerTx(1, relayerSh1, innerTx) + + retCode, err = sh0Context.TxProcessor.ProcessTransaction(relayedTx) + require.Equal(t, vmcommon.Ok, retCode) + require.NoError(t, err) + + scrs = sh0Context.GetIntermediateTransactions(t) + shard1Scr = scrs[0] + for _, scr := range scrs { + if scr.GetRcvAddr()[len(scr.GetRcvAddr())-1] == byte(1) { + shard1Scr = scr + break + } + } + // check the balances after the transfer, as well as the liquidity + utils.ProcessSCRResult(t, sh1Context, shard1Scr, vmcommon.Ok, nil) + utils.CheckESDTNFTBalance(t, sh0Context, sh0Addr, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh0Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(0)) + utils.CheckESDTNFTBalance(t, sh1Context, sh1Addr, tokenID1, 1, big.NewInt(1)) + utils.CheckESDTNFTBalance(t, sh1Context, core.SystemAccountAddress, tokenID1, 1, big.NewInt(1)) +} + +func createRelayedV2FromInnerTx(relayerNonce uint64, relayer []byte, innerTx *transaction.Transaction) *transaction.Transaction { + nonceHex := "00" + if innerTx.Nonce > 0 { + nonceHex = hex.EncodeToString(big.NewInt(int64(innerTx.Nonce)).Bytes()) + } + data := strings.Join([]string{"relayedTxV2", hex.EncodeToString(innerTx.RcvAddr), nonceHex, hex.EncodeToString(innerTx.Data), hex.EncodeToString(innerTx.Signature)}, "@") + return &transaction.Transaction{ + Nonce: relayerNonce, + Value: big.NewInt(0), + SndAddr: relayer, + RcvAddr: innerTx.SndAddr, + GasPrice: innerTx.GasPrice, + GasLimit: innerTx.GasLimit + 1_000_000, + Data: []byte(data), + } +} diff --git a/integrationTests/vm/txsFee/utils/utilsESDT.go b/integrationTests/vm/txsFee/utils/utilsESDT.go index 96c2c56e24e..e09340c8e5b 100644 --- a/integrationTests/vm/txsFee/utils/utilsESDT.go +++ b/integrationTests/vm/txsFee/utils/utilsESDT.go @@ -3,6 +3,7 @@ package utils import ( "bytes" "encoding/hex" + "fmt" "math/big" "strings" "testing" @@ -42,7 +43,10 @@ func CreateAccountWithESDTBalance( } if esdtNonce > 0 { esdtData.TokenMetaData = &esdt.MetaData{ - Nonce: esdtNonce, + Name: []byte(fmt.Sprintf("Token %d", esdtNonce)), + URIs: [][]byte{[]byte(fmt.Sprintf("URI for token %d", esdtNonce))}, + Creator: pubKey, + Nonce: esdtNonce, } } From 79efe7942c45ed2143accc2ef8723931f70a110d Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 19 Apr 2023 13:28:01 +0300 Subject: [PATCH 097/221] new go mod --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 09573845409..825d88f250d 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.15 github.com/multiversx/mx-chain-storage-go v1.0.7 - github.com/multiversx/mx-chain-vm-common-go v1.4.0 + github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230419081827-534ec82d0b8f github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77 diff --git a/go.sum b/go.sum index 1cc5571c731..b71de3f37ed 100644 --- a/go.sum +++ b/go.sum @@ -626,8 +626,9 @@ github.com/multiversx/mx-chain-storage-go v1.0.7 h1:UqLo/OLTD3IHiE/TB/SEdNRV1GG2 github.com/multiversx/mx-chain-storage-go v1.0.7/go.mod h1:gtKoV32Cg2Uy8deHzF8Ud0qAl0zv92FvWgPSYIP0Zmg= github.com/multiversx/mx-chain-vm-common-go v1.3.36/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.4.0 h1:0i0cJZJOXGzqYzwtKFHSr2yGmnFAdizOuISK8HgsnYo= github.com/multiversx/mx-chain-vm-common-go v1.4.0/go.mod h1:odBJC92ANA8zLtPh/wwajUUGJOaS88F5QYGf0t8Wgzw= +github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230419081827-534ec82d0b8f h1:YhJ/yF8UJeCOLMxHGY11Kj/FqEfzraSVROADhPNVSnA= +github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230419081827-534ec82d0b8f/go.mod h1:odBJC92ANA8zLtPh/wwajUUGJOaS88F5QYGf0t8Wgzw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 h1:ScUq7/wq78vthMTQ6v5Ux1DvSMQMHxQ2Sl7aPP26q1w= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50/go.mod h1:e3uYdgoKzs3puaznbmSjDcRisJc5Do4tpg7VqyYwoek= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 h1:axtp5/mpA+xYJ1cu4KtAGETV4t6v6/tNfQh0HCclBYY= From 494736d2f70310528456810240e1b98239412ef2 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 19 Apr 2023 13:36:39 +0300 Subject: [PATCH 098/221] fixes after merge --- integrationTests/vm/txsFee/dns_test.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 2c629ffaeb1..bc0d695aab2 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -351,11 +351,13 @@ func scrToString(scr *smartContractResult.SmartContractResult) string { hash, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, scr) + rcv, _ := integrationTests.TestAddressPubkeyConverter.Encode(scr.RcvAddr) + snd, _ := integrationTests.TestAddressPubkeyConverter.Encode(scr.SndAddr) return fmt.Sprintf("hash: %s, nonce: %d, value: %s, rcvAddr: %s, sender: %s, gasLimit: %d, gasPrice: %d, data: %s", hex.EncodeToString(hash), scr.Nonce, scr.Value.String(), - integrationTests.TestAddressPubkeyConverter.Encode(scr.RcvAddr), - integrationTests.TestAddressPubkeyConverter.Encode(scr.SndAddr), + rcv, + snd, scr.GasLimit, scr.GasPrice, data, ) } @@ -367,12 +369,13 @@ func txToString(tx *transaction.Transaction) string { } hash, _ := core.CalculateHash(integrationTests.TestMarshalizer, integrationTests.TestHasher, tx) - + rcv, _ := integrationTests.TestAddressPubkeyConverter.Encode(tx.RcvAddr) + snd, _ := integrationTests.TestAddressPubkeyConverter.Encode(tx.SndAddr) return fmt.Sprintf("hash: %s, nonce: %d, value: %s, rcvAddr: %s, sender: %s, gasLimit: %d, gasPrice: %d, data: %s", hex.EncodeToString(hash), tx.Nonce, tx.Value.String(), - integrationTests.TestAddressPubkeyConverter.Encode(tx.RcvAddr), - integrationTests.TestAddressPubkeyConverter.Encode(tx.SndAddr), + rcv, + snd, tx.GasLimit, tx.GasPrice, data, ) } From 96231ea8e1d953cfa738f16c92ee8dc0d2cfc0a1 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 19 Apr 2023 13:40:12 +0300 Subject: [PATCH 099/221] fixes after merge --- integrationTests/vm/testInitializer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 8c01682d8b8..8bf591da3ba 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -75,7 +75,7 @@ const DNSV2Address = "erd1qqqqqqqqqqqqqpgqcy67yanvwpepqmerkq6m8pgav0tlvgwxjmdq4h const DNSV2DeployerAddress = "erd1uzk2g5rhvg8prk9y50d0q7qsxg7tm7f320q0q4qlpmfu395wjmdqqy0n9q" // TestAddressPubkeyConverter represents an address public key converter -var TestAddressPubkeyConverter, _ = pubkeyConverter.NewBech32PubkeyConverter(32, log) +var TestAddressPubkeyConverter, _ = pubkeyConverter.NewBech32PubkeyConverter(32, "erd") // TODO: Merge test utilities from this file with the ones from "wasmvm/utils.go" From de31e038cdf6eaed8979a9f3b68ba60fc88873fe Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 24 Apr 2023 13:52:40 +0300 Subject: [PATCH 100/221] pass storageMarker as argument when SyncAccounts() is called --- common/interface.go | 5 + .../trieNodeRequester_test.go | 4 +- epochStart/bootstrap/process.go | 6 +- epochStart/interface.go | 6 -- factory/consensus/consensusComponents.go | 2 - factory/state/stateComponentsHandler_test.go | 4 +- integrationTests/mock/accountsDBSyncerStub.go | 6 +- .../state/stateTrieSync/stateTrieSync_test.go | 3 +- integrationTests/testProcessorNode.go | 9 +- node/nodeRunner.go | 2 - node/nodeTesting_test.go | 5 +- node/node_test.go | 3 +- process/interface.go | 2 +- process/mock/accountsDBSyncerStub.go | 6 +- process/sync/baseSync.go | 3 +- process/sync/metablock.go | 3 +- process/sync/metablock_test.go | 6 +- process/sync/shardblock_test.go | 2 +- state/accountsDB.go | 3 +- state/accountsDB_test.go | 95 ++++++++++--------- .../factory/accountsAdapterAPICreator_test.go | 3 +- state/interface.go | 2 +- state/peerAccountsDB_test.go | 26 ++--- state/syncer/baseAccountsSyncer.go | 2 - state/syncer/userAccountsSyncer.go | 8 +- state/syncer/validatorAccountsSyncer.go | 9 +- testscommon/components/default.go | 7 +- .../storageManagerStub.go | 2 +- trie/export_test.go | 4 +- trie/interface.go | 5 - trie/storageMarker/trieStorageMarker_test.go | 6 +- trie/trieStorageManagerFactory_test.go | 4 +- trie/trieStorageManagerInEpoch_test.go | 4 +- trie/trieStorageManagerWithoutPruning_test.go | 4 +- update/container/accountDBSyncers_test.go | 3 +- .../accountDBSyncerContainerFactory.go | 3 - update/genesis/import_test.go | 9 +- update/interface.go | 2 +- update/mock/accountsDBSyncerStub.go | 6 +- update/sync/coordinator_test.go | 2 +- update/sync/syncAccountsDBs.go | 3 +- 41 files changed, 140 insertions(+), 149 deletions(-) rename testscommon/{ => storageManager}/storageManagerStub.go (99%) diff --git a/common/interface.go b/common/interface.go index 9e23d0786c0..eed1f157a50 100644 --- a/common/interface.go +++ b/common/interface.go @@ -46,6 +46,11 @@ type TrieStats interface { GetTrieStats(address string, rootHash []byte) (*statistics.TrieStatsDTO, error) } +// StorageMarker is used to mark the given storer as synced and active +type StorageMarker interface { + MarkStorerAsSyncedAndActive(storer StorageManager) +} + // KeyBuilder is used for building trie keys as you traverse the trie type KeyBuilder interface { BuildKey(keyPart []byte) diff --git a/dataRetriever/storageRequesters/trieNodeRequester_test.go b/dataRetriever/storageRequesters/trieNodeRequester_test.go index 042c1390826..b65d53d9704 100644 --- a/dataRetriever/storageRequesters/trieNodeRequester_test.go +++ b/dataRetriever/storageRequesters/trieNodeRequester_test.go @@ -11,8 +11,8 @@ import ( "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/mock" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -23,7 +23,7 @@ func createMockTrieRequesterArguments() ArgTrieRequester { ResponseTopicName: "", Marshalizer: &mock.MarshalizerStub{}, TrieDataGetter: &trieMock.TrieStub{}, - TrieStorageManager: &testscommon.StorageManagerStub{}, + TrieStorageManager: &storageManager.StorageManagerStub{}, ManualEpochStartNotifier: &mock.ManualEpochStartNotifierStub{}, ChanGracefullyClose: make(chan endProcess.ArgEndProcess, 1), DelayBeforeGracefulClose: 0, diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index e5fb3e07ad1..724cdb72135 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1076,7 +1076,6 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { MaxHardCapForMissingNodes: e.maxHardCapForMissingNodes, TrieSyncerVersion: e.trieSyncerVersion, CheckNodesOnDisk: e.checkNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: e.trieSyncStatisticsProvider, AppStatusHandler: e.statusHandler, }, @@ -1089,7 +1088,7 @@ func (e *epochStartBootstrap) syncUserAccountsState(rootHash []byte) error { return err } - err = accountsDBSyncer.SyncAccounts(rootHash) + err = accountsDBSyncer.SyncAccounts(rootHash, storageMarker.NewTrieStorageMarker()) if err != nil { return err } @@ -1147,7 +1146,6 @@ func (e *epochStartBootstrap) syncValidatorAccountsState(rootHash []byte) error MaxHardCapForMissingNodes: e.maxHardCapForMissingNodes, TrieSyncerVersion: e.trieSyncerVersion, CheckNodesOnDisk: e.checkNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: disabledCommon.NewAppStatusHandler(), }, @@ -1157,7 +1155,7 @@ func (e *epochStartBootstrap) syncValidatorAccountsState(rootHash []byte) error return err } - err = accountsDBSyncer.SyncAccounts(rootHash) + err = accountsDBSyncer.SyncAccounts(rootHash, storageMarker.NewTrieStorageMarker()) if err != nil { return err } diff --git a/epochStart/interface.go b/epochStart/interface.go index 33cd7a1e233..fc4364afc43 100644 --- a/epochStart/interface.go +++ b/epochStart/interface.go @@ -115,12 +115,6 @@ type PendingMiniBlocksSyncHandler interface { IsInterfaceNil() bool } -// AccountsDBSyncer defines the methods for the accounts db syncer -type AccountsDBSyncer interface { - SyncAccounts(rootHash []byte) error - IsInterfaceNil() bool -} - // StartOfEpochMetaSyncer defines the methods to synchronize epoch start meta block from the network when nothing is known type StartOfEpochMetaSyncer interface { SyncEpochStartMeta(waitTime time.Duration) (data.MetaHeaderHandler, error) diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 50e05ad2a1a..f48516eebca 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -26,7 +26,6 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/trie/statistics" - "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/multiversx/mx-chain-go/update" logger "github.com/multiversx/mx-chain-logger-go" "github.com/multiversx/mx-chain-storage-go/timecache" @@ -529,7 +528,6 @@ func (ccf *consensusComponentsFactory) createArgsBaseAccountsSyncer(trieStorageM MaxHardCapForMissingNodes: ccf.config.TrieSync.MaxHardCapForMissingNodes, TrieSyncerVersion: ccf.config.TrieSync.TrieSyncerVersion, CheckNodesOnDisk: ccf.config.TrieSync.CheckNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: disabled.NewAppStatusHandler(), } diff --git a/factory/state/stateComponentsHandler_test.go b/factory/state/stateComponentsHandler_test.go index d055e80efad..b303b873940 100644 --- a/factory/state/stateComponentsHandler_test.go +++ b/factory/state/stateComponentsHandler_test.go @@ -6,8 +6,8 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/factory/mock" stateComp "github.com/multiversx/mx-chain-go/factory/state" - "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/require" ) @@ -108,7 +108,7 @@ func TestManagedStateComponents_Setters(t *testing.T) { require.NoError(t, err) triesContainer := &trieMock.TriesHolderStub{} - triesStorageManagers := map[string]common.StorageManager{"a": &testscommon.StorageManagerStub{}} + triesStorageManagers := map[string]common.StorageManager{"a": &storageManager.StorageManagerStub{}} err = managedStateComponents.SetTriesContainer(triesContainer) require.NoError(t, err) diff --git a/integrationTests/mock/accountsDBSyncerStub.go b/integrationTests/mock/accountsDBSyncerStub.go index 9ff9abb9017..39477bdc70a 100644 --- a/integrationTests/mock/accountsDBSyncerStub.go +++ b/integrationTests/mock/accountsDBSyncerStub.go @@ -7,7 +7,7 @@ import ( // AccountsDBSyncerStub - type AccountsDBSyncerStub struct { GetSyncedTriesCalled func() map[string]common.Trie - SyncAccountsCalled func(rootHash []byte) error + SyncAccountsCalled func(rootHash []byte, storageMarker common.StorageMarker) error } // GetSyncedTries - @@ -19,9 +19,9 @@ func (a *AccountsDBSyncerStub) GetSyncedTries() map[string]common.Trie { } // SyncAccounts - -func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte) error { +func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { if a.SyncAccountsCalled != nil { - return a.SyncAccountsCalled(rootHash) + return a.SyncAccountsCalled(rootHash, storageMarker) } return nil } diff --git a/integrationTests/state/stateTrieSync/stateTrieSync_test.go b/integrationTests/state/stateTrieSync/stateTrieSync_test.go index 367f6db6e14..9462df96ff7 100644 --- a/integrationTests/state/stateTrieSync/stateTrieSync_test.go +++ b/integrationTests/state/stateTrieSync/stateTrieSync_test.go @@ -346,7 +346,7 @@ func testMultipleDataTriesSync(t *testing.T, numAccounts int, numDataTrieLeaves userAccSyncer, err := syncer.NewUserAccountsSyncer(syncerArgs) assert.Nil(t, err) - err = userAccSyncer.SyncAccounts(rootHash) + err = userAccSyncer.SyncAccounts(rootHash, storageMarker.NewDisabledStorageMarker()) assert.Nil(t, err) _ = nRequester.AccntState.RecreateTrie(rootHash) @@ -588,7 +588,6 @@ func getUserAccountSyncerArgs(node *integrationTests.TestProcessorNode, version MaxTrieLevelInMemory: 200, MaxHardCapForMissingNodes: 5000, TrieSyncerVersion: version, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: integrationTests.TestAppStatusHandler, }, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index e0a1c3d129a..6dcefa19bc7 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -111,6 +111,7 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/update" @@ -3184,9 +3185,9 @@ func GetDefaultStateComponents() *testscommon.StateComponentsMock { AccountsRepo: &stateMock.AccountsRepositoryStub{}, Tries: &trieMock.TriesHolderStub{}, StorageManagers: map[string]common.StorageManager{ - "0": &testscommon.StorageManagerStub{}, - dataRetriever.UserAccountsUnit.String(): &testscommon.StorageManagerStub{}, - dataRetriever.PeerAccountsUnit.String(): &testscommon.StorageManagerStub{}, + "0": &storageManager.StorageManagerStub{}, + dataRetriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, + dataRetriever.PeerAccountsUnit.String(): &storageManager.StorageManagerStub{}, }, } } @@ -3225,7 +3226,7 @@ func getDefaultBootstrapComponents(shardCoordinator sharding.Coordinator) *mainF return &mainFactoryMocks.BootstrapComponentsStub{ Bootstrapper: &bootstrapMocks.EpochStartBootstrapperStub{ TrieHolder: &trieMock.TriesHolderStub{}, - StorageManagers: map[string]common.StorageManager{"0": &testscommon.StorageManagerStub{}}, + StorageManagers: map[string]common.StorageManager{"0": &storageManager.StorageManagerStub{}}, BootstrapCalled: nil, }, BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 32ed4643e49..8ce94a1fb5a 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -60,7 +60,6 @@ import ( storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" trieStatistics "github.com/multiversx/mx-chain-go/trie/statistics" - "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/multiversx/mx-chain-go/update/trigger" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -674,7 +673,6 @@ func getBaseAccountSyncerArgs( MaxTrieLevelInMemory: maxTrieLevelInMemory, MaxHardCapForMissingNodes: config.TrieSync.MaxHardCapForMissingNodes, TrieSyncerVersion: config.TrieSync.TrieSyncerVersion, - StorageMarker: storageMarker.NewDisabledStorageMarker(), CheckNodesOnDisk: true, UserAccountsSyncStatisticsHandler: trieStatistics.NewTrieSyncStatistics(), AppStatusHandler: disabled.NewAppStatusHandler(), diff --git a/node/nodeTesting_test.go b/node/nodeTesting_test.go index 89aa392bfc4..350d752c51e 100644 --- a/node/nodeTesting_test.go +++ b/node/nodeTesting_test.go @@ -23,6 +23,7 @@ import ( dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -397,7 +398,7 @@ func getDefaultCryptoComponents() *factoryMock.CryptoComponentsMock { PubKeyBytes: []byte("pubKey"), BlockSig: &mock.SingleSignerMock{}, TxSig: &mock.SingleSignerMock{}, - MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock( cryptoMocks.NewMultiSigner()), + MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock(cryptoMocks.NewMultiSigner()), PeerSignHandler: &mock.PeerSignatureHandler{}, BlKeyGen: &mock.KeyGenMock{}, TxKeyGen: &mock.KeyGenMock{}, @@ -415,7 +416,7 @@ func getDefaultStateComponents() *testscommon.StateComponentsMock { AccountsAPI: &stateMock.AccountsStub{}, AccountsRepo: &stateMock.AccountsRepositoryStub{}, Tries: &trieMock.TriesHolderStub{}, - StorageManagers: map[string]common.StorageManager{"0": &testscommon.StorageManagerStub{}}, + StorageManagers: map[string]common.StorageManager{"0": &storageManager.StorageManagerStub{}}, } } diff --git a/node/node_test.go b/node/node_test.go index 51503cfadf8..2d3668d5634 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -54,6 +54,7 @@ import ( stateMock "github.com/multiversx/mx-chain-go/testscommon/state" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/testscommon/txsSenderMock" "github.com/multiversx/mx-chain-go/vm/systemSmartContracts" @@ -4007,7 +4008,7 @@ func getDefaultBootstrapComponents() *mainFactoryMocks.BootstrapComponentsStub { return &mainFactoryMocks.BootstrapComponentsStub{ Bootstrapper: &bootstrapMocks.EpochStartBootstrapperStub{ TrieHolder: &trieMock.TriesHolderStub{}, - StorageManagers: map[string]common.StorageManager{"0": &testscommon.StorageManagerStub{}}, + StorageManagers: map[string]common.StorageManager{"0": &storageManager.StorageManagerStub{}}, BootstrapCalled: nil, }, BootstrapParams: &bootstrapMocks.BootstrapParamsHandlerMock{}, diff --git a/process/interface.go b/process/interface.go index 8c324e2d7fe..cacfc6650b9 100644 --- a/process/interface.go +++ b/process/interface.go @@ -1179,7 +1179,7 @@ type InterceptedChunksProcessor interface { // AccountsDBSyncer defines the methods for the accounts db syncer type AccountsDBSyncer interface { - SyncAccounts(rootHash []byte) error + SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error IsInterfaceNil() bool } diff --git a/process/mock/accountsDBSyncerStub.go b/process/mock/accountsDBSyncerStub.go index 9ff9abb9017..39477bdc70a 100644 --- a/process/mock/accountsDBSyncerStub.go +++ b/process/mock/accountsDBSyncerStub.go @@ -7,7 +7,7 @@ import ( // AccountsDBSyncerStub - type AccountsDBSyncerStub struct { GetSyncedTriesCalled func() map[string]common.Trie - SyncAccountsCalled func(rootHash []byte) error + SyncAccountsCalled func(rootHash []byte, storageMarker common.StorageMarker) error } // GetSyncedTries - @@ -19,9 +19,9 @@ func (a *AccountsDBSyncerStub) GetSyncedTries() map[string]common.Trie { } // SyncAccounts - -func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte) error { +func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { if a.SyncAccountsCalled != nil { - return a.SyncAccountsCalled(rootHash) + return a.SyncAccountsCalled(rootHash, storageMarker) } return nil } diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 2c2ea3ffcb5..b3a83e1bfe0 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -27,6 +27,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/trie/storageMarker" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -696,7 +697,7 @@ func (boot *baseBootstrap) handleTrieSyncError(err error, ctx context.Context) { func (boot *baseBootstrap) syncUserAccountsState(key []byte) error { log.Warn("base sync: started syncUserAccountsState") - return boot.accountsDBSyncer.SyncAccounts(key) + return boot.accountsDBSyncer.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) } func (boot *baseBootstrap) cleanNoncesSyncedWithErrorsBehindFinal() { diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 6b820235074..f7cb6f4aea1 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/trie/storageMarker" ) // MetaBootstrap implements the bootstrap mechanism @@ -216,7 +217,7 @@ func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { func (boot *MetaBootstrap) syncValidatorAccountsState(key []byte) error { log.Warn("base sync: started syncValidatorAccountsState") - return boot.validatorStatisticsDBSyncer.SyncAccounts(key) + return boot.validatorStatisticsDBSyncer.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) } // Close closes the synchronization loop diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 019076d66b9..088457a2c3f 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -1682,7 +1682,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { ) accountsSyncCalled := false args.AccountsDBSyncer = &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { accountsSyncCalled = true return nil }, @@ -1736,7 +1736,7 @@ func TestMetaBootstrap_SyncAccountsDBs(t *testing.T) { args := CreateMetaBootstrapMockArguments() accountsSyncCalled := false args.AccountsDBSyncer = &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { accountsSyncCalled = true return nil }, @@ -1776,7 +1776,7 @@ func TestMetaBootstrap_SyncAccountsDBs(t *testing.T) { args := CreateMetaBootstrapMockArguments() accountsSyncCalled := false args.ValidatorStatisticsDBSyncer = &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { accountsSyncCalled = true return nil }, diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index b0175890e43..5ef6be8394b 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -2135,7 +2135,7 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { syncCalled := false args.AccountsDBSyncer = &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { syncCalled = true return nil }} diff --git a/state/accountsDB.go b/state/accountsDB.go index ef200093467..295824b8c7f 100644 --- a/state/accountsDB.go +++ b/state/accountsDB.go @@ -19,6 +19,7 @@ import ( "github.com/multiversx/mx-chain-go/common/holders" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/statistics" + "github.com/multiversx/mx-chain-go/trie/storageMarker" logger "github.com/multiversx/mx-chain-logger-go" vmcommon "github.com/multiversx/mx-chain-vm-common-go" ) @@ -1295,7 +1296,7 @@ func (adb *AccountsDB) syncMissingNodes(missingNodesChan chan []byte, errChan ch } for missingNode := range missingNodesChan { - err := syncer.SyncAccounts(missingNode) + err := syncer.SyncAccounts(missingNode, storageMarker.NewDisabledStorageMarker()) if err != nil { log.Error("could not sync missing node", "missing node hash", missingNode, diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index f0ddcf55616..4e1f98ea294 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -30,6 +30,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" @@ -44,7 +45,7 @@ func createMockAccountsDBArgs() state.ArgsAccountsDB { return state.ArgsAccountsDB{ Trie: &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }, Hasher: &hashingMocks.HasherMock{}, @@ -232,7 +233,7 @@ func TestAccountsDB_SaveAccountNilAccountShouldErr(t *testing.T) { adb := generateAccountDBFromTrie(&trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -249,7 +250,7 @@ func TestAccountsDB_SaveAccountErrWhenGettingOldAccountShouldErr(t *testing.T) { return nil, 0, expectedErr }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -268,7 +269,7 @@ func TestAccountsDB_SaveAccountNilOldAccount(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -292,7 +293,7 @@ func TestAccountsDB_SaveAccountExistingOldAccount(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -329,7 +330,7 @@ func TestAccountsDB_SaveAccountSavesCodeAndDataTrieForUserAccount(t *testing.T) return trieStub, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -351,7 +352,7 @@ func TestAccountsDB_SaveAccountMalfunctionMarshallerShouldErr(t *testing.T) { account := generateAccount() mockTrie := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } marshaller := &testscommon.MarshalizerMock{} @@ -380,7 +381,7 @@ func TestAccountsDB_SaveAccountWithSomeValuesShouldWork(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } _, account, adb := generateAddressAccountAccountsDB(ts) @@ -407,7 +408,7 @@ func TestAccountsDB_RemoveAccountShouldWork(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -427,7 +428,7 @@ func TestAccountsDB_LoadAccountMalfunctionTrieShouldErr(t *testing.T) { trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adr := make([]byte, 32) @@ -448,7 +449,7 @@ func TestAccountsDB_LoadAccountNotFoundShouldCreateEmpty(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -489,7 +490,7 @@ func TestAccountsDB_LoadAccountExistingShouldLoadDataTrie(t *testing.T) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -508,7 +509,7 @@ func TestAccountsDB_GetExistingAccountMalfunctionTrieShouldErr(t *testing.T) { trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adr := make([]byte, 32) @@ -526,7 +527,7 @@ func TestAccountsDB_GetExistingAccountNotFoundShouldRetNil(t *testing.T) { return nil, 0, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -567,7 +568,7 @@ func TestAccountsDB_GetExistingAccountFoundShouldRetAccount(t *testing.T) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -586,7 +587,7 @@ func TestAccountsDB_GetAccountAccountNotFound(t *testing.T) { tr := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adr, _, _ := generateAddressAccountAccountsDB(tr) @@ -626,7 +627,7 @@ func TestAccountsDB_LoadCodeWrongHashLengthShouldErr(t *testing.T) { tr := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } _, account, adb := generateAddressAccountAccountsDB(tr) @@ -644,7 +645,7 @@ func TestAccountsDB_LoadCodeMalfunctionTrieShouldErr(t *testing.T) { account := generateAccount() mockTrie := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(mockTrie) @@ -661,7 +662,7 @@ func TestAccountsDB_LoadCodeOkValsShouldWork(t *testing.T) { tr := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adr, account, _ := generateAddressAccountAccountsDB(tr) @@ -674,7 +675,7 @@ func TestAccountsDB_LoadCodeOkValsShouldWork(t *testing.T) { return serializedCodeEntry, 0, err }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -699,7 +700,7 @@ func TestAccountsDB_LoadDataNilRootShouldRetNil(t *testing.T) { tr := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } _, account, adb := generateAddressAccountAccountsDB(tr) @@ -715,7 +716,7 @@ func TestAccountsDB_LoadDataBadLengthShouldErr(t *testing.T) { _, account, adb := generateAddressAccountAccountsDB(&trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -734,7 +735,7 @@ func TestAccountsDB_LoadDataMalfunctionTrieShouldErr(t *testing.T) { mockTrie := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(mockTrie) @@ -749,7 +750,7 @@ func TestAccountsDB_LoadDataNotFoundRootShouldReturnErr(t *testing.T) { _, account, adb := generateAddressAccountAccountsDB(&trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }) @@ -794,7 +795,7 @@ func TestAccountsDB_LoadDataWithSomeValuesShouldWork(t *testing.T) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(mockTrie) @@ -850,7 +851,7 @@ func TestAccountsDB_CommitShouldCallCommitFromTrie(t *testing.T) { }, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -876,7 +877,7 @@ func TestAccountsDB_RecreateTrieMalfunctionTrieShouldErr(t *testing.T) { errExpected := errors.New("failure") trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } trieStub.RecreateFromEpochCalled = func(_ common.RootHashHolder) (tree common.Trie, e error) { @@ -898,7 +899,7 @@ func TestAccountsDB_RecreateTrieOutputsNilTrieShouldErr(t *testing.T) { trieStub := trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } trieStub.RecreateFromEpochCalled = func(_ common.RootHashHolder) (tree common.Trie, e error) { @@ -921,7 +922,7 @@ func TestAccountsDB_RecreateTrieOkValsShouldWork(t *testing.T) { trieStub := trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, RecreateFromEpochCalled: func(_ common.RootHashHolder) (common.Trie, error) { wasCalled = true @@ -943,7 +944,7 @@ func TestAccountsDB_SnapshotState(t *testing.T) { snapshotMut := sync.Mutex{} trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler, _ uint32) { snapshotMut.Lock() takeSnapshotWasCalled = true @@ -969,7 +970,7 @@ func TestAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveDB(t activeDBWasPut := false trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -1022,7 +1023,7 @@ func TestAccountsDB_SnapshotStateWithErrorsShouldNotMarkActiveDB(t *testing.T) { expectedErr := errors.New("expected error") trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -1073,7 +1074,7 @@ func TestAccountsDB_SnapshotStateGetLatestStorageEpochErrDoesNotSnapshot(t *test takeSnapshotCalled := false trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return 0, fmt.Errorf("new error") }, @@ -1100,7 +1101,7 @@ func TestAccountsDB_SnapshotStateSnapshotSameRootHash(t *testing.T) { takeSnapshotCalled := 0 trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return latestEpoch, nil }, @@ -1183,7 +1184,7 @@ func TestAccountsDB_SnapshotStateSkipSnapshotIfSnapshotInProgress(t *testing.T) takeSnapshotCalled := 0 trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return latestEpoch, nil }, @@ -1247,7 +1248,7 @@ func TestAccountsDB_SetStateCheckpoint(t *testing.T) { snapshotMut := sync.Mutex{} trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ SetCheckpointCalled: func(_ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler) { snapshotMut.Lock() setCheckPointWasCalled = true @@ -1270,7 +1271,7 @@ func TestAccountsDB_IsPruningEnabled(t *testing.T) { trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ IsPruningEnabledCalled: func() bool { return true }, @@ -1288,7 +1289,7 @@ func TestAccountsDB_RevertToSnapshotOutOfBounds(t *testing.T) { trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(trieStub) @@ -1410,7 +1411,7 @@ func TestAccountsDB_RootHash(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } adb := generateAccountDBFromTrie(trieStub) @@ -1433,7 +1434,7 @@ func TestAccountsDB_GetAllLeaves(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -2403,7 +2404,7 @@ func TestAccountsDB_Close(t *testing.T) { return nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } marshaller := &testscommon.MarshalizerMock{} @@ -2474,7 +2475,7 @@ func TestAccountsDB_GetAccountFromBytesShouldLoadDataTrie(t *testing.T) { return dataTrie, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, } @@ -2498,7 +2499,7 @@ func TestAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -2531,7 +2532,7 @@ func TestAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -2560,7 +2561,7 @@ func TestAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -2603,7 +2604,7 @@ func TestAccountsDB_NewAccountsDbStartsSnapshotAfterRestart(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetCalled: func(key []byte) ([]byte, error) { if bytes.Equal(key, []byte(common.ActiveDBKey)) { return nil, fmt.Errorf("key not found") @@ -2838,7 +2839,7 @@ func TestAccountsDB_SyncMissingSnapshotNodes(t *testing.T) { trieHashes, _ = tr.GetAllHashes() syncer := &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { isSyncError = true return errors.New("sync error") }, diff --git a/state/factory/accountsAdapterAPICreator_test.go b/state/factory/accountsAdapterAPICreator_test.go index dd88f13dd4e..b0151b907c6 100644 --- a/state/factory/accountsAdapterAPICreator_test.go +++ b/state/factory/accountsAdapterAPICreator_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" mockState "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" mockTrie "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -18,7 +19,7 @@ func createMockAccountsArgs() state.ArgsAccountsDB { return state.ArgsAccountsDB{ Trie: &mockTrie.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, }, Hasher: &testscommon.HasherStub{}, diff --git a/state/interface.go b/state/interface.go index 376a25e1813..83419de7d8e 100644 --- a/state/interface.go +++ b/state/interface.go @@ -131,7 +131,7 @@ type AccountsAdapter interface { // AccountsDBSyncer defines the methods for the accounts db syncer type AccountsDBSyncer interface { - SyncAccounts(rootHash []byte) error + SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error IsInterfaceNil() bool } diff --git a/state/peerAccountsDB_test.go b/state/peerAccountsDB_test.go index a3524d5aab8..06e5f777179 100644 --- a/state/peerAccountsDB_test.go +++ b/state/peerAccountsDB_test.go @@ -12,7 +12,7 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -98,7 +98,7 @@ func TestNewPeerAccountsDB_SnapshotState(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ TakeSnapshotCalled: func(_ string, _ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler, _ uint32) { snapshotCalled = true }, @@ -121,7 +121,7 @@ func TestNewPeerAccountsDB_SnapshotStateGetLatestStorageEpochErrDoesNotSnapshot( args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return 0, fmt.Errorf("new error") }, @@ -146,7 +146,7 @@ func TestNewPeerAccountsDB_SetStateCheckpoint(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ SetCheckpointCalled: func(_ []byte, _ []byte, _ *common.TrieIteratorChannels, _ chan []byte, _ common.SnapshotStatisticsHandler) { checkpointCalled = true }, @@ -169,7 +169,7 @@ func TestNewPeerAccountsDB_RecreateAllTries(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{} + return &storageManager.StorageManagerStub{} }, RecreateCalled: func(_ []byte) (common.Trie, error) { recreateCalled = true @@ -198,7 +198,7 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeeded(t *testing.T) { return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ GetCalled: func(key []byte) ([]byte, error) { if bytes.Equal(key, []byte(common.ActiveDBKey)) { return nil, fmt.Errorf("key not found") @@ -253,7 +253,7 @@ func TestPeerAccountsDB_MarkSnapshotDone(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ PutInEpochCalled: func(bytes []byte, bytes2 []byte, u uint32) error { assert.Fail(t, "should have not called put in epoch") return nil @@ -283,7 +283,7 @@ func TestPeerAccountsDB_MarkSnapshotDone(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ PutInEpochWithoutCacheCalled: func(key []byte, value []byte, epoch uint32) error { assert.Equal(t, common.ActiveDBKey, string(key)) assert.Equal(t, common.ActiveDBVal, string(value)) @@ -306,7 +306,7 @@ func TestPeerAccountsDB_MarkSnapshotDone(t *testing.T) { args := createMockAccountsDBArgs() args.Trie = &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ PutInEpochWithoutCacheCalled: func(key []byte, value []byte, epoch uint32) error { assert.Equal(t, common.ActiveDBKey, string(key)) assert.Equal(t, common.ActiveDBVal, string(value)) @@ -337,7 +337,7 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeededMarksActiveDB(t *testin return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -372,7 +372,7 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeededMarksActiveDB(t *testin return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -403,7 +403,7 @@ func TestPeerAccountsDB_SetSyncerAndStartSnapshotIfNeededMarksActiveDB(t *testin return rootHash, nil }, GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, @@ -443,7 +443,7 @@ func TestPeerAccountsDB_SnapshotStateOnAClosedStorageManagerShouldNotMarkActiveD activeDBWasPut := false trieStub := &trieMock.TrieStub{ GetStorageManagerCalled: func() common.StorageManager { - return &testscommon.StorageManagerStub{ + return &storageManager.StorageManagerStub{ ShouldTakeSnapshotCalled: func() bool { return true }, diff --git a/state/syncer/baseAccountsSyncer.go b/state/syncer/baseAccountsSyncer.go index 18d28fc3370..18f43365341 100644 --- a/state/syncer/baseAccountsSyncer.go +++ b/state/syncer/baseAccountsSyncer.go @@ -32,7 +32,6 @@ type baseAccountsSyncer struct { name string maxHardCapForMissingNodes int checkNodesOnDisk bool - storageMarker trie.StorageMarker userAccountsSyncStatisticsHandler common.SizeSyncStatisticsHandler appStatusHandler core.AppStatusHandler @@ -48,7 +47,6 @@ type ArgsNewBaseAccountsSyncer struct { Hasher hashing.Hasher Marshalizer marshal.Marshalizer TrieStorageManager common.StorageManager - StorageMarker trie.StorageMarker RequestHandler trie.RequestHandler Timeout time.Duration Cacher storage.Cacher diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index 0170af9d17e..6ac039fe836 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -12,7 +12,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/trie" @@ -20,7 +19,7 @@ import ( logger "github.com/multiversx/mx-chain-logger-go" ) -var _ epochStart.AccountsDBSyncer = (*userAccountsSyncer)(nil) +var _ state.AccountsDBSyncer = (*userAccountsSyncer)(nil) var log = logger.GetOrCreate("syncer") @@ -88,7 +87,6 @@ func NewUserAccountsSyncer(args ArgsNewUserAccountsSyncer) (*userAccountsSyncer, maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, trieSyncerVersion: args.TrieSyncerVersion, checkNodesOnDisk: args.CheckNodesOnDisk, - storageMarker: args.StorageMarker, userAccountsSyncStatisticsHandler: args.UserAccountsSyncStatisticsHandler, appStatusHandler: args.AppStatusHandler, } @@ -104,7 +102,7 @@ func NewUserAccountsSyncer(args ArgsNewUserAccountsSyncer) (*userAccountsSyncer, } // SyncAccounts will launch the syncing method to gather all the data needed for userAccounts - it is a blocking method -func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { +func (u *userAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { u.mutex.Lock() defer u.mutex.Unlock() @@ -134,7 +132,7 @@ func (u *userAccountsSyncer) SyncAccounts(rootHash []byte) error { return err } - u.storageMarker.MarkStorerAsSyncedAndActive(mainTrie.GetStorageManager()) + storageMarker.MarkStorerAsSyncedAndActive(mainTrie.GetStorageManager()) return nil } diff --git a/state/syncer/validatorAccountsSyncer.go b/state/syncer/validatorAccountsSyncer.go index 34b87d1eb78..51076bdaec0 100644 --- a/state/syncer/validatorAccountsSyncer.go +++ b/state/syncer/validatorAccountsSyncer.go @@ -5,12 +5,12 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/process/factory" + "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/trie/statistics" ) -var _ epochStart.AccountsDBSyncer = (*validatorAccountsSyncer)(nil) +var _ state.AccountsDBSyncer = (*validatorAccountsSyncer)(nil) type validatorAccountsSyncer struct { *baseAccountsSyncer @@ -48,7 +48,6 @@ func NewValidatorAccountsSyncer(args ArgsNewValidatorAccountsSyncer) (*validator maxHardCapForMissingNodes: args.MaxHardCapForMissingNodes, trieSyncerVersion: args.TrieSyncerVersion, checkNodesOnDisk: args.CheckNodesOnDisk, - storageMarker: args.StorageMarker, userAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), appStatusHandler: args.AppStatusHandler, } @@ -61,7 +60,7 @@ func NewValidatorAccountsSyncer(args ArgsNewValidatorAccountsSyncer) (*validator } // SyncAccounts will launch the syncing method to gather all the data needed for validatorAccounts - it is a blocking method -func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte) error { +func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { v.mutex.Lock() defer v.mutex.Unlock() @@ -80,7 +79,7 @@ func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte) error { return err } - v.storageMarker.MarkStorerAsSyncedAndActive(mainTrie.GetStorageManager()) + storageMarker.MarkStorerAsSyncedAndActive(mainTrie.GetStorageManager()) return nil } diff --git a/testscommon/components/default.go b/testscommon/components/default.go index 72c0f58778f..bf7c893867a 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" ) @@ -91,9 +92,9 @@ func GetDefaultStateComponents() *testscommon.StateComponentsMock { Accounts: &stateMock.AccountsStub{}, Tries: &trieMock.TriesHolderStub{}, StorageManagers: map[string]common.StorageManager{ - "0": &testscommon.StorageManagerStub{}, - dataRetriever.UserAccountsUnit.String(): &testscommon.StorageManagerStub{}, - dataRetriever.PeerAccountsUnit.String(): &testscommon.StorageManagerStub{}, + "0": &storageManager.StorageManagerStub{}, + dataRetriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, + dataRetriever.PeerAccountsUnit.String(): &storageManager.StorageManagerStub{}, }, } } diff --git a/testscommon/storageManagerStub.go b/testscommon/storageManager/storageManagerStub.go similarity index 99% rename from testscommon/storageManagerStub.go rename to testscommon/storageManager/storageManagerStub.go index 924340d4df7..caaca56576b 100644 --- a/testscommon/storageManagerStub.go +++ b/testscommon/storageManager/storageManagerStub.go @@ -1,4 +1,4 @@ -package testscommon +package storageManager import ( "github.com/multiversx/mx-chain-go/common" diff --git a/trie/export_test.go b/trie/export_test.go index 0cba48402e0..457168d4a15 100644 --- a/trie/export_test.go +++ b/trie/export_test.go @@ -4,7 +4,7 @@ import ( "time" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" ) func (ts *trieSyncer) trieNodeIntercepted(hash []byte, val interface{}) { @@ -79,7 +79,7 @@ func WriteInChanNonBlocking(errChan chan error, err error) { } type StorageManagerExtensionStub struct { - *testscommon.StorageManagerStub + *storageManager.StorageManagerStub } // IsBaseTrieStorageManager - diff --git a/trie/interface.go b/trie/interface.go index aaa5b0a075e..9cd5502743e 100644 --- a/trie/interface.go +++ b/trie/interface.go @@ -123,11 +123,6 @@ type storageManagerExtension interface { RemoveFromCheckpointHashesHolder(hash []byte) } -// StorageMarker is used to mark the given storer as synced and active -type StorageMarker interface { - MarkStorerAsSyncedAndActive(storer common.StorageManager) -} - type dbWriteCacherWithIdentifier interface { GetIdentifier() string } diff --git a/trie/storageMarker/trieStorageMarker_test.go b/trie/storageMarker/trieStorageMarker_test.go index ae6699801cb..6312e80262c 100644 --- a/trie/storageMarker/trieStorageMarker_test.go +++ b/trie/storageMarker/trieStorageMarker_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/stretchr/testify/assert" ) @@ -17,7 +17,7 @@ func TestTrieStorageMarker_MarkStorerAsSyncedAndActive(t *testing.T) { trieSyncedKeyPut := false activeDbKeyPut := false - storer := &testscommon.StorageManagerStub{ + storer := &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return 5, nil }, @@ -45,7 +45,7 @@ func TestTrieStorageMarker_MarkStorerAsSyncedAndActive(t *testing.T) { trieSyncedKeyPut := false activeDbKeyPut := false - storer := &testscommon.StorageManagerStub{ + storer := &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { return 0, nil }, diff --git a/trie/trieStorageManagerFactory_test.go b/trie/trieStorageManagerFactory_test.go index d5a28801d9c..d1afb5c1737 100644 --- a/trie/trieStorageManagerFactory_test.go +++ b/trie/trieStorageManagerFactory_test.go @@ -6,7 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" @@ -66,7 +66,7 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { getCalled := false returnedVal := []byte("existingVal") putCalled := 0 - tsm = &testscommon.StorageManagerStub{ + tsm = &storageManager.StorageManagerStub{ GetCalled: func(_ []byte) ([]byte, error) { getCalled = true return returnedVal, nil diff --git a/trie/trieStorageManagerInEpoch_test.go b/trie/trieStorageManagerInEpoch_test.go index 9ba92d45549..d13033f735a 100644 --- a/trie/trieStorageManagerInEpoch_test.go +++ b/trie/trieStorageManagerInEpoch_test.go @@ -6,7 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/storage/database" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -22,7 +22,7 @@ func TestNewTrieStorageManagerInEpochNilStorageManager(t *testing.T) { func TestNewTrieStorageManagerInEpochInvalidStorageManagerType(t *testing.T) { t.Parallel() - trieStorage := &testscommon.StorageManagerStub{} + trieStorage := &storageManager.StorageManagerStub{} tsmie, err := newTrieStorageManagerInEpoch(trieStorage, 0) assert.True(t, check.IfNil(tsmie)) diff --git a/trie/trieStorageManagerWithoutPruning_test.go b/trie/trieStorageManagerWithoutPruning_test.go index 4dc35a38613..f60a1078ebc 100644 --- a/trie/trieStorageManagerWithoutPruning_test.go +++ b/trie/trieStorageManagerWithoutPruning_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" ) @@ -39,7 +39,7 @@ func TestTrieStorageManagerWithoutPruning_Remove(t *testing.T) { removeFromCheckpointHashesHolderCalled := false tsm := &trie.StorageManagerExtensionStub{ - StorageManagerStub: &testscommon.StorageManagerStub{ + StorageManagerStub: &storageManager.StorageManagerStub{ RemoveFromCheckpointHashesHolderCalled: func(hash []byte) { removeFromCheckpointHashesHolderCalled = true }, diff --git a/update/container/accountDBSyncers_test.go b/update/container/accountDBSyncers_test.go index 0a63210e6c9..9e5a4853b7b 100644 --- a/update/container/accountDBSyncers_test.go +++ b/update/container/accountDBSyncers_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/require" @@ -69,7 +70,7 @@ func TestAccountDBSyncers_ReplaceShouldWork(t *testing.T) { // update newTestVal := &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(_ []byte) error { + SyncAccountsCalled: func(_ []byte, _ common.StorageMarker) error { return errors.New("local error") }, } diff --git a/update/factory/accountDBSyncerContainerFactory.go b/update/factory/accountDBSyncerContainerFactory.go index 41fe4050c7a..ec1929c754f 100644 --- a/update/factory/accountDBSyncerContainerFactory.go +++ b/update/factory/accountDBSyncerContainerFactory.go @@ -15,7 +15,6 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/statistics" - "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/multiversx/mx-chain-go/update" containers "github.com/multiversx/mx-chain-go/update/container" "github.com/multiversx/mx-chain-go/update/genesis" @@ -150,7 +149,6 @@ func (a *accountDBSyncersContainerFactory) createUserAccountsSyncer(shardId uint MaxHardCapForMissingNodes: a.maxHardCapForMissingNodes, TrieSyncerVersion: a.trieSyncerVersion, CheckNodesOnDisk: a.checkNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: disabled.NewAppStatusHandler(), }, @@ -180,7 +178,6 @@ func (a *accountDBSyncersContainerFactory) createValidatorAccountsSyncer(shardId MaxHardCapForMissingNodes: a.maxHardCapForMissingNodes, TrieSyncerVersion: a.trieSyncerVersion, CheckNodesOnDisk: a.checkNodesOnDisk, - StorageMarker: storageMarker.NewTrieStorageMarker(), UserAccountsSyncStatisticsHandler: statistics.NewTrieSyncStatistics(), AppStatusHandler: disabled.NewAppStatusHandler(), }, diff --git a/update/genesis/import_test.go b/update/genesis/import_test.go index 01a8b50e82e..30dd5f95492 100644 --- a/update/genesis/import_test.go +++ b/update/genesis/import_test.go @@ -13,6 +13,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/assert" @@ -23,7 +24,7 @@ import ( func TestNewStateImport(t *testing.T) { trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &storageManager.StorageManagerStub{} tests := []struct { name string args ArgsNewStateImport @@ -86,8 +87,8 @@ func TestImportAll(t *testing.T) { t.Parallel() trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &testscommon.StorageManagerStub{} - trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &storageManager.StorageManagerStub{} + trieStorageManagers[dataRetriever.PeerAccountsUnit.String()] = &storageManager.StorageManagerStub{} args := ArgsNewStateImport{ HardforkStorer: &mock.HardforkStorerStub{}, @@ -110,7 +111,7 @@ func TestStateImport_ImportUnFinishedMetaBlocksShouldWork(t *testing.T) { t.Parallel() trieStorageManagers := make(map[string]common.StorageManager) - trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &testscommon.StorageManagerStub{} + trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = &storageManager.StorageManagerStub{} hasher := &hashingMocks.HasherMock{} marshahlizer := &mock.MarshalizerMock{} diff --git a/update/interface.go b/update/interface.go index bdd993e0392..6487b71438e 100644 --- a/update/interface.go +++ b/update/interface.go @@ -175,7 +175,7 @@ type WhiteListHandler interface { // AccountsDBSyncer defines the methods for the accounts db syncer type AccountsDBSyncer interface { GetSyncedTries() map[string]common.Trie - SyncAccounts(rootHash []byte) error + SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error IsInterfaceNil() bool } diff --git a/update/mock/accountsDBSyncerStub.go b/update/mock/accountsDBSyncerStub.go index 9ff9abb9017..39477bdc70a 100644 --- a/update/mock/accountsDBSyncerStub.go +++ b/update/mock/accountsDBSyncerStub.go @@ -7,7 +7,7 @@ import ( // AccountsDBSyncerStub - type AccountsDBSyncerStub struct { GetSyncedTriesCalled func() map[string]common.Trie - SyncAccountsCalled func(rootHash []byte) error + SyncAccountsCalled func(rootHash []byte, storageMarker common.StorageMarker) error } // GetSyncedTries - @@ -19,9 +19,9 @@ func (a *AccountsDBSyncerStub) GetSyncedTries() map[string]common.Trie { } // SyncAccounts - -func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte) error { +func (a *AccountsDBSyncerStub) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { if a.SyncAccountsCalled != nil { - return a.SyncAccountsCalled(rootHash) + return a.SyncAccountsCalled(rootHash, storageMarker) } return nil } diff --git a/update/sync/coordinator_test.go b/update/sync/coordinator_test.go index de7b26d5032..b56b2d8f99a 100644 --- a/update/sync/coordinator_test.go +++ b/update/sync/coordinator_test.go @@ -109,7 +109,7 @@ func createSyncTrieState(retErr bool) update.EpochStartTriesSyncHandler { AccountsDBsSyncers: &mock.AccountsDBSyncersStub{ GetCalled: func(key string) (syncer update.AccountsDBSyncer, err error) { return &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(rootHash []byte) error { + SyncAccountsCalled: func(rootHash []byte, _ common.StorageMarker) error { if retErr { return errors.New("err") } diff --git a/update/sync/syncAccountsDBs.go b/update/sync/syncAccountsDBs.go index 38c8a2fcf72..803460bd914 100644 --- a/update/sync/syncAccountsDBs.go +++ b/update/sync/syncAccountsDBs.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/multiversx/mx-chain-go/update" "github.com/multiversx/mx-chain-go/update/genesis" ) @@ -149,7 +150,7 @@ func (st *syncAccountsDBs) syncAccountsOfType(accountType genesis.Type, trieID s return err } - err = accountsDBSyncer.SyncAccounts(rootHash) + err = accountsDBSyncer.SyncAccounts(rootHash, storageMarker.NewDisabledStorageMarker()) if err != nil { // TODO: critical error - should not happen - maybe recreate trie syncer here return err From 1fd7feb5998882ebab88e9c46c5599b13f57564f Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Mon, 24 Apr 2023 20:50:48 +0300 Subject: [PATCH 101/221] some fixes after merge --- factory/processing/processComponents.go | 2 +- factory/processing/processComponents_test.go | 68 ++++++++++---------- 2 files changed, 36 insertions(+), 34 deletions(-) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index dde67569aec..3c543396258 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -911,7 +911,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string encodedAddress, err := pcf.coreData.AddressPubKeyConverter().Encode(userAccount.AddressBytes()) if err != nil { - return nil, err + return map[string]*outport.AlteredAccount{}, err } genesisAccounts[encodedAddress] = &outport.AlteredAccount{ diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index dfa2992a6ab..c7769c5494f 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -68,6 +68,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto Length: 32, Type: "bech32", SignatureLength: 0, + Hrp: "erd", }) valPubKeyConv, _ := factory.NewPubkeyConverter(config.PubkeyConfig{ Length: 96, @@ -198,14 +199,15 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MultiSigContainer: &cryptoMocks.MultiSignerContainerMock{ MultiSigner: &cryptoMocks.MultisignerMock{}, }, - PrivKey: &cryptoMocks.PrivateKeyStub{}, - PubKey: &cryptoMocks.PublicKeyStub{}, - PubKeyString: "pub key string", - PubKeyBytes: []byte("pub key bytes"), - TxKeyGen: &cryptoMocks.KeyGenStub{}, - TxSig: &cryptoMocks.SingleSignerStub{}, - PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, - MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, + PrivKey: &cryptoMocks.PrivateKeyStub{}, + PubKey: &cryptoMocks.PublicKeyStub{}, + PubKeyString: "pub key string", + PubKeyBytes: []byte("pub key bytes"), + TxKeyGen: &cryptoMocks.KeyGenStub{}, + TxSig: &cryptoMocks.SingleSignerStub{}, + PeerSignHandler: &cryptoMocks.PeerSignatureHandlerStub{}, + MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, + ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, }, State: &testscommon.StateComponentsMock{ Accounts: &state.AccountsStub{ @@ -365,7 +367,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, NodesConfig: &testscommon.NodesSetupStub{}, - AddrPubKeyConv: &mock.PubkeyConverterStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: nil, } pcf, err := processComp.NewProcessComponentsFactory(args) @@ -379,7 +381,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, NodesConfig: &testscommon.NodesSetupStub{}, - AddrPubKeyConv: &mock.PubkeyConverterStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, ValPubKeyConv: nil, } @@ -394,9 +396,9 @@ func TestNewProcessComponentsFactory(t *testing.T) { args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, NodesConfig: &testscommon.NodesSetupStub{}, - AddrPubKeyConv: &mock.PubkeyConverterStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, - ValPubKeyConv: &mock.PubkeyConverterStub{}, + ValPubKeyConv: &testscommon.PubkeyConverterStub{}, IntMarsh: nil, } pcf, err := processComp.NewProcessComponentsFactory(args) @@ -410,9 +412,9 @@ func TestNewProcessComponentsFactory(t *testing.T) { args.CoreData = &mock.CoreComponentsMock{ EconomicsHandler: &economicsmocks.EconomicsHandlerStub{}, NodesConfig: &testscommon.NodesSetupStub{}, - AddrPubKeyConv: &mock.PubkeyConverterStub{}, + AddrPubKeyConv: &testscommon.PubkeyConverterStub{}, EpochChangeNotifier: &epochNotifier.EpochNotifierStub{}, - ValPubKeyConv: &mock.PubkeyConverterStub{}, + ValPubKeyConv: &testscommon.PubkeyConverterStub{}, IntMarsh: &testscommon.MarshalizerStub{}, UInt64ByteSliceConv: nil, } @@ -903,19 +905,19 @@ func TestProcessComponentsFactory_Create(t *testing.T) { t.Run("NewESDTDataStorage fails should error", testWithNilMarshaller(105, "Marshalizer", unreachableStep)) t.Run("NewReceiptsRepository fails should error", - testWithNilMarshaller(106, "marshalizer", unreachableStep)) + testWithNilMarshaller(107, "marshalizer", unreachableStep)) t.Run("newBlockProcessor fails due to invalid shard should error", testWithInvalidShard(31, "could not create block processor")) // newShardBlockProcessor t.Run("newShardBlockProcessor: NewESDTTransferParser fails should error", - testWithNilMarshaller(107, "marshaller", unreachableStep)) + testWithNilMarshaller(108, "marshaller", unreachableStep)) t.Run("newShardBlockProcessor: createBuiltInFunctionContainer fails should error", testWithNilAddressPubKeyConv(46, "public key converter", unreachableStep)) t.Run("newShardBlockProcessor: createVMFactoryShard fails due to NewBlockChainHookImpl failure should error", testWithNilAddressPubKeyConv(47, "pubkey converter", unreachableStep)) t.Run("newShardBlockProcessor: NewIntermediateProcessorsContainerFactory fails should error", - testWithNilMarshaller(110, "Marshalizer", unreachableStep)) + testWithNilMarshaller(111, "Marshalizer", unreachableStep)) t.Run("newShardBlockProcessor: NewTxTypeHandler fails should error", testWithNilAddressPubKeyConv(49, "pubkey converter", unreachableStep)) t.Run("newShardBlockProcessor: NewGasComputation fails should error", @@ -949,13 +951,13 @@ func TestProcessComponentsFactory_Create(t *testing.T) { testCreateWithArgs(t, args, "PollingTimeInSeconds") }) t.Run("newShardBlockProcessor: NewBlockSizeComputation fails should error", - testWithNilMarshaller(116, "Marshalizer", unreachableStep)) - t.Run("newShardBlockProcessor: NewPreProcessorsContainerFactory fails should error", testWithNilMarshaller(117, "Marshalizer", unreachableStep)) - t.Run("newShardBlockProcessor: NewPrintDoubleTransactionsDetector fails should error", + t.Run("newShardBlockProcessor: NewPreProcessorsContainerFactory fails should error", testWithNilMarshaller(118, "Marshalizer", unreachableStep)) - t.Run("newShardBlockProcessor: NewTransactionCoordinator fails should error", + t.Run("newShardBlockProcessor: NewPrintDoubleTransactionsDetector fails should error", testWithNilMarshaller(119, "Marshalizer", unreachableStep)) + t.Run("newShardBlockProcessor: NewTransactionCoordinator fails should error", + testWithNilMarshaller(120, "Marshalizer", unreachableStep)) // newMetaBlockProcessor, step for meta is 31 inside newBlockProcessor t.Run("newMetaBlockProcessor: createBuiltInFunctionContainer fails should error", @@ -963,9 +965,9 @@ func TestProcessComponentsFactory_Create(t *testing.T) { t.Run("newMetaBlockProcessor: createVMFactoryMeta fails due to NewBlockChainHookImpl failure should error", testWithNilAddressPubKeyConv(47, "pubkey converter", blockProcessorOnMetaStep)) t.Run("newMetaBlockProcessor: NewIntermediateProcessorsContainerFactory fails should error", - testWithNilMarshaller(110, "Marshalizer", blockProcessorOnMetaStep)) + testWithNilMarshaller(111, "Marshalizer", blockProcessorOnMetaStep)) t.Run("newMetaBlockProcessor: NewESDTTransferParser fails should error", - testWithNilMarshaller(111, "marshaller", blockProcessorOnMetaStep)) + testWithNilMarshaller(112, "marshaller", blockProcessorOnMetaStep)) t.Run("newMetaBlockProcessor: NewTxTypeHandler fails should error", testWithNilAddressPubKeyConv(49, "pubkey converter", blockProcessorOnMetaStep)) t.Run("newMetaBlockProcessor: NewGasComputation fails should error", @@ -985,25 +987,25 @@ func TestProcessComponentsFactory_Create(t *testing.T) { t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to NewMetaTxProcessor failure second time should error", testWithNilAddressPubKeyConv(55, "pubkey converter", blockProcessorOnMetaStep)) t.Run("newMetaBlockProcessor: NewBlockSizeComputation fails should error", - testWithNilMarshaller(119, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewPreProcessorsContainerFactory fails should error", testWithNilMarshaller(120, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewPrintDoubleTransactionsDetector fails should error", + t.Run("newMetaBlockProcessor: NewPreProcessorsContainerFactory fails should error", testWithNilMarshaller(121, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewTransactionCoordinator fails should error", + t.Run("newMetaBlockProcessor: NewPrintDoubleTransactionsDetector fails should error", testWithNilMarshaller(122, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewStakingToPeer fails should error", + t.Run("newMetaBlockProcessor: NewTransactionCoordinator fails should error", testWithNilMarshaller(123, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewEpochStartData fails should error", + t.Run("newMetaBlockProcessor: NewStakingToPeer fails should error", testWithNilMarshaller(124, "Marshalizer", blockProcessorOnMetaStep)) + t.Run("newMetaBlockProcessor: NewEpochStartData fails should error", + testWithNilMarshaller(125, "Marshalizer", blockProcessorOnMetaStep)) t.Run("newMetaBlockProcessor: NewEndOfEpochEconomicsDataCreator fails should error", - testWithNilMarshaller(125, "marshalizer", blockProcessorOnMetaStep)) + testWithNilMarshaller(126, "marshalizer", blockProcessorOnMetaStep)) t.Run("newMetaBlockProcessor: GetStorer RewardTransactionUnit fails should error", testWithMissingStorer(1, retriever.RewardTransactionUnit, blockProcessorOnMetaStep)) t.Run("newMetaBlockProcessor: GetStorer MiniBlockUnit fails should error", testWithMissingStorer(4, retriever.MiniBlockUnit, blockProcessorOnMetaStep)) t.Run("newMetaBlockProcessor: NewRewardsCreatorProxy fails should error", - testWithNilMarshaller(126, "marshalizer", blockProcessorOnMetaStep)) + testWithNilMarshaller(127, "marshalizer", blockProcessorOnMetaStep)) t.Run("NewNodesSetupChecker fails should error", testWithNilPubKeyConv(5, "pubkey converter", unreachableStep)) t.Run("nodesSetupChecker.Check fails should error", func(t *testing.T) { @@ -1047,8 +1049,8 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, "messenger") }) - t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(123, "marshalizer", unreachableStep)) - t.Run("NewTxsSenderWithAccumulator fails should error", testWithNilMarshaller(124, "Marshalizer", unreachableStep)) + t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(124, "marshalizer", unreachableStep)) + t.Run("NewTxsSenderWithAccumulator fails should error", testWithNilMarshaller(125, "Marshalizer", unreachableStep)) t.Run("should work with indexAndReturnGenesisAccounts failing due to RootHash failure", func(t *testing.T) { t.Parallel() From 3968467fce2ac072ee74d6ba02d69e1eed441000 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Tue, 25 Apr 2023 16:02:55 +0300 Subject: [PATCH 102/221] fix after review --- common/interface.go | 1 + state/syncer/errors.go | 3 ++ state/syncer/userAccountSyncer_test.go | 47 ++++++++++++++++++++ state/syncer/userAccountsSyncer.go | 4 ++ state/syncer/validatorAccountsSyncer.go | 5 +++ state/syncer/validatorAccountsSyncer_test.go | 22 +++++++++ trie/storageMarker/disabledStorageMarker.go | 5 +++ trie/storageMarker/trieStorageMarker.go | 5 +++ 8 files changed, 92 insertions(+) create mode 100644 state/syncer/userAccountSyncer_test.go create mode 100644 state/syncer/validatorAccountsSyncer_test.go diff --git a/common/interface.go b/common/interface.go index eed1f157a50..d067f55972b 100644 --- a/common/interface.go +++ b/common/interface.go @@ -49,6 +49,7 @@ type TrieStats interface { // StorageMarker is used to mark the given storer as synced and active type StorageMarker interface { MarkStorerAsSyncedAndActive(storer StorageManager) + IsInterfaceNil() bool } // KeyBuilder is used for building trie keys as you traverse the trie diff --git a/state/syncer/errors.go b/state/syncer/errors.go index c42c5fd08f2..5a12356ecf8 100644 --- a/state/syncer/errors.go +++ b/state/syncer/errors.go @@ -4,3 +4,6 @@ import "errors" // ErrNilPubkeyConverter signals that a nil public key converter was provided var ErrNilPubkeyConverter = errors.New("nil pubkey converter") + +// ErrNilStorageMarker signals that a nil storage marker was provided +var ErrNilStorageMarker = errors.New("nil storage marker") diff --git a/state/syncer/userAccountSyncer_test.go b/state/syncer/userAccountSyncer_test.go new file mode 100644 index 00000000000..1653ad89011 --- /dev/null +++ b/state/syncer/userAccountSyncer_test.go @@ -0,0 +1,47 @@ +package syncer + +import ( + "testing" + "time" + + "github.com/multiversx/mx-chain-go/dataRetriever/mock" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/stretchr/testify/assert" +) + +func getDefaultBaseAccSyncerArgs() ArgsNewBaseAccountsSyncer { + return ArgsNewBaseAccountsSyncer{ + Hasher: &hashingMocks.HasherMock{}, + Marshalizer: testscommon.MarshalizerMock{}, + TrieStorageManager: &storageManager.StorageManagerStub{}, + RequestHandler: &testscommon.RequestHandlerStub{}, + Timeout: time.Second, + Cacher: testscommon.NewCacherMock(), + UserAccountsSyncStatisticsHandler: &testscommon.SizeSyncStatisticsHandlerStub{}, + AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, + MaxTrieLevelInMemory: 0, + MaxHardCapForMissingNodes: 100, + TrieSyncerVersion: 2, + CheckNodesOnDisk: false, + } +} + +func TestUserAccountsSyncer_SyncAccounts(t *testing.T) { + t.Parallel() + + args := ArgsNewUserAccountsSyncer{ + ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), + ShardId: 0, + Throttler: &mock.ThrottlerStub{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterStub{}, + } + syncer, err := NewUserAccountsSyncer(args) + assert.Nil(t, err) + assert.NotNil(t, syncer) + + err = syncer.SyncAccounts([]byte("rootHash"), nil) + assert.Equal(t, ErrNilStorageMarker, err) +} diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index 6ac039fe836..8c4365dc61c 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -103,6 +103,10 @@ func NewUserAccountsSyncer(args ArgsNewUserAccountsSyncer) (*userAccountsSyncer, // SyncAccounts will launch the syncing method to gather all the data needed for userAccounts - it is a blocking method func (u *userAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { + if check.IfNil(storageMarker) { + return ErrNilStorageMarker + } + u.mutex.Lock() defer u.mutex.Unlock() diff --git a/state/syncer/validatorAccountsSyncer.go b/state/syncer/validatorAccountsSyncer.go index 51076bdaec0..0e08849462f 100644 --- a/state/syncer/validatorAccountsSyncer.go +++ b/state/syncer/validatorAccountsSyncer.go @@ -4,6 +4,7 @@ import ( "context" "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/process/factory" "github.com/multiversx/mx-chain-go/state" @@ -61,6 +62,10 @@ func NewValidatorAccountsSyncer(args ArgsNewValidatorAccountsSyncer) (*validator // SyncAccounts will launch the syncing method to gather all the data needed for validatorAccounts - it is a blocking method func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker common.StorageMarker) error { + if check.IfNil(storageMarker) { + return ErrNilStorageMarker + } + v.mutex.Lock() defer v.mutex.Unlock() diff --git a/state/syncer/validatorAccountsSyncer_test.go b/state/syncer/validatorAccountsSyncer_test.go new file mode 100644 index 00000000000..1cbca877262 --- /dev/null +++ b/state/syncer/validatorAccountsSyncer_test.go @@ -0,0 +1,22 @@ +package syncer + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidatorAccountsSyncer_SyncAccounts(t *testing.T) { + t.Parallel() + + args := ArgsNewValidatorAccountsSyncer{ + ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), + } + + syncer, err := NewValidatorAccountsSyncer(args) + assert.Nil(t, err) + assert.NotNil(t, syncer) + + err = syncer.SyncAccounts([]byte("rootHash"), nil) + assert.Equal(t, ErrNilStorageMarker, err) +} diff --git a/trie/storageMarker/disabledStorageMarker.go b/trie/storageMarker/disabledStorageMarker.go index 5070910b61f..62257e62f60 100644 --- a/trie/storageMarker/disabledStorageMarker.go +++ b/trie/storageMarker/disabledStorageMarker.go @@ -13,3 +13,8 @@ func NewDisabledStorageMarker() *disabledStorageMarker { // MarkStorerAsSyncedAndActive does nothing for this implementation func (dsm *disabledStorageMarker) MarkStorerAsSyncedAndActive(_ common.StorageManager) { } + +// IsInterfaceNil returns true if there is no value under the interface +func (dsm *disabledStorageMarker) IsInterfaceNil() bool { + return dsm == nil +} diff --git a/trie/storageMarker/trieStorageMarker.go b/trie/storageMarker/trieStorageMarker.go index 59ec321d0df..ca5ce3287d5 100644 --- a/trie/storageMarker/trieStorageMarker.go +++ b/trie/storageMarker/trieStorageMarker.go @@ -39,3 +39,8 @@ func (sm *trieStorageMarker) MarkStorerAsSyncedAndActive(storer common.StorageMa } log.Debug("set activeDB in epoch", "epoch", lastEpoch) } + +// IsInterfaceNil returns true if there is no value under the interface +func (sm *trieStorageMarker) IsInterfaceNil() bool { + return sm == nil +} From e4cd99a1e97b3a208d7c50488c29802e3d66923f Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Tue, 25 Apr 2023 16:09:39 +0300 Subject: [PATCH 103/221] add TODOs --- state/syncer/userAccountSyncer_test.go | 2 ++ state/syncer/validatorAccountsSyncer_test.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/state/syncer/userAccountSyncer_test.go b/state/syncer/userAccountSyncer_test.go index 1653ad89011..a5168e2cbeb 100644 --- a/state/syncer/userAccountSyncer_test.go +++ b/state/syncer/userAccountSyncer_test.go @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/assert" ) +// TODO add more tests + func getDefaultBaseAccSyncerArgs() ArgsNewBaseAccountsSyncer { return ArgsNewBaseAccountsSyncer{ Hasher: &hashingMocks.HasherMock{}, diff --git a/state/syncer/validatorAccountsSyncer_test.go b/state/syncer/validatorAccountsSyncer_test.go index 1cbca877262..4624c550b16 100644 --- a/state/syncer/validatorAccountsSyncer_test.go +++ b/state/syncer/validatorAccountsSyncer_test.go @@ -6,6 +6,8 @@ import ( "github.com/stretchr/testify/assert" ) +// TODO add more tests + func TestValidatorAccountsSyncer_SyncAccounts(t *testing.T) { t.Parallel() From c7848fa9ae1c40e726dfb0ce8972e28313d2464f Mon Sep 17 00:00:00 2001 From: jules01 Date: Tue, 25 Apr 2023 19:19:15 +0300 Subject: [PATCH 104/221] - refactored transaction simulation construction --- api/middleware/responseLogger.go | 2 +- factory/processing/blockProcessorCreator.go | 217 ----------- .../processing/blockProcessorCreator_test.go | 14 +- factory/processing/export_test.go | 14 +- factory/processing/processComponents.go | 36 +- .../processComponentsForTxSimulator.go | 361 ++++++++++++++++++ .../processComponentsForTxSimulator_test.go | 53 +++ 7 files changed, 435 insertions(+), 262 deletions(-) create mode 100644 factory/processing/processComponentsForTxSimulator.go create mode 100644 factory/processing/processComponentsForTxSimulator_test.go diff --git a/api/middleware/responseLogger.go b/api/middleware/responseLogger.go index 36ff6261ec7..233ad88b809 100644 --- a/api/middleware/responseLogger.go +++ b/api/middleware/responseLogger.go @@ -66,7 +66,7 @@ func (rlm *responseLoggerMiddleware) logRequestAndResponse(c *gin.Context, durat reqBody := c.Request.Body reqBodyBytes, err := ioutil.ReadAll(reqBody) if err != nil { - log.Error(err.Error()) + log.Debug(err.Error()) return } diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 4f73a39db02..f9664bddf67 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -11,11 +11,9 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever" debugFactory "github.com/multiversx/mx-chain-go/debug/factory" "github.com/multiversx/mx-chain-go/epochStart" - "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" metachainEpochStart "github.com/multiversx/mx-chain-go/epochStart/metachain" mainFactory "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/genesis" - processDisabled "github.com/multiversx/mx-chain-go/genesis/process/disabled" "github.com/multiversx/mx-chain-go/outport" processOutport "github.com/multiversx/mx-chain-go/outport/process" factoryOutportProvider "github.com/multiversx/mx-chain-go/outport/process/factory" @@ -35,7 +33,6 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" "github.com/multiversx/mx-chain-go/process/throttle" "github.com/multiversx/mx-chain-go/process/transaction" - "github.com/multiversx/mx-chain-go/process/txsimulator" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/vm" @@ -46,7 +43,6 @@ import ( type blockProcessorAndVmFactories struct { blockProcessor process.BlockProcessor - vmFactoryForTxSimulate process.VirtualMachinesContainerFactory vmFactoryForProcessing process.VirtualMachinesContainerFactory } @@ -59,7 +55,6 @@ func (pcf *processComponentsFactory) newBlockProcessor( headerValidator process.HeaderConstructionValidator, blockTracker process.BlockTracker, pendingMiniBlocksHandler process.PendingMiniBlocksHandler, - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, wasmVMChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, @@ -74,7 +69,6 @@ func (pcf *processComponentsFactory) newBlockProcessor( headerValidator, blockTracker, pcf.smartContractParser, - txSimulatorProcessorArgs, wasmVMChangeLocker, scheduledTxsExecutionHandler, processedMiniBlocksTracker, @@ -91,7 +85,6 @@ func (pcf *processComponentsFactory) newBlockProcessor( headerValidator, blockTracker, pendingMiniBlocksHandler, - txSimulatorProcessorArgs, wasmVMChangeLocker, scheduledTxsExecutionHandler, processedMiniBlocksTracker, @@ -112,7 +105,6 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( headerValidator process.HeaderConstructionValidator, blockTracker process.BlockTracker, smartContractParser genesis.InitialSmartContractParser, - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, wasmVMChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, @@ -283,11 +275,6 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( scheduledTxsExecutionHandler.SetTransactionProcessor(transactionProcessor) - vmFactoryTxSimulator, err := pcf.createShardTxSimulatorProcessor(txSimulatorProcessorArgs, argsNewScProcessor, argsNewTxProcessor, esdtTransferParser, wasmVMChangeLocker, mapDNSAddresses) - if err != nil { - return nil, err - } - blockSizeThrottler, err := throttle.NewBlockSizeThrottle( pcf.config.BlockSizeThrottleConfig.MinSizeInBytes, pcf.config.BlockSizeThrottleConfig.MaxSizeInBytes, @@ -435,7 +422,6 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( blockProcessorComponents := &blockProcessorAndVmFactories{ blockProcessor: blockProcessor, - vmFactoryForTxSimulate: vmFactoryTxSimulator, vmFactoryForProcessing: vmFactory, } @@ -451,7 +437,6 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( headerValidator process.HeaderConstructionValidator, blockTracker process.BlockTracker, pendingMiniBlocksHandler process.PendingMiniBlocksHandler, - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, wasmVMChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, @@ -588,11 +573,6 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( scheduledTxsExecutionHandler.SetTransactionProcessor(transactionProcessor) - vmFactoryTxSimulator, err := pcf.createMetaTxSimulatorProcessor(txSimulatorProcessorArgs, argsNewScProcessor, txTypeHandler) - if err != nil { - return nil, err - } - blockSizeThrottler, err := throttle.NewBlockSizeThrottle(pcf.config.BlockSizeThrottleConfig.MinSizeInBytes, pcf.config.BlockSizeThrottleConfig.MaxSizeInBytes) if err != nil { return nil, err @@ -889,7 +869,6 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( blockProcessorComponents := &blockProcessorAndVmFactories{ blockProcessor: metaProcessor, - vmFactoryForTxSimulate: vmFactoryTxSimulator, vmFactoryForProcessing: vmFactory, } @@ -940,202 +919,6 @@ func (pcf *processComponentsFactory) createOutportDataProvider( }) } -func (pcf *processComponentsFactory) createShardTxSimulatorProcessor( - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, - scProcArgs smartContract.ArgsNewSmartContractProcessor, - txProcArgs transaction.ArgsNewTxProcessor, - esdtTransferParser vmcommon.ESDTTransferParser, - wasmVMChangeLocker common.Locker, - mapDNSAddresses map[string]struct{}, -) (process.VirtualMachinesContainerFactory, error) { - readOnlyAccountsDB, err := txsimulator.NewReadOnlyAccountsDB(pcf.state.AccountsAdapterAPI()) - if err != nil { - return nil, err - } - - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - pcf.bootstrapComponents.ShardCoordinator(), - pcf.coreData.InternalMarshalizer(), - pcf.coreData.Hasher(), - pcf.coreData.AddressPubKeyConverter(), - disabled.NewChainStorer(), - pcf.data.Datapool(), - &processDisabled.FeeHandler{}, - ) - if err != nil { - return nil, err - } - - builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(readOnlyAccountsDB, mapDNSAddresses) - if err != nil { - return nil, err - } - - smartContractStorageSimulate := pcf.config.SmartContractsStorageSimulate - vmFactory, err := pcf.createVMFactoryShard( - readOnlyAccountsDB, - builtInFuncFactory.BuiltInFunctionContainer(), - esdtTransferParser, - wasmVMChangeLocker, - smartContractStorageSimulate, - builtInFuncFactory.NFTStorageHandler(), - builtInFuncFactory.ESDTGlobalSettingsHandler(), - ) - if err != nil { - return nil, err - } - - vmContainer, err := vmFactory.Create() - if err != nil { - return nil, err - } - - scProcArgs.VmContainer = vmContainer - - interimProcContainer, err := interimProcFactory.Create() - if err != nil { - return nil, err - } - - scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - if err != nil { - return nil, err - } - scProcArgs.ScrForwarder = scForwarder - scProcArgs.BlockChainHook = vmFactory.BlockChainHookImpl() - - receiptTxInterim, err := interimProcContainer.Get(dataBlock.ReceiptBlock) - if err != nil { - return nil, err - } - txProcArgs.ReceiptForwarder = receiptTxInterim - - badTxInterim, err := interimProcContainer.Get(dataBlock.InvalidBlock) - if err != nil { - return nil, err - } - scProcArgs.BadTxForwarder = badTxInterim - txProcArgs.BadTxForwarder = badTxInterim - - scProcArgs.TxFeeHandler = &processDisabled.FeeHandler{} - txProcArgs.TxFeeHandler = &processDisabled.FeeHandler{} - - scProcArgs.AccountsDB = readOnlyAccountsDB - scProcArgs.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher - scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) - if err != nil { - return nil, err - } - txProcArgs.ScProcessor = scProcessor - - txProcArgs.Accounts = readOnlyAccountsDB - - txSimulatorProcessorArgs.TransactionProcessor, err = transaction.NewTxProcessor(txProcArgs) - if err != nil { - return nil, err - } - - txSimulatorProcessorArgs.IntermediateProcContainer = interimProcContainer - - return vmFactory, nil -} - -func (pcf *processComponentsFactory) createMetaTxSimulatorProcessor( - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, - scProcArgs smartContract.ArgsNewSmartContractProcessor, - txTypeHandler process.TxTypeHandler, -) (process.VirtualMachinesContainerFactory, error) { - interimProcFactory, err := shard.NewIntermediateProcessorsContainerFactory( - pcf.bootstrapComponents.ShardCoordinator(), - pcf.coreData.InternalMarshalizer(), - pcf.coreData.Hasher(), - pcf.coreData.AddressPubKeyConverter(), - disabled.NewChainStorer(), - pcf.data.Datapool(), - &processDisabled.FeeHandler{}, - ) - if err != nil { - return nil, err - } - - interimProcContainer, err := interimProcFactory.Create() - if err != nil { - return nil, err - } - - scForwarder, err := interimProcContainer.Get(dataBlock.SmartContractResultBlock) - if err != nil { - return nil, err - } - scProcArgs.ScrForwarder = scForwarder - - badTxInterim, err := interimProcContainer.Get(dataBlock.InvalidBlock) - if err != nil { - return nil, err - } - scProcArgs.BadTxForwarder = badTxInterim - scProcArgs.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher - - scProcArgs.TxFeeHandler = &processDisabled.FeeHandler{} - - scProcArgs.VMOutputCacher = txSimulatorProcessorArgs.VMOutputCacher - - readOnlyAccountsDB, err := txsimulator.NewReadOnlyAccountsDB(pcf.state.AccountsAdapterAPI()) - if err != nil { - return nil, err - } - - builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(readOnlyAccountsDB, make(map[string]struct{})) - if err != nil { - return nil, err - } - - vmFactory, err := pcf.createVMFactoryMeta( - readOnlyAccountsDB, - builtInFuncFactory.BuiltInFunctionContainer(), - pcf.config.SmartContractsStorageSimulate, - builtInFuncFactory.NFTStorageHandler(), - builtInFuncFactory.ESDTGlobalSettingsHandler(), - ) - if err != nil { - return nil, err - } - - vmContainer, err := vmFactory.Create() - if err != nil { - return nil, err - } - - scProcArgs.VmContainer = vmContainer - scProcArgs.BlockChainHook = vmFactory.BlockChainHookImpl() - - scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) - if err != nil { - return nil, err - } - - argsNewMetaTx := transaction.ArgsNewMetaTxProcessor{ - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - Accounts: readOnlyAccountsDB, - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - ScProcessor: scProcessor, - TxTypeHandler: txTypeHandler, - EconomicsFee: &processDisabled.FeeHandler{}, - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - } - - txSimulatorProcessorArgs.TransactionProcessor, err = transaction.NewMetaTxProcessor(argsNewMetaTx) - if err != nil { - return nil, err - } - - txSimulatorProcessorArgs.IntermediateProcContainer = interimProcContainer - - return vmFactory, nil -} - func (pcf *processComponentsFactory) createVMFactoryShard( accounts state.AccountsAdapter, builtInFuncs vmcommon.BuiltInFunctionContainer, diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index 5d85dd26931..53ac721fefd 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -11,11 +11,9 @@ import ( dataComp "github.com/multiversx/mx-chain-go/factory/data" "github.com/multiversx/mx-chain-go/factory/mock" processComp "github.com/multiversx/mx-chain-go/factory/processing" - "github.com/multiversx/mx-chain-go/process/txsimulator" "github.com/multiversx/mx-chain-go/state" factoryState "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager/disabled" - "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/testscommon" componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -43,7 +41,7 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { _, err = pcf.Create() require.NoError(t, err) - bp, vmFactoryForSimulate, err := pcf.NewBlockProcessor( + bp, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, &mock.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, @@ -52,9 +50,6 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, - &txsimulator.ArgsTxSimulator{ - VMOutputCacher: txcache.NewDisabledCache(), - }, &sync.RWMutex{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -63,7 +58,6 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { require.NoError(t, err) require.NotNil(t, bp) - require.NotNil(t, vmFactoryForSimulate) } func Test_newBlockProcessorCreatorForMeta(t *testing.T) { @@ -166,7 +160,7 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { _, err = pcf.Create() require.NoError(t, err) - bp, vmFactoryForSimulate, err := pcf.NewBlockProcessor( + bp, err := pcf.NewBlockProcessor( &testscommon.RequestHandlerStub{}, &mock.ForkDetectorStub{}, &mock.EpochStartTriggerStub{}, @@ -175,9 +169,6 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { &mock.HeaderValidatorStub{}, &mock.BlockTrackerStub{}, &mock.PendingMiniBlocksHandlerStub{}, - &txsimulator.ArgsTxSimulator{ - VMOutputCacher: txcache.NewDisabledCache(), - }, &sync.RWMutex{}, &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, @@ -186,7 +177,6 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { require.NoError(t, err) require.NotNil(t, bp) - require.NotNil(t, vmFactoryForSimulate) } func createAccountAdapter( diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index f9cae468a41..d07a8648394 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/genesis" "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-go/process/txsimulator" ) // NewBlockProcessor calls the unexported method with the same name in order to use it in tests @@ -21,12 +20,11 @@ func (pcf *processComponentsFactory) NewBlockProcessor( headerValidator process.HeaderConstructionValidator, blockTracker process.BlockTracker, pendingMiniBlocksHandler process.PendingMiniBlocksHandler, - txSimulatorProcessorArgs *txsimulator.ArgsTxSimulator, wasmVMChangeLocker common.Locker, scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository factory.ReceiptsRepository, -) (process.BlockProcessor, process.VirtualMachinesContainerFactory, error) { +) (process.BlockProcessor, error) { blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -36,20 +34,24 @@ func (pcf *processComponentsFactory) NewBlockProcessor( headerValidator, blockTracker, pendingMiniBlocksHandler, - txSimulatorProcessorArgs, wasmVMChangeLocker, scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, ) if err != nil { - return nil, nil, err + return nil, err } - return blockProcessorComponents.blockProcessor, blockProcessorComponents.vmFactoryForTxSimulate, nil + return blockProcessorComponents.blockProcessor, nil } // IndexGenesisBlocks - func (pcf *processComponentsFactory) IndexGenesisBlocks(genesisBlocks map[uint32]data.HeaderHandler, indexingData map[uint32]*genesis.IndexingData) error { return pcf.indexGenesisBlocks(genesisBlocks, indexingData, map[string]*outport.AlteredAccount{}) } + +// CreateTxSimulatorProcessor - +func (pcf *processComponentsFactory) CreateTxSimulatorProcessor() (factory.TransactionSimulatorProcessor, process.VirtualMachinesContainerFactory, error) { + return pcf.createTxSimulatorProcessor() +} diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 24d35bbd61a..aac7480919e 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -56,7 +56,6 @@ import ( "github.com/multiversx/mx-chain-go/process/track" "github.com/multiversx/mx-chain-go/process/transactionLog" "github.com/multiversx/mx-chain-go/process/txsSender" - "github.com/multiversx/mx-chain-go/process/txsimulator" "github.com/multiversx/mx-chain-go/redundancy" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/networksharding" @@ -543,20 +542,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - vmOutputCacherConfig := storageFactory.GetCacherFromConfig(pcf.config.VMOutputCacher) - vmOutputCacher, err := storageunit.NewCache(vmOutputCacherConfig) - if err != nil { - return nil, err - } - - txSimulatorProcessorArgs := &txsimulator.ArgsTxSimulator{ - AddressPubKeyConverter: pcf.coreData.AddressPubKeyConverter(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - VMOutputCacher: vmOutputCacher, - Hasher: pcf.coreData.Hasher(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - } - scheduledSCRSStorer, err := pcf.data.StorageService().GetStorer(dataRetriever.ScheduledSCRsUnit) if err != nil { return nil, err @@ -606,7 +591,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { headerValidator, blockTracker, pendingMiniBlocksHandler, - txSimulatorProcessorArgs, pcf.coreData.WasmVMChangeLocker(), scheduledTxsExecutionHandler, processedMiniBlocksTracker, @@ -637,11 +621,6 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } - txSimulator, err := txsimulator.NewTransactionSimulator(*txSimulatorProcessorArgs) - if err != nil { - return nil, err - } - observerBLSPrivateKey, observerBLSPublicKey := pcf.crypto.BlockSignKeyGen().GeneratePair() observerBLSPublicKeyBuff, err := observerBLSPublicKey.ToByteArray() if err != nil { @@ -677,6 +656,11 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + txSimulatorProcessor, vmFactoryForTxSimulate, err := pcf.createTxSimulatorProcessor() + if err != nil { + return nil, err + } + return &processComponents{ nodesCoordinator: pcf.nodesCoordinator, shardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), @@ -700,7 +684,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { headerConstructionValidator: headerValidator, headerIntegrityVerifier: pcf.bootstrapComponents.HeaderIntegrityVerifier(), peerShardMapper: peerShardMapper, - txSimulatorProcessor: txSimulator, + txSimulatorProcessor: txSimulatorProcessor, miniBlocksPoolCleaner: mbsPoolsCleaner, txsPoolCleaner: txsPoolsCleaner, fallbackHeaderValidator: fallbackHeaderValidator, @@ -712,7 +696,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { importHandler: pcf.importHandler, nodeRedundancyHandler: nodeRedundancyHandler, currentEpochProvider: currentEpochProvider, - vmFactoryForTxSimulator: blockProcessorComponents.vmFactoryForTxSimulate, + vmFactoryForTxSimulator: vmFactoryForTxSimulate, vmFactoryForProcessing: blockProcessorComponents.vmFactoryForProcessing, scheduledTxsExecutionHandler: scheduledTxsExecutionHandler, txsSender: txsSenderWithAccumulator, @@ -911,9 +895,9 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string continue } - encodedAddress, err := pcf.coreData.AddressPubKeyConverter().Encode(userAccount.AddressBytes()) - if err != nil { - return nil, err + encodedAddress, errEncode := pcf.coreData.AddressPubKeyConverter().Encode(userAccount.AddressBytes()) + if errEncode != nil { + return nil, errEncode } genesisAccounts[encodedAddress] = &outport.AlteredAccount{ diff --git a/factory/processing/processComponentsForTxSimulator.go b/factory/processing/processComponentsForTxSimulator.go new file mode 100644 index 00000000000..93a327ef4bc --- /dev/null +++ b/factory/processing/processComponentsForTxSimulator.go @@ -0,0 +1,361 @@ +package processing + +import ( + "github.com/multiversx/mx-chain-core-go/core" + dataBlock "github.com/multiversx/mx-chain-core-go/data/block" + bootstrapDisabled "github.com/multiversx/mx-chain-go/epochStart/bootstrap/disabled" + "github.com/multiversx/mx-chain-go/factory" + "github.com/multiversx/mx-chain-go/genesis" + processDisabled "github.com/multiversx/mx-chain-go/genesis/process/disabled" + "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/preprocess" + "github.com/multiversx/mx-chain-go/process/coordinator" + "github.com/multiversx/mx-chain-go/process/factory/metachain" + "github.com/multiversx/mx-chain-go/process/factory/shard" + "github.com/multiversx/mx-chain-go/process/smartContract" + "github.com/multiversx/mx-chain-go/process/transaction" + "github.com/multiversx/mx-chain-go/process/transactionLog" + "github.com/multiversx/mx-chain-go/process/txsimulator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/storage" + storageFactory "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/multiversx/mx-chain-go/storage/storageunit" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/multiversx/mx-chain-vm-common-go/parsers" +) + +func (pcf *processComponentsFactory) createTxSimulatorProcessor() (factory.TransactionSimulatorProcessor, process.VirtualMachinesContainerFactory, error) { + readOnlyAccountsDB, err := txsimulator.NewReadOnlyAccountsDB(pcf.state.AccountsAdapterAPI()) + if err != nil { + return nil, nil, err + } + + vmOutputCacherConfig := storageFactory.GetCacherFromConfig(pcf.config.VMOutputCacher) + vmOutputCacher, err := storageunit.NewCache(vmOutputCacherConfig) + if err != nil { + return nil, nil, err + } + + txLogsProcessor, err := transactionLog.NewTxLogProcessor(transactionLog.ArgTxLogProcessor{ + Marshalizer: pcf.coreData.InternalMarshalizer(), + SaveInStorageEnabled: false, // no storer needed for tx simulator + }) + if err != nil { + return nil, nil, err + } + + txSimulatorProcessorArgs, vmContainerFactory, err := pcf.createArgsTxSimulatorProcessor(readOnlyAccountsDB, vmOutputCacher, txLogsProcessor) + if err != nil { + return nil, nil, err + } + + txSimulatorProcessorArgs.VMOutputCacher = vmOutputCacher + txSimulatorProcessorArgs.AddressPubKeyConverter = pcf.coreData.AddressPubKeyConverter() + txSimulatorProcessorArgs.ShardCoordinator = pcf.bootstrapComponents.ShardCoordinator() + txSimulatorProcessorArgs.Hasher = pcf.coreData.Hasher() + txSimulatorProcessorArgs.Marshalizer = pcf.coreData.InternalMarshalizer() + + txSimulator, err := txsimulator.NewTransactionSimulator(txSimulatorProcessorArgs) + + return txSimulator, vmContainerFactory, err +} + +func (pcf *processComponentsFactory) createArgsTxSimulatorProcessor( + accountsAdapter state.AccountsAdapter, + vmOutputCacher storage.Cacher, + txLogsProcessor process.TransactionLogProcessor, +) (txsimulator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, error) { + shardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() + if shardID == core.MetachainShardId { + return pcf.createArgsTxSimulatorProcessorForMeta(accountsAdapter, vmOutputCacher, txLogsProcessor) + } else { + return pcf.createArgsTxSimulatorProcessorShard(accountsAdapter, vmOutputCacher, txLogsProcessor) + } +} + +func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( + accountsAdapter state.AccountsAdapter, + vmOutputCacher storage.Cacher, + txLogsProcessor process.TransactionLogProcessor, +) (txsimulator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, error) { + args := txsimulator.ArgsTxSimulator{} + + intermediateProcessorsFactory, err := metachain.NewIntermediateProcessorsContainerFactory( + pcf.bootstrapComponents.ShardCoordinator(), + pcf.coreData.InternalMarshalizer(), + pcf.coreData.Hasher(), + pcf.coreData.AddressPubKeyConverter(), + bootstrapDisabled.NewChainStorer(), + pcf.data.Datapool(), + &processDisabled.FeeHandler{}) + if err != nil { + return args, nil, err + } + + intermediateProcessorsContainer, err := intermediateProcessorsFactory.Create() + if err != nil { + return args, nil, err + } + + builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(accountsAdapter, make(map[string]struct{})) + if err != nil { + return args, nil, err + } + + vmContainerFactory, err := pcf.createVMFactoryMeta( + accountsAdapter, + builtInFuncFactory.BuiltInFunctionContainer(), + pcf.config.SmartContractsStorageSimulate, + builtInFuncFactory.NFTStorageHandler(), + builtInFuncFactory.ESDTGlobalSettingsHandler(), + ) + + vmContainer, err := vmContainerFactory.Create() + if err != nil { + return args, nil, err + } + + txTypeHandler, err := pcf.createTxTypeHandler(builtInFuncFactory) + if err != nil { + return args, nil, err + } + + gasHandler, err := preprocess.NewGasComputation( + pcf.coreData.EconomicsData(), + txTypeHandler, + pcf.coreData.EnableEpochsHandler(), + ) + if err != nil { + return args, nil, err + } + + scForwarder, err := intermediateProcessorsContainer.Get(dataBlock.SmartContractResultBlock) + if err != nil { + return args, nil, err + } + badTxInterim, err := intermediateProcessorsContainer.Get(dataBlock.InvalidBlock) + if err != nil { + return args, nil, err + } + + scProcArgs := smartContract.ArgsNewSmartContractProcessor{ + VmContainer: vmContainer, + ArgsParser: smartContract.NewArgumentParser(), + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + AccountsDB: accountsAdapter, + BlockChainHook: vmContainerFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScrForwarder: scForwarder, + TxFeeHandler: &processDisabled.FeeHandler{}, + EconomicsFee: pcf.coreData.EconomicsData(), + TxTypeHandler: txTypeHandler, + GasHandler: gasHandler, + GasSchedule: pcf.gasSchedule, + TxLogsProcessor: txLogsProcessor, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + BadTxForwarder: badTxInterim, + VMOutputCacher: vmOutputCacher, + WasmVMChangeLocker: pcf.coreData.WasmVMChangeLocker(), + IsGenesisProcessing: false, + } + + scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) + if err != nil { + return args, nil, err + } + + argsTxProcessor := transaction.ArgsNewMetaTxProcessor{ + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Accounts: accountsAdapter, + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScProcessor: scProcessor, + TxTypeHandler: txTypeHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + txProcessor, err := transaction.NewMetaTxProcessor(argsTxProcessor) + if err != nil { + return args, nil, err + } + + args.TransactionProcessor = txProcessor + args.IntermediateProcContainer = intermediateProcessorsContainer + + return args, vmContainerFactory, nil +} + +func (pcf *processComponentsFactory) createTxTypeHandler(builtInFuncFactory vmcommon.BuiltInFunctionFactory) (process.TxTypeHandler, error) { + esdtTransferParser, err := parsers.NewESDTTransferParser(pcf.coreData.InternalMarshalizer()) + if err != nil { + return nil, err + } + + argsTxTypeHandler := coordinator.ArgNewTxTypeHandler{ + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + ArgumentParser: parsers.NewCallArgsParser(), + ESDTTransferParser: esdtTransferParser, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + return coordinator.NewTxTypeHandler(argsTxTypeHandler) +} + +func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( + accountsAdapter state.AccountsAdapter, + vmOutputCacher storage.Cacher, + txLogsProcessor process.TransactionLogProcessor, +) (txsimulator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, error) { + args := txsimulator.ArgsTxSimulator{} + + intermediateProcessorsFactory, err := shard.NewIntermediateProcessorsContainerFactory( + pcf.bootstrapComponents.ShardCoordinator(), + pcf.coreData.InternalMarshalizer(), + pcf.coreData.Hasher(), + pcf.coreData.AddressPubKeyConverter(), + bootstrapDisabled.NewChainStorer(), + pcf.data.Datapool(), + &processDisabled.FeeHandler{}, + ) + if err != nil { + return args, nil, err + } + + intermediateProcessorsContainer, err := intermediateProcessorsFactory.Create() + if err != nil { + return args, nil, err + } + + mapDNSAddresses, err := pcf.smartContractParser.GetDeployedSCAddresses(genesis.DNSType) + if err != nil { + return args, nil, err + } + + builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(accountsAdapter, mapDNSAddresses) + if err != nil { + return args, nil, err + } + + smartContractStorageSimulate := pcf.config.SmartContractsStorageSimulate + esdtTransferParser, err := parsers.NewESDTTransferParser(pcf.coreData.InternalMarshalizer()) + if err != nil { + return args, nil, err + } + + vmContainerFactory, err := pcf.createVMFactoryShard( + accountsAdapter, + builtInFuncFactory.BuiltInFunctionContainer(), + esdtTransferParser, + pcf.coreData.WasmVMChangeLocker(), + smartContractStorageSimulate, + builtInFuncFactory.NFTStorageHandler(), + builtInFuncFactory.ESDTGlobalSettingsHandler(), + ) + if err != nil { + return args, nil, err + } + + err = builtInFuncFactory.SetPayableHandler(vmContainerFactory.BlockChainHookImpl()) + if err != nil { + return args, nil, err + } + + vmContainer, err := vmContainerFactory.Create() + if err != nil { + return args, nil, err + } + + txTypeHandler, err := pcf.createTxTypeHandler(builtInFuncFactory) + if err != nil { + return args, nil, err + } + txFeeHandler := &processDisabled.FeeHandler{} + + gasHandler, err := preprocess.NewGasComputation( + pcf.coreData.EconomicsData(), + txTypeHandler, + pcf.coreData.EnableEpochsHandler(), + ) + if err != nil { + return args, nil, err + } + + scForwarder, err := intermediateProcessorsContainer.Get(dataBlock.SmartContractResultBlock) + if err != nil { + return args, nil, err + } + badTxInterim, err := intermediateProcessorsContainer.Get(dataBlock.InvalidBlock) + if err != nil { + return args, nil, err + } + receiptTxInterim, err := intermediateProcessorsContainer.Get(dataBlock.ReceiptBlock) + if err != nil { + return args, nil, err + } + + argsParser := smartContract.NewArgumentParser() + + scProcArgs := smartContract.ArgsNewSmartContractProcessor{ + VmContainer: vmContainer, + ArgsParser: argsParser, + Hasher: pcf.coreData.Hasher(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + AccountsDB: accountsAdapter, + BlockChainHook: vmContainerFactory.BlockChainHookImpl(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScrForwarder: scForwarder, + TxFeeHandler: &processDisabled.FeeHandler{}, + EconomicsFee: pcf.coreData.EconomicsData(), + TxTypeHandler: txTypeHandler, + GasHandler: gasHandler, + GasSchedule: pcf.gasSchedule, + TxLogsProcessor: txLogsProcessor, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + BadTxForwarder: badTxInterim, + VMOutputCacher: vmOutputCacher, + WasmVMChangeLocker: pcf.coreData.WasmVMChangeLocker(), + IsGenesisProcessing: false, + } + + scProcessor, err := smartContract.NewSmartContractProcessor(scProcArgs) + if err != nil { + return args, nil, err + } + + argsTxProcessor := transaction.ArgsNewTxProcessor{ + Accounts: accountsAdapter, + Hasher: pcf.coreData.Hasher(), + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + SignMarshalizer: pcf.coreData.TxMarshalizer(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ScProcessor: scProcessor, + TxFeeHandler: txFeeHandler, + TxTypeHandler: txTypeHandler, + EconomicsFee: pcf.coreData.EconomicsData(), + ReceiptForwarder: receiptTxInterim, + BadTxForwarder: badTxInterim, + ArgsParser: argsParser, + ScrForwarder: scForwarder, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + txProcessor, err := transaction.NewTxProcessor(argsTxProcessor) + if err != nil { + return args, nil, err + } + + args.TransactionProcessor = txProcessor + args.IntermediateProcContainer = intermediateProcessorsContainer + + return args, vmContainerFactory, nil +} diff --git a/factory/processing/processComponentsForTxSimulator_test.go b/factory/processing/processComponentsForTxSimulator_test.go new file mode 100644 index 00000000000..94a55251978 --- /dev/null +++ b/factory/processing/processComponentsForTxSimulator_test.go @@ -0,0 +1,53 @@ +package processing_test + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/factory/processing" + "github.com/multiversx/mx-chain-go/process/mock" + "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/stretchr/testify/assert" +) + +func TestManagedProcessComponents_createTxSimulatorProcessor(t *testing.T) { + t.Parallel() + + shardCoordinatorForShardID2 := mock.NewMultiShardsCoordinatorMock(3) + shardCoordinatorForShardID2.CurrentShard = 2 + + shardCoordinatorForMetachain := mock.NewMultiShardsCoordinatorMock(3) + shardCoordinatorForMetachain.CurrentShard = core.MetachainShardId + + // no further t.Parallel as these tests are quite heavy (they open netMessengers and other components that start a lot of goroutines) + t.Run("invalid VMOutputCacher config should error", func(t *testing.T) { + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinatorForShardID2) + processArgs.Config.VMOutputCacher.Type = "invalid" + pcf, _ := processing.NewProcessComponentsFactory(processArgs) + + txSimulator, vmContainerFactory, err := pcf.CreateTxSimulatorProcessor() + assert.NotNil(t, err) + assert.True(t, check.IfNil(txSimulator)) + assert.True(t, check.IfNil(vmContainerFactory)) + assert.Contains(t, err.Error(), "not supported cache type") + }) + t.Run("should work for shard", func(t *testing.T) { + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinatorForShardID2) + pcf, _ := processing.NewProcessComponentsFactory(processArgs) + + txSimulator, vmContainerFactory, err := pcf.CreateTxSimulatorProcessor() + assert.Nil(t, err) + assert.False(t, check.IfNil(txSimulator)) + assert.False(t, check.IfNil(vmContainerFactory)) + }) + t.Run("should work for metachain", func(t *testing.T) { + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinatorForMetachain) + pcf, _ := processing.NewProcessComponentsFactory(processArgs) + + txSimulator, vmContainerFactory, err := pcf.CreateTxSimulatorProcessor() + assert.Nil(t, err) + assert.False(t, check.IfNil(txSimulator)) + assert.False(t, check.IfNil(vmContainerFactory)) + }) +} From c4cb35e50d080ba5b185e8cbbe544e9346f62772 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 26 Apr 2023 14:52:49 +0300 Subject: [PATCH 105/221] fixes after merge --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index bba09a4e3c7..908a56d70a7 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 - github.com/multiversx/mx-chain-vm-common-go v1.4.1 + github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230426114543-7cbe0054a196 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index 3b5e4647240..ae038645078 100644 --- a/go.sum +++ b/go.sum @@ -629,8 +629,9 @@ github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= -github.com/multiversx/mx-chain-vm-common-go v1.4.1 h1:HHZF9zU4WsMbfLrCarx3ESM95caWUrPBleGHKdsbzgc= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230426114543-7cbe0054a196 h1:IiRdjifo4+nDpVJsjS78LMhlFJQ2syJaZid7ISvgfyo= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230426114543-7cbe0054a196/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= From 99f15d74f9c832c9ce3108b10bb86fbec328eb0c Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 26 Apr 2023 15:02:35 +0300 Subject: [PATCH 106/221] fixes after merge --- integrationTests/vm/txsFee/dns_test.go | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/integrationTests/vm/txsFee/dns_test.go b/integrationTests/vm/txsFee/dns_test.go index 18e93bb65ee..cda8aaece8c 100644 --- a/integrationTests/vm/txsFee/dns_test.go +++ b/integrationTests/vm/txsFee/dns_test.go @@ -110,18 +110,6 @@ func TestDeployDNSContract_TestRegisterAndResolveAndSendTxWithSndAndRcvUserName( require.Nil(t, err) } -func getNonce(testContext *vm.VMTestContext, address []byte) uint64 { - accnt, _ := testContext.Accounts.LoadAccount(address) - return accnt.GetNonce() -} - -func getBalance(testContext *vm.VMTestContext, address []byte) *big.Int { - accnt, _ := testContext.Accounts.LoadAccount(address) - userAccnt, _ := accnt.(state.UserAccountHandler) - - return userAccnt.GetBalance() -} - // relayer address is in shard 2, creates a transaction on the behalf of the user from shard 2, that will call the DNS contract // from shard 1. func TestDeployDNSContract_TestGasWhenSaveUsernameFailsCrossShardBackwardsCompatibility(t *testing.T) { @@ -274,7 +262,7 @@ func processRegisterThroughRelayedTxs(tb testing.TB, args argsProcessRegister) ( log.Info("user tx", "tx", txToString(userTx)) // generate the relayed transaction - relayedTxData := utils.PrepareRelayerTxData(userTx) // v1 will suffice + relayedTxData := integrationTests.PrepareRelayedTxDataV1(userTx) // v1 will suffice relayedTxGasLimit := userTxGasLimit + 1 + uint64(len(relayedTxData)) relayedTx := vm.CreateTransaction( getNonce(args.testContextForRelayerAndUser, args.relayerAddress), From 3a06cbf60898fbccb55cd62cc5ea1a05b626c8b9 Mon Sep 17 00:00:00 2001 From: jules01 Date: Wed, 26 Apr 2023 19:29:38 +0300 Subject: [PATCH 107/221] - fixes after merge --- factory/processing/processComponents.go | 2 +- factory/processing/processComponentsForTxSimulator.go | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index fe292fc5241..a0738464837 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -659,7 +659,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { txSimulatorProcessor, vmFactoryForTxSimulate, err := pcf.createTxSimulatorProcessor() if err != nil { - return nil, err + return nil, fmt.Errorf("%w when assembling components for the transactions simulator processor", err) } return &processComponents{ diff --git a/factory/processing/processComponentsForTxSimulator.go b/factory/processing/processComponentsForTxSimulator.go index 93a327ef4bc..3473677f057 100644 --- a/factory/processing/processComponentsForTxSimulator.go +++ b/factory/processing/processComponentsForTxSimulator.go @@ -177,6 +177,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( TxTypeHandler: txTypeHandler, EconomicsFee: pcf.coreData.EconomicsData(), EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + TxVersionChecker: pcf.coreData.TxVersionChecker(), + GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), } txProcessor, err := transaction.NewMetaTxProcessor(argsTxProcessor) @@ -347,6 +349,8 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( ArgsParser: argsParser, ScrForwarder: scForwarder, EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + TxVersionChecker: pcf.coreData.TxVersionChecker(), + GuardianChecker: pcf.bootstrapComponents.GuardedAccountHandler(), } txProcessor, err := transaction.NewTxProcessor(argsTxProcessor) From d57f6554654d2620013e5fcaf619f2c31ac50938 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 27 Apr 2023 12:46:13 +0300 Subject: [PATCH 108/221] - added integration-test --- .../realcomponents/processorRunner.go | 583 ++++++++++++++++++ .../realcomponents/processorRunner_test.go | 13 + .../txsimulator/componentConstruction_test.go | 63 ++ node/nodeRunner_test.go | 98 +-- testscommon/realConfigsHandling.go | 105 ++++ 5 files changed, 768 insertions(+), 94 deletions(-) create mode 100644 integrationTests/realcomponents/processorRunner.go create mode 100644 integrationTests/realcomponents/processorRunner_test.go create mode 100644 integrationTests/realcomponents/txsimulator/componentConstruction_test.go create mode 100644 testscommon/realConfigsHandling.go diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go new file mode 100644 index 00000000000..0b49e165589 --- /dev/null +++ b/integrationTests/realcomponents/processorRunner.go @@ -0,0 +1,583 @@ +package realcomponents + +import ( + "crypto/rand" + "io" + "math/big" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/endProcess" + "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/forking" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" + dbLookupFactory "github.com/multiversx/mx-chain-go/dblookupext/factory" + "github.com/multiversx/mx-chain-go/factory" + factoryBootstrap "github.com/multiversx/mx-chain-go/factory/bootstrap" + factoryCore "github.com/multiversx/mx-chain-go/factory/core" + factoryCrypto "github.com/multiversx/mx-chain-go/factory/crypto" + factoryData "github.com/multiversx/mx-chain-go/factory/data" + factoryNetwork "github.com/multiversx/mx-chain-go/factory/network" + factoryProcessing "github.com/multiversx/mx-chain-go/factory/processing" + factoryState "github.com/multiversx/mx-chain-go/factory/state" + factoryStatus "github.com/multiversx/mx-chain-go/factory/status" + factoryStatusCore "github.com/multiversx/mx-chain-go/factory/statusCore" + "github.com/multiversx/mx-chain-go/genesis" + "github.com/multiversx/mx-chain-go/genesis/parsing" + "github.com/multiversx/mx-chain-go/integrationTests/vm" + "github.com/multiversx/mx-chain-go/integrationTests/vm/wasm" + "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process/interceptors" + nodesCoord "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/storage/cache" + storageFactory "github.com/multiversx/mx-chain-go/storage/factory" + "github.com/multiversx/mx-chain-go/storage/storageunit" + "github.com/multiversx/mx-chain-go/update/trigger" + "github.com/stretchr/testify/require" +) + +// ProcessorRunner is a test emulation to the nodeRunner component +type ProcessorRunner struct { + closers []io.Closer + Config config.Configs + CoreComponents factory.CoreComponentsHolder + CryptoComponents factory.CryptoComponentsHandler + StatusCoreComponents factory.StatusCoreComponentsHolder + NetworkComponents factory.NetworkComponentsHolder + BootstrapComponents factory.BootstrapComponentsHolder + DataComponents factory.DataComponentsHolder + StateComponents factory.StateComponentsHolder + NodesCoordinator nodesCoord.NodesCoordinator + StatusComponents factory.StatusComponentsHolder + ProcessComponents factory.ProcessComponentsHolder +} + +// NewProcessorRunner returns a new instance of ProcessorRunner +func NewProcessorRunner(tb testing.TB, config config.Configs) *ProcessorRunner { + pr := &ProcessorRunner{ + Config: config, + closers: make([]io.Closer, 0), + } + + pr.createComponents(tb) + + return pr +} + +func (pr *ProcessorRunner) createComponents(tb testing.TB) { + pr.createCoreComponents(tb) + pr.createCryptoComponents(tb) + pr.createStatusCoreComponents(tb) + pr.createNetworkComponents(tb) + pr.createBootstrapComponents(tb) + pr.createDataComponents(tb) + pr.createStateComponents(tb) + pr.createStatusComponents(tb) + pr.createProcessComponents(tb) +} + +func (pr *ProcessorRunner) createCoreComponents(tb testing.TB) { + argsCore := factoryCore.CoreComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + ConfigPathsHolder: *pr.Config.ConfigurationPathsHolder, + EpochConfig: *pr.Config.EpochConfig, + RoundConfig: *pr.Config.RoundConfig, + RatingsConfig: *pr.Config.RatingsConfig, + EconomicsConfig: *pr.Config.EconomicsConfig, + ImportDbConfig: *pr.Config.ImportDbConfig, + NodesFilename: pr.Config.ConfigurationPathsHolder.Nodes, + WorkingDirectory: pr.Config.FlagsConfig.WorkingDir, + ChanStopNodeProcess: make(chan endProcess.ArgEndProcess), + } + coreFactory, err := factoryCore.NewCoreComponentsFactory(argsCore) + require.Nil(tb, err) + + coreComp, err := factoryCore.NewManagedCoreComponents(coreFactory) + require.Nil(tb, err) + + err = coreComp.Create() + require.Nil(tb, err) + require.Nil(tb, coreComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, coreComp) + pr.CoreComponents = coreComp +} + +func (pr *ProcessorRunner) createCryptoComponents(tb testing.TB) { + argsCrypto := factoryCrypto.CryptoComponentsFactoryArgs{ + ValidatorKeyPemFileName: pr.Config.ConfigurationPathsHolder.ValidatorKey, + AllValidatorKeysPemFileName: pr.Config.ConfigurationPathsHolder.AllValidatorKeys, + SkIndex: 0, + Config: *pr.Config.GeneralConfig, + EnableEpochs: pr.Config.EpochConfig.EnableEpochs, + PrefsConfig: *pr.Config.PreferencesConfig, + CoreComponentsHolder: pr.CoreComponents, + KeyLoader: core.NewKeyLoader(), + ActivateBLSPubKeyMessageVerification: false, + IsInImportMode: false, + ImportModeNoSigCheck: false, + NoKeyProvided: true, + P2pKeyPemFileName: "", + } + + cryptoFactory, err := factoryCrypto.NewCryptoComponentsFactory(argsCrypto) + require.Nil(tb, err) + + cryptoComp, err := factoryCrypto.NewManagedCryptoComponents(cryptoFactory) + require.Nil(tb, err) + + err = cryptoComp.Create() + require.Nil(tb, err) + require.Nil(tb, cryptoComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, cryptoComp) + pr.CryptoComponents = cryptoComp +} + +func (pr *ProcessorRunner) createStatusCoreComponents(tb testing.TB) { + argsStatusCore := factoryStatusCore.StatusCoreComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + EpochConfig: *pr.Config.EpochConfig, + RoundConfig: *pr.Config.RoundConfig, + RatingsConfig: *pr.Config.RatingsConfig, + EconomicsConfig: *pr.Config.EconomicsConfig, + CoreComp: pr.CoreComponents, + } + + statusCoreFactory, err := factoryStatusCore.NewStatusCoreComponentsFactory(argsStatusCore) + require.Nil(tb, err) + + statusCoreComp, err := factoryStatusCore.NewManagedStatusCoreComponents(statusCoreFactory) + require.Nil(tb, err) + + err = statusCoreComp.Create() + require.Nil(tb, err) + require.Nil(tb, statusCoreComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, statusCoreComp) + pr.StatusCoreComponents = statusCoreComp +} + +func (pr *ProcessorRunner) createNetworkComponents(tb testing.TB) { + argsNetwork := factoryNetwork.NetworkComponentsFactoryArgs{ + P2pConfig: *pr.Config.P2pConfig, + MainConfig: *pr.Config.GeneralConfig, + RatingsConfig: *pr.Config.RatingsConfig, + StatusHandler: pr.StatusCoreComponents.AppStatusHandler(), + Marshalizer: pr.CoreComponents.InternalMarshalizer(), + Syncer: pr.CoreComponents.SyncTimer(), + PreferredPeersSlices: make([]string, 0), + BootstrapWaitTime: 1, + NodeOperationMode: p2p.NormalOperation, + ConnectionWatcherType: "", + CryptoComponents: pr.CryptoComponents, + } + + networkFactory, err := factoryNetwork.NewNetworkComponentsFactory(argsNetwork) + require.Nil(tb, err) + + networkComp, err := factoryNetwork.NewManagedNetworkComponents(networkFactory) + require.Nil(tb, err) + + err = networkComp.Create() + require.Nil(tb, err) + require.Nil(tb, networkComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, networkComp) + pr.NetworkComponents = networkComp +} + +func (pr *ProcessorRunner) createBootstrapComponents(tb testing.TB) { + argsBootstrap := factoryBootstrap.BootstrapComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + RoundConfig: *pr.Config.RoundConfig, + PrefConfig: *pr.Config.PreferencesConfig, + ImportDbConfig: *pr.Config.ImportDbConfig, + FlagsConfig: *pr.Config.FlagsConfig, + WorkingDir: pr.Config.FlagsConfig.WorkingDir, + CoreComponents: pr.CoreComponents, + CryptoComponents: pr.CryptoComponents, + NetworkComponents: pr.NetworkComponents, + StatusCoreComponents: pr.StatusCoreComponents, + } + + bootstrapFactory, err := factoryBootstrap.NewBootstrapComponentsFactory(argsBootstrap) + require.Nil(tb, err) + + bootstrapComp, err := factoryBootstrap.NewManagedBootstrapComponents(bootstrapFactory) + require.Nil(tb, err) + + err = bootstrapComp.Create() + require.Nil(tb, err) + require.Nil(tb, bootstrapComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, bootstrapComp) + pr.BootstrapComponents = bootstrapComp +} + +func (pr *ProcessorRunner) createDataComponents(tb testing.TB) { + argsData := factoryData.DataComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + PrefsConfig: pr.Config.PreferencesConfig.Preferences, + ShardCoordinator: pr.BootstrapComponents.ShardCoordinator(), + Core: pr.CoreComponents, + StatusCore: pr.StatusCoreComponents, + Crypto: pr.CryptoComponents, + CurrentEpoch: 0, + CreateTrieEpochRootHashStorer: false, + NodeProcessingMode: common.Normal, + SnapshotsEnabled: false, + } + + dataFactory, err := factoryData.NewDataComponentsFactory(argsData) + require.Nil(tb, err) + + dataComp, err := factoryData.NewManagedDataComponents(dataFactory) + require.Nil(tb, err) + + err = dataComp.Create() + require.Nil(tb, err) + require.Nil(tb, dataComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, dataComp) + pr.DataComponents = dataComp +} + +func (pr *ProcessorRunner) createStateComponents(tb testing.TB) { + argsState := factoryState.StateComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + Core: pr.CoreComponents, + StatusCore: pr.StatusCoreComponents, + StorageService: pr.DataComponents.StorageService(), + ProcessingMode: common.Normal, + ShouldSerializeSnapshots: false, + SnapshotsEnabled: false, + ChainHandler: pr.DataComponents.Blockchain(), + } + + stateFactory, err := factoryState.NewStateComponentsFactory(argsState) + require.Nil(tb, err) + + stateComp, err := factoryState.NewManagedStateComponents(stateFactory) + require.Nil(tb, err) + + err = stateComp.Create() + require.Nil(tb, err) + require.Nil(tb, stateComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, stateComp) + pr.StateComponents = stateComp +} + +func (pr *ProcessorRunner) createStatusComponents(tb testing.TB) { + nodesShufflerOut, err := factoryBootstrap.CreateNodesShuffleOut( + pr.CoreComponents.GenesisNodesSetup(), + pr.Config.GeneralConfig.EpochStartConfig, + pr.CoreComponents.ChanStopNodeProcess(), + ) + require.Nil(tb, err) + + bootstrapStorer, err := pr.DataComponents.StorageService().GetStorer(dataRetriever.BootstrapUnit) + require.Nil(tb, err) + + pr.NodesCoordinator, err = factoryBootstrap.CreateNodesCoordinator( + nodesShufflerOut, + pr.CoreComponents.GenesisNodesSetup(), + pr.Config.PreferencesConfig.Preferences, + pr.CoreComponents.EpochStartNotifierWithConfirm(), + pr.CryptoComponents.PublicKey(), + pr.CoreComponents.InternalMarshalizer(), + pr.CoreComponents.Hasher(), + pr.CoreComponents.Rater(), + bootstrapStorer, + pr.CoreComponents.NodesShuffler(), + pr.BootstrapComponents.ShardCoordinator().SelfId(), + pr.BootstrapComponents.EpochBootstrapParams(), + pr.BootstrapComponents.EpochBootstrapParams().Epoch(), + pr.CoreComponents.ChanStopNodeProcess(), + pr.CoreComponents.NodeTypeProvider(), + pr.CoreComponents.EnableEpochsHandler(), + pr.DataComponents.Datapool().CurrentEpochValidatorInfo(), + ) + require.Nil(tb, err) + + argsStatus := factoryStatus.StatusComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + ExternalConfig: *pr.Config.ExternalConfig, + EconomicsConfig: *pr.Config.EconomicsConfig, + ShardCoordinator: pr.BootstrapComponents.ShardCoordinator(), + NodesCoordinator: pr.NodesCoordinator, + EpochStartNotifier: pr.CoreComponents.EpochStartNotifierWithConfirm(), + CoreComponents: pr.CoreComponents, + StatusCoreComponents: pr.StatusCoreComponents, + NetworkComponents: pr.NetworkComponents, + StateComponents: pr.StateComponents, + IsInImportMode: false, + } + + statusFactory, err := factoryStatus.NewStatusComponentsFactory(argsStatus) + require.Nil(tb, err) + + statusComp, err := factoryStatus.NewManagedStatusComponents(statusFactory) + require.Nil(tb, err) + + err = statusComp.Create() + require.Nil(tb, err) + require.Nil(tb, statusComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, statusComp) + pr.StatusComponents = statusComp +} + +func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { + totalSupply, ok := big.NewInt(0).SetString(pr.Config.EconomicsConfig.GlobalSettings.GenesisTotalSupply, 10) + require.True(tb, ok) + + args := genesis.AccountsParserArgs{ + GenesisFilePath: pr.Config.ConfigurationPathsHolder.Genesis, + EntireSupply: totalSupply, + MinterAddress: pr.Config.EconomicsConfig.GlobalSettings.GenesisMintingSenderAddress, + PubkeyConverter: pr.CoreComponents.AddressPubKeyConverter(), + KeyGenerator: pr.CryptoComponents.TxSignKeyGen(), + Hasher: pr.CoreComponents.Hasher(), + Marshalizer: pr.CoreComponents.InternalMarshalizer(), + } + + accountsParser, err := parsing.NewAccountsParser(args) + require.Nil(tb, err) + + whiteListCache, err := storageunit.NewCache(storageFactory.GetCacherFromConfig(pr.Config.GeneralConfig.WhiteListPool)) + require.Nil(tb, err) + + whiteListRequest, err := interceptors.NewWhiteListDataVerifier(whiteListCache) + require.Nil(tb, err) + + whiteListCacheVerified, err := storageunit.NewCache(storageFactory.GetCacherFromConfig(pr.Config.GeneralConfig.WhiteListerVerifiedTxs)) + require.Nil(tb, err) + + whiteListerVerifiedTxs, err := interceptors.NewWhiteListDataVerifier(whiteListCacheVerified) + require.Nil(tb, err) + + smartContractParser, err := parsing.NewSmartContractsParser( + pr.Config.ConfigurationPathsHolder.SmartContracts, + pr.CoreComponents.AddressPubKeyConverter(), + pr.CryptoComponents.TxSignKeyGen(), + ) + require.Nil(tb, err) + + argsGasScheduleNotifier := forking.ArgsNewGasScheduleNotifier{ + GasScheduleConfig: pr.Config.EpochConfig.GasSchedule, + ConfigDir: pr.Config.ConfigurationPathsHolder.GasScheduleDirectoryName, + EpochNotifier: pr.CoreComponents.EpochNotifier(), + WasmVMChangeLocker: pr.CoreComponents.WasmVMChangeLocker(), + } + gasScheduleNotifier, err := forking.NewGasScheduleNotifier(argsGasScheduleNotifier) + require.Nil(tb, err) + + historyRepoFactoryArgs := &dbLookupFactory.ArgsHistoryRepositoryFactory{ + SelfShardID: pr.BootstrapComponents.ShardCoordinator().SelfId(), + Config: pr.Config.GeneralConfig.DbLookupExtensions, + Hasher: pr.CoreComponents.Hasher(), + Marshalizer: pr.CoreComponents.InternalMarshalizer(), + Store: pr.DataComponents.StorageService(), + Uint64ByteSliceConverter: pr.CoreComponents.Uint64ByteSliceConverter(), + } + historyRepositoryFactory, err := dbLookupFactory.NewHistoryRepositoryFactory(historyRepoFactoryArgs) + require.Nil(tb, err) + + historyRepository, err := historyRepositoryFactory.Create() + require.Nil(tb, err) + + requestedItemsHandler := cache.NewTimeCache( + time.Duration(uint64(time.Millisecond) * pr.CoreComponents.GenesisNodesSetup().GetRoundDuration())) + + importStartHandler, err := trigger.NewImportStartHandler(filepath.Join(pr.Config.FlagsConfig.DbDir, common.DefaultDBPath), pr.Config.FlagsConfig.Version) + require.Nil(tb, err) + + argsProcess := factoryProcessing.ProcessComponentsFactoryArgs{ + Config: *pr.Config.GeneralConfig, + EpochConfig: *pr.Config.EpochConfig, + PrefConfigs: pr.Config.PreferencesConfig.Preferences, + ImportDBConfig: *pr.Config.ImportDbConfig, + AccountsParser: accountsParser, + SmartContractParser: smartContractParser, + GasSchedule: gasScheduleNotifier, + NodesCoordinator: pr.NodesCoordinator, + RequestedItemsHandler: requestedItemsHandler, + WhiteListHandler: whiteListRequest, + WhiteListerVerifiedTxs: whiteListerVerifiedTxs, + MaxRating: pr.Config.RatingsConfig.General.MaxRating, + SystemSCConfig: pr.Config.SystemSCConfig, + Version: "test", + ImportStartHandler: importStartHandler, + WorkingDir: pr.Config.FlagsConfig.WorkingDir, + HistoryRepo: historyRepository, + SnapshotsEnabled: false, + Data: pr.DataComponents, + CoreData: pr.CoreComponents, + Crypto: pr.CryptoComponents, + State: pr.StateComponents, + Network: pr.NetworkComponents, + BootstrapComponents: pr.BootstrapComponents, + StatusComponents: pr.StatusComponents, + StatusCoreComponents: pr.StatusCoreComponents, + } + + processFactory, err := factoryProcessing.NewProcessComponentsFactory(argsProcess) + require.Nil(tb, err) + + processComp, err := factoryProcessing.NewManagedProcessComponents(processFactory) + require.Nil(tb, err) + + err = processComp.Create() + require.Nil(tb, err) + require.Nil(tb, processComp.CheckSubcomponents()) + + pr.closers = append(pr.closers, processComp) + pr.ProcessComponents = processComp +} + +// Close will close all inner components +func (pr *ProcessorRunner) Close(tb testing.TB) { + for i := len(pr.closers) - 1; i >= 0; i-- { + err := pr.closers[i].Close() + require.Nil(tb, err) + } +} + +// GenerateAddress will generate an address for the given shardID +func (pr *ProcessorRunner) GenerateAddress(shardID uint32) []byte { + address := make([]byte, 32) + + for { + _, _ = rand.Read(address) + if pr.BootstrapComponents.ShardCoordinator().ComputeId(address) == shardID { + return address + } + } +} + +// AddBalanceToAccount will add the provided balance to the account +func (pr *ProcessorRunner) AddBalanceToAccount(tb testing.TB, address []byte, balanceToAdd *big.Int) { + userAccount := pr.GetUserAccount(tb, address) + + err := userAccount.AddToBalance(balanceToAdd) + require.Nil(tb, err) + + err = pr.StateComponents.AccountsAdapter().SaveAccount(userAccount) + require.Nil(tb, err) +} + +// GetUserAccount will return the user account for the provided address +func (pr *ProcessorRunner) GetUserAccount(tb testing.TB, address []byte) state.UserAccountHandler { + acc, err := pr.StateComponents.AccountsAdapter().LoadAccount(address) + require.Nil(tb, err) + + userAccount, ok := acc.(state.UserAccountHandler) + require.True(tb, ok) + + return userAccount +} + +// SetESDTForAccount will set the provided ESDT balance to the account +func (pr *ProcessorRunner) SetESDTForAccount( + tb testing.TB, + address []byte, + tokenIdentifier string, + esdtNonce uint64, + esdtValue *big.Int, +) { + userAccount := pr.GetUserAccount(tb, address) + + esdtData := &esdt.ESDigitalToken{ + Value: esdtValue, + Properties: []byte{}, + } + + esdtDataBytes, err := pr.CoreComponents.InternalMarshalizer().Marshal(esdtData) + require.Nil(tb, err) + + key := append([]byte(core.ProtectedKeyPrefix), []byte(core.ESDTKeyIdentifier)...) + key = append(key, tokenIdentifier...) + if esdtNonce > 0 { + key = append(key, big.NewInt(0).SetUint64(esdtNonce).Bytes()...) + } + + err = userAccount.SaveKeyValue(key, esdtDataBytes) + require.Nil(tb, err) + + err = pr.StateComponents.AccountsAdapter().SaveAccount(userAccount) + require.Nil(tb, err) + + pr.saveNewTokenOnSystemAccount(tb, key, esdtData) + + _, err = pr.StateComponents.AccountsAdapter().Commit() + require.Nil(tb, err) +} + +func (pr *ProcessorRunner) saveNewTokenOnSystemAccount(tb testing.TB, tokenKey []byte, esdtData *esdt.ESDigitalToken) { + esdtDataOnSystemAcc := esdtData + esdtDataOnSystemAcc.Properties = nil + esdtDataOnSystemAcc.Reserved = []byte{1} + esdtDataOnSystemAcc.Value.Set(esdtData.Value) + + esdtDataBytes, err := pr.CoreComponents.InternalMarshalizer().Marshal(esdtData) + require.Nil(tb, err) + + sysAccount, err := pr.StateComponents.AccountsAdapter().LoadAccount(core.SystemAccountAddress) + require.Nil(tb, err) + + sysUserAccount, ok := sysAccount.(state.UserAccountHandler) + require.True(tb, ok) + + err = sysUserAccount.SaveKeyValue(tokenKey, esdtDataBytes) + require.Nil(tb, err) + + err = pr.StateComponents.AccountsAdapter().SaveAccount(sysAccount) + require.Nil(tb, err) +} + +// ExecuteTransactionAsScheduled will execute the provided transaction as scheduled +func (pr *ProcessorRunner) ExecuteTransactionAsScheduled(tb testing.TB, tx *transaction.Transaction) error { + hash, err := core.CalculateHash(pr.CoreComponents.InternalMarshalizer(), pr.CoreComponents.Hasher(), tx) + require.Nil(tb, err) + pr.ProcessComponents.ScheduledTxsExecutionHandler().AddScheduledTx(hash, tx) + + return pr.ProcessComponents.ScheduledTxsExecutionHandler().Execute(hash) +} + +// CreateDeploySCTx will return the transaction and the hash for the deployment smart-contract transaction +func (pr *ProcessorRunner) CreateDeploySCTx( + tb testing.TB, + owner []byte, + pathToContract string, + gasLimit uint64, + initialHexParameters []string, +) (*transaction.Transaction, []byte) { + scCode := wasm.GetSCCode(pathToContract) + ownerAccount := pr.GetUserAccount(tb, owner) + + txDataComponents := append([]string{wasm.CreateDeployTxData(scCode)}, initialHexParameters...) + + tx := &transaction.Transaction{ + Nonce: ownerAccount.GetNonce(), + Value: big.NewInt(0), + RcvAddr: vm.CreateEmptyAddress(), + SndAddr: owner, + GasPrice: pr.CoreComponents.EconomicsData().MinGasPrice(), + GasLimit: gasLimit, + Data: []byte(strings.Join(txDataComponents, "@")), + } + + hash, err := core.CalculateHash(pr.CoreComponents.InternalMarshalizer(), pr.CoreComponents.Hasher(), tx) + require.Nil(tb, err) + + return tx, hash +} diff --git a/integrationTests/realcomponents/processorRunner_test.go b/integrationTests/realcomponents/processorRunner_test.go new file mode 100644 index 00000000000..df09a1bb582 --- /dev/null +++ b/integrationTests/realcomponents/processorRunner_test.go @@ -0,0 +1,13 @@ +package realcomponents + +import ( + "testing" + + "github.com/multiversx/mx-chain-go/testscommon" +) + +func TestNewProcessorRunnerAndClose(t *testing.T) { + cfg := testscommon.CreateTestConfigs(t, "../../cmd/node/config") + pr := NewProcessorRunner(t, *cfg) + pr.Close(t) +} diff --git a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go new file mode 100644 index 00000000000..4be8f652b95 --- /dev/null +++ b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go @@ -0,0 +1,63 @@ +package txsimulator + +import ( + "fmt" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/transaction" + "github.com/multiversx/mx-chain-go/integrationTests/realcomponents" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/vm" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var log = logger.GetOrCreate("integrationTests/realcomponents/txsimulator") + +func TestTransactionSimulationComponentConstruction(t *testing.T) { + cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") + cfg.EpochConfig.EnableEpochs.ESDTEnableEpoch = 0 + cfg.EpochConfig.EnableEpochs.BuiltInFunctionsEnableEpoch = 0 + cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "metachain" // the problem was only on the metachain + + pr := realcomponents.NewProcessorRunner(t, *cfg) + defer pr.Close(t) + + senderShardID := uint32(0) // doesn't matter + alice := pr.GenerateAddress(senderShardID) + log.Info("generated address", + "alice", pr.CoreComponents.AddressPubKeyConverter().SilentEncode(alice, log), + "shard", senderShardID, + ) + + rootHash, err := pr.StateComponents.AccountsAdapter().Commit() + require.Nil(t, err) + + err = pr.DataComponents.Blockchain().SetCurrentBlockHeaderAndRootHash( + &block.MetaBlock{ + Nonce: 1, + RootHash: rootHash, + }, rootHash) + require.Nil(t, err) + + issueCost, _ := big.NewInt(0).SetString(pr.Config.SystemSCConfig.ESDTSystemSCConfig.BaseIssuingCost, 10) + + txForSimulation := &transaction.Transaction{ + Nonce: pr.GetUserAccount(t, alice).GetNonce(), + Value: issueCost, + RcvAddr: vm.ESDTSCAddress, + SndAddr: alice, + GasPrice: pr.CoreComponents.EconomicsData().MinGasPrice(), + GasLimit: 60_000_000, + Data: []byte(fmt.Sprintf("issue@%x@%x@0100@02", "token", "tkn")), + ChainID: []byte(pr.CoreComponents.ChainID()), + Version: 1, + } + + _, err = pr.ProcessComponents.TransactionSimulatorProcessor().ProcessTx(txForSimulation) + assert.Nil(t, err) + assert.Equal(t, 0, pr.StateComponents.AccountsAdapter().JournalLen()) // state for processing should not be dirtied +} diff --git a/node/nodeRunner_test.go b/node/nodeRunner_test.go index ce0242db3f7..231dfc96c73 100644 --- a/node/nodeRunner_test.go +++ b/node/nodeRunner_test.go @@ -6,112 +6,22 @@ package node import ( "io/ioutil" "os" - "os/exec" "path" - "strings" "syscall" "testing" "time" "github.com/multiversx/mx-chain-core-go/data/endProcess" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/node/mock" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/api" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func createConfigs(tb testing.TB) *config.Configs { - tempDir := tb.TempDir() - - originalConfigsPath := "../cmd/node/config" - newConfigsPath := path.Join(tempDir, "config") - - cmd := exec.Command("cp", "-r", originalConfigsPath, newConfigsPath) - err := cmd.Run() - require.Nil(tb, err) - - newGenesisSmartContractsFilename := path.Join(newConfigsPath, "genesisSmartContracts.json") - correctTestPathInGenesisSmartContracts(tb, tempDir, newGenesisSmartContractsFilename) - - apiConfig, err := common.LoadApiConfig(path.Join(newConfigsPath, "api.toml")) - require.Nil(tb, err) - - generalConfig, err := common.LoadMainConfig(path.Join(newConfigsPath, "config.toml")) - require.Nil(tb, err) - - ratingsConfig, err := common.LoadRatingsConfig(path.Join(newConfigsPath, "ratings.toml")) - require.Nil(tb, err) - - economicsConfig, err := common.LoadEconomicsConfig(path.Join(newConfigsPath, "economics.toml")) - require.Nil(tb, err) - - prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) - require.Nil(tb, err) - - p2pConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) - require.Nil(tb, err) - - externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) - require.Nil(tb, err) - - systemSCConfig, err := common.LoadSystemSmartContractsConfig(path.Join(newConfigsPath, "systemSmartContractsConfig.toml")) - require.Nil(tb, err) - - epochConfig, err := common.LoadEpochConfig(path.Join(newConfigsPath, "enableEpochs.toml")) - require.Nil(tb, err) - - roundConfig, err := common.LoadRoundConfig(path.Join(newConfigsPath, "enableRounds.toml")) - require.Nil(tb, err) - - // make the node pass the network wait constraints - p2pConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 - p2pConfig.Node.ThresholdMinConnectedPeers = 0 - - return &config.Configs{ - GeneralConfig: generalConfig, - ApiRoutesConfig: apiConfig, - EconomicsConfig: economicsConfig, - SystemSCConfig: systemSCConfig, - RatingsConfig: ratingsConfig, - PreferencesConfig: prefsConfig, - ExternalConfig: externalConfig, - P2pConfig: p2pConfig, - FlagsConfig: &config.ContextFlagsConfig{ - WorkingDir: tempDir, - NoKeyProvided: true, - Version: "test version", - DbDir: path.Join(tempDir, "db"), - }, - ImportDbConfig: &config.ImportDbConfig{}, - ConfigurationPathsHolder: &config.ConfigurationPathsHolder{ - GasScheduleDirectoryName: path.Join(newConfigsPath, "gasSchedules"), - Nodes: path.Join(newConfigsPath, "nodesSetup.json"), - Genesis: path.Join(newConfigsPath, "genesis.json"), - SmartContracts: newGenesisSmartContractsFilename, - ValidatorKey: "validatorKey.pem", - }, - EpochConfig: epochConfig, - RoundConfig: roundConfig, - } -} - -func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGenesisSmartContractsFilename string) { - input, err := ioutil.ReadFile(newGenesisSmartContractsFilename) - require.Nil(tb, err) - - lines := strings.Split(string(input), "\n") - for i, line := range lines { - if strings.Contains(line, "./config") { - lines[i] = strings.Replace(line, "./config", path.Join(tempDir, "config"), 1) - } - } - output := strings.Join(lines, "\n") - err = ioutil.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) - require.Nil(tb, err) -} +const originalConfigsPath = "../cmd/node/config" func TestNewNodeRunner(t *testing.T) { t.Parallel() @@ -127,7 +37,7 @@ func TestNewNodeRunner(t *testing.T) { t.Run("with valid configs should work", func(t *testing.T) { t.Parallel() - configs := createConfigs(t) + configs := testscommon.CreateTestConfigs(t, originalConfigsPath) runner, err := NewNodeRunner(configs) assert.NotNil(t, runner) assert.Nil(t, err) @@ -137,7 +47,7 @@ func TestNewNodeRunner(t *testing.T) { func TestNodeRunner_StartAndCloseNodeUsingSIGINT(t *testing.T) { t.Parallel() - configs := createConfigs(t) + configs := testscommon.CreateTestConfigs(t, originalConfigsPath) runner, _ := NewNodeRunner(configs) trigger := mock.NewApplicationRunningTrigger() diff --git a/testscommon/realConfigsHandling.go b/testscommon/realConfigsHandling.go new file mode 100644 index 00000000000..2041d9f7375 --- /dev/null +++ b/testscommon/realConfigsHandling.go @@ -0,0 +1,105 @@ +package testscommon + +import ( + "io/ioutil" + "os/exec" + "path" + "strings" + "testing" + + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/require" +) + +// CreateTestConfigs will try to copy the whole configs directory to a temp directory and return the configs after load +// The copying of the configs is required because minor adjustments of their contents is required for the tests to pass +func CreateTestConfigs(tb testing.TB, originalConfigsPath string) *config.Configs { + tempDir := tb.TempDir() + + newConfigsPath := path.Join(tempDir, "config") + + // TODO refactor this cp to work on all OSes + cmd := exec.Command("cp", "-r", originalConfigsPath, newConfigsPath) + err := cmd.Run() + require.Nil(tb, err) + + newGenesisSmartContractsFilename := path.Join(newConfigsPath, "genesisSmartContracts.json") + correctTestPathInGenesisSmartContracts(tb, tempDir, newGenesisSmartContractsFilename) + + apiConfig, err := common.LoadApiConfig(path.Join(newConfigsPath, "api.toml")) + require.Nil(tb, err) + + generalConfig, err := common.LoadMainConfig(path.Join(newConfigsPath, "config.toml")) + require.Nil(tb, err) + + ratingsConfig, err := common.LoadRatingsConfig(path.Join(newConfigsPath, "ratings.toml")) + require.Nil(tb, err) + + economicsConfig, err := common.LoadEconomicsConfig(path.Join(newConfigsPath, "economics.toml")) + require.Nil(tb, err) + + prefsConfig, err := common.LoadPreferencesConfig(path.Join(newConfigsPath, "prefs.toml")) + require.Nil(tb, err) + + p2pConfig, err := common.LoadP2PConfig(path.Join(newConfigsPath, "p2p.toml")) + require.Nil(tb, err) + + externalConfig, err := common.LoadExternalConfig(path.Join(newConfigsPath, "external.toml")) + require.Nil(tb, err) + + systemSCConfig, err := common.LoadSystemSmartContractsConfig(path.Join(newConfigsPath, "systemSmartContractsConfig.toml")) + require.Nil(tb, err) + + epochConfig, err := common.LoadEpochConfig(path.Join(newConfigsPath, "enableEpochs.toml")) + require.Nil(tb, err) + + roundConfig, err := common.LoadRoundConfig(path.Join(newConfigsPath, "enableRounds.toml")) + require.Nil(tb, err) + + // make the node pass the network wait constraints + p2pConfig.Node.MinNumPeersToWaitForOnBootstrap = 0 + p2pConfig.Node.ThresholdMinConnectedPeers = 0 + + return &config.Configs{ + GeneralConfig: generalConfig, + ApiRoutesConfig: apiConfig, + EconomicsConfig: economicsConfig, + SystemSCConfig: systemSCConfig, + RatingsConfig: ratingsConfig, + PreferencesConfig: prefsConfig, + ExternalConfig: externalConfig, + P2pConfig: p2pConfig, + FlagsConfig: &config.ContextFlagsConfig{ + WorkingDir: tempDir, + NoKeyProvided: true, + Version: "test version", + DbDir: path.Join(tempDir, "db"), + }, + ImportDbConfig: &config.ImportDbConfig{}, + ConfigurationPathsHolder: &config.ConfigurationPathsHolder{ + GasScheduleDirectoryName: path.Join(newConfigsPath, "gasSchedules"), + Nodes: path.Join(newConfigsPath, "nodesSetup.json"), + Genesis: path.Join(newConfigsPath, "genesis.json"), + SmartContracts: newGenesisSmartContractsFilename, + ValidatorKey: "validatorKey.pem", + }, + EpochConfig: epochConfig, + RoundConfig: roundConfig, + } +} + +func correctTestPathInGenesisSmartContracts(tb testing.TB, tempDir string, newGenesisSmartContractsFilename string) { + input, err := ioutil.ReadFile(newGenesisSmartContractsFilename) + require.Nil(tb, err) + + lines := strings.Split(string(input), "\n") + for i, line := range lines { + if strings.Contains(line, "./config") { + lines[i] = strings.Replace(line, "./config", path.Join(tempDir, "config"), 1) + } + } + output := strings.Join(lines, "\n") + err = ioutil.WriteFile(newGenesisSmartContractsFilename, []byte(output), 0644) + require.Nil(tb, err) +} From 063d4f2f71f948b226c34f66d9e00f7bc8dc9ac0 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 27 Apr 2023 13:09:37 +0300 Subject: [PATCH 109/221] - added integration-test for shard --- .../testdata/adder/adder.abi.json | 61 ++++++++++++++++ .../realcomponents/testdata/adder/adder.wasm | Bin 0 -> 684 bytes .../txsimulator/componentConstruction_test.go | 68 +++++++++++++++++- 3 files changed, 128 insertions(+), 1 deletion(-) create mode 100644 integrationTests/realcomponents/testdata/adder/adder.abi.json create mode 100755 integrationTests/realcomponents/testdata/adder/adder.wasm diff --git a/integrationTests/realcomponents/testdata/adder/adder.abi.json b/integrationTests/realcomponents/testdata/adder/adder.abi.json new file mode 100644 index 00000000000..4f529e58e47 --- /dev/null +++ b/integrationTests/realcomponents/testdata/adder/adder.abi.json @@ -0,0 +1,61 @@ +{ + "buildInfo": { + "rustc": { + "version": "1.60.0-nightly", + "commitHash": "c5c610aad0a012a9228ecb83cc19e77111a52140", + "commitDate": "2022-02-14", + "channel": "Nightly", + "short": "rustc 1.60.0-nightly (c5c610aad 2022-02-14)" + }, + "contractCrate": { + "name": "adder", + "version": "0.0.0" + }, + "framework": { + "name": "elrond-wasm", + "version": "0.29.3" + } + }, + "docs": [ + "One of the simplest smart contracts possible,", + "it holds a single variable in storage, which anyone can increment." + ], + "name": "Adder", + "constructor": { + "inputs": [ + { + "name": "initial_value", + "type": "BigUint" + } + ], + "outputs": [] + }, + "endpoints": [ + { + "name": "getSum", + "mutability": "readonly", + "inputs": [], + "outputs": [ + { + "type": "BigUint" + } + ] + }, + { + "docs": [ + "Add desired amount to the storage variable." + ], + "name": "add", + "mutability": "mutable", + "inputs": [ + { + "name": "value", + "type": "BigUint" + } + ], + "outputs": [] + } + ], + "hasCallback": false, + "types": {} +} diff --git a/integrationTests/realcomponents/testdata/adder/adder.wasm b/integrationTests/realcomponents/testdata/adder/adder.wasm new file mode 100755 index 0000000000000000000000000000000000000000..bcf3b797f3fd96f8ee03b7c969de628df15b33e5 GIT binary patch literal 684 zcmZuvO>fgc5PiFw=4)v(X;MKd4ki-fZ{SD-A)yGVaCSEF#)%r+$RAX>;7a_B9{LA3 za_6Tp8$0TS!)j*U+j(zxv@m%r0ElDea`$d+< z^J8Ii2*(T~TP(}i&!W;P=a3H7uGt(uWLcU)IixXT0kmB`t<65#;_P&msPFfm9c#&} z2DmNK%tZEcYQlPgmkseXouA%w)~nfR!=A?e4}X4HBkO|Y&VCOqDHS^4-3W}@BU!RDV6%_NOyU{;Xu Date: Thu, 27 Apr 2023 14:21:20 +0300 Subject: [PATCH 110/221] - added exception for some new tests, they are not short --- integrationTests/realcomponents/processorRunner_test.go | 4 ++++ .../txsimulator/componentConstruction_test.go | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/integrationTests/realcomponents/processorRunner_test.go b/integrationTests/realcomponents/processorRunner_test.go index df09a1bb582..55951b63831 100644 --- a/integrationTests/realcomponents/processorRunner_test.go +++ b/integrationTests/realcomponents/processorRunner_test.go @@ -7,6 +7,10 @@ import ( ) func TestNewProcessorRunnerAndClose(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + cfg := testscommon.CreateTestConfigs(t, "../../cmd/node/config") pr := NewProcessorRunner(t, *cfg) pr.Close(t) diff --git a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go index c17f8090b49..c7c30cf6d32 100644 --- a/integrationTests/realcomponents/txsimulator/componentConstruction_test.go +++ b/integrationTests/realcomponents/txsimulator/componentConstruction_test.go @@ -19,6 +19,10 @@ import ( var log = logger.GetOrCreate("integrationTests/realcomponents/txsimulator") func TestTransactionSimulationComponentConstructionOnMetachain(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") cfg.EpochConfig.EnableEpochs.ESDTEnableEpoch = 0 cfg.EpochConfig.EnableEpochs.BuiltInFunctionsEnableEpoch = 0 @@ -64,6 +68,10 @@ func TestTransactionSimulationComponentConstructionOnMetachain(t *testing.T) { } func TestTransactionSimulationComponentConstructionOnShard(t *testing.T) { + if testing.Short() { + t.Skip("this is not a short test") + } + cfg := testscommon.CreateTestConfigs(t, "../../../cmd/node/config") cfg.EpochConfig.EnableEpochs.SCDeployEnableEpoch = 0 cfg.PreferencesConfig.Preferences.DestinationShardAsObserver = "0" From 220900eb93dd0b01d656eec71798c57617be379f Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 27 Apr 2023 14:27:00 +0300 Subject: [PATCH 111/221] - fixes after merge --- .../processComponentsForTxSimulator.go | 41 +++++++++++-------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/factory/processing/processComponentsForTxSimulator.go b/factory/processing/processComponentsForTxSimulator.go index 3473677f057..db18a407ed9 100644 --- a/factory/processing/processComponentsForTxSimulator.go +++ b/factory/processing/processComponentsForTxSimulator.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/coordinator" - "github.com/multiversx/mx-chain-go/process/factory/metachain" "github.com/multiversx/mx-chain-go/process/factory/shard" "github.com/multiversx/mx-chain-go/process/smartContract" "github.com/multiversx/mx-chain-go/process/transaction" @@ -80,14 +79,17 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( ) (txsimulator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, error) { args := txsimulator.ArgsTxSimulator{} - intermediateProcessorsFactory, err := metachain.NewIntermediateProcessorsContainerFactory( - pcf.bootstrapComponents.ShardCoordinator(), - pcf.coreData.InternalMarshalizer(), - pcf.coreData.Hasher(), - pcf.coreData.AddressPubKeyConverter(), - bootstrapDisabled.NewChainStorer(), - pcf.data.Datapool(), - &processDisabled.FeeHandler{}) + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + Store: bootstrapDisabled.NewChainStorer(), + PoolsHolder: pcf.data.Datapool(), + EconomicsFee: &processDisabled.FeeHandler{}, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + intermediateProcessorsFactory, err := shard.NewIntermediateProcessorsContainerFactory(argsFactory) if err != nil { return args, nil, err } @@ -217,15 +219,18 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( ) (txsimulator.ArgsTxSimulator, process.VirtualMachinesContainerFactory, error) { args := txsimulator.ArgsTxSimulator{} - intermediateProcessorsFactory, err := shard.NewIntermediateProcessorsContainerFactory( - pcf.bootstrapComponents.ShardCoordinator(), - pcf.coreData.InternalMarshalizer(), - pcf.coreData.Hasher(), - pcf.coreData.AddressPubKeyConverter(), - bootstrapDisabled.NewChainStorer(), - pcf.data.Datapool(), - &processDisabled.FeeHandler{}, - ) + argsFactory := shard.ArgsNewIntermediateProcessorsContainerFactory{ + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Hasher: pcf.coreData.Hasher(), + PubkeyConverter: pcf.coreData.AddressPubKeyConverter(), + Store: bootstrapDisabled.NewChainStorer(), + PoolsHolder: pcf.data.Datapool(), + EconomicsFee: &processDisabled.FeeHandler{}, + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + } + + intermediateProcessorsFactory, err := shard.NewIntermediateProcessorsContainerFactory(argsFactory) if err != nil { return args, nil, err } From 2528267c8c26907a1b87a1666ade468655b8ff33 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 27 Apr 2023 15:04:57 +0300 Subject: [PATCH 112/221] - linter issues --- cmd/keygenerator/converter/pidPubkeyConverter.go | 3 --- factory/processing/processComponentsForTxSimulator.go | 3 +++ vm/systemSmartContracts/eei.go | 3 +-- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cmd/keygenerator/converter/pidPubkeyConverter.go b/cmd/keygenerator/converter/pidPubkeyConverter.go index 1cff0dfa0d7..41eeea15fa1 100644 --- a/cmd/keygenerator/converter/pidPubkeyConverter.go +++ b/cmd/keygenerator/converter/pidPubkeyConverter.go @@ -6,11 +6,8 @@ import ( "github.com/multiversx/mx-chain-crypto-go/signing/secp256k1" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/p2p/factory" - logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("cmd/keygenerator/converter") - type pidPubkeyConverter struct { keyGen crypto.KeyGenerator p2PKeyConverter p2p.P2PKeyConverter diff --git a/factory/processing/processComponentsForTxSimulator.go b/factory/processing/processComponentsForTxSimulator.go index db18a407ed9..d6a5568697e 100644 --- a/factory/processing/processComponentsForTxSimulator.go +++ b/factory/processing/processComponentsForTxSimulator.go @@ -111,6 +111,9 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorForMeta( builtInFuncFactory.NFTStorageHandler(), builtInFuncFactory.ESDTGlobalSettingsHandler(), ) + if err != nil { + return args, nil, err + } vmContainer, err := vmContainerFactory.Create() if err != nil { diff --git a/vm/systemSmartContracts/eei.go b/vm/systemSmartContracts/eei.go index 32da253277f..d3ca57029e3 100644 --- a/vm/systemSmartContracts/eei.go +++ b/vm/systemSmartContracts/eei.go @@ -161,8 +161,7 @@ func (host *vmContext) SetStorage(key []byte, value []byte) { // GetBalance returns the balance of the given address func (host *vmContext) GetBalance(addr []byte) *big.Int { - strAdr := string(addr) - outAcc, exists := host.outputAccounts[strAdr] + outAcc, exists := host.outputAccounts[string(addr)] if exists { actualBalance := big.NewInt(0).Add(outAcc.Balance, outAcc.BalanceDelta) return actualBalance From 245e527ac22e5cf9baa66e176c47239566e67670 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 27 Apr 2023 15:27:12 +0300 Subject: [PATCH 113/221] MX-14120: repopulate tokens supplies --- cmd/node/flags.go | 8 + config/contextFlagsConfig.go | 1 + epochStart/bootstrap/metaStorageHandler.go | 1 + epochStart/bootstrap/process.go | 1 + epochStart/bootstrap/shardStorageHandler.go | 1 + factory/consensus/consensusComponents.go | 5 + factory/data/dataComponents.go | 9 +- factory/processing/processComponents.go | 13 +- heartbeat/sender/baseSender.go | 2 +- integrationTests/testSyncNode.go | 2 + node/nodeRunner.go | 5 +- process/sync/argBootstrapper.go | 1 + process/sync/baseSync.go | 35 +++ process/sync/metablock_test.go | 1 + process/sync/shardblock.go | 7 + process/sync/shardblock_test.go | 1 + process/sync/trieIterators/errors.go | 11 + .../trieIterators/tokensSuppliesComputer.go | 144 ++++++++++++ .../tokensSuppliesComputer_test.go | 202 ++++++++++++++++ .../trieIterators/trieAccountsIterator.go | 103 ++++++++ .../trieAccountsIterator_test.go | 220 ++++++++++++++++++ storage/factory/storageServiceFactory.go | 49 +++- testscommon/components/components.go | 9 +- 23 files changed, 802 insertions(+), 29 deletions(-) create mode 100644 process/sync/trieIterators/errors.go create mode 100644 process/sync/trieIterators/tokensSuppliesComputer.go create mode 100644 process/sync/trieIterators/tokensSuppliesComputer_test.go create mode 100644 process/sync/trieIterators/trieAccountsIterator.go create mode 100644 process/sync/trieIterators/trieAccountsIterator_test.go diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 0cb32cb937e..17334cc74f5 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -384,6 +384,12 @@ var ( Usage: "String flag for specifying the desired `operation mode`(s) of the node, resulting in altering some configuration values accordingly. Possible values are: snapshotless-observer, full-archive, db-lookup-extension, historical-balances or `\"\"` (empty). Multiple values can be separated via ,", Value: "", } + + // repopulateTokensSupplies defines a flag that, if set, will repopulate the tokens supplies database by iterating over the trie + repopulateTokensSupplies = cli.BoolFlag{ + Name: "repopulate-tokens-supplies", + Usage: "Boolean flag for repopulating the tokens supplies database. It will delete the current data, iterate over the entire trie and add he new obtained supplies", + } ) func getFlags() []cli.Flag { @@ -443,6 +449,7 @@ func getFlags() []cli.Flag { dbDirectory, logsDirectory, operationMode, + repopulateTokensSupplies, } } @@ -472,6 +479,7 @@ func getFlagsConfig(ctx *cli.Context, log logger.Logger) *config.ContextFlagsCon flagsConfig.NoKeyProvided = ctx.GlobalBool(noKey.Name) flagsConfig.SnapshotsEnabled = ctx.GlobalBool(snapshotsEnabled.Name) flagsConfig.OperationMode = ctx.GlobalString(operationMode.Name) + flagsConfig.RepopulateTokensSupplies = ctx.GlobalBool(repopulateTokensSupplies.Name) return flagsConfig } diff --git a/config/contextFlagsConfig.go b/config/contextFlagsConfig.go index 360eeabf349..c5ccc61bca1 100644 --- a/config/contextFlagsConfig.go +++ b/config/contextFlagsConfig.go @@ -28,6 +28,7 @@ type ContextFlagsConfig struct { NoKeyProvided bool SnapshotsEnabled bool OperationMode string + RepopulateTokensSupplies bool } // ImportDbConfig will hold the import-db parameters diff --git a/epochStart/bootstrap/metaStorageHandler.go b/epochStart/bootstrap/metaStorageHandler.go index 0a25fa08b45..7a036096e4f 100644 --- a/epochStart/bootstrap/metaStorageHandler.go +++ b/epochStart/bootstrap/metaStorageHandler.go @@ -54,6 +54,7 @@ func NewMetaStorageHandler( CreateTrieEpochRootHashStorer: false, NodeProcessingMode: nodeProcessingMode, SnapshotsEnabled: snapshotsEnabled, + RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time ManagedPeersHolder: managedPeersHolder, }, ) diff --git a/epochStart/bootstrap/process.go b/epochStart/bootstrap/process.go index 5e082537258..ea6e189962d 100644 --- a/epochStart/bootstrap/process.go +++ b/epochStart/bootstrap/process.go @@ -1123,6 +1123,7 @@ func (e *epochStartBootstrap) createStorageService( CreateTrieEpochRootHashStorer: createTrieEpochRootHashStorer, NodeProcessingMode: e.nodeProcessingMode, SnapshotsEnabled: e.flagsConfig.SnapshotsEnabled, + RepopulateTokensSupplies: e.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: e.cryptoComponentsHolder.ManagedPeersHolder(), }) if err != nil { diff --git a/epochStart/bootstrap/shardStorageHandler.go b/epochStart/bootstrap/shardStorageHandler.go index 9ffc5384e31..2ffdfe23ccd 100644 --- a/epochStart/bootstrap/shardStorageHandler.go +++ b/epochStart/bootstrap/shardStorageHandler.go @@ -58,6 +58,7 @@ func NewShardStorageHandler( CreateTrieEpochRootHashStorer: false, NodeProcessingMode: nodeProcessingMode, SnapshotsEnabled: snapshotsEnabled, + RepopulateTokensSupplies: false, // tokens supplies cannot be repopulated at this time ManagedPeersHolder: managedPeersHolder, }, ) diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index a4060921bb8..c2e0404c630 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -39,6 +39,7 @@ const defaultSpan = 300 * time.Second // ConsensusComponentsFactoryArgs holds the arguments needed to create a consensus components factory type ConsensusComponentsFactoryArgs struct { Config config.Config + FlagsConfig config.ContextFlagsConfig BootstrapRoundIndex uint64 CoreComponents factory.CoreComponentsHolder NetworkComponents factory.NetworkComponentsHolder @@ -55,6 +56,7 @@ type ConsensusComponentsFactoryArgs struct { type consensusComponentsFactory struct { config config.Config + flagsConfig config.ContextFlagsConfig bootstrapRoundIndex uint64 coreComponents factory.CoreComponentsHolder networkComponents factory.NetworkComponentsHolder @@ -111,6 +113,7 @@ func NewConsensusComponentsFactory(args ConsensusComponentsFactoryArgs) (*consen return &consensusComponentsFactory{ config: args.Config, + flagsConfig: args.FlagsConfig, bootstrapRoundIndex: args.BootstrapRoundIndex, coreComponents: args.CoreComponents, networkComponents: args.NetworkComponents, @@ -503,6 +506,7 @@ func (ccf *consensusComponentsFactory) createShardBootstrapper() (process.Bootst HistoryRepo: ccf.processComponents.HistoryRepository(), ScheduledTxsExecutionHandler: ccf.processComponents.ScheduledTxsExecutionHandler(), ProcessWaitTime: time.Duration(ccf.config.GeneralSettings.SyncProcessTimeInMillis) * time.Millisecond, + RepopulateTokensSupplies: ccf.flagsConfig.RepopulateTokensSupplies, } argsShardBootstrapper := sync.ArgShardBootstrapper{ @@ -637,6 +641,7 @@ func (ccf *consensusComponentsFactory) createMetaChainBootstrapper() (process.Bo HistoryRepo: ccf.processComponents.HistoryRepository(), ScheduledTxsExecutionHandler: ccf.processComponents.ScheduledTxsExecutionHandler(), ProcessWaitTime: time.Duration(ccf.config.GeneralSettings.SyncProcessTimeInMillis) * time.Millisecond, + RepopulateTokensSupplies: ccf.flagsConfig.RepopulateTokensSupplies, } argsMetaBootstrapper := sync.ArgMetaBootstrapper{ diff --git a/factory/data/dataComponents.go b/factory/data/dataComponents.go index 20acad83209..a8c6ad482f4 100644 --- a/factory/data/dataComponents.go +++ b/factory/data/dataComponents.go @@ -27,10 +27,10 @@ type DataComponentsFactoryArgs struct { Core factory.CoreComponentsHolder StatusCore factory.StatusCoreComponentsHolder Crypto factory.CryptoComponentsHolder + FlagsConfigs config.ContextFlagsConfig CurrentEpoch uint32 CreateTrieEpochRootHashStorer bool NodeProcessingMode common.NodeProcessingMode - SnapshotsEnabled bool } type dataComponentsFactory struct { @@ -40,10 +40,10 @@ type dataComponentsFactory struct { core factory.CoreComponentsHolder statusCore factory.StatusCoreComponentsHolder crypto factory.CryptoComponentsHolder + flagsConfig config.ContextFlagsConfig currentEpoch uint32 createTrieEpochRootHashStorer bool nodeProcessingMode common.NodeProcessingMode - snapshotsEnabled bool } // dataComponents struct holds the data components @@ -94,8 +94,8 @@ func NewDataComponentsFactory(args DataComponentsFactoryArgs) (*dataComponentsFa statusCore: args.StatusCore, currentEpoch: args.CurrentEpoch, createTrieEpochRootHashStorer: args.CreateTrieEpochRootHashStorer, + flagsConfig: args.FlagsConfigs, nodeProcessingMode: args.NodeProcessingMode, - snapshotsEnabled: args.SnapshotsEnabled, crypto: args.Crypto, }, nil } @@ -186,7 +186,8 @@ func (dcf *dataComponentsFactory) createDataStoreFromConfig() (dataRetriever.Sto StorageType: storageFactory.ProcessStorageService, CreateTrieEpochRootHashStorer: dcf.createTrieEpochRootHashStorer, NodeProcessingMode: dcf.nodeProcessingMode, - SnapshotsEnabled: dcf.snapshotsEnabled, + SnapshotsEnabled: dcf.flagsConfig.SnapshotsEnabled, + RepopulateTokensSupplies: dcf.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: dcf.crypto.ManagedPeersHolder(), }) if err != nil { diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 205e528c1f1..d9ea9e8a935 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -144,7 +144,7 @@ type ProcessComponentsFactoryArgs struct { ImportStartHandler update.ImportStartHandler WorkingDir string HistoryRepo dblookupext.HistoryRepository - SnapshotsEnabled bool + FlagsConfig config.ContextFlagsConfig Data factory.DataComponentsHolder CoreData factory.CoreComponentsHolder @@ -177,7 +177,7 @@ type processComponentsFactory struct { historyRepo dblookupext.HistoryRepository epochNotifier process.EpochNotifier importHandler update.ImportHandler - snapshotsEnabled bool + flagsConfig config.ContextFlagsConfig esdtNftStorage vmcommon.ESDTNFTStorageHandler data factory.DataComponentsHolder @@ -224,7 +224,7 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom historyRepo: args.HistoryRepo, epochNotifier: args.CoreData.EpochNotifier(), statusCoreComponents: args.StatusCoreComponents, - snapshotsEnabled: args.SnapshotsEnabled, + flagsConfig: args.FlagsConfig, }, nil } @@ -1505,7 +1505,8 @@ func (pcf *processComponentsFactory) newStorageRequesters() (dataRetriever.Reque StorageType: storageFactory.ProcessStorageService, CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.GetNodeProcessingMode(&pcf.importDBConfig), - SnapshotsEnabled: pcf.snapshotsEnabled, + SnapshotsEnabled: pcf.flagsConfig.SnapshotsEnabled, + RepopulateTokensSupplies: pcf.flagsConfig.RepopulateTokensSupplies, ManagedPeersHolder: pcf.crypto.ManagedPeersHolder(), }, ) @@ -1559,7 +1560,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForMeta( DataPacker: dataPacker, ManualEpochStartNotifier: manualEpochStartNotifier, ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), - SnapshotsEnabled: pcf.snapshotsEnabled, + SnapshotsEnabled: pcf.flagsConfig.SnapshotsEnabled, } requestersContainerFactory, err := storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) if err != nil { @@ -1592,7 +1593,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForShard( DataPacker: dataPacker, ManualEpochStartNotifier: manualEpochStartNotifier, ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), - SnapshotsEnabled: pcf.snapshotsEnabled, + SnapshotsEnabled: pcf.flagsConfig.SnapshotsEnabled, } requestersContainerFactory, err := storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) if err != nil { diff --git a/heartbeat/sender/baseSender.go b/heartbeat/sender/baseSender.go index ac438148d31..cf7a7787c1f 100644 --- a/heartbeat/sender/baseSender.go +++ b/heartbeat/sender/baseSender.go @@ -80,7 +80,7 @@ func checkBaseSenderArgs(args argBaseSender) error { return fmt.Errorf("%w for timeBetweenSendsWhenError", heartbeat.ErrInvalidTimeDuration) } if args.thresholdBetweenSends < minThresholdBetweenSends || args.thresholdBetweenSends > maxThresholdBetweenSends { - return fmt.Errorf("%w for thresholdBetweenSends, receieved %f, min allowed %f, max allowed %f", + return fmt.Errorf("%w for thresholdBetweenSends, received %f, min allowed %f, max allowed %f", heartbeat.ErrInvalidThreshold, args.thresholdBetweenSends, minThresholdBetweenSends, maxThresholdBetweenSends) } if check.IfNil(args.privKey) { diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 8b2b72d5419..9b78bd8127d 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -170,6 +170,7 @@ func (tpn *TestProcessorNode) createShardBootstrapper() (TestBootstrapper, error HistoryRepo: &dblookupext.HistoryRepositoryStub{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessWaitTime: tpn.RoundHandler.TimeDuration(), + RepopulateTokensSupplies: false, } argsShardBootstrapper := sync.ArgShardBootstrapper{ @@ -215,6 +216,7 @@ func (tpn *TestProcessorNode) createMetaChainBootstrapper() (TestBootstrapper, e HistoryRepo: &dblookupext.HistoryRepositoryStub{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessWaitTime: tpn.RoundHandler.TimeDuration(), + RepopulateTokensSupplies: false, } argsMetaBootstrapper := sync.ArgMetaBootstrapper{ diff --git a/node/nodeRunner.go b/node/nodeRunner.go index 99d46934d65..1af3cadca68 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -856,6 +856,7 @@ func (nr *nodeRunner) CreateManagedConsensusComponents( consensusArgs := consensusComp.ConsensusComponentsFactoryArgs{ Config: *nr.configs.GeneralConfig, + FlagsConfig: *nr.configs.FlagsConfig, BootstrapRoundIndex: nr.configs.FlagsConfig.BootstrapRoundIndex, CoreComponents: coreComponents, NetworkComponents: networkComponents, @@ -1227,7 +1228,7 @@ func (nr *nodeRunner) CreateManagedProcessComponents( ImportStartHandler: importStartHandler, WorkingDir: configs.FlagsConfig.WorkingDir, HistoryRepo: historyRepository, - SnapshotsEnabled: configs.FlagsConfig.SnapshotsEnabled, + FlagsConfig: *configs.FlagsConfig, } processComponentsFactory, err := processComp.NewProcessComponentsFactory(processArgs) if err != nil { @@ -1271,8 +1272,8 @@ func (nr *nodeRunner) CreateManagedDataComponents( Crypto: crypto, CurrentEpoch: storerEpoch, CreateTrieEpochRootHashStorer: configs.ImportDbConfig.ImportDbSaveTrieEpochRootHash, + FlagsConfigs: *configs.FlagsConfig, NodeProcessingMode: common.GetNodeProcessingMode(nr.configs.ImportDbConfig), - SnapshotsEnabled: configs.FlagsConfig.SnapshotsEnabled, } dataComponentsFactory, err := dataComp.NewDataComponentsFactory(dataArgs) diff --git a/process/sync/argBootstrapper.go b/process/sync/argBootstrapper.go index 9441c595e7b..ec3f64a58d8 100644 --- a/process/sync/argBootstrapper.go +++ b/process/sync/argBootstrapper.go @@ -47,6 +47,7 @@ type ArgBaseBootstrapper struct { IsInImportMode bool ScheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler ProcessWaitTime time.Duration + RepopulateTokensSupplies bool } // ArgShardBootstrapper holds all dependencies required by the bootstrap data factory in order to create diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 2c011e50c10..51c6cd86b05 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -23,6 +23,7 @@ import ( "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/sync/storageBootstrap/metricsLoader" + "github.com/multiversx/mx-chain-go/process/sync/trieIterators" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage" @@ -123,6 +124,8 @@ type baseBootstrap struct { isInImportMode bool scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler processWaitTime time.Duration + + repopulateTokensSupplies bool } // setRequestedHeaderNonce method sets the header nonce requested by the sync mechanism @@ -1191,6 +1194,38 @@ func (boot *baseBootstrap) GetNodeState() common.NodeState { return common.NsNotSynchronized } +func (boot *baseBootstrap) handleAccountsTrieIteration() error { + // change this if more trie accounts iterators handlers are needed + if !boot.repopulateTokensSupplies { + return nil + } + + argsTrieAccountsIteratorProc := trieIterators.ArgsTrieAccountsIterator{ + Marshaller: boot.marshalizer, + Accounts: boot.accounts, + } + trieAccountsIteratorProc, err := trieIterators.NewTrieAccountsIterator(argsTrieAccountsIteratorProc) + if err != nil { + return err + } + + argsTokensSuppliesProc := trieIterators.ArgsTokensSuppliesProcessor{ + StorageService: boot.store, + Marshaller: boot.marshalizer, + } + tokensSuppliesProc, err := trieIterators.NewTokensSuppliesProcessor(argsTokensSuppliesProc) + if err != nil { + return err + } + + err = trieAccountsIteratorProc.Process(tokensSuppliesProc.HandleTrieAccountIteration) + if err != nil { + return err + } + + return tokensSuppliesProc.SaveSupplies() +} + // Close will close the endless running go routine func (boot *baseBootstrap) Close() error { if boot.cancelFunc != nil { diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 3f8ddd83267..7104359c5ce 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -89,6 +89,7 @@ func CreateMetaBootstrapMockArguments() sync.ArgMetaBootstrapper { HistoryRepo: &dblookupext.HistoryRepositoryStub{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessWaitTime: testProcessWaitTime, + RepopulateTokensSupplies: false, } argsMetaBootstrapper := sync.ArgMetaBootstrapper{ diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 10965aec981..749a4c85699 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -2,6 +2,7 @@ package sync import ( "context" + "fmt" "math" "strings" @@ -67,6 +68,7 @@ func NewShardBootstrap(arguments ArgShardBootstrapper) (*ShardBootstrap, error) historyRepo: arguments.HistoryRepo, scheduledTxsExecutionHandler: arguments.ScheduledTxsExecutionHandler, processWaitTime: arguments.ProcessWaitTime, + repopulateTokensSupplies: arguments.RepopulateTokensSupplies, } if base.isInImportMode { @@ -134,6 +136,11 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { var ctx context.Context ctx, boot.cancelFunc = context.WithCancel(context.Background()) + + err := boot.handleAccountsTrieIteration() + if err != nil { + panic(fmt.Sprintf("cannot handle start-up trie accounts iteration: %s", err.Error())) + } go boot.syncBlocks(ctx) } diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 90acef3f45d..768a13a3342 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -215,6 +215,7 @@ func CreateShardBootstrapMockArguments() sync.ArgShardBootstrapper { HistoryRepo: &dblookupext.HistoryRepositoryStub{}, ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessWaitTime: testProcessWaitTime, + RepopulateTokensSupplies: false, } argsShardBootstrapper := sync.ArgShardBootstrapper{ diff --git a/process/sync/trieIterators/errors.go b/process/sync/trieIterators/errors.go new file mode 100644 index 00000000000..5fc876f1de3 --- /dev/null +++ b/process/sync/trieIterators/errors.go @@ -0,0 +1,11 @@ +package trieIterators + +import "errors" + +var errNilAccountsAdapter = errors.New("nil accounts adapter") + +var errNilStorageService = errors.New("nil storage service") + +var errNilMarshaller = errors.New("nil marshaller") + +var errNilUserAccount = errors.New("nil user account") diff --git a/process/sync/trieIterators/tokensSuppliesComputer.go b/process/sync/trieIterators/tokensSuppliesComputer.go new file mode 100644 index 00000000000..8dd87fe75e5 --- /dev/null +++ b/process/sync/trieIterators/tokensSuppliesComputer.go @@ -0,0 +1,144 @@ +package trieIterators + +import ( + "bytes" + "context" + "fmt" + "math/big" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/dblookupext/esdtSupply" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/trie/keyBuilder" +) + +type tokensSuppliesProcessor struct { + storageService dataRetriever.StorageService + marshaller marshal.Marshalizer + tokensSupplies map[string]*big.Int +} + +// ArgsTokensSuppliesProcessor is the arguments struct for NewTokensSuppliesProcessor +type ArgsTokensSuppliesProcessor struct { + StorageService dataRetriever.StorageService + Marshaller marshal.Marshalizer +} + +// NewTokensSuppliesProcessor returns a new instance of tokensSuppliesProcessor +func NewTokensSuppliesProcessor(args ArgsTokensSuppliesProcessor) (*tokensSuppliesProcessor, error) { + if check.IfNil(args.StorageService) { + return nil, errNilStorageService + } + if check.IfNil(args.Marshaller) { + return nil, errNilMarshaller + } + + return &tokensSuppliesProcessor{ + storageService: args.StorageService, + marshaller: args.Marshaller, + tokensSupplies: make(map[string]*big.Int), + }, nil +} + +// HandleTrieAccountIteration is the handler for the trie account iteration +func (t *tokensSuppliesProcessor) HandleTrieAccountIteration(userAccount state.UserAccountHandler) error { + if check.IfNil(userAccount) { + return errNilUserAccount + } + if bytes.Equal(core.SystemAccountAddress, userAccount.AddressBytes()) { + log.Debug("repopulate tokens supplies: skipping system account address") + return nil + } + rh := userAccount.GetRootHash() + if len(rh) == 0 { + return nil + } + + dataTrie := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + + errDataTrieGet := userAccount.DataTrie().GetAllLeavesOnChannel(dataTrie, context.Background(), rh, keyBuilder.NewKeyBuilder()) + if errDataTrieGet != nil { + return errDataTrieGet + } + + log.Trace("extractTokensSupplies - parsing account", "address", userAccount.AddressBytes()) + esdtPrefix := []byte(core.ProtectedKeyPrefix + core.ESDTKeyIdentifier) + for userLeaf := range dataTrie.LeavesChan { + if !bytes.HasPrefix(userLeaf.Key(), esdtPrefix) { + continue + } + + tokenKey := userLeaf.Key() + lenESDTPrefix := len(esdtPrefix) + suffix := append(userLeaf.Key(), userAccount.AddressBytes()...) + value, errVal := userLeaf.ValueWithoutSuffix(suffix) + if errVal != nil { + log.Warn("cannot get value without suffix", "error", errVal, "key", userLeaf.Key()) + return errVal + } + var esToken esdt.ESDigitalToken + err := t.marshaller.Unmarshal(&esToken, value) + if err != nil { + return err + } + + tokenName := string(tokenKey)[lenESDTPrefix:] + tokenID, nonce := common.ExtractTokenIDAndNonceFromTokenStorageKey([]byte(tokenName)) + tokenIDStr := string(tokenID) + if nonce > 0 { + tokenIDStr += fmt.Sprintf("-%d", nonce) + } + + tokenSupply, found := t.tokensSupplies[tokenIDStr] + if !found { + t.tokensSupplies[tokenIDStr] = esToken.Value + } else { + tokenSupply = big.NewInt(0).Add(tokenSupply, esToken.Value) + t.tokensSupplies[tokenIDStr] = tokenSupply + } + } + + err := dataTrie.ErrChan.ReadFromChanNonBlocking() + if err != nil { + return fmt.Errorf("error while iterating over an account's trie: %w", err) + } + + return nil +} + +// SaveSupplies will store the recomputed tokens supplies into the database +func (t *tokensSuppliesProcessor) SaveSupplies() error { + suppliesStorer, err := t.storageService.GetStorer(dataRetriever.ESDTSuppliesUnit) + if err != nil { + return err + } + + for tokenName, supply := range t.tokensSupplies { + log.Trace("repopulate tokens supplies", "token", tokenName, "supply", supply.String()) + supplyObj := &esdtSupply.SupplyESDT{ + Supply: supply, + } + supplyObjBytes, err := t.marshaller.Marshal(supplyObj) + if err != nil { + return err + } + + err = suppliesStorer.Put([]byte(tokenName), supplyObjBytes) + if err != nil { + return fmt.Errorf("%w while saving recomputed supply of the token %s", err, tokenName) + } + } + + log.Debug("finished the repopulation of the tokens supplies", "num tokens", len(t.tokensSupplies)) + + return nil +} diff --git a/process/sync/trieIterators/tokensSuppliesComputer_test.go b/process/sync/trieIterators/tokensSuppliesComputer_test.go new file mode 100644 index 00000000000..16e6fe1bd9a --- /dev/null +++ b/process/sync/trieIterators/tokensSuppliesComputer_test.go @@ -0,0 +1,202 @@ +package trieIterators + +import ( + "context" + "errors" + "math/big" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/keyValStorage" + "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/dataRetriever" + esdtSupply2 "github.com/multiversx/mx-chain-go/dblookupext/esdtSupply" + "github.com/multiversx/mx-chain-go/state" + storage2 "github.com/multiversx/mx-chain-go/storage" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/genericMocks" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/trie" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func getTokensSuppliesProcessorArgs() ArgsTokensSuppliesProcessor { + return ArgsTokensSuppliesProcessor{ + StorageService: &genericMocks.ChainStorerMock{}, + Marshaller: &testscommon.MarshalizerMock{}, + } +} + +func TestNewTokensSuppliesProcessor(t *testing.T) { + t.Parallel() + + t.Run("nil storage service", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + args.StorageService = nil + + tsp, err := NewTokensSuppliesProcessor(args) + require.Nil(t, tsp) + require.Equal(t, errNilStorageService, err) + }) + + t.Run("nil marshaller", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + args.Marshaller = nil + + tsp, err := NewTokensSuppliesProcessor(args) + require.Nil(t, tsp) + require.Equal(t, errNilMarshaller, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + + tsp, err := NewTokensSuppliesProcessor(args) + require.NotNil(t, tsp) + require.NoError(t, err) + }) +} + +func TestTokensSuppliesProcessor_HandleTrieAccountIteration(t *testing.T) { + t.Parallel() + + t.Run("nil user account", func(t *testing.T) { + t.Parallel() + + tsp, _ := NewTokensSuppliesProcessor(getTokensSuppliesProcessorArgs()) + err := tsp.HandleTrieAccountIteration(nil) + require.Equal(t, errNilUserAccount, err) + }) + + t.Run("should skip system account", func(t *testing.T) { + t.Parallel() + + tsp, _ := NewTokensSuppliesProcessor(getTokensSuppliesProcessorArgs()) + + userAcc := stateMock.NewAccountWrapMock(vmcommon.SystemAccountAddress) + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + }) + + t.Run("empty root hash of account", func(t *testing.T) { + t.Parallel() + + tsp, _ := NewTokensSuppliesProcessor(getTokensSuppliesProcessorArgs()) + + userAcc := stateMock.NewAccountWrapMock([]byte("addr")) + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + userAcc, _ := state.NewUserAccount([]byte("addr")) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + esToken := &esdt.ESDigitalToken{ + Value: big.NewInt(37), + } + esBytes, _ := args.Marshaller.Marshal(esToken) + tknKey := []byte("ELRONDesdtTKN-00aacc") + value := append(esBytes, tknKey...) + value = append(value, []byte("addr")...) + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(tknKey, value) + + sft := &esdt.ESDigitalToken{ + Value: big.NewInt(1), + } + sftBytes, _ := args.Marshaller.Marshal(sft) + sftKey := []byte("ELRONDesdtSFT-00aabb") + sftKey = append(sftKey, big.NewInt(37).Bytes()...) + value = append(sftBytes, sftKey...) + value = append(value, []byte("addr")...) + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(sftKey, value) + + close(leavesChannels.LeavesChan) + return nil + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + + err = tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + + expectedSupplies := map[string]*big.Int{ + "SFT-00aabb-37": big.NewInt(2), + "TKN-00aacc": big.NewInt(74), + } + require.Equal(t, expectedSupplies, tsp.tokensSupplies) + }) +} + +func TestTokensSuppliesProcessor_SaveSupplies(t *testing.T) { + t.Parallel() + + t.Run("cannot find esdt supplies storer", func(t *testing.T) { + t.Parallel() + + errStorerNotFound := errors.New("storer not found") + args := getTokensSuppliesProcessorArgs() + args.StorageService = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage2.Storer, error) { + return nil, errStorerNotFound + }, + } + tsp, _ := NewTokensSuppliesProcessor(args) + err := tsp.SaveSupplies() + require.Equal(t, errStorerNotFound, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + savedItems := make(map[string][]byte) + args := getTokensSuppliesProcessorArgs() + args.StorageService = &storage.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage2.Storer, error) { + return &storage.StorerStub{ + PutCalled: func(key, data []byte) error { + savedItems[string(key)] = data + return nil + }, + }, nil + }, + } + tsp, _ := NewTokensSuppliesProcessor(args) + + supplies := map[string]*big.Int{ + "SFT-00aabb-37": big.NewInt(2), + "TKN-00aacc": big.NewInt(74), + } + tsp.tokensSupplies = supplies + + err := tsp.SaveSupplies() + require.NoError(t, err) + + checkStoredSupply := func(t *testing.T, key string, storedValue []byte, expectedSupply *big.Int) { + supply := esdtSupply2.SupplyESDT{} + _ = args.Marshaller.Unmarshal(&supply, storedValue) + require.Equal(t, expectedSupply, supply.Supply) + } + + require.Len(t, savedItems, 2) + for key, value := range savedItems { + checkStoredSupply(t, key, value, supplies[key]) + } + }) +} diff --git a/process/sync/trieIterators/trieAccountsIterator.go b/process/sync/trieIterators/trieAccountsIterator.go new file mode 100644 index 00000000000..e54ea193d5d --- /dev/null +++ b/process/sync/trieIterators/trieAccountsIterator.go @@ -0,0 +1,103 @@ +package trieIterators + +import ( + "context" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/common/errChan" + "github.com/multiversx/mx-chain-go/dataRetriever" + "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("trieIterators") + +type trieAccountIteratorHandler func(account state.UserAccountHandler) error + +type trieAccountsIterator struct { + marshaller marshal.Marshalizer + storageService dataRetriever.StorageService + accounts state.AccountsAdapter +} + +// ArgsTrieAccountsIterator holds the arguments needed to create a new trie Accounts iterator +type ArgsTrieAccountsIterator struct { + Marshaller marshal.Marshalizer + Accounts state.AccountsAdapter +} + +// NewTrieAccountsIterator returns a new instance of trieAccountsIterator +func NewTrieAccountsIterator(args ArgsTrieAccountsIterator) (*trieAccountsIterator, error) { + if check.IfNil(args.Marshaller) { + return nil, errNilMarshaller + } + if check.IfNil(args.Accounts) { + return nil, errNilAccountsAdapter + } + + return &trieAccountsIterator{ + marshaller: args.Marshaller, + accounts: args.Accounts, + }, nil +} + +// Process will iterate over the entire trie and iterate over the Accounts while calling the received handlers +func (t *trieAccountsIterator) Process(handlers ...trieAccountIteratorHandler) error { + rootHash, err := t.accounts.RootHash() + if err != nil { + return err + } + + iteratorChannels := &common.TrieIteratorChannels{ + LeavesChan: make(chan core.KeyValueHolder, common.TrieLeavesChannelDefaultCapacity), + ErrChan: errChan.NewErrChanWrapper(), + } + err = t.accounts.GetAllLeaves(iteratorChannels, context.Background(), rootHash) + if err != nil { + return err + } + + log.Debug("starting the trie's accounts iteration with calling the handlers") + for leaf := range iteratorChannels.LeavesChan { + userAddress, isAccount := t.getAddress(leaf) + if !isAccount { + continue + } + + acc, err := t.accounts.GetExistingAccount(userAddress) + if err != nil { + return err + } + + userAccount, ok := acc.(state.UserAccountHandler) + if !ok { + continue + } + + for _, handler := range handlers { + err = handler(userAccount) + if err != nil { + return err + } + } + } + + return nil +} + +func (t *trieAccountsIterator) getAddress(kv core.KeyValueHolder) ([]byte, bool) { + userAccount := &state.UserAccountData{} + errUnmarshal := t.marshaller.Unmarshal(userAccount, kv.Value()) + if errUnmarshal != nil { + // probably a code node + return nil, false + } + if len(userAccount.RootHash) == 0 { + return nil, false + } + + return kv.Key(), true +} diff --git a/process/sync/trieIterators/trieAccountsIterator_test.go b/process/sync/trieIterators/trieAccountsIterator_test.go new file mode 100644 index 00000000000..bab0b88a1b5 --- /dev/null +++ b/process/sync/trieIterators/trieAccountsIterator_test.go @@ -0,0 +1,220 @@ +package trieIterators + +import ( + "context" + "errors" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/keyValStorage" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/testscommon" + stateMock "github.com/multiversx/mx-chain-go/testscommon/state" + vmcommon "github.com/multiversx/mx-chain-vm-common-go" + "github.com/stretchr/testify/require" +) + +func getTrieAccountsIteratorArgs() ArgsTrieAccountsIterator { + return ArgsTrieAccountsIterator{ + Marshaller: &testscommon.MarshalizerMock{}, + Accounts: &stateMock.AccountsStub{}, + } +} + +func TestNewTrieAccountsIterator(t *testing.T) { + t.Parallel() + + t.Run("nil marshaller", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Marshaller = nil + + tai, err := NewTrieAccountsIterator(args) + require.Nil(t, tai) + require.Equal(t, errNilMarshaller, err) + }) + + t.Run("nil accounts", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = nil + + tai, err := NewTrieAccountsIterator(args) + require.Nil(t, tai) + require.Equal(t, errNilAccountsAdapter, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + tai, err := NewTrieAccountsIterator(args) + require.NotNil(t, tai) + require.NoError(t, err) + }) +} + +func TestTrieAccountsIterator_Process(t *testing.T) { + t.Parallel() + + var expectedErr = errors.New("expected error") + + t.Run("cannot get root hash", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return nil, expectedErr + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process() + require.Equal(t, expectedErr, err) + }) + + t.Run("cannot get all leaves", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(_ *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + return expectedErr + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process() + require.Equal(t, expectedErr, err) + }) + + t.Run("cannot get existing account", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return nil, expectedErr + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process() + require.Equal(t, expectedErr, err) + }) + + t.Run("should work without handlers", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process() + require.NoError(t, err) + }) + + t.Run("one handler returns error, should error", func(t *testing.T) { + t.Parallel() + + handler1 := func(account state.UserAccountHandler) error { + return nil + } + handler2 := func(account state.UserAccountHandler) error { + return expectedErr + } + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{RootHash: []byte("rootHash")}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(handler1, handler2) + require.Equal(t, expectedErr, err) + }) + + t.Run("should work with handlers", func(t *testing.T) { + t.Parallel() + + handlersReceived := make(map[int]struct{}) + handler1 := func(account state.UserAccountHandler) error { + handlersReceived[0] = struct{}{} + return nil + } + handler2 := func(account state.UserAccountHandler) error { + handlersReceived[1] = struct{}{} + return nil + } + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{RootHash: []byte("rootHash")}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(handler1, handler2) + require.NoError(t, err) + require.Len(t, handlersReceived, 2) + }) +} diff --git a/storage/factory/storageServiceFactory.go b/storage/factory/storageServiceFactory.go index df34577ceb1..d1030f1a479 100644 --- a/storage/factory/storageServiceFactory.go +++ b/storage/factory/storageServiceFactory.go @@ -3,6 +3,7 @@ package factory import ( "fmt" "path/filepath" + "time" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" @@ -51,6 +52,7 @@ type StorageServiceFactory struct { storageType StorageServiceType nodeProcessingMode common.NodeProcessingMode snapshotsEnabled bool + repopulateTokensSupplies bool } // StorageServiceFactoryArgs holds the arguments needed for creating a new storage service factory @@ -67,6 +69,7 @@ type StorageServiceFactoryArgs struct { CreateTrieEpochRootHashStorer bool NodeProcessingMode common.NodeProcessingMode SnapshotsEnabled bool + RepopulateTokensSupplies bool } // NewStorageServiceFactory will return a new instance of StorageServiceFactory @@ -101,6 +104,7 @@ func NewStorageServiceFactory(args StorageServiceFactoryArgs) (*StorageServiceFa storageType: args.StorageType, nodeProcessingMode: args.NodeProcessingMode, snapshotsEnabled: args.SnapshotsEnabled, + repopulateTokensSupplies: args.RepopulateTokensSupplies, }, nil } @@ -291,12 +295,12 @@ func (psf *StorageServiceFactory) CreateForShard() (dataRetriever.StorageService hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(psf.shardCoordinator.SelfId()) store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnit) - err = psf.setupDbLookupExtensions(store) + err = psf.setUpDbLookupExtensions(store) if err != nil { return nil, err } - err = psf.setupLogsAndEventsStorer(store) + err = psf.setUpLogsAndEventsStorer(store) if err != nil { return nil, err } @@ -351,12 +355,12 @@ func (psf *StorageServiceFactory) CreateForMeta() (dataRetriever.StorageService, store.AddStorer(hdrNonceHashDataUnit, shardHdrHashNonceUnits[i]) } - err = psf.setupDbLookupExtensions(store) + err = psf.setUpDbLookupExtensions(store) if err != nil { return nil, err } - err = psf.setupLogsAndEventsStorer(store) + err = psf.setUpLogsAndEventsStorer(store) if err != nil { return nil, err } @@ -392,7 +396,7 @@ func (psf *StorageServiceFactory) createTrieUnit( return psf.createTriePruningPersister(pruningStorageArgs) } -func (psf *StorageServiceFactory) setupLogsAndEventsStorer(chainStorer *dataRetriever.ChainStorer) error { +func (psf *StorageServiceFactory) setUpLogsAndEventsStorer(chainStorer *dataRetriever.ChainStorer) error { var txLogsUnit storage.Storer txLogsUnit = storageDisabled.NewStorer() @@ -414,7 +418,7 @@ func (psf *StorageServiceFactory) setupLogsAndEventsStorer(chainStorer *dataRetr return nil } -func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetriever.ChainStorer) error { +func (psf *StorageServiceFactory) setUpDbLookupExtensions(chainStorer *dataRetriever.ChainStorer) error { if !psf.generalConfig.DbLookupExtensions.Enabled { return nil } @@ -477,20 +481,41 @@ func (psf *StorageServiceFactory) setupDbLookupExtensions(chainStorer *dataRetri chainStorer.AddStorer(dataRetriever.EpochByHashUnit, epochByHashUnit) - esdtSuppliesConfig := psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig - esdtSuppliesDbConfig := GetDBFromConfig(esdtSuppliesConfig.DB) - esdtSuppliesDbConfig.FilePath = psf.pathManager.PathForStatic(shardID, esdtSuppliesConfig.DB.FilePath) - esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) - esdtSuppliesUnit, err := storageunit.NewStorageUnitFromConf(esdtSuppliesCacherConfig, esdtSuppliesDbConfig) + return psf.setUpEsdtSuppliesStorer(chainStorer, shardID) +} + +func (psf *StorageServiceFactory) setUpEsdtSuppliesStorer(chainStorer *dataRetriever.ChainStorer, shardIDStr string) error { + esdtSuppliesUnit, err := psf.createEsdtSuppliesUnit(shardIDStr) if err != nil { return fmt.Errorf("%w for DbLookupExtensions.ESDTSuppliesStorageConfig", err) } - chainStorer.AddStorer(dataRetriever.ESDTSuppliesUnit, esdtSuppliesUnit) + if psf.repopulateTokensSupplies { + // if the flag is set, then we need to clear the storer at this point. The easiest way is to destroy it and then create it again + err = esdtSuppliesUnit.DestroyUnit() + if err != nil { + return err + } + + time.Sleep(time.Second) // making sure the unit was properly closed and destroyed + esdtSuppliesUnit, err = psf.createEsdtSuppliesUnit(shardIDStr) + if err != nil { + return err + } + } + chainStorer.AddStorer(dataRetriever.ESDTSuppliesUnit, esdtSuppliesUnit) return nil } +func (psf *StorageServiceFactory) createEsdtSuppliesUnit(shardIDStr string) (storage.Storer, error) { + esdtSuppliesConfig := psf.generalConfig.DbLookupExtensions.ESDTSuppliesStorageConfig + esdtSuppliesDbConfig := GetDBFromConfig(esdtSuppliesConfig.DB) + esdtSuppliesDbConfig.FilePath = psf.pathManager.PathForStatic(shardIDStr, esdtSuppliesConfig.DB.FilePath) + esdtSuppliesCacherConfig := GetCacherFromConfig(esdtSuppliesConfig.Cache) + return storageunit.NewStorageUnitFromConf(esdtSuppliesCacherConfig, esdtSuppliesDbConfig) +} + func (psf *StorageServiceFactory) createPruningStorerArgs( storageConfig config.StorageConfig, customDatabaseRemover storage.CustomDatabaseRemoverHandler, diff --git a/testscommon/components/components.go b/testscommon/components/components.go index c66432fa624..66517772334 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -162,6 +162,7 @@ func GetConsensusArgs(shardCoordinator sharding.Coordinator) consensusComp.Conse return consensusComp.ConsensusComponentsFactoryArgs{ Config: testscommon.GetGeneralConfig(), + FlagsConfig: config.ContextFlagsConfig{}, BootstrapRoundIndex: 0, CoreComponents: coreComponents, NetworkComponents: networkComponents, @@ -219,7 +220,7 @@ func GetDataArgs(coreComponents factory.CoreComponentsHolder, shardCoordinator s CurrentEpoch: 0, CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.Normal, - SnapshotsEnabled: false, + FlagsConfigs: config.ContextFlagsConfig{}, } } @@ -567,9 +568,9 @@ func GetProcessArgs( MaxServiceFee: 100, }, }, - Version: "v1.0.0", - HistoryRepo: &dblookupext.HistoryRepositoryStub{}, - SnapshotsEnabled: false, + Version: "v1.0.0", + HistoryRepo: &dblookupext.HistoryRepositoryStub{}, + FlagsConfig: config.ContextFlagsConfig{}, } } From 2d0775522794888c7d7efdd3002feb6fda862cba Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 27 Apr 2023 15:33:47 +0300 Subject: [PATCH 114/221] MX-14120: fix linter issue --- process/sync/trieIterators/trieAccountsIterator.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/process/sync/trieIterators/trieAccountsIterator.go b/process/sync/trieIterators/trieAccountsIterator.go index e54ea193d5d..a60ca9cee59 100644 --- a/process/sync/trieIterators/trieAccountsIterator.go +++ b/process/sync/trieIterators/trieAccountsIterator.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" - "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/state" logger "github.com/multiversx/mx-chain-logger-go" ) @@ -18,9 +17,8 @@ var log = logger.GetOrCreate("trieIterators") type trieAccountIteratorHandler func(account state.UserAccountHandler) error type trieAccountsIterator struct { - marshaller marshal.Marshalizer - storageService dataRetriever.StorageService - accounts state.AccountsAdapter + marshaller marshal.Marshalizer + accounts state.AccountsAdapter } // ArgsTrieAccountsIterator holds the arguments needed to create a new trie Accounts iterator From 03318c0b6f935e067d1edbd194979ae059417361 Mon Sep 17 00:00:00 2001 From: jules01 Date: Thu, 27 Apr 2023 17:21:32 +0300 Subject: [PATCH 115/221] - fixes after review --- ...nentsForTxSimulator.go => txSimulatorProcessComponents.go} | 0 ...Simulator_test.go => txSimulatorProcessComponents_test.go} | 0 integrationTests/realcomponents/processorRunner.go | 4 ++-- 3 files changed, 2 insertions(+), 2 deletions(-) rename factory/processing/{processComponentsForTxSimulator.go => txSimulatorProcessComponents.go} (100%) rename factory/processing/{processComponentsForTxSimulator_test.go => txSimulatorProcessComponents_test.go} (100%) diff --git a/factory/processing/processComponentsForTxSimulator.go b/factory/processing/txSimulatorProcessComponents.go similarity index 100% rename from factory/processing/processComponentsForTxSimulator.go rename to factory/processing/txSimulatorProcessComponents.go diff --git a/factory/processing/processComponentsForTxSimulator_test.go b/factory/processing/txSimulatorProcessComponents_test.go similarity index 100% rename from factory/processing/processComponentsForTxSimulator_test.go rename to factory/processing/txSimulatorProcessComponents_test.go diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 0b49e165589..51b3516356d 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -557,11 +557,11 @@ func (pr *ProcessorRunner) ExecuteTransactionAsScheduled(tb testing.TB, tx *tran func (pr *ProcessorRunner) CreateDeploySCTx( tb testing.TB, owner []byte, - pathToContract string, + contractPath string, gasLimit uint64, initialHexParameters []string, ) (*transaction.Transaction, []byte) { - scCode := wasm.GetSCCode(pathToContract) + scCode := wasm.GetSCCode(contractPath) ownerAccount := pr.GetUserAccount(tb, owner) txDataComponents := append([]string{wasm.CreateDeployTxData(scCode)}, initialHexParameters...) From ada98d42a6bdab4bf042765afd999324fcf11aaa Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 28 Apr 2023 16:03:32 +0300 Subject: [PATCH 116/221] extra tests and fixes --- factory/processing/processComponents.go | 21 +- factory/processing/processComponents_test.go | 324 ++++++++++++------- testscommon/stateComponentsMock.go | 13 + 3 files changed, 219 insertions(+), 139 deletions(-) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 3c543396258..13840946633 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -1395,9 +1395,10 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( return pcf.newStorageRequesters() } + shardC := pcf.bootstrapComponents.ShardCoordinator() requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ RequesterConfig: pcf.config.Requesters, - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + ShardCoordinator: shardC, Messenger: pcf.network.NetworkMessenger(), Marshaller: pcf.coreData.InternalMarshalizer(), Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), @@ -1408,10 +1409,10 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { + if shardC.SelfId() < shardC.NumberOfShards() { return requesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) } - if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { + if shardC.SelfId() == core.MetachainShardId { return requesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) } @@ -1541,12 +1542,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForMeta( ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), SnapshotsEnabled: pcf.snapshotsEnabled, } - requestersContainerFactory, err := storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) - if err != nil { - return nil, err - } - - return requestersContainerFactory, nil + return storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) } func (pcf *processComponentsFactory) createStorageRequestersForShard( @@ -1574,12 +1570,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForShard( ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), SnapshotsEnabled: pcf.snapshotsEnabled, } - requestersContainerFactory, err := storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) - if err != nil { - return nil, err - } - - return requestersContainerFactory, nil + return storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) } func (pcf *processComponentsFactory) newShardInterceptorContainerFactory( diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index c7769c5494f..56fcd2dbc52 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -2,7 +2,6 @@ package processing_test import ( "bytes" - "context" "errors" "math/big" "strings" @@ -15,6 +14,8 @@ import ( dataBlock "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/endProcess" outportCore "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/hashing/blake2b" + "github.com/multiversx/mx-chain-core-go/hashing/keccak" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/factory" @@ -33,7 +34,7 @@ import ( "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" - componentsMock "github.com/multiversx/mx-chain-go/testscommon/components" + "github.com/multiversx/mx-chain-go/testscommon/components" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/dblookupext" @@ -50,8 +51,6 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" - "github.com/multiversx/mx-chain-go/testscommon/trie" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" updateMocks "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/require" ) @@ -62,20 +61,24 @@ const ( testingProtocolSustainabilityAddress = "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" ) -func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { - gasSchedule, _ := common.LoadGasScheduleConfig("../../cmd/node/config/gasSchedules/gasScheduleV1.toml") - addrPubKeyConv, _ := factory.NewPubkeyConverter(config.PubkeyConfig{ +var ( + gasSchedule, _ = common.LoadGasScheduleConfig("../../cmd/node/config/gasSchedules/gasScheduleV1.toml") + addrPubKeyConv, _ = factory.NewPubkeyConverter(config.PubkeyConfig{ Length: 32, Type: "bech32", SignatureLength: 0, Hrp: "erd", }) - valPubKeyConv, _ := factory.NewPubkeyConverter(config.PubkeyConfig{ + valPubKeyConv, _ = factory.NewPubkeyConverter(config.PubkeyConfig{ Length: 96, Type: "hex", SignatureLength: 48, }) - return processComp.ProcessComponentsFactoryArgs{ +) + +func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFactoryArgs { + + args := processComp.ProcessComponentsFactoryArgs{ Config: testscommon.GetGeneralConfig(), EpochConfig: config.EpochConfig{}, PrefConfigs: config.PreferencesConfig{}, @@ -114,11 +117,11 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto }, Active: config.GovernanceSystemSCConfigActive{ ProposalCost: "500", - MinQuorum: "50", - MinPassThreshold: "50", - MinVetoThreshold: "50", + MinQuorum: 0.5, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.5, }, - FirstWhitelistedAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "2500000000000000000000", @@ -160,8 +163,8 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto Store: genericMocks.NewChainStorerMock(0), }, CoreData: &mock.CoreComponentsMock{ - IntMarsh: &testscommon.MarshalizerStub{}, - TxMarsh: &testscommon.MarshalizerStub{}, + IntMarsh: &marshal.GogoProtoMarshalizer{}, + TxMarsh: &marshal.JsonMarshalizer{}, UInt64ByteSliceConv: &testsMocks.Uint64ByteSliceConverterMock{}, AddrPubKeyConv: addrPubKeyConv, ValPubKeyConv: valPubKeyConv, @@ -179,19 +182,21 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto return testingProtocolSustainabilityAddress }, }, - Hash: &testscommon.HasherStub{}, - TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, - RatingHandler: &testscommon.RaterMock{}, - EnableEpochsHandlerField: &testscommon.EnableEpochsHandlerStub{}, - EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, - EpochNotifierWithConfirm: &updateMocks.EpochStartNotifierStub{}, - RoundHandlerField: &testscommon.RoundHandlerMock{}, - ChanStopProcess: make(chan endProcess.ArgEndProcess, 1), - TxSignHasherField: &testscommon.HasherStub{}, - HardforkTriggerPubKeyField: []byte("hardfork pub key"), - WasmVMChangeLockerInternal: &sync.RWMutex{}, - NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, - RatingsConfig: &testscommon.RatingsInfoMock{}, + Hash: blake2b.NewBlake2b(), + TxVersionCheckHandler: &testscommon.TxVersionCheckerStub{}, + RatingHandler: &testscommon.RaterMock{}, + EnableEpochsHandlerField: &testscommon.EnableEpochsHandlerStub{}, + EnableRoundsHandlerField: &testscommon.EnableRoundsHandlerStub{}, + EpochNotifierWithConfirm: &updateMocks.EpochStartNotifierStub{}, + RoundHandlerField: &testscommon.RoundHandlerMock{}, + ChanStopProcess: make(chan endProcess.ArgEndProcess, 1), + TxSignHasherField: keccak.NewKeccak(), + HardforkTriggerPubKeyField: []byte("hardfork pub key"), + WasmVMChangeLockerInternal: &sync.RWMutex{}, + NodeTypeProviderField: &nodeTypeProviderMock.NodeTypeProviderStub{}, + RatingsConfig: &testscommon.RatingsInfoMock{}, + PathHdl: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, }, Crypto: &testsMocks.CryptoComponentsStub{ BlKeyGen: &cryptoMocks.KeyGenStub{}, @@ -209,34 +214,6 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MsgSigVerifier: &testscommon.MessageSignVerifierMock{}, ManagedPeersHolderField: &testscommon.ManagedPeersHolderStub{}, }, - State: &testscommon.StateComponentsMock{ - Accounts: &state.AccountsStub{ - CommitCalled: func() ([]byte, error) { - return []byte(""), nil - }, - RootHashCalled: func() ([]byte, error) { - return []byte("root hash"), nil - }, - }, - PeersAcc: &state.AccountsStub{ - CommitCalled: func() ([]byte, error) { - return []byte("hash"), nil - }, - RootHashCalled: func() ([]byte, error) { - return []byte("root hash"), nil - }, - }, - Tries: &trie.TriesHolderStub{ - GetCalled: func(bytes []byte) common.Trie { - return &trie.TrieStub{} - }, - }, - AccountsAPI: &state.AccountsStub{}, - StorageManagers: map[string]common.StorageManager{ - trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, - trieFactory.PeerAccountTrie: &testscommon.StorageManagerStub{}, - }, - }, Network: &testsMocks.NetworkComponentsStub{ Messenger: &p2pmocks.MessengerStub{}, InputAntiFlood: &testsMocks.P2PAntifloodHandlerStub{}, @@ -258,6 +235,10 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto AppStatusHandlerField: &statusHandler.AppStatusHandlerStub{}, }, } + + args.State = components.GetStateComponents(args.CoreData) + + return args } func TestNewProcessComponentsFactory(t *testing.T) { @@ -623,7 +604,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, expectedErr.Error()) }) - t.Run("newStorageResolver fails due to NewStorageServiceFactory failure should error", func(t *testing.T) { + t.Run("newStorageRequester fails due to NewStorageServiceFactory failure should error", func(t *testing.T) { t.Parallel() args := createMockProcessComponentsFactoryArgs() @@ -631,21 +612,50 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.StoragePruning.NumActivePersisters = 0 testCreateWithArgs(t, args, "active persisters") }) - t.Run("newStorageResolver fails due to CreateForMeta failure should error", func(t *testing.T) { + t.Run("newStorageRequester fails due to NewSimpleDataPacker failure on createStorageRequestersForMeta should error", func(t *testing.T) { t.Parallel() args := createMockProcessComponentsFactoryArgs() args.ImportDBConfig.IsImportDBMode = true - args.Config.ShardHdrNonceHashStorage.Cache.Type = "invalid" - updateShardCoordinatorForMetaAtStep(t, args, 0) - testCreateWithArgs(t, args, "ShardHdrNonceHashStorage") + + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + step := 0 + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + step++ + if step > 3 { + return nil + } + return &testscommon.MarshalizerStub{} + } + args.CoreData = coreCompStub + updateShardCoordinatorForMetaAtStep(t, args, 3) + testCreateWithArgs(t, args, "marshalizer") + }) + t.Run("newStorageRequester fails due to NewSimpleDataPacker failure on createStorageRequestersForShard should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.ImportDBConfig.IsImportDBMode = true + + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + step := 0 + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + step++ + if step > 3 { + return nil + } + return &testscommon.MarshalizerStub{} + } + args.CoreData = coreCompStub + testCreateWithArgs(t, args, "marshalizer") }) - t.Run("newStorageResolver fails due to CreateForMeta failure should error", func(t *testing.T) { + t.Run("newStorageRequester fails due to CreateForMeta failure should error", func(t *testing.T) { t.Parallel() args := createMockProcessComponentsFactoryArgs() args.ImportDBConfig.IsImportDBMode = true args.Config.ShardHdrNonceHashStorage.Cache.Type = "invalid" + updateShardCoordinatorForMetaAtStep(t, args, 0) testCreateWithArgs(t, args, "ShardHdrNonceHashStorage") }) t.Run("newResolverContainerFactory fails due to NewPeerAuthenticationPayloadValidator failure should error", func(t *testing.T) { @@ -657,6 +667,8 @@ func TestProcessComponentsFactory_Create(t *testing.T) { }) t.Run("newResolverContainerFactory fails due to invalid shard should error", testWithInvalidShard(0, "could not create interceptor and resolver container factory")) + t.Run("newRequesterContainerFactory fails due to invalid shard should error", + testWithInvalidShard(5, "could not create requester container factory")) t.Run("newMetaResolverContainerFactory fails due to NewSimpleDataPacker failure should error", func(t *testing.T) { t.Parallel() @@ -676,7 +688,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { }) t.Run("newShardResolverContainerFactory fails due to NewSimpleDataPacker failure should error", testWithNilMarshaller(3, "marshalizer", unreachableStep)) - t.Run("NewResolversFinder fails should error", func(t *testing.T) { + t.Run("NewRequestersFinder fails should error", func(t *testing.T) { t.Parallel() args := createMockProcessComponentsFactoryArgs() @@ -697,7 +709,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { testCreateWithArgs(t, args, "shard coordinator") }) t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(0, retriever.TxLogsUnit, unreachableStep)) - t.Run("NewResolversFinder fails should error", testWithNilMarshaller(5, "Marshalizer", unreachableStep)) + t.Run("NewRequestersFinder fails should error", testWithNilMarshaller(5, "Marshalizer", unreachableStep)) t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to invalid GenesisNodePrice should error", func(t *testing.T) { t.Parallel() @@ -708,7 +720,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { testCreateWithArgs(t, args, "invalid genesis node price") }) t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to NewGenesisBlockCreator failure should error", - testWithNilMarshaller(6, "Marshalizer", unreachableStep)) + testWithNilMarshaller(7, "Marshalizer", unreachableStep)) t.Run("setGenesisHeader fails due to invalid shard should error", testWithInvalidShard(8, "genesis block does not exist")) t.Run("newValidatorStatisticsProcessor fails due to nil genesis header should error", func(t *testing.T) { @@ -726,7 +738,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { testCreateWithArgs(t, args, errorsMx.ErrGenesisBlockNotInitialized.Error()) }) t.Run("indexGenesisBlocks fails due to CalculateHash failure should error", - testWithNilMarshaller(41, "marshalizer", unreachableStep)) + testWithNilMarshaller(42, "marshalizer", unreachableStep)) t.Run("indexGenesisBlocks fails due to GenerateInitialTransactions failure should error", func(t *testing.T) { t.Parallel() @@ -743,7 +755,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { t.Run("newEpochStartTrigger fails due to invalid shard should error", testWithInvalidShard(16, "error creating new start of epoch trigger because of invalid shard id")) t.Run("newEpochStartTrigger fails due to NewHeaderValidator failure should error", - testWithNilMarshaller(46, "Marshalizer", unreachableStep)) + testWithNilMarshaller(47, "Marshalizer", unreachableStep)) t.Run("newEpochStartTrigger fails due to NewPeerMiniBlockSyncer failure should error", func(t *testing.T) { t.Parallel() @@ -789,7 +801,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { testCreateWithArgs(t, args, errorsMx.ErrGenesisBlockNotInitialized.Error()) }) t.Run("newEpochStartTrigger fails due to invalid shard should error", - testWithInvalidShard(17, "genesis block does not exist")) + testWithInvalidShard(17, "error creating new start of epoch trigger because of invalid shard id")) t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(48, "marshalizer", unreachableStep)) t.Run("prepareGenesisBlock fails due to CalculateHash failure should error", func(t *testing.T) { t.Parallel() @@ -830,10 +842,10 @@ func TestProcessComponentsFactory_Create(t *testing.T) { testCreateWithArgs(t, args, expectedErr.Error()) }) t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(2, retriever.BootstrapUnit, unreachableStep)) - t.Run("NewBootstrapStorer fails should error", testWithNilMarshaller(50, "Marshalizer", unreachableStep)) - t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(51, "Marshalizer", unreachableStep)) + t.Run("NewBootstrapStorer fails should error", testWithNilMarshaller(51, "Marshalizer", unreachableStep)) + t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(52, "Marshalizer", unreachableStep)) t.Run("newBlockTracker fails due to invalid shard should error", - testWithInvalidShard(19, "could not create block tracker")) + testWithInvalidShard(20, "could not create block tracker")) t.Run("NewMiniBlocksPoolsCleaner fails should error", func(t *testing.T) { t.Parallel() @@ -857,10 +869,10 @@ func TestProcessComponentsFactory_Create(t *testing.T) { cnt := 0 bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { cnt++ - if cnt > 22 { + if cnt > 25 { return nil } - return testscommon.NewMultiShardsCoordinatorMock(2) + return mock.NewMultiShardsCoordinatorMock(2) } testCreateWithArgs(t, args, "shard coordinator") }) @@ -872,7 +884,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { testCreateWithArgs(t, args, "PublicKeyToListenFrom") }) t.Run("newInterceptorContainerFactory fails due to invalid shard should error", - testWithInvalidShard(23, "could not create interceptor container factory")) + testWithInvalidShard(24, "could not create interceptor container factory")) t.Run("createExportFactoryHandler fails", func(t *testing.T) { t.Parallel() @@ -882,15 +894,15 @@ func TestProcessComponentsFactory_Create(t *testing.T) { cnt := 0 bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { cnt++ - if cnt > 25 { + if cnt > 28 { return nil } - return testscommon.NewMultiShardsCoordinatorMock(2) + return mock.NewMultiShardsCoordinatorMock(2) } testCreateWithArgs(t, args, "shard coordinator") }) t.Run("newForkDetector fails due to invalid shard should error", - testWithInvalidShard(27, "could not create fork detector")) + testWithInvalidShard(28, "could not create fork detector")) t.Run("NewCache fails for vmOutput should error", func(t *testing.T) { t.Parallel() @@ -903,11 +915,11 @@ func TestProcessComponentsFactory_Create(t *testing.T) { t.Run("NewScheduledTxsExecution fails should error", testWithNilMarshaller(104, "Marshalizer", unreachableStep)) t.Run("NewESDTDataStorage fails should error", - testWithNilMarshaller(105, "Marshalizer", unreachableStep)) + testWithNilMarshaller(106, "Marshalizer", unreachableStep)) t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(107, "marshalizer", unreachableStep)) t.Run("newBlockProcessor fails due to invalid shard should error", - testWithInvalidShard(31, "could not create block processor")) + testWithInvalidShard(32, "could not create block processor")) // newShardBlockProcessor t.Run("newShardBlockProcessor: NewESDTTransferParser fails should error", @@ -1042,7 +1054,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { cnt := 0 netwCompStub.MessengerCalled = func() p2p.Messenger { cnt++ - if cnt > 7 { + if cnt > 8 { return nil } return &p2pmocks.MessengerStub{} @@ -1062,13 +1074,17 @@ func TestProcessComponentsFactory_Create(t *testing.T) { return true }, } - stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) - require.True(t, ok) - accountsStub, ok := stateCompStub.Accounts.(*state.AccountsStub) - require.True(t, ok) - accountsStub.RootHashCalled = func() ([]byte, error) { - return nil, expectedErr + stateCompMock := testscommon.NewStateComponentsMockFromRealComponent(args.State) + realAccounts := stateCompMock.AccountsAdapter() + stateCompMock.Accounts = &state.AccountsStub{ + GetAllLeavesCalled: realAccounts.GetAllLeaves, + RootHashCalled: func() ([]byte, error) { + return nil, expectedErr + }, + CommitCalled: realAccounts.Commit, } + args.State = stateCompMock + pcf, _ := processComp.NewProcessComponentsFactory(args) require.NotNil(t, pcf) @@ -1087,15 +1103,18 @@ func TestProcessComponentsFactory_Create(t *testing.T) { return true }, } - stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) - require.True(t, ok) - accountsStub, ok := stateCompStub.Accounts.(*state.AccountsStub) - require.True(t, ok) - accountsStub.GetAllLeavesCalled = func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { - close(leavesChannels.LeavesChan) - leavesChannels.ErrChan.Close() - return expectedErr + stateCompMock := testscommon.NewStateComponentsMockFromRealComponent(args.State) + realAccounts := stateCompMock.AccountsAdapter() + stateCompMock.Accounts = &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return expectedErr + }, + RootHashCalled: realAccounts.RootHash, + CommitCalled: realAccounts.Commit, } + args.State = stateCompMock pcf, _ := processComp.NewProcessComponentsFactory(args) require.NotNil(t, pcf) @@ -1115,17 +1134,23 @@ func TestProcessComponentsFactory_Create(t *testing.T) { return true }, } - stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) - require.True(t, ok) - accountsStub, ok := stateCompStub.Accounts.(*state.AccountsStub) - require.True(t, ok) - accountsStub.GetAllLeavesCalled = func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { - leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("key_ok"), []byte("value")) // coverage - leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("key_invalid"), []byte("value")) - close(leavesChannels.LeavesChan) - leavesChannels.ErrChan.Close() - return nil + stateCompMock := testscommon.NewStateComponentsMockFromRealComponent(args.State) + realAccounts := stateCompMock.AccountsAdapter() + stateCompMock.Accounts = &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + addrOk, _ := addrPubKeyConv.Decode("erd17c4fs6mz2aa2hcvva2jfxdsrdknu4220496jmswer9njznt22eds0rxlr4") + addrNOK, _ := addrPubKeyConv.Decode("erd1ulhw20j7jvgfgak5p05kv667k5k9f320sgef5ayxkt9784ql0zssrzyhjp") + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(addrOk, []byte("value")) // coverage + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(addrNOK, []byte("value")) + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return nil + }, + RootHashCalled: realAccounts.RootHash, + CommitCalled: realAccounts.Commit, } + args.State = stateCompMock + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) cnt := 0 coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { @@ -1158,16 +1183,24 @@ func TestProcessComponentsFactory_Create(t *testing.T) { return true }, } - stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) - require.True(t, ok) - accountsStub, ok := stateCompStub.Accounts.(*state.AccountsStub) - require.True(t, ok) - accountsStub.GetAllLeavesCalled = func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { - close(leavesChannels.LeavesChan) - leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) - leavesChannels.ErrChan.Close() - return nil + realStateComp := args.State + args.State = &testscommon.StateComponentsMock{ + Accounts: &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.WriteInChanNonBlocking(expectedErr) + leavesChannels.ErrChan.Close() + return nil + }, + CommitCalled: realStateComp.AccountsAdapter().Commit, + RootHashCalled: realStateComp.AccountsAdapter().RootHash, + }, + PeersAcc: realStateComp.PeerAccounts(), + Tries: realStateComp.TriesContainer(), + AccountsAPI: realStateComp.AccountsAdapterAPI(), + StorageManagers: realStateComp.TrieStorageManagers(), } + pcf, _ := processComp.NewProcessComponentsFactory(args) require.NotNil(t, pcf) @@ -1175,11 +1208,54 @@ func TestProcessComponentsFactory_Create(t *testing.T) { require.Nil(t, err) require.NotNil(t, instance) }) - t.Run("should work - shard", func(t *testing.T) { + t.Run("should work with indexAndReturnGenesisAccounts failing due to error on Encode", func(t *testing.T) { t.Parallel() + args := createMockProcessComponentsFactoryArgs() + statusCompStub, ok := args.StatusComponents.(*testsMocks.StatusComponentsStub) + require.True(t, ok) + statusCompStub.Outport = &outport.OutportStub{ + HasDriversCalled: func() bool { + return true + }, + } + realStateComp := args.State + args.State = &testscommon.StateComponentsMock{ + Accounts: &state.AccountsStub{ + GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("invalid addr"), []byte("value")) + close(leavesChannels.LeavesChan) + leavesChannels.ErrChan.Close() + return nil + }, + CommitCalled: realStateComp.AccountsAdapter().Commit, + RootHashCalled: realStateComp.AccountsAdapter().RootHash, + }, + PeersAcc: realStateComp.PeerAccounts(), + Tries: realStateComp.TriesContainer(), + AccountsAPI: realStateComp.AccountsAdapterAPI(), + StorageManagers: realStateComp.TrieStorageManagers(), + } + coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) + coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { + return &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return nil + }, + } + } + args.CoreData = coreCompStub + + pcf, _ := processComp.NewProcessComponentsFactory(args) + require.NotNil(t, pcf) + + instance, err := pcf.Create() + require.Nil(t, err) + require.NotNil(t, instance) + }) + t.Run("should work - shard", func(t *testing.T) { shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinator) pcf, _ := processComp.NewProcessComponentsFactory(processArgs) require.NotNil(t, pcf) @@ -1191,11 +1267,9 @@ func TestProcessComponentsFactory_Create(t *testing.T) { require.NoError(t, err) }) t.Run("should work - meta", func(t *testing.T) { - t.Parallel() - shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) shardCoordinator.CurrentShard = common.MetachainShardId - processArgs := componentsMock.GetProcessComponentsFactoryArgs(shardCoordinator) + processArgs := components.GetProcessComponentsFactoryArgs(shardCoordinator) shardCoordinator.ComputeIdCalled = func(address []byte) uint32 { protocolSustainabilityAddr, err := processArgs.CoreData.AddressPubKeyConverter().Decode(testingProtocolSustainabilityAddress) @@ -1326,17 +1400,17 @@ func testWithNilAccountsAdapterAPI(nilStep int, expectedErrSubstr string, metaSt t.Parallel() args := createMockProcessComponentsFactoryArgs() - stateCompStub, ok := args.State.(*testscommon.StateComponentsMock) - require.True(t, ok) - accountsAdapterAPI := stateCompStub.AccountsAdapterAPI() + stateCompMock := testscommon.NewStateComponentsMockFromRealComponent(args.State) + accountsAdapterAPI := stateCompMock.AccountsAdapterAPI() step := 0 - stateCompStub.AccountsAdapterAPICalled = func() mxState.AccountsAdapter { + stateCompMock.AccountsAdapterAPICalled = func() mxState.AccountsAdapter { step++ if step > nilStep { return nil } return accountsAdapterAPI } + args.State = stateCompMock updateShardCoordinatorForMetaAtStep(t, args, metaStep) testCreateWithArgs(t, args, expectedErrSubstr) } @@ -1374,7 +1448,7 @@ func updateShardCoordinatorForMetaAtStep(t *testing.T, args processComp.ProcessC step := 0 bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { step++ - shardC := testscommon.NewMultiShardsCoordinatorMock(2) + shardC := mock.NewMultiShardsCoordinatorMock(2) if step > metaStep { shardC.CurrentShard = common.MetachainShardId } @@ -1389,6 +1463,8 @@ func testWithInvalidShard(failingStep int, expectedErrSubstr string) func(t *tes args := createMockProcessComponentsFactoryArgs() bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) require.True(t, ok) + + x := bootstrapCompStub.ShardCoordinator() cnt := 0 bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { cnt++ @@ -1398,7 +1474,7 @@ func testWithInvalidShard(failingStep int, expectedErrSubstr string) func(t *tes CurrentShard: 3, } } - return testscommon.NewMultiShardsCoordinatorMock(2) + return x } testCreateWithArgs(t, args, expectedErrSubstr) } diff --git a/testscommon/stateComponentsMock.go b/testscommon/stateComponentsMock.go index 9a08cb328df..28cc5d19531 100644 --- a/testscommon/stateComponentsMock.go +++ b/testscommon/stateComponentsMock.go @@ -2,6 +2,7 @@ package testscommon import ( "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/state" ) @@ -16,6 +17,18 @@ type StateComponentsMock struct { StorageManagers map[string]common.StorageManager } +// NewStateComponentsMockFromRealComponent - +func NewStateComponentsMockFromRealComponent(stateComponents factory.StateComponentsHolder) *StateComponentsMock { + return &StateComponentsMock{ + PeersAcc: stateComponents.PeerAccounts(), + Accounts: stateComponents.AccountsAdapter(), + AccountsAPI: stateComponents.AccountsAdapterAPI(), + AccountsRepo: stateComponents.AccountsRepository(), + Tries: stateComponents.TriesContainer(), + StorageManagers: stateComponents.TrieStorageManagers(), + } +} + // Create - func (scm *StateComponentsMock) Create() error { return nil From 4e14bdff6640ddcdd6261cf19598ff622771ba00 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 28 Apr 2023 16:49:24 +0300 Subject: [PATCH 117/221] fix linter --- cmd/keygenerator/converter/pidPubkeyConverter.go | 3 --- factory/consensus/consensusComponents_test.go | 8 ++++---- factory/processing/processComponents_test.go | 15 ++++++++------- integrationTests/testProcessorNode.go | 4 ++-- node/nodeTesting_test.go | 7 ++++--- testscommon/components/default.go | 5 +++-- testscommon/{ => factory}/stateComponentsMock.go | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) rename testscommon/{ => factory}/stateComponentsMock.go (99%) diff --git a/cmd/keygenerator/converter/pidPubkeyConverter.go b/cmd/keygenerator/converter/pidPubkeyConverter.go index 1cff0dfa0d7..41eeea15fa1 100644 --- a/cmd/keygenerator/converter/pidPubkeyConverter.go +++ b/cmd/keygenerator/converter/pidPubkeyConverter.go @@ -6,11 +6,8 @@ import ( "github.com/multiversx/mx-chain-crypto-go/signing/secp256k1" "github.com/multiversx/mx-chain-go/p2p" "github.com/multiversx/mx-chain-go/p2p/factory" - logger "github.com/multiversx/mx-chain-logger-go" ) -var log = logger.GetOrCreate("cmd/keygenerator/converter") - type pidPubkeyConverter struct { keyGen crypto.KeyGenerator p2PKeyConverter p2p.P2PKeyConverter diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index d0512e51fdf..4002e886e99 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -136,7 +136,7 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent HeaderIntegrVerif: &mock.HeaderIntegrityVerifierStub{}, FallbackHdrValidator: &testscommon.FallBackHeaderValidatorStub{}, }, - StateComponents: &testscommon.StateComponentsMock{ + StateComponents: &factoryMocks.StateComponentsMock{ StorageManagers: map[string]common.StorageManager{ trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, trieFactory.PeerAccountTrie: &testscommon.StorageManagerStub{}, @@ -553,7 +553,7 @@ func TestConsensusComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockConsensusComponentsFactoryArgs() - stateCompStub, ok := args.StateComponents.(*testscommon.StateComponentsMock) + stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) require.True(t, ok) stateCompStub.StorageManagers = make(map[string]common.StorageManager) // missing UserAccountTrie ccf, _ := consensusComp.NewConsensusComponentsFactory(args) @@ -604,7 +604,7 @@ func TestConsensusComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockConsensusComponentsFactoryArgs() - stateCompStub, ok := args.StateComponents.(*testscommon.StateComponentsMock) + stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) require.True(t, ok) stateCompStub.StorageManagers = make(map[string]common.StorageManager) // missing UserAccountTrie processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) @@ -625,7 +625,7 @@ func TestConsensusComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockConsensusComponentsFactoryArgs() - stateCompStub, ok := args.StateComponents.(*testscommon.StateComponentsMock) + stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) require.True(t, ok) stateCompStub.StorageManagers = map[string]common.StorageManager{ trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index 56fcd2dbc52..ebe27a8962f 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -2,6 +2,7 @@ package processing_test import ( "bytes" + "context" "errors" "math/big" "strings" @@ -436,7 +437,7 @@ func TestNewProcessComponentsFactory(t *testing.T) { t.Parallel() args := createMockProcessComponentsFactoryArgs() - args.State = &testscommon.StateComponentsMock{ + args.State = &factoryMocks.StateComponentsMock{ Accounts: nil, } pcf, err := processComp.NewProcessComponentsFactory(args) @@ -1074,7 +1075,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { return true }, } - stateCompMock := testscommon.NewStateComponentsMockFromRealComponent(args.State) + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) realAccounts := stateCompMock.AccountsAdapter() stateCompMock.Accounts = &state.AccountsStub{ GetAllLeavesCalled: realAccounts.GetAllLeaves, @@ -1103,7 +1104,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { return true }, } - stateCompMock := testscommon.NewStateComponentsMockFromRealComponent(args.State) + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) realAccounts := stateCompMock.AccountsAdapter() stateCompMock.Accounts = &state.AccountsStub{ GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { @@ -1134,7 +1135,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { return true }, } - stateCompMock := testscommon.NewStateComponentsMockFromRealComponent(args.State) + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) realAccounts := stateCompMock.AccountsAdapter() stateCompMock.Accounts = &state.AccountsStub{ GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { @@ -1184,7 +1185,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { }, } realStateComp := args.State - args.State = &testscommon.StateComponentsMock{ + args.State = &factoryMocks.StateComponentsMock{ Accounts: &state.AccountsStub{ GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { close(leavesChannels.LeavesChan) @@ -1220,7 +1221,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { }, } realStateComp := args.State - args.State = &testscommon.StateComponentsMock{ + args.State = &factoryMocks.StateComponentsMock{ Accounts: &state.AccountsStub{ GetAllLeavesCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte) error { leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("invalid addr"), []byte("value")) @@ -1400,7 +1401,7 @@ func testWithNilAccountsAdapterAPI(nilStep int, expectedErrSubstr string, metaSt t.Parallel() args := createMockProcessComponentsFactoryArgs() - stateCompMock := testscommon.NewStateComponentsMockFromRealComponent(args.State) + stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) accountsAdapterAPI := stateCompMock.AccountsAdapterAPI() step := 0 stateCompMock.AccountsAdapterAPICalled = func() mxState.AccountsAdapter { diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index fb55a30cb98..e7bf0a3fc84 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -3205,8 +3205,8 @@ func GetDefaultCryptoComponents() *mock.CryptoComponentsStub { } // GetDefaultStateComponents - -func GetDefaultStateComponents() *testscommon.StateComponentsMock { - return &testscommon.StateComponentsMock{ +func GetDefaultStateComponents() *testFactory.StateComponentsMock { + return &testFactory.StateComponentsMock{ PeersAcc: &stateMock.AccountsStub{}, Accounts: &stateMock.AccountsStub{}, AccountsRepo: &stateMock.AccountsRepositoryStub{}, diff --git a/node/nodeTesting_test.go b/node/nodeTesting_test.go index e18d26ba218..5c36fc47774 100644 --- a/node/nodeTesting_test.go +++ b/node/nodeTesting_test.go @@ -21,6 +21,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/cryptoMocks" dataRetrieverMock "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + factoryMocks "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" @@ -396,7 +397,7 @@ func getDefaultCryptoComponents() *factoryMock.CryptoComponentsMock { PubKeyBytes: []byte("pubKey"), BlockSig: &mock.SingleSignerMock{}, TxSig: &mock.SingleSignerMock{}, - MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock( cryptoMocks.NewMultiSigner()), + MultiSigContainer: cryptoMocks.NewMultiSignerContainerMock(cryptoMocks.NewMultiSigner()), PeerSignHandler: &mock.PeerSignatureHandler{}, BlKeyGen: &mock.KeyGenMock{}, TxKeyGen: &mock.KeyGenMock{}, @@ -407,8 +408,8 @@ func getDefaultCryptoComponents() *factoryMock.CryptoComponentsMock { } } -func getDefaultStateComponents() *testscommon.StateComponentsMock { - return &testscommon.StateComponentsMock{ +func getDefaultStateComponents() *factoryMocks.StateComponentsMock { + return &factoryMocks.StateComponentsMock{ PeersAcc: &stateMock.AccountsStub{}, Accounts: &stateMock.AccountsStub{}, AccountsAPI: &stateMock.AccountsStub{}, diff --git a/testscommon/components/default.go b/testscommon/components/default.go index 6cb28f54616..04fa94f1e3d 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -13,6 +13,7 @@ import ( dataRetrieverTests "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon/economicsmocks" epochNotifierMock "github.com/multiversx/mx-chain-go/testscommon/epochNotifier" + "github.com/multiversx/mx-chain-go/testscommon/factory" "github.com/multiversx/mx-chain-go/testscommon/nodeTypeProviderMock" "github.com/multiversx/mx-chain-go/testscommon/p2pmocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" @@ -86,8 +87,8 @@ func GetDefaultNetworkComponents() *mock.NetworkComponentsMock { } // GetDefaultStateComponents - -func GetDefaultStateComponents() *testscommon.StateComponentsMock { - return &testscommon.StateComponentsMock{ +func GetDefaultStateComponents() *factory.StateComponentsMock { + return &factory.StateComponentsMock{ PeersAcc: &stateMock.AccountsStub{}, Accounts: &stateMock.AccountsStub{}, Tries: &trieMock.TriesHolderStub{}, diff --git a/testscommon/stateComponentsMock.go b/testscommon/factory/stateComponentsMock.go similarity index 99% rename from testscommon/stateComponentsMock.go rename to testscommon/factory/stateComponentsMock.go index 28cc5d19531..f4e5e241222 100644 --- a/testscommon/stateComponentsMock.go +++ b/testscommon/factory/stateComponentsMock.go @@ -1,4 +1,4 @@ -package testscommon +package factory import ( "github.com/multiversx/mx-chain-go/common" From 26cd75075ed695d75551fb78b40d11b9ed3a634d Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Fri, 28 Apr 2023 17:28:42 +0300 Subject: [PATCH 118/221] MX-14120: fixes --- consensus/message.pb.go | 5 +- dblookupext/esdtSupply/proto/supplyESDT.proto | 1 + dblookupext/esdtSupply/supplyESDT.pb.go | 96 ++++++++++++++----- go.mod | 2 +- go.sum | 3 +- node/node.go | 7 +- .../trieIterators/tokensSuppliesComputer.go | 45 ++++++--- 7 files changed, 114 insertions(+), 45 deletions(-) diff --git a/consensus/message.pb.go b/consensus/message.pb.go index fea3604fb71..bb28b0a277d 100644 --- a/consensus/message.pb.go +++ b/consensus/message.pb.go @@ -6,14 +6,13 @@ package consensus import ( bytes "bytes" fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" reflect "reflect" strings "strings" - - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/dblookupext/esdtSupply/proto/supplyESDT.proto b/dblookupext/esdtSupply/proto/supplyESDT.proto index 1e12b568d07..a78c4f15b81 100644 --- a/dblookupext/esdtSupply/proto/supplyESDT.proto +++ b/dblookupext/esdtSupply/proto/supplyESDT.proto @@ -12,4 +12,5 @@ message SupplyESDT { bytes Supply = 1 [(gogoproto.jsontag) = "value", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; bytes Burned = 2 [(gogoproto.jsontag) = "burned", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; bytes Minted = 3 [(gogoproto.jsontag) = "minted", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + bool RecomputedSupply = 4 [(gogoproto.jsontag) = "RecomputedSupply"]; } diff --git a/dblookupext/esdtSupply/supplyESDT.pb.go b/dblookupext/esdtSupply/supplyESDT.pb.go index f330cc75b17..7a7f5c68971 100644 --- a/dblookupext/esdtSupply/supplyESDT.pb.go +++ b/dblookupext/esdtSupply/supplyESDT.pb.go @@ -29,9 +29,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // SupplyESDT is used to store information a shard esdt token supply type SupplyESDT struct { - Supply *math_big.Int `protobuf:"bytes,1,opt,name=Supply,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"value"` - Burned *math_big.Int `protobuf:"bytes,2,opt,name=Burned,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"burned"` - Minted *math_big.Int `protobuf:"bytes,3,opt,name=Minted,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"minted"` + Supply *math_big.Int `protobuf:"bytes,1,opt,name=Supply,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"value"` + Burned *math_big.Int `protobuf:"bytes,2,opt,name=Burned,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"burned"` + Minted *math_big.Int `protobuf:"bytes,3,opt,name=Minted,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"minted"` + RecomputedSupply bool `protobuf:"varint,4,opt,name=RecomputedSupply,proto3" json:"RecomputedSupply"` } func (m *SupplyESDT) Reset() { *m = SupplyESDT{} } @@ -83,6 +84,13 @@ func (m *SupplyESDT) GetMinted() *math_big.Int { return nil } +func (m *SupplyESDT) GetRecomputedSupply() bool { + if m != nil { + return m.RecomputedSupply + } + return false +} + func init() { proto.RegisterType((*SupplyESDT)(nil), "proto.SupplyESDT") } @@ -90,26 +98,28 @@ func init() { func init() { proto.RegisterFile("supplyESDT.proto", fileDescriptor_173c6d56cc05b222) } var fileDescriptor_173c6d56cc05b222 = []byte{ - // 294 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0xd1, 0x31, 0x4e, 0xc3, 0x30, - 0x14, 0x06, 0x60, 0xbb, 0xa8, 0x19, 0x2c, 0x06, 0xd4, 0xa9, 0x62, 0x78, 0x45, 0x4c, 0x2c, 0x49, - 0x06, 0x46, 0xb6, 0xd0, 0x0e, 0x1d, 0x58, 0x5a, 0x26, 0x36, 0x27, 0x31, 0x8e, 0x21, 0x89, 0xa3, - 0xc4, 0xae, 0xca, 0xc6, 0x11, 0x38, 0x06, 0xe2, 0x24, 0x8c, 0x11, 0x53, 0xa6, 0x42, 0x9c, 0x05, - 0x75, 0xea, 0x11, 0x10, 0x0e, 0x02, 0x0e, 0xd0, 0xc9, 0xfe, 0x7f, 0xc9, 0xfe, 0xa4, 0xf7, 0xc8, - 0x51, 0xa5, 0x8b, 0x22, 0x7d, 0x98, 0x2d, 0xa7, 0xd7, 0x5e, 0x51, 0x4a, 0x25, 0x47, 0x43, 0x7b, - 0x1c, 0xbb, 0x5c, 0xa8, 0x44, 0x87, 0x5e, 0x24, 0x33, 0x9f, 0x4b, 0x2e, 0x7d, 0x5b, 0x87, 0xfa, - 0xd6, 0x26, 0x1b, 0xec, 0xad, 0x7f, 0x75, 0xfa, 0x36, 0x20, 0x64, 0xf9, 0xfb, 0xd5, 0xe8, 0x8e, - 0x38, 0x7d, 0x1a, 0xe3, 0x13, 0x7c, 0x76, 0x18, 0x2c, 0xb6, 0x9b, 0xc9, 0x70, 0x45, 0x53, 0xcd, - 0x5e, 0xde, 0x27, 0xb3, 0x8c, 0xaa, 0xc4, 0x0f, 0x05, 0xf7, 0xe6, 0xb9, 0xba, 0xf8, 0xe7, 0x64, - 0x3a, 0x55, 0x62, 0xc5, 0xca, 0x6a, 0xed, 0x67, 0x6b, 0x37, 0x4a, 0xa8, 0xc8, 0xdd, 0x48, 0x96, - 0xcc, 0xe5, 0xd2, 0x8f, 0xa9, 0xa2, 0x5e, 0x20, 0xf8, 0x3c, 0x57, 0x97, 0xb4, 0x52, 0xac, 0x5c, - 0xfc, 0x08, 0xa3, 0x7b, 0xe2, 0x04, 0xba, 0xcc, 0x59, 0x3c, 0x1e, 0x58, 0x6b, 0xb9, 0xdd, 0x4c, - 0x9c, 0xd0, 0x36, 0x7b, 0xc4, 0x7a, 0xe2, 0x1b, 0xbb, 0x12, 0xb9, 0x62, 0xf1, 0xf8, 0xe0, 0x0f, - 0xcb, 0x6c, 0xb3, 0x47, 0xac, 0x27, 0x82, 0x69, 0xdd, 0x02, 0x6a, 0x5a, 0x40, 0xbb, 0x16, 0xf0, - 0xa3, 0x01, 0xfc, 0x6c, 0x00, 0xbf, 0x1a, 0xc0, 0xb5, 0x01, 0xdc, 0x18, 0xc0, 0x1f, 0x06, 0xf0, - 0xa7, 0x01, 0xb4, 0x33, 0x80, 0x9f, 0x3a, 0x40, 0x75, 0x07, 0xa8, 0xe9, 0x00, 0xdd, 0x10, 0x56, - 0xc5, 0xaa, 0x9f, 0x4f, 0xe8, 0xd8, 0x0d, 0x9d, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x86, 0x09, - 0xb3, 0x0a, 0xeb, 0x01, 0x00, 0x00, + // 324 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0xd1, 0x3f, 0x4e, 0xc3, 0x30, + 0x14, 0x06, 0xf0, 0x98, 0xd2, 0x08, 0x59, 0x0c, 0x55, 0xc4, 0x50, 0x31, 0xbc, 0x54, 0x4c, 0x5d, + 0x92, 0x0c, 0x8c, 0x2c, 0x28, 0xb4, 0x43, 0x07, 0x96, 0x94, 0x89, 0x2d, 0x7f, 0x8c, 0x6b, 0xa8, + 0xe3, 0x28, 0xb1, 0xab, 0xb2, 0x71, 0x04, 0x06, 0x0e, 0x81, 0x38, 0x09, 0x63, 0xc7, 0x4e, 0x85, + 0xba, 0x0b, 0xea, 0xd4, 0x23, 0x20, 0x9c, 0x0a, 0x90, 0xba, 0x76, 0xb2, 0xbf, 0xcf, 0xf2, 0xfb, + 0x49, 0x36, 0x6e, 0x55, 0xaa, 0x28, 0xc6, 0x8f, 0xfd, 0x61, 0xef, 0xc6, 0x2f, 0x4a, 0x21, 0x85, + 0xd3, 0x34, 0xcb, 0xa9, 0x47, 0x99, 0x1c, 0xa9, 0xc4, 0x4f, 0x05, 0x0f, 0xa8, 0xa0, 0x22, 0x30, + 0x75, 0xa2, 0xee, 0x4c, 0x32, 0xc1, 0xec, 0xea, 0x5b, 0x67, 0x2f, 0x0d, 0x8c, 0x87, 0xbf, 0xa3, + 0x9c, 0x7b, 0x6c, 0xd7, 0xa9, 0x8d, 0x3a, 0xa8, 0x7b, 0x1c, 0x46, 0xeb, 0x85, 0xdb, 0x9c, 0xc4, + 0x63, 0x45, 0xde, 0x3e, 0xdc, 0x3e, 0x8f, 0xe5, 0x28, 0x48, 0x18, 0xf5, 0x07, 0xb9, 0xbc, 0xf8, + 0xe7, 0x70, 0x35, 0x96, 0x6c, 0x42, 0xca, 0x6a, 0x1a, 0xf0, 0xa9, 0x97, 0x8e, 0x62, 0x96, 0x7b, + 0xa9, 0x28, 0x89, 0x47, 0x45, 0x90, 0xc5, 0x32, 0xf6, 0x43, 0x46, 0x07, 0xb9, 0xbc, 0x8a, 0x2b, + 0x49, 0xca, 0x68, 0x2b, 0x38, 0x0f, 0xd8, 0x0e, 0x55, 0x99, 0x93, 0xac, 0x7d, 0x60, 0xac, 0xe1, + 0x7a, 0xe1, 0xda, 0x89, 0x69, 0xf6, 0x88, 0xd5, 0xc4, 0x0f, 0x76, 0xcd, 0x72, 0x49, 0xb2, 0x76, + 0xe3, 0x0f, 0xe3, 0xa6, 0xd9, 0x23, 0x56, 0x13, 0xce, 0x25, 0x6e, 0x45, 0x24, 0x15, 0xbc, 0x50, + 0x92, 0x64, 0xdb, 0xf7, 0x3c, 0xec, 0xa0, 0xee, 0x51, 0x78, 0xb2, 0x5e, 0xb8, 0x3b, 0x67, 0xd1, + 0x4e, 0x13, 0xf6, 0x66, 0x4b, 0xb0, 0xe6, 0x4b, 0xb0, 0x36, 0x4b, 0x40, 0x4f, 0x1a, 0xd0, 0xab, + 0x06, 0xf4, 0xae, 0x01, 0xcd, 0x34, 0xa0, 0xb9, 0x06, 0xf4, 0xa9, 0x01, 0x7d, 0x69, 0xb0, 0x36, + 0x1a, 0xd0, 0xf3, 0x0a, 0xac, 0xd9, 0x0a, 0xac, 0xf9, 0x0a, 0xac, 0x5b, 0x4c, 0xaa, 0x4c, 0xd6, + 0x53, 0x12, 0xdb, 0xfc, 0xf1, 0xf9, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x06, 0x34, 0x97, + 0x2d, 0x02, 0x00, 0x00, } func (this *SupplyESDT) Equal(that interface{}) bool { @@ -149,17 +159,21 @@ func (this *SupplyESDT) Equal(that interface{}) bool { return false } } + if this.RecomputedSupply != that1.RecomputedSupply { + return false + } return true } func (this *SupplyESDT) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 8) s = append(s, "&esdtSupply.SupplyESDT{") s = append(s, "Supply: "+fmt.Sprintf("%#v", this.Supply)+",\n") s = append(s, "Burned: "+fmt.Sprintf("%#v", this.Burned)+",\n") s = append(s, "Minted: "+fmt.Sprintf("%#v", this.Minted)+",\n") + s = append(s, "RecomputedSupply: "+fmt.Sprintf("%#v", this.RecomputedSupply)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -191,6 +205,16 @@ func (m *SupplyESDT) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RecomputedSupply { + i-- + if m.RecomputedSupply { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} size := __caster.Size(m.Minted) @@ -259,6 +283,9 @@ func (m *SupplyESDT) Size() (n int) { l = __caster.Size(m.Minted) n += 1 + l + sovSupplyESDT(uint64(l)) } + if m.RecomputedSupply { + n += 2 + } return n } @@ -276,6 +303,7 @@ func (this *SupplyESDT) String() string { `Supply:` + fmt.Sprintf("%v", this.Supply) + `,`, `Burned:` + fmt.Sprintf("%v", this.Burned) + `,`, `Minted:` + fmt.Sprintf("%v", this.Minted) + `,`, + `RecomputedSupply:` + fmt.Sprintf("%v", this.RecomputedSupply) + `,`, `}`, }, "") return s @@ -431,6 +459,26 @@ func (m *SupplyESDT) Unmarshal(dAtA []byte) error { } } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RecomputedSupply", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSupplyESDT + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RecomputedSupply = bool(v != 0) default: iNdEx = preIndex skippy, err := skipSupplyESDT(dAtA[iNdEx:]) diff --git a/go.mod b/go.mod index bba09a4e3c7..dba5ede87df 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1 + github.com/multiversx/mx-chain-core-go v1.2.2-0.20230428142157-76e19ecd04ac github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.1 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 3b5e4647240..f0c74309ddb 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,9 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1 h1:kmDfK7Znl3S0IJlDEE4sFuBOmA2rZkBudxlGhI1bvQc= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.2-0.20230428142157-76e19ecd04ac h1:SYlgvr/2nMupmPvZnroIXjAAVgLSZCFxYgc8XhKHzd0= +github.com/multiversx/mx-chain-core-go v1.2.2-0.20230428142157-76e19ecd04ac/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.1 h1:gD/D7xZP7OL8L/ZZ3SoOfKjVHrU0iUxIG2AbidHFTUc= diff --git a/node/node.go b/node/node.go index 3de7c9610a4..1d5cc001adb 100644 --- a/node/node.go +++ b/node/node.go @@ -577,9 +577,10 @@ func (n *Node) GetTokenSupply(token string) (*api.ESDTSupply, error) { } return &api.ESDTSupply{ - Supply: bigToString(esdtSupply.Supply), - Burned: bigToString(esdtSupply.Burned), - Minted: bigToString(esdtSupply.Minted), + Supply: bigToString(esdtSupply.Supply), + Burned: bigToString(esdtSupply.Burned), + Minted: bigToString(esdtSupply.Minted), + RecomputedSupply: esdtSupply.RecomputedSupply, }, nil } diff --git a/process/sync/trieIterators/tokensSuppliesComputer.go b/process/sync/trieIterators/tokensSuppliesComputer.go index 8dd87fe75e5..4ab585d7183 100644 --- a/process/sync/trieIterators/tokensSuppliesComputer.go +++ b/process/sync/trieIterators/tokensSuppliesComputer.go @@ -93,18 +93,7 @@ func (t *tokensSuppliesProcessor) HandleTrieAccountIteration(userAccount state.U tokenName := string(tokenKey)[lenESDTPrefix:] tokenID, nonce := common.ExtractTokenIDAndNonceFromTokenStorageKey([]byte(tokenName)) - tokenIDStr := string(tokenID) - if nonce > 0 { - tokenIDStr += fmt.Sprintf("-%d", nonce) - } - - tokenSupply, found := t.tokensSupplies[tokenIDStr] - if !found { - t.tokensSupplies[tokenIDStr] = esToken.Value - } else { - tokenSupply = big.NewInt(0).Add(tokenSupply, esToken.Value) - t.tokensSupplies[tokenIDStr] = tokenSupply - } + t.addToBalance(tokenID, nonce, esToken.Value) } err := dataTrie.ErrChan.ReadFromChanNonBlocking() @@ -115,6 +104,31 @@ func (t *tokensSuppliesProcessor) HandleTrieAccountIteration(userAccount state.U return nil } +func (t *tokensSuppliesProcessor) addToBalance(tokenID []byte, nonce uint64, value *big.Int) { + tokenIDStr := string(tokenID) + if nonce > 0 { + t.putInSuppliesMap(string(tokenID), value) // put for collection as well + nonceStr := fmt.Sprintf("%d", nonce) + if len(nonceStr)%2 != 0 { + nonceStr = "0" + nonceStr + } + tokenIDStr += fmt.Sprintf("-%s", nonceStr) + } + + t.putInSuppliesMap(tokenIDStr, value) +} + +func (t *tokensSuppliesProcessor) putInSuppliesMap(id string, value *big.Int) { + currentValue, found := t.tokensSupplies[id] + if !found { + t.tokensSupplies[id] = value + return + } + + currentValue = big.NewInt(0).Add(currentValue, value) + t.tokensSupplies[id] = currentValue +} + // SaveSupplies will store the recomputed tokens supplies into the database func (t *tokensSuppliesProcessor) SaveSupplies() error { suppliesStorer, err := t.storageService.GetStorer(dataRetriever.ESDTSuppliesUnit) @@ -125,7 +139,8 @@ func (t *tokensSuppliesProcessor) SaveSupplies() error { for tokenName, supply := range t.tokensSupplies { log.Trace("repopulate tokens supplies", "token", tokenName, "supply", supply.String()) supplyObj := &esdtSupply.SupplyESDT{ - Supply: supply, + Supply: supply, + RecomputedSupply: true, } supplyObjBytes, err := t.marshaller.Marshal(supplyObj) if err != nil { @@ -138,6 +153,10 @@ func (t *tokensSuppliesProcessor) SaveSupplies() error { } } + err = suppliesStorer.Put([]byte("recomputed"), []byte("true")) + if err != nil { + return err + } log.Debug("finished the repopulation of the tokens supplies", "num tokens", len(t.tokensSupplies)) return nil From 959a913c995c8ff946678ffd1988c04dd25fce79 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 28 Apr 2023 18:17:46 +0300 Subject: [PATCH 119/221] more processing tests fixes --- factory/interface.go | 1 + factory/mock/stateComponentsHolderStub.go | 5 ++ factory/processing/processComponents_test.go | 62 +++++++++++++++++++- 3 files changed, 65 insertions(+), 3 deletions(-) diff --git a/factory/interface.go b/factory/interface.go index ecec87cabf6..2def7d7204b 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -325,6 +325,7 @@ type StateComponentsHolder interface { AccountsRepository() state.AccountsRepository TriesContainer() common.TriesHolder TrieStorageManagers() map[string]common.StorageManager + Close() error IsInterfaceNil() bool } diff --git a/factory/mock/stateComponentsHolderStub.go b/factory/mock/stateComponentsHolderStub.go index 65cf2efdb0d..075d48ea041 100644 --- a/factory/mock/stateComponentsHolderStub.go +++ b/factory/mock/stateComponentsHolderStub.go @@ -69,6 +69,11 @@ func (s *StateComponentsHolderStub) TrieStorageManagers() map[string]common.Stor return nil } +// Close - +func (s *StateComponentsHolderStub) Close() error { + return nil +} + // IsInterfaceNil - func (s *StateComponentsHolderStub) IsInterfaceNil() bool { return s == nil diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index ebe27a8962f..701828e7ddb 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -29,6 +29,7 @@ import ( genesisMocks "github.com/multiversx/mx-chain-go/genesis/mock" testsMocks "github.com/multiversx/mx-chain-go/integrationTests/mock" "github.com/multiversx/mx-chain-go/p2p" + "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" mxState "github.com/multiversx/mx-chain-go/state" @@ -272,7 +273,18 @@ func TestNewProcessComponentsFactory(t *testing.T) { require.True(t, errors.Is(err, errorsMx.ErrNilDataComponentsHolder)) require.Nil(t, pcf) }) - t.Run("nil Blockchain should error", func(t *testing.T) { + t.Run("nil BlockChain should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Data = &testsMocks.DataComponentsStub{ + BlockChain: nil, + } + pcf, err := processComp.NewProcessComponentsFactory(args) + require.True(t, errors.Is(err, errorsMx.ErrNilBlockChainHandler)) + require.Nil(t, pcf) + }) + t.Run("nil DataPool should error", func(t *testing.T) { t.Parallel() args := createMockProcessComponentsFactoryArgs() @@ -592,6 +604,13 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.PublicKeyShardId.Type = "invalid" testCreateWithArgs(t, args, "cache type") }) + t.Run("createNetworkShardingCollector fails due to invalid PeerIdShardId config should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + args.Config.PeerIdShardId.Type = "invalid" + testCreateWithArgs(t, args, "cache type") + }) t.Run("prepareNetworkShardingCollector fails due to SetPeerShardResolver failure should error", func(t *testing.T) { t.Parallel() @@ -605,6 +624,19 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, expectedErr.Error()) }) + t.Run("prepareNetworkShardingCollector fails due to SetPeerValidatorMapper failure should error", func(t *testing.T) { + t.Parallel() + + args := createMockProcessComponentsFactoryArgs() + netwCompStub, ok := args.Network.(*testsMocks.NetworkComponentsStub) + require.True(t, ok) + netwCompStub.InputAntiFlood = &testsMocks.P2PAntifloodHandlerStub{ + SetPeerValidatorMapperCalled: func(validatorMapper process.PeerValidatorMapper) error { + return expectedErr + }, + } + testCreateWithArgs(t, args, expectedErr.Error()) + }) t.Run("newStorageRequester fails due to NewStorageServiceFactory failure should error", func(t *testing.T) { t.Parallel() @@ -778,7 +810,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } return dataPool.ValidatorsInfo() }, - CloseCalled: nil, + CloseCalled: dataPool.Close, } testCreateWithArgs(t, args, "validators info pool") }) @@ -895,7 +927,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { cnt := 0 bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { cnt++ - if cnt > 28 { + if cnt > 26 { return nil } return mock.NewMultiShardsCoordinatorMock(2) @@ -1092,6 +1124,10 @@ func TestProcessComponentsFactory_Create(t *testing.T) { instance, err := pcf.Create() require.Nil(t, err) require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() }) t.Run("should work with indexAndReturnGenesisAccounts failing due to GetAllLeaves failure", func(t *testing.T) { t.Parallel() @@ -1123,6 +1159,10 @@ func TestProcessComponentsFactory_Create(t *testing.T) { instance, err := pcf.Create() require.Nil(t, err) require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() }) t.Run("should work with indexAndReturnGenesisAccounts failing due to Unmarshal failure", func(t *testing.T) { t.Parallel() @@ -1172,6 +1212,10 @@ func TestProcessComponentsFactory_Create(t *testing.T) { instance, err := pcf.Create() require.Nil(t, err) require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() }) t.Run("should work with indexAndReturnGenesisAccounts failing due to error on GetAllLeaves", func(t *testing.T) { t.Parallel() @@ -1208,6 +1252,10 @@ func TestProcessComponentsFactory_Create(t *testing.T) { instance, err := pcf.Create() require.Nil(t, err) require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() }) t.Run("should work with indexAndReturnGenesisAccounts failing due to error on Encode", func(t *testing.T) { t.Parallel() @@ -1253,6 +1301,10 @@ func TestProcessComponentsFactory_Create(t *testing.T) { instance, err := pcf.Create() require.Nil(t, err) require.NotNil(t, instance) + + err = instance.Close() + require.NoError(t, err) + _ = args.State.Close() }) t.Run("should work - shard", func(t *testing.T) { shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -1266,6 +1318,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { err = instance.Close() require.NoError(t, err) + _ = processArgs.State.Close() }) t.Run("should work - meta", func(t *testing.T) { shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) @@ -1291,6 +1344,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { err = instance.Close() require.NoError(t, err) + _ = processArgs.State.Close() }) } @@ -1489,4 +1543,6 @@ func testCreateWithArgs(t *testing.T, args processComp.ProcessComponentsFactoryA require.Error(t, err) require.True(t, strings.Contains(err.Error(), expectedErrSubstr)) require.Nil(t, instance) + + _ = args.State.Close() } From a66b340e3182d36c5ff12a4c44d838b7d6b75cdf Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Tue, 2 May 2023 16:17:51 +0300 Subject: [PATCH 120/221] MX-14120: adapt tests --- .../sync/trieIterators/tokensSuppliesComputer.go | 4 ---- .../trieIterators/tokensSuppliesComputer_test.go | 14 ++++++++------ 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/process/sync/trieIterators/tokensSuppliesComputer.go b/process/sync/trieIterators/tokensSuppliesComputer.go index 4ab585d7183..501ea96504e 100644 --- a/process/sync/trieIterators/tokensSuppliesComputer.go +++ b/process/sync/trieIterators/tokensSuppliesComputer.go @@ -153,10 +153,6 @@ func (t *tokensSuppliesProcessor) SaveSupplies() error { } } - err = suppliesStorer.Put([]byte("recomputed"), []byte("true")) - if err != nil { - return err - } log.Debug("finished the repopulation of the tokens supplies", "num tokens", len(t.tokensSupplies)) return nil diff --git a/process/sync/trieIterators/tokensSuppliesComputer_test.go b/process/sync/trieIterators/tokensSuppliesComputer_test.go index 16e6fe1bd9a..77c548ef7d6 100644 --- a/process/sync/trieIterators/tokensSuppliesComputer_test.go +++ b/process/sync/trieIterators/tokensSuppliesComputer_test.go @@ -10,9 +10,9 @@ import ( "github.com/multiversx/mx-chain-core-go/data/esdt" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" - esdtSupply2 "github.com/multiversx/mx-chain-go/dblookupext/esdtSupply" + coreEsdt "github.com/multiversx/mx-chain-go/dblookupext/esdtSupply" "github.com/multiversx/mx-chain-go/state" - storage2 "github.com/multiversx/mx-chain-go/storage" + chainStorage "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/genericMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" @@ -138,6 +138,7 @@ func TestTokensSuppliesProcessor_HandleTrieAccountIteration(t *testing.T) { expectedSupplies := map[string]*big.Int{ "SFT-00aabb-37": big.NewInt(2), + "SFT-00aabb": big.NewInt(2), "TKN-00aacc": big.NewInt(74), } require.Equal(t, expectedSupplies, tsp.tokensSupplies) @@ -153,7 +154,7 @@ func TestTokensSuppliesProcessor_SaveSupplies(t *testing.T) { errStorerNotFound := errors.New("storer not found") args := getTokensSuppliesProcessorArgs() args.StorageService = &storage.ChainStorerStub{ - GetStorerCalled: func(unitType dataRetriever.UnitType) (storage2.Storer, error) { + GetStorerCalled: func(unitType dataRetriever.UnitType) (chainStorage.Storer, error) { return nil, errStorerNotFound }, } @@ -168,7 +169,7 @@ func TestTokensSuppliesProcessor_SaveSupplies(t *testing.T) { savedItems := make(map[string][]byte) args := getTokensSuppliesProcessorArgs() args.StorageService = &storage.ChainStorerStub{ - GetStorerCalled: func(unitType dataRetriever.UnitType) (storage2.Storer, error) { + GetStorerCalled: func(unitType dataRetriever.UnitType) (chainStorage.Storer, error) { return &storage.StorerStub{ PutCalled: func(key, data []byte) error { savedItems[string(key)] = data @@ -181,6 +182,7 @@ func TestTokensSuppliesProcessor_SaveSupplies(t *testing.T) { supplies := map[string]*big.Int{ "SFT-00aabb-37": big.NewInt(2), + "SFT-00aabb": big.NewInt(2), "TKN-00aacc": big.NewInt(74), } tsp.tokensSupplies = supplies @@ -189,12 +191,12 @@ func TestTokensSuppliesProcessor_SaveSupplies(t *testing.T) { require.NoError(t, err) checkStoredSupply := func(t *testing.T, key string, storedValue []byte, expectedSupply *big.Int) { - supply := esdtSupply2.SupplyESDT{} + supply := coreEsdt.SupplyESDT{} _ = args.Marshaller.Unmarshal(&supply, storedValue) require.Equal(t, expectedSupply, supply.Supply) } - require.Len(t, savedItems, 2) + require.Len(t, savedItems, 3) for key, value := range savedItems { checkStoredSupply(t, key, value, supplies[key]) } From 08312927ee8e707290181abc7db7ad4a334b38a8 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Wed, 3 May 2023 15:43:48 +0300 Subject: [PATCH 121/221] increased code coverage on dataRetriever package --- dataRetriever/blockchain/blockchain_test.go | 12 + dataRetriever/blockchain/metachain_test.go | 11 + dataRetriever/chainStorer_test.go | 3 + dataRetriever/dataPool/dataPool_test.go | 1 + .../dataPool/headersCache/headersCache.go | 2 +- .../dataPool/headersCache/headersPool_test.go | 71 +- .../metaResolversContainerFactory_test.go | 24 + .../shardResolversContainerFactory_test.go | 24 + dataRetriever/provider/miniBlocks_test.go | 14 +- .../requestHandlers/requestHandler_test.go | 1956 +++++++++++------ .../requesters/requesters_test.go | 2 + .../disabled/disabledEpochProvider_test.go | 19 + .../resolvers/headerResolver_test.go | 161 ++ .../resolvers/messageProcessor_test.go | 10 + .../resolvers/miniblockResolver_test.go | 203 ++ .../peerAuthenticationResolver_test.go | 42 +- .../resolvers/transactionResolver_test.go | 122 + .../resolvers/trieNodeResolver_test.go | 43 + .../resolvers/validatorInfoResolver_test.go | 34 + dataRetriever/shardedData/shardedData_test.go | 53 +- .../storageRequesters/headerRequester_test.go | 83 +- .../storageRequesters/sliceRequester_test.go | 62 +- .../trieNodeRequester_test.go | 59 + .../topicSender/diffPeerListCreator_test.go | 18 + dataRetriever/txpool/shardedTxPool_test.go | 25 + dataRetriever/unitType_test.go | 68 + testscommon/storageManagerStub.go | 4 + 27 files changed, 2393 insertions(+), 733 deletions(-) create mode 100644 dataRetriever/resolvers/epochproviders/disabled/disabledEpochProvider_test.go create mode 100644 dataRetriever/unitType_test.go diff --git a/dataRetriever/blockchain/blockchain_test.go b/dataRetriever/blockchain/blockchain_test.go index 3980d2723f7..212d2755adc 100644 --- a/dataRetriever/blockchain/blockchain_test.go +++ b/dataRetriever/blockchain/blockchain_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/mock" "github.com/stretchr/testify/assert" @@ -81,3 +82,14 @@ func TestBlockChain_SettersAndGettersNilValues(t *testing.T) { assert.Nil(t, bc.GetCurrentBlockHeader()) assert.Empty(t, bc.GetCurrentBlockRootHash()) } + +func TestBlockChain_SettersInvalidValues(t *testing.T) { + t.Parallel() + + bc, _ := NewBlockChain(&mock.AppStatusHandlerStub{}) + err := bc.SetGenesisHeader(&block.MetaBlock{}) + assert.Equal(t, err, data.ErrInvalidHeaderType) + + err = bc.SetCurrentBlockHeaderAndRootHash(&block.MetaBlock{}, []byte("root hash")) + assert.Equal(t, err, data.ErrInvalidHeaderType) +} diff --git a/dataRetriever/blockchain/metachain_test.go b/dataRetriever/blockchain/metachain_test.go index eb0f589f899..684aa95477c 100644 --- a/dataRetriever/blockchain/metachain_test.go +++ b/dataRetriever/blockchain/metachain_test.go @@ -82,3 +82,14 @@ func TestMetaChain_SettersAndGettersNilValues(t *testing.T) { assert.Nil(t, mc.GetCurrentBlockHeader()) assert.Empty(t, mc.GetCurrentBlockRootHash()) } + +func TestMetaChain_SettersInvalidValues(t *testing.T) { + t.Parallel() + + bc, _ := NewMetaChain(&mock.AppStatusHandlerStub{}) + err := bc.SetGenesisHeader(&block.Header{}) + assert.Equal(t, err, ErrWrongTypeInSet) + + err = bc.SetCurrentBlockHeaderAndRootHash(&block.Header{}, []byte("root hash")) + assert.Equal(t, err, ErrWrongTypeInSet) +} diff --git a/dataRetriever/chainStorer_test.go b/dataRetriever/chainStorer_test.go index 3b4373641af..73093ccde7e 100644 --- a/dataRetriever/chainStorer_test.go +++ b/dataRetriever/chainStorer_test.go @@ -239,6 +239,9 @@ func TestBlockChain_GetStorer(t *testing.T) { assert.True(t, peerBlockUnit == storer) storer, _ = b.GetStorer(4) assert.True(t, headerUnit == storer) + storer, err := b.GetStorer(5) + assert.True(t, errors.Is(err, dataRetriever.ErrStorerNotFound)) + assert.Nil(t, storer) } func TestBlockChain_GetAllStorers(t *testing.T) { diff --git a/dataRetriever/dataPool/dataPool_test.go b/dataRetriever/dataPool/dataPool_test.go index 2eb98aee5a6..11a94c5e488 100644 --- a/dataRetriever/dataPool/dataPool_test.go +++ b/dataRetriever/dataPool/dataPool_test.go @@ -209,6 +209,7 @@ func TestNewDataPool_OkValsShouldWork(t *testing.T) { assert.True(t, args.SmartContracts == tdp.SmartContracts()) assert.True(t, args.PeerAuthentications == tdp.PeerAuthentications()) assert.True(t, args.Heartbeats == tdp.Heartbeats()) + assert.True(t, args.ValidatorsInfo == tdp.ValidatorsInfo()) } func TestNewDataPool_Close(t *testing.T) { diff --git a/dataRetriever/dataPool/headersCache/headersCache.go b/dataRetriever/dataPool/headersCache/headersCache.go index bba024f30db..4b1ef31d8d9 100644 --- a/dataRetriever/dataPool/headersCache/headersCache.go +++ b/dataRetriever/dataPool/headersCache/headersCache.go @@ -53,7 +53,7 @@ func (cache *headersCache) addHeader(headerHash []byte, header data.HeaderHandle return true } -//tryToDoEviction will check if pool is full and if it is will do eviction +// tryToDoEviction will check if pool is full and if so, it will do the eviction func (cache *headersCache) tryToDoEviction(shardId uint32) { numHeaders := cache.getNumHeaders(shardId) if int(numHeaders) >= cache.maxHeadersPerShard { diff --git a/dataRetriever/dataPool/headersCache/headersPool_test.go b/dataRetriever/dataPool/headersCache/headersPool_test.go index 3865c6b9c47..2b2fb4cf3c6 100644 --- a/dataRetriever/dataPool/headersCache/headersPool_test.go +++ b/dataRetriever/dataPool/headersCache/headersPool_test.go @@ -1,6 +1,7 @@ package headersCache_test import ( + "errors" "fmt" "sort" "sync" @@ -16,6 +17,45 @@ import ( "github.com/stretchr/testify/require" ) +func TestNewHeadersCacher(t *testing.T) { + t.Parallel() + + t.Run("invalid MaxHeadersPerShard should error", testNewHeadersCacher( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 0, + })) + t.Run("invalid NumElementsToRemoveOnEviction should error", testNewHeadersCacher( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1, + NumElementsToRemoveOnEviction: 0, + })) + t.Run("invalid config should error", testNewHeadersCacher( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1, + NumElementsToRemoveOnEviction: 3, + })) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + headersCacher, err := headersCache.NewHeadersPool(config.HeadersPoolConfig{ + MaxHeadersPerShard: 2, + NumElementsToRemoveOnEviction: 1, + }) + require.NoError(t, err) + require.NotNil(t, headersCacher) + }) +} + +func testNewHeadersCacher(cfg config.HeadersPoolConfig) func(t *testing.T) { + return func(t *testing.T) { + t.Parallel() + + headersCacher, err := headersCache.NewHeadersPool(cfg) + require.True(t, errors.Is(err, headersCache.ErrInvalidHeadersCacheParameter)) + require.Nil(t, headersCacher) + } +} + func TestNewHeadersCacher_AddHeadersInCache(t *testing.T) { t.Parallel() @@ -28,11 +68,16 @@ func TestNewHeadersCacher_AddHeadersInCache(t *testing.T) { nonce := uint64(1) shardId := uint32(0) + headers, _, err := headersCacher.GetHeadersByNonceAndShardId(nonce, shardId) + require.Equal(t, headersCache.ErrHeaderNotFound, err) + require.Nil(t, headers) + headerHash1 := []byte("hash1") headerHash2 := []byte("hash2") testHdr1 := &block.Header{Nonce: nonce, ShardID: shardId} testHdr2 := &block.Header{Nonce: nonce, ShardID: shardId, Round: 100} + headersCacher.AddHeader([]byte("nil header hash"), nil) // coverage headersCacher.AddHeader(headerHash1, testHdr1) headersCacher.AddHeader(headerHash2, testHdr2) @@ -45,7 +90,7 @@ func TestNewHeadersCacher_AddHeadersInCache(t *testing.T) { require.Equal(t, testHdr2, header) expectedHeaders := []data.HeaderHandler{testHdr1, testHdr2} - headers, _, err := headersCacher.GetHeadersByNonceAndShardId(nonce, shardId) + headers, _, err = headersCacher.GetHeadersByNonceAndShardId(nonce, shardId) require.Nil(t, err) require.Equal(t, expectedHeaders, headers) } @@ -70,6 +115,8 @@ func Test_RemoveHeaderByHash(t *testing.T) { headersCacher.AddHeader(headerHash1, testHdr1) headersCacher.AddHeader(headerHash2, testHdr2) + headersCacher.RemoveHeaderByHash([]byte("")) + headersCacher.RemoveHeaderByHash([]byte("missing hash")) headersCacher.RemoveHeaderByHash(headerHash1) header, err := headersCacher.GetHeaderByHash(headerHash1) require.Nil(t, header) @@ -101,6 +148,8 @@ func TestHeadersCacher_AddHeadersInCacheAndRemoveByNonceAndShardId(t *testing.T) headersCacher.AddHeader(headerHash1, testHdr1) headersCacher.AddHeader(headerHash2, testHdr2) + headersCacher.RemoveHeaderByNonceAndShardId(nonce, 100) + headersCacher.RemoveHeaderByNonceAndShardId(100, shardId) headersCacher.RemoveHeaderByNonceAndShardId(nonce, shardId) header, err := headersCacher.GetHeaderByHash(headerHash1) require.Nil(t, header) @@ -577,6 +626,7 @@ func TestHeadersPool_RegisterHandler(t *testing.T) { wasCalled = true wg.Done() } + headersCacher.RegisterHandler(nil) headersCacher.RegisterHandler(handler) header, hash := createASliceOfHeaders(1, 0) headersCacher.AddHeader(hash[0], &header[0]) @@ -603,6 +653,25 @@ func TestHeadersPool_Clear(t *testing.T) { require.Equal(t, 0, headersCacher.GetNumHeaders(0)) } +func TestHeadersPool_IsInterfaceNil(t *testing.T) { + t.Parallel() + + headersCacher, _ := headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 0, + }, + ) + require.True(t, headersCacher.IsInterfaceNil()) + + headersCacher, _ = headersCache.NewHeadersPool( + config.HeadersPoolConfig{ + MaxHeadersPerShard: 1000, + NumElementsToRemoveOnEviction: 10, + }, + ) + require.False(t, headersCacher.IsInterfaceNil()) +} + func createASliceOfHeaders(numHeaders int, shardId uint32) ([]block.Header, [][]byte) { headers := make([]block.Header, 0) headersHashes := make([][]byte, 0) diff --git a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go index 3c7c8ee020d..f332cc8e9f7 100644 --- a/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/metaResolversContainerFactory_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" @@ -89,6 +90,17 @@ func createTriesHolderForMeta() common.TriesHolder { // ------- NewResolversContainerFactory +func TestNewMetaResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewMetaResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) +} + func TestNewMetaResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() @@ -281,6 +293,18 @@ func TestMetaResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { assert.Equal(t, totalResolvers+noOfShards, container.Len()) } +func TestMetaResolversContainerFactory_IsInterfaceNil(t *testing.T) { + t.Parallel() + + args := getArgumentsMeta() + args.ShardCoordinator = nil + rcf, _ := resolverscontainer.NewMetaResolversContainerFactory(args) + assert.True(t, rcf.IsInterfaceNil()) + + rcf, _ = resolverscontainer.NewMetaResolversContainerFactory(getArgumentsMeta()) + assert.False(t, rcf.IsInterfaceNil()) +} + func getArgumentsMeta() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ ShardCoordinator: mock.NewOneShardCoordinatorMock(), diff --git a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go index f55fe63774e..06e7bfd8147 100644 --- a/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go +++ b/dataRetriever/factory/resolverscontainer/shardResolversContainerFactory_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/factory/resolverscontainer" @@ -95,6 +96,17 @@ func createTriesHolderForShard() common.TriesHolder { // ------- NewResolversContainerFactory +func TestNewShardResolversContainerFactory_NewNumGoRoutinesThrottlerFailsShouldErr(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.NumConcurrentResolvingJobs = 0 + rcf, err := resolverscontainer.NewShardResolversContainerFactory(args) + + assert.Nil(t, rcf) + assert.Equal(t, core.ErrNotPositiveValue, err) +} + func TestNewShardResolversContainerFactory_NilShardCoordinatorShouldErr(t *testing.T) { t.Parallel() @@ -337,6 +349,18 @@ func TestShardResolversContainerFactory_With4ShardsShouldWork(t *testing.T) { assert.Equal(t, totalResolvers, container.Len()) } +func TestShardResolversContainerFactory_IsInterfaceNil(t *testing.T) { + t.Parallel() + + args := getArgumentsShard() + args.ShardCoordinator = nil + rcf, _ := resolverscontainer.NewShardResolversContainerFactory(args) + assert.True(t, rcf.IsInterfaceNil()) + + rcf, _ = resolverscontainer.NewShardResolversContainerFactory(getArgumentsMeta()) + assert.False(t, rcf.IsInterfaceNil()) +} + func getArgumentsShard() resolverscontainer.FactoryArgs { return resolverscontainer.FactoryArgs{ ShardCoordinator: mock.NewOneShardCoordinatorMock(), diff --git a/dataRetriever/provider/miniBlocks_test.go b/dataRetriever/provider/miniBlocks_test.go index c935345363d..3ccbeba3490 100644 --- a/dataRetriever/provider/miniBlocks_test.go +++ b/dataRetriever/provider/miniBlocks_test.go @@ -2,6 +2,7 @@ package provider_test import ( "bytes" + "errors" "fmt" "testing" @@ -240,13 +241,24 @@ func TestMiniBlockProvider_GetMiniBlocksFromStorerShouldBeFoundInStorage(t *test existingHashes := [][]byte{ []byte("hash1"), []byte("hash2"), + []byte("hash3"), } requestedHashes := existingHashes + cnt := 0 arg := createMockMiniblockProviderArgs(nil, existingHashes) + arg.Marshalizer = &testscommon.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt == 1 { + return errors.New("unmarshal fails for coverage") + } + return nil + }, + } mbp, _ := provider.NewMiniBlockProvider(arg) miniBlocksAndHashes, missingHashes := mbp.GetMiniBlocksFromStorer(requestedHashes) assert.Equal(t, 2, len(miniBlocksAndHashes)) - assert.Equal(t, 0, len(missingHashes)) + assert.Equal(t, 1, len(missingHashes)) } diff --git a/dataRetriever/requestHandlers/requestHandler_test.go b/dataRetriever/requestHandlers/requestHandler_test.go index 0c9abb97036..48d27f46217 100644 --- a/dataRetriever/requestHandlers/requestHandler_test.go +++ b/dataRetriever/requestHandlers/requestHandler_test.go @@ -37,711 +37,909 @@ func createRequestersFinderStubThatShouldNotBeCalled(tb testing.TB) *dataRetriev } } -func TestNewResolverRequestHandlerNilFinder(t *testing.T) { +func TestNewResolverRequestHandler(t *testing.T) { t.Parallel() - rrh, err := NewResolverRequestHandler( - nil, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + t.Run("nil finder should error", func(t *testing.T) { + t.Parallel() - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrNilRequestersFinder, err) -} + rrh, err := NewResolverRequestHandler( + nil, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestNewResolverRequestHandlerNilRequestedItemsHandler(t *testing.T) { - t.Parallel() + assert.Nil(t, rrh) + assert.Equal(t, dataRetriever.ErrNilRequestersFinder, err) + }) + t.Run("nil requested items handler should error", func(t *testing.T) { + t.Parallel() - rrh, err := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{}, - nil, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + nil, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrNilRequestedItemsHandler, err) -} + assert.Nil(t, rrh) + assert.Equal(t, dataRetriever.ErrNilRequestedItemsHandler, err) + }) + t.Run("nil whitelist handler should error", func(t *testing.T) { + t.Parallel() -func TestNewResolverRequestHandlerMaxTxRequestTooSmall(t *testing.T) { - t.Parallel() + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + nil, + 1, + 0, + time.Second, + ) - rrh, err := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 0, - 0, - time.Second, - ) + assert.Nil(t, rrh) + assert.Equal(t, dataRetriever.ErrNilWhiteListHandler, err) + }) + t.Run("invalid max txs to request should error", func(t *testing.T) { + t.Parallel() - assert.Nil(t, rrh) - assert.Equal(t, dataRetriever.ErrInvalidMaxTxRequest, err) -} + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 0, + 0, + time.Second, + ) -func TestNewResolverRequestHandler(t *testing.T) { - t.Parallel() + assert.Nil(t, rrh) + assert.Equal(t, dataRetriever.ErrInvalidMaxTxRequest, err) + }) + t.Run("invalid request interval should error", func(t *testing.T) { + t.Parallel() - rrh, err := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{}, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Millisecond-time.Nanosecond, + ) + + assert.Nil(t, rrh) + assert.True(t, errors.Is(err, dataRetriever.ErrRequestIntervalTooSmall)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + rrh, err := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - assert.Nil(t, err) - assert.NotNil(t, rrh) + assert.Nil(t, err) + assert.NotNil(t, rrh) + }) } -func TestResolverRequestHandler_RequestTransactionErrorWhenGettingCrossShardRequesterShouldNotPanic(t *testing.T) { +func TestResolverRequestHandler_RequestTransaction(t *testing.T) { t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") - } - }() + t.Run("no hash should not panic", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return nil, errExpected + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + require.Fail(t, "should have not been called") + return nil, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestTransaction(0, make([][]byte, 0)) -} + rrh.RequestTransaction(0, make([][]byte, 0)) + }) + t.Run("error when getting cross shard requester should not panic", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestTransactionWrongResolverShouldNotPanic(t *testing.T) { - t.Parallel() + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") - } - }() + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - wrongTxRequester := &dataRetrieverMocks.NonceRequesterStub{} + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + }) + t.Run("uncastable requester should not panic", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return wrongTxRequester, nil + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + wrongTxRequester := &dataRetrieverMocks.NonceRequesterStub{} + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return wrongTxRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestTransaction(0, make([][]byte, 0)) -} + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + }) + t.Run("should request", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestTransactionShouldRequestTransactions(t *testing.T) { - t.Parallel() + chTxRequested := make(chan struct{}) + txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + chTxRequested <- struct{}{} + return nil + }, + } - chTxRequested := make(chan struct{}) - txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ - RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { - chTxRequested <- struct{}{} - return nil - }, - } + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return txRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return txRequester, nil + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + + select { + case <-chTxRequested: + case <-time.After(timeoutSendRequests): + assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") + } + + time.Sleep(time.Second) + }) + t.Run("should request 4 times if different shards", func(t *testing.T) { + t.Parallel() + + numRequests := uint32(0) + txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + atomic.AddUint32(&numRequests, 1) + return nil }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + timeSpan := time.Second + timeCache := cache.NewTimeCache(timeSpan) + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return txRequester, nil + }, + }, + timeCache, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - select { - case <-chTxRequested: - case <-time.After(timeoutSendRequests): - assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") - } + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) - time.Sleep(time.Second) -} + time.Sleep(time.Second) // let the go routines finish + assert.Equal(t, uint32(2), atomic.LoadUint32(&numRequests)) + time.Sleep(time.Second) // sweep will take effect -func TestResolverRequestHandler_RequestTransactionShouldRequest4TimesIfDifferentShardsAndEnoughTime(t *testing.T) { - t.Parallel() + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) - numRequests := uint32(0) - txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ - RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { - atomic.AddUint32(&numRequests, 1) - return nil - }, - } + time.Sleep(time.Second) // let the go routines finish + assert.Equal(t, uint32(4), atomic.LoadUint32(&numRequests)) + }) + t.Run("errors on request should not panic", func(t *testing.T) { + t.Parallel() - timeSpan := time.Second - timeCache := cache.NewTimeCache(timeSpan) - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return txRequester, nil + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + chTxRequested := make(chan struct{}) + txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + chTxRequested <- struct{}{} + return errExpected }, - }, - timeCache, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return txRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - time.Sleep(time.Second) // let the go routines finish - assert.Equal(t, uint32(2), atomic.LoadUint32(&numRequests)) - time.Sleep(time.Second) // sweep will take effect + rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) - rrh.RequestTransaction(1, [][]byte{[]byte("txHash")}) + select { + case <-chTxRequested: + case <-time.After(timeoutSendRequests): + assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") + } - time.Sleep(time.Second) // let the go routines finish - assert.Equal(t, uint32(4), atomic.LoadUint32(&numRequests)) + time.Sleep(time.Second) + }) } -func TestResolverRequestHandler_RequestTransactionErrorsOnRequestShouldNotPanic(t *testing.T) { +func TestResolverRequestHandler_RequestMiniBlock(t *testing.T) { t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") - } - }() + t.Run("hash already requested", func(t *testing.T) { + t.Parallel() - chTxRequested := make(chan struct{}) - txRequester := &dataRetrieverMocks.HashSliceRequesterStub{ - RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { - chTxRequested <- struct{}{} - return errExpected - }, - } + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return txRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + require.Fail(t, "should not have been called") + return nil, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestTransaction(0, [][]byte{[]byte("txHash")}) + rrh.RequestMiniBlock(0, make([]byte, 0)) + }) + t.Run("CrossShardRequester returns error", func(t *testing.T) { + t.Parallel() - select { - case <-chTxRequested: - case <-time.After(timeoutSendRequests): - assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") - } + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - time.Sleep(time.Second) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMiniBlockErrorWhenGettingCrossShardRequesterShouldNotPanic(t *testing.T) { - t.Parallel() + rrh.RequestMiniBlock(0, make([]byte, 0)) + }) + t.Run("RequestDataFromHash error", func(t *testing.T) { + t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() + + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + return errExpected + }, + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestMiniBlock(0, []byte("mbHash")) + }) + t.Run("should request", func(t *testing.T) { + t.Parallel() + + wasCalled := false + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + wasCalled = true + return nil + }, } - }() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return nil, errExpected + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestMiniBlock(0, make([]byte, 0)) -} + rrh.RequestMiniBlock(0, []byte("mbHash")) -func TestResolverRequestHandler_RequestMiniBlockErrorsOnRequestShouldNotPanic(t *testing.T) { - t.Parallel() + assert.True(t, wasCalled) + }) + t.Run("should call with the correct epoch", func(t *testing.T) { + t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") + expectedEpoch := uint32(7) + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + assert.Equal(t, expectedEpoch, epoch) + return nil + }, } - }() - - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - return errExpected - }, - } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.SetEpoch(expectedEpoch) - rrh.RequestMiniBlock(0, []byte("mbHash")) + rrh.RequestMiniBlock(0, []byte("mbHash")) + }) } -func TestResolverRequestHandler_RequestMiniBlockShouldCallRequestOnResolver(t *testing.T) { +func TestResolverRequestHandler_RequestShardHeader(t *testing.T) { t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + t.Run("hash already requested should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + rrh, _ := NewResolverRequestHandler( + createRequestersFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) - - rrh.RequestMiniBlock(0, []byte("mbHash")) + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - assert.True(t, wasCalled) -} + rrh.RequestShardHeader(0, make([]byte, 0)) + }) + t.Run("no hash should work", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestMiniBlockShouldCallWithTheCorrectEpoch(t *testing.T) { - t.Parallel() + rrh, _ := NewResolverRequestHandler( + createRequestersFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - expectedEpoch := uint32(7) - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - assert.Equal(t, expectedEpoch, epoch) - return nil - }, - } + rrh.RequestShardHeader(1, make([]byte, 0)) + }) + t.Run("RequestDataFromHash returns error should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + return errExpected }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.SetEpoch(expectedEpoch) + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestMiniBlock(0, []byte("mbHash")) -} + rrh.RequestShardHeader(0, []byte("hdrHash")) + }) + t.Run("should call request", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestShardHeaderHashAlreadyRequestedShouldNotRequest(t *testing.T) { - t.Parallel() + wasCalled := false + mbRequester := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + wasCalled = true + return nil + }, + } - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{ - HasCalled: func(key string) bool { - return true + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeader(0, make([]byte, 0)) + rrh.RequestShardHeader(0, []byte("hdrHash")) + + assert.True(t, wasCalled) + }) } -func TestResolverRequestHandler_RequestShardHeaderHashBadRequest(t *testing.T) { +func TestResolverRequestHandler_RequestMetaHeader(t *testing.T) { t.Parallel() - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + t.Run("header already requested should work", func(t *testing.T) { + t.Parallel() - rrh.RequestShardHeader(1, make([]byte, 0)) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestShardHeaderShouldCallRequestOnResolver(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeader([]byte("hdrHash")) + }) + t.Run("cast fail should work", func(t *testing.T) { + t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + req := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + require.Fail(t, "should have not been called") + return nil + }, + } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return req, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeader(0, []byte("hdrHash")) + rrh.RequestMetaHeader([]byte("hdrHash")) + }) + t.Run("MetaChainRequester returns error should work", func(t *testing.T) { + t.Parallel() - assert.True(t, wasCalled) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMetadHeaderHashAlreadyRequestedShouldNotRequest(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeader([]byte("hdrHash")) + }) + t.Run("RequestDataFromHash returns error should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{ - HasCalled: func(key string) bool { - return true + req := &dataRetrieverMocks.HeaderRequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + return errExpected }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestMetaHeader(make([]byte, 0)) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return req, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMetadHeaderHashNotHeaderResolverShouldNotRequest(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeader([]byte("hdrHash")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + wasCalled := false + mbRequester := &dataRetrieverMocks.HeaderRequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + wasCalled = true + return nil + }, + } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return mbRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestMetaHeader([]byte("hdrHash")) + rrh.RequestMetaHeader([]byte("hdrHash")) - assert.False(t, wasCalled) + assert.True(t, wasCalled) + }) } -func TestResolverRequestHandler_RequestMetaHeaderShouldCallRequestOnResolver(t *testing.T) { +func TestResolverRequestHandler_RequestShardHeaderByNonce(t *testing.T) { t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.HeaderRequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + t.Run("nonce already requested should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return mbRequester, nil + called := false + rrh, _ := NewResolverRequestHandler( + createRequestersFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + called = true + return true + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) - - rrh.RequestMetaHeader([]byte("hdrHash")) - - assert.True(t, wasCalled) -} + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestShardHeaderByNonceAlreadyRequestedShouldNotRequest(t *testing.T) { - t.Parallel() + rrh.RequestShardHeaderByNonce(0, 0) + require.True(t, called) + }) + t.Run("invalid nonce should work", func(t *testing.T) { + t.Parallel() - called := false - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{ - HasCalled: func(key string) bool { - called = true - return true + called := false + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, err error) { + called = true + return nil, errExpected + }, }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + core.MetachainShardId, + time.Second, + ) - rrh.RequestShardHeaderByNonce(0, 0) - require.True(t, called) -} + rrh.RequestShardHeaderByNonce(1, 0) + require.True(t, called) + }) + t.Run("finder returns error should work and not panic", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestShardHeaderByNonceBadRequest(t *testing.T) { - t.Parallel() + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - localErr := errors.New("err") - called := false - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (requester dataRetriever.Requester, err error) { - called = true - return nil, localErr + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - core.MetachainShardId, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeaderByNonce(1, 0) - require.True(t, called) -} + rrh.RequestShardHeaderByNonce(0, 0) + }) + t.Run("cast fails should work and not panic", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsErrorShouldNotPanic(t *testing.T) { - t.Parallel() + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") - } - }() + hdrRequester := &dataRetrieverMocks.RequesterStub{} - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { - return nil, errExpected + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeaderByNonce(0, 0) -} + rrh.RequestShardHeaderByNonce(0, 0) + }) + t.Run("resolver fails should work and not panic", func(t *testing.T) { + t.Parallel() -func TestResolverRequestHandler_RequestShardHeaderByNonceFinderReturnsAWrongResolverShouldNotPanic(t *testing.T) { - t.Parallel() + defer func() { + r := recover() + if r != nil { + assert.Fail(t, "should not panic") + } + }() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") + hdrRequester := &dataRetrieverMocks.NonceRequesterStub{ + RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { + return errExpected + }, } - }() - - hdrRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - return errExpected - }, - } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { - return hdrRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) - - rrh.RequestShardHeaderByNonce(0, 0) -} + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestShardHeaderByNonceResolverFailsShouldNotPanic(t *testing.T) { - t.Parallel() + rrh.RequestShardHeaderByNonce(0, 0) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - defer func() { - r := recover() - if r != nil { - assert.Fail(t, "should not panic") + wasCalled := false + hdrRequester := &dataRetrieverMocks.NonceRequesterStub{ + RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { + wasCalled = true + return nil + }, } - }() - - hdrRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - return errExpected - }, - } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { - return hdrRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) + + rrh.RequestShardHeaderByNonce(0, 0) - rrh.RequestShardHeaderByNonce(0, 0) + assert.True(t, wasCalled) + }) } -func TestResolverRequestHandler_RequestShardHeaderByNonceShouldRequest(t *testing.T) { +func TestResolverRequestHandler_RequestMetaHeaderByNonce(t *testing.T) { t.Parallel() - wasCalled := false - hdrRequester := &dataRetrieverMocks.NonceRequesterStub{ - RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { - wasCalled = true - return nil - }, - } + t.Run("nonce already requested should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - CrossShardRequesterCalled: func(baseTopic string, shardID uint32) (requester dataRetriever.Requester, e error) { - return hdrRequester, nil + rrh, _ := NewResolverRequestHandler( + createRequestersFinderStubThatShouldNotBeCalled(t), + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestShardHeaderByNonce(0, 0) + rrh.RequestMetaHeaderByNonce(0) + }) + t.Run("MetaChainRequester returns error should work", func(t *testing.T) { + t.Parallel() - assert.True(t, wasCalled) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{ + AddCalled: func(keys [][]byte) { + require.Fail(t, "should not have been called") + }, + }, + 100, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMetaHeaderHashAlreadyRequestedShouldNotRequest(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeaderByNonce(0) + }) + t.Run("RequestDataFromNonce returns error should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - createRequestersFinderStubThatShouldNotBeCalled(t), - &mock.RequestedItemsHandlerStub{ - HasCalled: func(key string) bool { - return true + hdrRequester := &dataRetrieverMocks.HeaderRequesterStub{ + RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { + return errExpected }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestMetaHeaderByNonce(0) -} + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) -func TestResolverRequestHandler_RequestMetaHeaderByNonceShouldRequest(t *testing.T) { - t.Parallel() + rrh.RequestMetaHeaderByNonce(0) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - wasCalled := false - hdrRequester := &dataRetrieverMocks.HeaderRequesterStub{ - RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { - wasCalled = true - return nil - }, - } + wasCalled := false + hdrRequester := &dataRetrieverMocks.HeaderRequesterStub{ + RequestDataFromNonceCalled: func(nonce uint64, epoch uint32) error { + wasCalled = true + return nil + }, + } - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return hdrRequester, nil + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return hdrRequester, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 100, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) - rrh.RequestMetaHeaderByNonce(0) + rrh.RequestMetaHeaderByNonce(0) - assert.True(t, wasCalled) + assert.True(t, wasCalled) + }) } func TestResolverRequestHandler_RequestScrErrorWhenGettingCrossShardRequesterShouldNotPanic(t *testing.T) { @@ -910,168 +1108,211 @@ func TestResolverRequestHandler_RequestRewardShouldRequestReward(t *testing.T) { time.Sleep(time.Second) } -func TestRequestTrieNodes_ShouldWork(t *testing.T) { +func TestRequestTrieNodes(t *testing.T) { t.Parallel() - chTxRequested := make(chan struct{}) - requesterMock := &dataRetrieverMocks.HashSliceRequesterStub{ - RequestDataFromHashArrayCalled: func(hash [][]byte, epoch uint32) error { - chTxRequested <- struct{}{} - return nil - }, - } + t.Run("should work", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaCrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { - return requesterMock, nil + chTxRequested := make(chan struct{}) + requesterMock := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hash [][]byte, epoch uint32) error { + chTxRequested <- struct{}{} + return nil }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + } - rrh.RequestTrieNodes(0, [][]byte{[]byte("hash")}, "topic") - select { - case <-chTxRequested: - case <-time.After(timeoutSendRequests): - assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") - } + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaCrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return requesterMock, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - time.Sleep(time.Second) -} + rrh.RequestTrieNodes(0, [][]byte{[]byte("hash")}, "topic") + select { + case <-chTxRequested: + case <-time.After(timeoutSendRequests): + assert.Fail(t, "timeout while waiting to call RequestDataFromHashArray") + } -func TestRequestTrieNodes_NilResolver(t *testing.T) { - t.Parallel() + time.Sleep(time.Second) + }) + t.Run("nil resolver", func(t *testing.T) { + t.Parallel() - localError := errors.New("test error") - called := false - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaCrossShardRequesterCalled: func(baseTopic string, shId uint32) (requester dataRetriever.Requester, err error) { - called = true - return nil, localError + localError := errors.New("test error") + called := false + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaCrossShardRequesterCalled: func(baseTopic string, shId uint32) (requester dataRetriever.Requester, err error) { + called = true + return nil, localError + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) - - rrh.RequestTrieNodes(core.MetachainShardId, [][]byte{[]byte("hash")}, "topic") - assert.True(t, called) -} + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) -func TestRequestStartOfEpochMetaBlock_MissingResolver(t *testing.T) { - t.Parallel() + rrh.RequestTrieNodes(core.MetachainShardId, [][]byte{[]byte("hash")}, "topic") + assert.True(t, called) + }) + t.Run("no hash", func(t *testing.T) { + t.Parallel() - called := false - localError := errors.New("test error") - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { - called = true - return nil, localError + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaCrossShardRequesterCalled: func(baseTopic string, shId uint32) (requester dataRetriever.Requester, err error) { + require.Fail(t, "should have not been called") + return nil, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestStartOfEpochMetaBlock(0) - assert.True(t, called) + rrh.RequestTrieNodes(core.MetachainShardId, [][]byte{}, "topic") + }) } -func TestRequestStartOfEpochMetaBlock_WrongResolver(t *testing.T) { +func TestResolverRequestHandler_RequestStartOfEpochMetaBlock(t *testing.T) { t.Parallel() - called := false - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { - called = true - return &dataRetrieverMocks.RequesterStub{}, nil + t.Run("epoch already requested", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + require.Fail(t, "should not have been called") + return nil, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestStartOfEpochMetaBlock(0) - assert.True(t, called) -} + rrh.RequestStartOfEpochMetaBlock(0) + }) + t.Run("missing resolver", func(t *testing.T) { + t.Parallel() -func TestRequestStartOfEpochMetaBlock_RequestDataFromEpochError(t *testing.T) { - t.Parallel() + called := false + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + called = true + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - called := false - localError := errors.New("test error") - requesterMock := &dataRetrieverMocks.EpochRequesterStub{ - RequestDataFromEpochCalled: func(identifier []byte) error { - called = true - return localError - }, - } + rrh.RequestStartOfEpochMetaBlock(0) + assert.True(t, called) + }) + t.Run("wrong resolver", func(t *testing.T) { + t.Parallel() - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { - return requesterMock, nil + called := false + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + called = true + return &dataRetrieverMocks.RequesterStub{}, nil + }, }, - }, - &mock.RequestedItemsHandlerStub{}, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestStartOfEpochMetaBlock(0) - assert.True(t, called) -} + rrh.RequestStartOfEpochMetaBlock(0) + assert.True(t, called) + }) + t.Run("RequestDataFromEpoch fails", func(t *testing.T) { + t.Parallel() -func TestRequestStartOfEpochMetaBlock_AddError(t *testing.T) { - t.Parallel() + called := false + requesterMock := &dataRetrieverMocks.EpochRequesterStub{ + RequestDataFromEpochCalled: func(identifier []byte) error { + called = true + return errExpected + }, + } - called := false - localError := errors.New("test error") - requesterMock := &dataRetrieverMocks.EpochRequesterStub{ - RequestDataFromEpochCalled: func(identifier []byte) error { - return nil - }, - } + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + return requesterMock, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh, _ := NewResolverRequestHandler( - &dataRetrieverMocks.RequestersFinderStub{ - MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { - return requesterMock, nil + rrh.RequestStartOfEpochMetaBlock(0) + assert.True(t, called) + }) + t.Run("add error", func(t *testing.T) { + t.Parallel() + + called := false + requesterMock := &dataRetrieverMocks.EpochRequesterStub{ + RequestDataFromEpochCalled: func(identifier []byte) error { + return nil }, - }, - &mock.RequestedItemsHandlerStub{ - AddCalled: func(key string) error { - called = true - return localError + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, err error) { + return requesterMock, nil + }, }, - }, - &mock.WhiteListHandlerStub{}, - 1, - 0, - time.Second, - ) + &mock.RequestedItemsHandlerStub{ + AddCalled: func(key string) error { + called = true + return errExpected + }, + }, + &mock.WhiteListHandlerStub{}, + 1, + 0, + time.Second, + ) - rrh.RequestStartOfEpochMetaBlock(0) - assert.True(t, called) + rrh.RequestStartOfEpochMetaBlock(0) + assert.True(t, called) + }) } func TestResolverRequestHandler_RequestTrieNodeRequestFails(t *testing.T) { @@ -1182,14 +1423,12 @@ func TestResolverRequestHandler_RequestTrieNodeNotAValidResolver(t *testing.T) { assert.True(t, called) } -//------- RequestPeerAuthentications - func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) { t.Parallel() providedHashes := [][]byte{[]byte("h1"), []byte("h2")} providedShardId := uint32(15) - t.Run("CrossShardRequester returns error", func(t *testing.T) { + t.Run("MetaChainRequester returns error", func(t *testing.T) { t.Parallel() wasCalled := false @@ -1219,18 +1458,12 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) t.Run("cast fails", func(t *testing.T) { t.Parallel() - wasCalled := false - mbRequester := &dataRetrieverMocks.RequesterStub{ - RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { - wasCalled = true - return nil - }, - } + req := &dataRetrieverMocks.NonceRequesterStub{} rrh, _ := NewResolverRequestHandler( &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (dataRetriever.Requester, error) { assert.Equal(t, common.PeerAuthenticationTopic, baseTopic) - return mbRequester, errExpected + return req, nil }, }, &mock.RequestedItemsHandlerStub{}, @@ -1241,7 +1474,6 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) ) rrh.RequestPeerAuthenticationsByHashes(providedShardId, providedHashes) - assert.False(t, wasCalled) }) t.Run("RequestDataFromHashArray returns error", func(t *testing.T) { t.Parallel() @@ -1261,7 +1493,12 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) return paRequester, nil }, }, - &mock.RequestedItemsHandlerStub{}, + &mock.RequestedItemsHandlerStub{ + AddCalled: func(key string) error { + require.Fail(t, "should not have been called") + return nil + }, + }, &mock.WhiteListHandlerStub{}, 1, 0, @@ -1311,7 +1548,33 @@ func TestResolverRequestHandler_RequestPeerAuthenticationsByHashes(t *testing.T) func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { t.Parallel() + t.Run("hash already requested should work", func(t *testing.T) { + t.Parallel() + + providedHash := []byte("provided hash") + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + require.Fail(t, "should not have been called") + return nil, nil + }, + }, + &mock.RequestedItemsHandlerStub{ + HasCalled: func(key string) bool { + return true + }, + }, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestValidatorInfo(providedHash) + }) t.Run("MetaChainRequester returns error", func(t *testing.T) { + t.Parallel() + providedHash := []byte("provided hash") wasCalled := false res := &dataRetrieverMocks.RequesterStub{ @@ -1324,7 +1587,7 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { rrh, _ := NewResolverRequestHandler( &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return res, errors.New("provided err") + return res, errExpected }, }, &mock.RequestedItemsHandlerStub{}, @@ -1337,7 +1600,39 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { rrh.RequestValidatorInfo(providedHash) assert.False(t, wasCalled) }) + t.Run("RequestDataFromHash returns error", func(t *testing.T) { + t.Parallel() + + providedHash := []byte("provided hash") + res := &dataRetrieverMocks.RequesterStub{ + RequestDataFromHashCalled: func(hash []byte, epoch uint32) error { + return errExpected + }, + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return res, nil + }, + }, + &mock.RequestedItemsHandlerStub{ + AddCalled: func(key string) error { + require.Fail(t, "should not have been called") + return nil + }, + }, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestValidatorInfo(providedHash) + }) t.Run("should work", func(t *testing.T) { + t.Parallel() + providedHash := []byte("provided hash") wasCalled := false res := &dataRetrieverMocks.RequesterStub{ @@ -1370,7 +1665,28 @@ func TestResolverRequestHandler_RequestValidatorInfo(t *testing.T) { func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { t.Parallel() + t.Run("no hash", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + require.Fail(t, "should not have been called") + return nil, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestValidatorsInfo([][]byte{}) + }) t.Run("MetaChainRequester returns error", func(t *testing.T) { + t.Parallel() + providedHash := []byte("provided hash") wasCalled := false res := &dataRetrieverMocks.RequesterStub{ @@ -1383,7 +1699,7 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { rrh, _ := NewResolverRequestHandler( &dataRetrieverMocks.RequestersFinderStub{ MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { - return res, errors.New("provided err") + return res, errExpected }, }, &mock.RequestedItemsHandlerStub{}, @@ -1396,7 +1712,39 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { rrh.RequestValidatorsInfo([][]byte{providedHash}) assert.False(t, wasCalled) }) + t.Run("RequestDataFromHashArray returns error", func(t *testing.T) { + t.Parallel() + + providedHash := []byte("provided hash") + res := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + return errExpected + }, + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + MetaChainRequesterCalled: func(baseTopic string) (requester dataRetriever.Requester, e error) { + return res, nil + }, + }, + &mock.RequestedItemsHandlerStub{ + AddCalled: func(key string) error { + require.Fail(t, "should not have been called") + return nil + }, + }, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestValidatorsInfo([][]byte{providedHash}) + }) t.Run("cast fails", func(t *testing.T) { + t.Parallel() + providedHash := []byte("provided hash") mbRequester := &dataRetrieverMocks.NonceRequesterStub{} // uncastable to HashSliceRequester wasCalled := false @@ -1421,6 +1769,8 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { assert.False(t, wasCalled) }) t.Run("should work", func(t *testing.T) { + t.Parallel() + providedHashes := [][]byte{[]byte("provided hash 1"), []byte("provided hash 2")} wasCalled := false res := &dataRetrieverMocks.HashSliceRequesterStub{ @@ -1449,3 +1799,201 @@ func TestResolverRequestHandler_RequestValidatorsInfo(t *testing.T) { assert.True(t, wasCalled) }) } + +func TestResolverRequestHandler_RequestMiniblocks(t *testing.T) { + t.Parallel() + + t.Run("no hash should work", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + require.Fail(t, "should have not been called") + return nil, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{}) + }) + t.Run("CrossShardRequester fails should work", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{[]byte("mbHash")}) + }) + t.Run("cast fails should work", func(t *testing.T) { + t.Parallel() + + nonceRequester := &dataRetrieverMocks.NonceRequesterStub{} // uncastable to HashSliceRequester + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return nonceRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{ + AddCalled: func(keys [][]byte) { + require.Fail(t, "should have not been called") + }, + }, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{[]byte("mbHash")}) + }) + t.Run("request data fails should work", func(t *testing.T) { + t.Parallel() + + mbRequester := &dataRetrieverMocks.HashSliceRequesterStub{ + RequestDataFromHashArrayCalled: func(hashes [][]byte, epoch uint32) error { + return errExpected + }, + } + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return mbRequester, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{[]byte("mbHash")}) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + CrossShardRequesterCalled: func(baseTopic string, crossShard uint32) (dataRetriever.Requester, error) { + return &dataRetrieverMocks.HashSliceRequesterStub{}, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + rrh.RequestMiniBlocks(0, [][]byte{[]byte("mbHash")}) + }) +} + +func TestResolverRequestHandler_RequestInterval(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + require.Equal(t, time.Second, rrh.RequestInterval()) +} + +func TestResolverRequestHandler_NumPeersToQuery(t *testing.T) { + t.Parallel() + + t.Run("get returns error", func(t *testing.T) { + t.Parallel() + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + GetCalled: func(key string) (dataRetriever.Requester, error) { + return nil, errExpected + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + _, _, err := rrh.GetNumPeersToQuery("key") + require.Equal(t, errExpected, err) + + err = rrh.SetNumPeersToQuery("key", 1, 1) + require.Equal(t, errExpected, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + req := &dataRetrieverMocks.RequesterStub{ + SetNumPeersToQueryCalled: func(intra int, cross int) { + require.Equal(t, 1, intra) + require.Equal(t, 1, cross) + }, + NumPeersToQueryCalled: func() (int, int) { + return 10, 10 + }, + } + + rrh, _ := NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{ + GetCalled: func(key string) (dataRetriever.Requester, error) { + return req, nil + }, + }, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + + intra, cross, err := rrh.GetNumPeersToQuery("key") + require.NoError(t, err) + require.Equal(t, 10, intra) + require.Equal(t, 10, cross) + + err = rrh.SetNumPeersToQuery("key", 1, 1) + require.NoError(t, err) + }) +} + +func TestResolverRequestHandler_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var rrh *resolverRequestHandler + require.True(t, rrh.IsInterfaceNil()) + + rrh, _ = NewResolverRequestHandler( + &dataRetrieverMocks.RequestersFinderStub{}, + &mock.RequestedItemsHandlerStub{}, + &mock.WhiteListHandlerStub{}, + 100, + 0, + time.Second, + ) + require.False(t, rrh.IsInterfaceNil()) +} diff --git a/dataRetriever/requestHandlers/requesters/requesters_test.go b/dataRetriever/requestHandlers/requesters/requesters_test.go index 4ec7ec9a74e..a0029d755ae 100644 --- a/dataRetriever/requestHandlers/requesters/requesters_test.go +++ b/dataRetriever/requestHandlers/requesters/requesters_test.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers" dataRetrieverStub "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" + logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" ) @@ -81,6 +82,7 @@ func testRequestDataFromHashArray(t *testing.T, requesterType requestHandlerType t.Run("should work", func(t *testing.T) { t.Parallel() + _ = logger.SetLogLevel("*:TRACE") // coverage providedEpoch := uint32(1234) providedHashes := [][]byte{[]byte("hash 1"), []byte("hash 2"), []byte("hash 3")} args := createMockArgBaseRequester() diff --git a/dataRetriever/resolvers/epochproviders/disabled/disabledEpochProvider_test.go b/dataRetriever/resolvers/epochproviders/disabled/disabledEpochProvider_test.go new file mode 100644 index 00000000000..b4b5fb95fac --- /dev/null +++ b/dataRetriever/resolvers/epochproviders/disabled/disabledEpochProvider_test.go @@ -0,0 +1,19 @@ +package disabled + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEpochProvider(t *testing.T) { + t.Parallel() + + var ep *epochProvider + require.True(t, ep.IsInterfaceNil()) + + ep = NewEpochProvider() + require.False(t, ep.IsInterfaceNil()) + require.True(t, ep.EpochIsActiveInNetwork(1)) + ep.EpochConfirmed(0, 0) +} diff --git a/dataRetriever/resolvers/headerResolver_test.go b/dataRetriever/resolvers/headerResolver_test.go index 9b36dc5d0c7..e71fff039bd 100644 --- a/dataRetriever/resolvers/headerResolver_test.go +++ b/dataRetriever/resolvers/headerResolver_test.go @@ -3,6 +3,8 @@ package resolvers_test import ( "bytes" "errors" + "fmt" + "math" "sync" "testing" @@ -194,6 +196,32 @@ func TestHeaderResolver_ProcessReceivedMessage_WrongIdentifierStartBlock(t *test assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestHeaderResolver_ProcessReceivedMessageEpochTypeUnknownEpochShouldWork(t *testing.T) { + t.Parallel() + + arg := createMockArgHeaderResolver() + arg.HdrStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) (i []byte, e error) { + return []byte("hash"), nil + }, + } + wasSent := false + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + wasSent = true + return nil + }, + } + hdrRes, _ := resolvers.NewHeaderResolver(arg) + + requestedData := []byte(fmt.Sprintf("epoch_%d", math.MaxUint32)) + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.EpochType, requestedData), "") + assert.NoError(t, err) + assert.True(t, wasSent) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestHeaderResolver_ProcessReceivedMessage_Ok(t *testing.T) { t.Parallel() @@ -260,6 +288,43 @@ func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSend assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestHeaderResolver_ValidateRequestHashTypeFoundInHdrPoolShouldSearchAndSendFullHistory(t *testing.T) { + t.Parallel() + + requestedData := []byte("aaaa") + + searchWasCalled := false + sendWasCalled := false + + headers := &mock.HeadersCacherStub{} + + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + if bytes.Equal(requestedData, hash) { + searchWasCalled = true + return &block.Header{}, nil + } + return nil, errors.New("0") + } + + arg := createMockArgHeaderResolver() + arg.IsFullHistoryNode = true + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + sendWasCalled = true + return nil + }, + } + arg.Headers = headers + hdrRes, _ := resolvers.NewHeaderResolver(arg) + + err := hdrRes.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashType, requestedData), fromConnectedPeerId) + assert.Nil(t, err) + assert.True(t, searchWasCalled) + assert.True(t, sendWasCalled) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestHeaderResolver_ProcessReceivedMessageRequestHashTypeFoundInHdrPoolMarshalizerFailsShouldErr(t *testing.T) { t.Parallel() @@ -542,6 +607,102 @@ func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoo assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoolButMarshalFailsShouldError(t *testing.T) { + t.Parallel() + + requestedNonce := uint64(67) + targetShardId := uint32(9) + wasResolved := false + + headers := &mock.HeadersCacherStub{} + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return nil, errors.New("err") + } + headers.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + wasResolved = true + return []data.HeaderHandler{&block.Header{}, &block.Header{}}, [][]byte{[]byte("1"), []byte("2")}, nil + } + + arg := createMockArgHeaderResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + TargetShardIDCalled: func() uint32 { + return targetShardId + }, + } + arg.Headers = headers + arg.HeadersNoncesStorage = &storageStubs.StorerStub{ + GetFromEpochCalled: func(key []byte, epoch uint32) ([]byte, error) { + return nil, errKeyNotFound + }, + SearchFirstCalled: func(key []byte) (i []byte, e error) { + return nil, errKeyNotFound + }, + } + initialMarshaller := arg.Marshaller + arg.Marshaller = &mock.MarshalizerStub{ + UnmarshalCalled: initialMarshaller.Unmarshal, + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + hdrRes, _ := resolvers.NewHeaderResolver(arg) + + err := hdrRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, wasResolved) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeNotFoundInHdrNoncePoolShouldRetFromPoolAndSend(t *testing.T) { + t.Parallel() + + requestedNonce := uint64(67) + wasSend := false + hash := []byte("aaaa") + + headers := &mock.HeadersCacherStub{} + headers.GetHeaderByHashCalled = func(hash []byte) (handler data.HeaderHandler, e error) { + return &block.Header{}, nil + } + headers.GetHeaderByNonceAndShardIdCalled = func(hdrNonce uint64, shardId uint32) (handlers []data.HeaderHandler, i [][]byte, e error) { + assert.Fail(t, "should not have been called") + return nil, nil, nil + } + arg := createMockArgHeaderResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + wasSend = true + return nil + }, + } + arg.Headers = headers + arg.HeadersNoncesStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) (i []byte, e error) { + return hash, nil + }, + } + hdrRes, _ := resolvers.NewHeaderResolver(arg) + + err := hdrRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.NonceType, arg.NonceConverter.ToByteSlice(requestedNonce)), + fromConnectedPeerId, + ) + + assert.Nil(t, err) + assert.True(t, wasSend) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestHeaderResolver_ProcessReceivedMessageRequestNonceTypeFoundInHdrNoncePoolCheckRetErr(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/messageProcessor_test.go b/dataRetriever/resolvers/messageProcessor_test.go index b4a10bc24b0..05fb0dcd127 100644 --- a/dataRetriever/resolvers/messageProcessor_test.go +++ b/dataRetriever/resolvers/messageProcessor_test.go @@ -18,6 +18,16 @@ const fromConnectedPeer = core.PeerID("from connected peer") //------- canProcessMessage +func TestMessageProcessor_CanProcessNilMessageShouldErr(t *testing.T) { + t.Parallel() + + mp := &messageProcessor{} + + err := mp.canProcessMessage(nil, "") + + assert.True(t, errors.Is(err, dataRetriever.ErrNilMessage)) +} + func TestMessageProcessor_CanProcessErrorsShouldErr(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/miniblockResolver_test.go b/dataRetriever/resolvers/miniblockResolver_test.go index 94d82e2bf92..1b336c50396 100644 --- a/dataRetriever/resolvers/miniblockResolver_test.go +++ b/dataRetriever/resolvers/miniblockResolver_test.go @@ -95,6 +95,17 @@ func TestNewMiniblockResolver_NilThrottlerShouldErr(t *testing.T) { assert.True(t, check.IfNil(mbRes)) } +func TestNewMiniblockResolver_NilDataPackerShouldErr(t *testing.T) { + t.Parallel() + + arg := createMockArgMiniblockResolver() + arg.DataPacker = nil + mbRes, err := resolvers.NewMiniblockResolver(arg) + + assert.Equal(t, dataRetriever.ErrNilDataPacker, err) + assert.True(t, check.IfNil(mbRes)) +} + func TestNewMiniblockResolver_OkValsShouldWork(t *testing.T) { t.Parallel() @@ -250,6 +261,147 @@ func TestMiniblockResolver_ProcessReceivedMessageFoundInPoolMarshalizerFailShoul assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestMiniblockResolver_ProcessReceivedMessageUnmarshalFails(t *testing.T) { + t.Parallel() + + goodMarshalizer := &mock.MarshalizerMock{} + cnt := 0 + marshalizer := &mock.MarshalizerStub{ + MarshalCalled: goodMarshalizer.Marshal, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt > 1 { + return expectedErr + } + return goodMarshalizer.Unmarshal(obj, buff) + }, + } + mbHash := []byte("aaa") + miniBlockList := make([][]byte, 0) + miniBlockList = append(miniBlockList, mbHash) + requestedBuff, merr := goodMarshalizer.Marshal(&batch.Batch{Data: miniBlockList}) + + assert.Nil(t, merr) + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + arg := createMockArgMiniblockResolver() + arg.MiniBlockPool = cache + arg.MiniBlockStorage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + body := block.MiniBlock{} + buff, _ := goodMarshalizer.Marshal(&body) + return buff, nil + }, + } + arg.Marshaller = marshalizer + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + assert.Fail(t, "should not have been called") + return nil, nil + }, + } + mbRes, _ := resolvers.NewMiniblockResolver(arg) + + err := mbRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.HashArrayType, requestedBuff), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestMiniblockResolver_ProcessReceivedMessagePackDataInChunksFails(t *testing.T) { + t.Parallel() + + goodMarshalizer := &mock.MarshalizerMock{} + mbHash := []byte("aaa") + miniBlockList := make([][]byte, 0) + miniBlockList = append(miniBlockList, mbHash) + requestedBuff, merr := goodMarshalizer.Marshal(&batch.Batch{Data: miniBlockList}) + + assert.Nil(t, merr) + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + arg := createMockArgMiniblockResolver() + arg.MiniBlockPool = cache + arg.MiniBlockStorage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + body := block.MiniBlock{} + buff, _ := goodMarshalizer.Marshal(&body) + return buff, nil + }, + } + arg.Marshaller = goodMarshalizer + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + return nil, expectedErr + }, + } + mbRes, _ := resolvers.NewMiniblockResolver(arg) + + err := mbRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.HashArrayType, requestedBuff), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestMiniblockResolver_ProcessReceivedMessageSendFails(t *testing.T) { + t.Parallel() + + goodMarshalizer := &mock.MarshalizerMock{} + mbHash := []byte("aaa") + miniBlockList := make([][]byte, 0) + miniBlockList = append(miniBlockList, mbHash) + requestedBuff, merr := goodMarshalizer.Marshal(&batch.Batch{Data: miniBlockList}) + + assert.Nil(t, merr) + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + arg := createMockArgMiniblockResolver() + arg.MiniBlockPool = cache + arg.MiniBlockStorage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) (i []byte, e error) { + body := block.MiniBlock{} + buff, _ := goodMarshalizer.Marshal(&body) + return buff, nil + }, + } + arg.Marshaller = goodMarshalizer + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + return expectedErr + }, + } + mbRes, _ := resolvers.NewMiniblockResolver(arg) + + err := mbRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.HashArrayType, requestedBuff), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestMiniblockResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStorageAndSend(t *testing.T) { t.Parallel() @@ -297,6 +449,57 @@ func TestMiniblockResolver_ProcessReceivedMessageNotFoundInPoolShouldRetFromStor assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestMiniblockResolver_ProcessReceivedMessageMarshalFails(t *testing.T) { + t.Parallel() + + mbHash := []byte("aaa") + marshalizer := &mock.MarshalizerMock{} + miniBlockList := make([][]byte, 0) + miniBlockList = append(miniBlockList, mbHash) + requestedBuff, _ := marshalizer.Marshal(&batch.Batch{Data: miniBlockList}) + + wasResolved := false + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return nil, false + } + + store := &storageStubs.StorerStub{} + store.SearchFirstCalled = func(key []byte) (i []byte, e error) { + wasResolved = true + mb, _ := marshalizer.Marshal(&block.MiniBlock{}) + return mb, nil + } + + arg := createMockArgMiniblockResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + assert.Fail(t, "should have not been called") + return nil + }, + } + arg.MiniBlockPool = cache + arg.MiniBlockStorage = store + arg.Marshaller = &mock.MarshalizerStub{ + UnmarshalCalled: marshalizer.Unmarshal, + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + mbRes, _ := resolvers.NewMiniblockResolver(arg) + + err := mbRes.ProcessReceivedMessage( + createRequestMsg(dataRetriever.HashType, requestedBuff), + fromConnectedPeerId, + ) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, wasResolved) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestMiniblockResolver_ProcessReceivedMessageMissingDataShouldNotSend(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/peerAuthenticationResolver_test.go b/dataRetriever/resolvers/peerAuthenticationResolver_test.go index 22b75093a4a..962d50be2ec 100644 --- a/dataRetriever/resolvers/peerAuthenticationResolver_test.go +++ b/dataRetriever/resolvers/peerAuthenticationResolver_test.go @@ -331,16 +331,20 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { pk1 := "pk01" pk2 := "pk02" + pk3 := "pk03" providedKeys := make(map[string]interface{}) providedKeys[pk1] = createMockPeerAuthenticationObject() providedKeys[pk2] = createMockPeerAuthenticationObject() + providedKeys[pk3] = createMockPeerAuthenticationObject() pks := make([][]byte, 0) pks = append(pks, []byte(pk1)) pks = append(pks, []byte(pk2)) + pks = append(pks, []byte(pk3)) hashes := make([][]byte, 0) hashes = append(hashes, []byte("pk01")) // exists in cache hashes = append(hashes, []byte("pk1")) // no entries + hashes = append(hashes, []byte("pk03")) // unmarshal fails providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) assert.Nil(t, err) @@ -366,7 +370,18 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { }, } arg.DataPacker, _ = partitioning.NewSizeDataPacker(arg.Marshaller) - + initialMarshaller := arg.Marshaller + cnt := 0 + arg.Marshaller = &mock.MarshalizerStub{ + MarshalCalled: initialMarshaller.Marshal, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt == 4 { // pk03 + return expectedErr + } + return initialMarshaller.Unmarshal(obj, buff) + }, + } res, err := resolvers.NewPeerAuthenticationResolver(arg) assert.Nil(t, err) assert.False(t, res.IsInterfaceNil()) @@ -375,6 +390,31 @@ func TestPeerAuthenticationResolver_ProcessReceivedMessage(t *testing.T) { assert.Nil(t, err) assert.True(t, wasSent) }) + t.Run("resolveMultipleHashesRequest: PackDataInChunks returns error", func(t *testing.T) { + t.Parallel() + + cache := testscommon.NewCacherStub() + cache.PeekCalled = func(key []byte) (value interface{}, ok bool) { + return createMockPeerAuthenticationObject(), true + } + + arg := createMockArgPeerAuthenticationResolver() + arg.PeerAuthenticationPool = cache + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + return nil, expectedErr + }, + } + res, err := resolvers.NewPeerAuthenticationResolver(arg) + assert.Nil(t, err) + assert.False(t, res.IsInterfaceNil()) + + hashes := getKeysSlice() + providedHashes, err := arg.Marshaller.Marshal(batch.Batch{Data: hashes}) + assert.Nil(t, err) + err = res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, providedHashes), fromConnectedPeer) + assert.True(t, errors.Is(err, expectedErr)) + }) t.Run("resolveMultipleHashesRequest: Send returns error", func(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/transactionResolver_test.go b/dataRetriever/resolvers/transactionResolver_test.go index 13d1323c68c..d75d2192789 100644 --- a/dataRetriever/resolvers/transactionResolver_test.go +++ b/dataRetriever/resolvers/transactionResolver_test.go @@ -269,6 +269,51 @@ func TestTxResolver_ProcessReceivedMessageFoundInTxPoolMarshalizerFailShouldRetN assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestTxResolver_ProcessReceivedMessageBatchMarshalFailShouldRetNilAndErr(t *testing.T) { + t.Parallel() + + marshalizerMock := &mock.MarshalizerMock{} + cnt := 0 + marshalizerStub := &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) (i []byte, e error) { + cnt++ + if cnt > 1 { + return nil, expectedErr + } + return marshalizerMock.Marshal(obj) + }, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + return marshalizerMock.Unmarshal(obj, buff) + }, + } + txReturned := &transaction.Transaction{ + Nonce: 10, + } + txPool := testscommon.NewShardedDataStub() + txPool.SearchFirstDataCalled = func(key []byte) (value interface{}, ok bool) { + if bytes.Equal([]byte("aaa"), key) { + return txReturned, true + } + + return nil, false + } + + arg := createMockArgTxResolver() + arg.TxPool = txPool + arg.Marshaller = marshalizerStub + txRes, _ := resolvers.NewTxResolver(arg) + + data, _ := marshalizerMock.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashType, Value: []byte("aaa")}) + + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestTxResolver_ProcessReceivedMessageFoundInTxStorageShouldRetValAndSend(t *testing.T) { t.Parallel() @@ -480,6 +525,83 @@ func TestTxResolver_ProcessReceivedMessageRequestedTwoSmallTransactionsFoundOnly assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestTxResolver_ProcessReceivedMessageHashArrayUnmarshalFails(t *testing.T) { + t.Parallel() + + arg := createMockArgTxResolver() + marshalizer := arg.Marshaller + cnt := 0 + arg.Marshaller = &mock.MarshalizerStub{ + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt > 1 { + return expectedErr + } + return marshalizer.Unmarshal(obj, buff) + }, + } + txRes, _ := resolvers.NewTxResolver(arg) + + data, _ := marshalizer.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: []byte("buff")}) + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestTxResolver_ProcessReceivedMessageHashArrayPackDataInChunksFails(t *testing.T) { + t.Parallel() + + txHash1 := []byte("txHash1") + txHash2 := []byte("txHash2") + + arg := createMockArgTxResolver() + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + return nil, expectedErr + }, + } + txRes, _ := resolvers.NewTxResolver(arg) + + buff, _ := arg.Marshaller.Marshal(&batch.Batch{Data: [][]byte{txHash1, txHash2}}) + data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: buff}) + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + +func TestTxResolver_ProcessReceivedMessageHashArraySendFails(t *testing.T) { + t.Parallel() + + txHash1 := []byte("txHash1") + txHash2 := []byte("txHash2") + + arg := createMockArgTxResolver() + arg.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + return expectedErr + }, + } + txRes, _ := resolvers.NewTxResolver(arg) + + buff, _ := arg.Marshaller.Marshal(&batch.Batch{Data: [][]byte{txHash1, txHash2}}) + data, _ := arg.Marshaller.Marshal(&dataRetriever.RequestData{Type: dataRetriever.HashArrayType, Value: buff}) + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := txRes.ProcessReceivedMessage(msg, connectedPeerId) + + assert.True(t, errors.Is(err, expectedErr)) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestTxResolver_Close(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/trieNodeResolver_test.go b/dataRetriever/resolvers/trieNodeResolver_test.go index e3281c9139e..dd7325d533b 100644 --- a/dataRetriever/resolvers/trieNodeResolver_test.go +++ b/dataRetriever/resolvers/trieNodeResolver_test.go @@ -249,6 +249,49 @@ func TestTrieNodeResolver_ProcessReceivedMessageTrieErrorsShouldErr(t *testing.T assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) } +func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesUnmarshalFails(t *testing.T) { + t.Parallel() + + arg := createMockArgTrieNodeResolver() + initialMarshaller := arg.Marshaller + cnt := 0 + arg.Marshaller = &mock.MarshalizerStub{ + MarshalCalled: initialMarshaller.Marshal, + UnmarshalCalled: func(obj interface{}, buff []byte) error { + cnt++ + if cnt > 1 { + return expectedErr + } + return initialMarshaller.Unmarshal(obj, buff) + }, + } + arg.TrieDataGetter = &trieMock.TrieStub{ + GetSerializedNodeCalled: func(_ []byte) ([]byte, error) { + assert.Fail(t, "should have not called send") + return nil, nil + }, + } + tnRes, _ := resolvers.NewTrieNodeResolver(arg) + + b := &batch.Batch{ + Data: [][]byte{[]byte("hash1")}, + } + buffBatch, _ := arg.Marshaller.Marshal(b) + + data, _ := arg.Marshaller.Marshal( + &dataRetriever.RequestData{ + Type: dataRetriever.HashArrayType, + Value: buffBatch, + }, + ) + msg := &p2pmocks.P2PMessageMock{DataField: data} + + err := tnRes.ProcessReceivedMessage(msg, fromConnectedPeer) + assert.Equal(t, expectedErr, err) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).StartWasCalled()) + assert.True(t, arg.Throttler.(*mock.ThrottlerStub).EndWasCalled()) +} + func TestTrieNodeResolver_ProcessReceivedMessageMultipleHashesGetSerializedNodeErrorsShouldNotSend(t *testing.T) { t.Parallel() diff --git a/dataRetriever/resolvers/validatorInfoResolver_test.go b/dataRetriever/resolvers/validatorInfoResolver_test.go index 88d115de3cb..92a9420cb54 100644 --- a/dataRetriever/resolvers/validatorInfoResolver_test.go +++ b/dataRetriever/resolvers/validatorInfoResolver_test.go @@ -408,6 +408,40 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) assert.Equal(t, expectedErr, err) }) + t.Run("send returns error", func(t *testing.T) { + t.Parallel() + + numOfProvidedData := 3 + providedHashes := make([][]byte, 0) + providedData := make([]state.ValidatorInfo, 0) + for i := 0; i < numOfProvidedData; i++ { + hashStr := fmt.Sprintf("hash%d", i) + providedHashes = append(providedHashes, []byte(hashStr)) + pkStr := fmt.Sprintf("pk%d", i) + providedData = append(providedData, createMockValidatorInfo([]byte(pkStr))) + } + args := createMockArgValidatorInfoResolver() + numOfCalls := 0 + args.ValidatorInfoPool = &testscommon.ShardedDataStub{ + SearchFirstDataCalled: func(key []byte) (value interface{}, ok bool) { + val := providedData[numOfCalls] + numOfCalls++ + return val, true + }, + } + args.SenderResolver = &mock.TopicResolverSenderStub{ + SendCalled: func(buff []byte, peer core.PeerID) error { + return expectedErr + }, + } + args.DataPacker, _ = partitioning.NewSimpleDataPacker(args.Marshaller) + res, _ := resolvers.NewValidatorInfoResolver(args) + require.False(t, check.IfNil(res)) + + buff, _ := args.Marshaller.Marshal(&batch.Batch{Data: providedHashes}) + err := res.ProcessReceivedMessage(createRequestMsg(dataRetriever.HashArrayType, buff), fromConnectedPeer) + assert.Equal(t, expectedErr, err) + }) t.Run("all hashes in one chunk should work", func(t *testing.T) { t.Parallel() diff --git a/dataRetriever/shardedData/shardedData_test.go b/dataRetriever/shardedData/shardedData_test.go index d00e7939535..071b0c00356 100644 --- a/dataRetriever/shardedData/shardedData_test.go +++ b/dataRetriever/shardedData/shardedData_test.go @@ -124,6 +124,8 @@ func TestShardedData_RemoveData(t *testing.T) { sd, _ := NewShardedData("", defaultTestConfig) + sd.RemoveData([]byte{}, "missing_cache_id") // coverage + sd.AddData([]byte("tx_hash1"), &transaction.Transaction{Nonce: 1}, 0, "1") assert.Equal(t, 1, sd.ShardDataStore("1").Len(), "AddData failed, length should be 1") @@ -146,11 +148,13 @@ func TestShardedData_RemoveData(t *testing.T) { "FindAndRemoveData failed, length should be 1 in shard 2") } -func TestShardedData_Clear(t *testing.T) { +func TestShardedData_ClearShardStore(t *testing.T) { t.Parallel() sd, _ := NewShardedData("", defaultTestConfig) + sd.ClearShardStore("missing_cache_id") // coverage + sd.AddData([]byte("tx_hash1"), &transaction.Transaction{Nonce: 1}, 0, "1") sd.AddData([]byte("tx_hash2"), &transaction.Transaction{Nonce: 2}, 0, "2") sd.AddData([]byte("tx_hash1"), &transaction.Transaction{Nonce: 1}, 0, "2") @@ -308,3 +312,50 @@ func TestShardedData_SearchFirstDataFoundShouldRetResults(t *testing.T) { } // TODO: Add high load test, reach maximum capacity and inspect RAM usage. EN-6735. + +func TestShardedData_RemoveSetOfDataFromPool(t *testing.T) { + t.Parallel() + + sd, _ := NewShardedData("", defaultTestConfig) + + sd.RemoveSetOfDataFromPool([][]byte{}, "missing_cache_id") // coverage + + sd.AddData([]byte("aaa"), "a1", 2, "0") + _, ok := sd.SearchFirstData([]byte("aaa")) + assert.True(t, ok) + sd.RemoveSetOfDataFromPool([][]byte{[]byte("aaa")}, "0") + _, ok = sd.SearchFirstData([]byte("aaa")) + assert.False(t, ok) +} + +func TestShardedData_ImmunizeSetOfDataAgainstEviction(t *testing.T) { + t.Parallel() + + sd, _ := NewShardedData("", defaultTestConfig) + sd.ImmunizeSetOfDataAgainstEviction([][]byte{[]byte("aaa")}, "0") +} + +func TestShardedData_GetCounts(t *testing.T) { + t.Parallel() + + sd, _ := NewShardedData("", defaultTestConfig) + + sd.RemoveSetOfDataFromPool([][]byte{}, "missing_cache_id") // coverage + + sd.AddData([]byte("aaa"), "a1", 2, "0") + sd.AddData([]byte("bbb"), "b1", 2, "0") + counts := sd.GetCounts() + assert.Equal(t, int64(2), counts.GetTotal()) +} + +func TestShardedData_Diagnose(t *testing.T) { + t.Parallel() + + sd, _ := NewShardedData("", defaultTestConfig) + + sd.RemoveSetOfDataFromPool([][]byte{}, "missing_cache_id") // coverage + + sd.AddData([]byte("aaa"), "a1", 2, "0") + sd.AddData([]byte("bbb"), "b1", 2, "0") + sd.Diagnose(true) +} diff --git a/dataRetriever/storageRequesters/headerRequester_test.go b/dataRetriever/storageRequesters/headerRequester_test.go index 82724e0e705..f73f81f6b74 100644 --- a/dataRetriever/storageRequesters/headerRequester_test.go +++ b/dataRetriever/storageRequesters/headerRequester_test.go @@ -324,32 +324,73 @@ func TestHeaderRequester_RequestDataFromNonceShouldWork(t *testing.T) { assert.True(t, sendCalled) } -func TestHeaderRequester_RequestDataFromEpochShouldWork(t *testing.T) { +func TestHeaderRequester_RequestDataFromEpoch(t *testing.T) { t.Parallel() - sendCalled := false - epochIdentifier := []byte(core.EpochStartIdentifier(math.MaxUint32)) - arg := createMockHeaderRequesterArg() - arg.HdrStorage = &storageStubs.StorerStub{ - SearchFirstCalled: func(key []byte) ([]byte, error) { - assert.Equal(t, epochIdentifier, key) - return make([]byte, 0), nil - }, - } - arg.ManualEpochStartNotifier = &mock.ManualEpochStartNotifierStub{} - arg.Messenger = &p2pmocks.MessengerStub{ - SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { - sendCalled = true + t.Run("unknown epoch should error", func(t *testing.T) { + t.Parallel() - return nil - }, - } - hdReq, _ := NewHeaderRequester(arg) + epochIdentifier := []byte("unknown epoch") + arg := createMockHeaderRequesterArg() + arg.HdrStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) ([]byte, error) { + assert.Fail(t, "should not have been called") + return make([]byte, 0), nil + }, + } + hdReq, _ := NewHeaderRequester(arg) + + err := hdReq.RequestDataFromEpoch(epochIdentifier) + assert.Equal(t, core.ErrInvalidIdentifierForEpochStartBlockRequest, err) + }) + t.Run("identifier not found should error should error", func(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + epochIdentifier := []byte(core.EpochStartIdentifier(100)) + arg := createMockHeaderRequesterArg() + arg.HdrStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) ([]byte, error) { + return make([]byte, 0), expectedErr + }, + } + arg.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + hdReq, _ := NewHeaderRequester(arg) + + err := hdReq.RequestDataFromEpoch(epochIdentifier) + assert.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + sendCalled := false + epochIdentifier := []byte(core.EpochStartIdentifier(math.MaxUint32)) + arg := createMockHeaderRequesterArg() + arg.HdrStorage = &storageStubs.StorerStub{ + SearchFirstCalled: func(key []byte) ([]byte, error) { + assert.Equal(t, epochIdentifier, key) + return make([]byte, 0), nil + }, + } + arg.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + sendCalled = true - err := hdReq.RequestDataFromEpoch(epochIdentifier) + return nil + }, + } + hdReq, _ := NewHeaderRequester(arg) - assert.Nil(t, err) - assert.True(t, sendCalled) + err := hdReq.RequestDataFromEpoch(epochIdentifier) + + assert.Nil(t, err) + assert.True(t, sendCalled) + }) } func TestHeaderRequester_Close(t *testing.T) { diff --git a/dataRetriever/storageRequesters/sliceRequester_test.go b/dataRetriever/storageRequesters/sliceRequester_test.go index 75f3232b388..0693257464b 100644 --- a/dataRetriever/storageRequesters/sliceRequester_test.go +++ b/dataRetriever/storageRequesters/sliceRequester_test.go @@ -17,6 +17,8 @@ import ( "github.com/stretchr/testify/assert" ) +var expectedErr = errors.New("expected err") + func createMockSliceRequesterArg() ArgSliceRequester { return ArgSliceRequester{ Messenger: &mock.MessageHandlerStub{}, @@ -108,7 +110,6 @@ func TestNewSliceRequester_ShouldWork(t *testing.T) { func TestSliceRequester_RequestDataFromHashNotFoundShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") sendWasCalled := false arg := createMockSliceRequesterArg() arg.Storage = &storageStubs.StorerStub{ @@ -140,6 +141,32 @@ func TestSliceRequester_RequestDataFromHashNotFoundShouldErr(t *testing.T) { } } +func TestSliceRequester_RequestDataFromHashMarshalFails(t *testing.T) { + t.Parallel() + + arg := createMockSliceRequesterArg() + arg.Marshalizer = &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + arg.Storage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return make([]byte, 0), nil + }, + } + arg.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + sr, _ := NewSliceRequester(arg) + + err := sr.RequestDataFromHash([]byte("hash"), 0) + assert.Equal(t, expectedErr, err) +} + func TestSliceRequester_RequestDataFromHashShouldWork(t *testing.T) { t.Parallel() @@ -164,6 +191,37 @@ func TestSliceRequester_RequestDataFromHashShouldWork(t *testing.T) { assert.True(t, sendWasCalled) } +func TestSliceRequester_RequestDataFromHashesPackDataInChunksFails(t *testing.T) { + t.Parallel() + + numGetCalled := 0 + arg := createMockSliceRequesterArg() + arg.Storage = &storageStubs.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + numGetCalled++ + return make([]byte, 0), nil + }, + } + arg.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + arg.DataPacker = &mock.DataPackerStub{ + PackDataInChunksCalled: func(data [][]byte, limit int) ([][]byte, error) { + return nil, expectedErr + }, + } + sr, _ := NewSliceRequester(arg) + + hashes := [][]byte{[]byte("hash1"), []byte("hash2")} + err := sr.RequestDataFromHashArray(hashes, 0) + + assert.Equal(t, expectedErr, err) + assert.Equal(t, len(hashes), numGetCalled) +} + func TestSliceRequester_RequestDataFromHashesShouldWork(t *testing.T) { t.Parallel() @@ -197,7 +255,6 @@ func TestSliceRequester_GetErroredShouldReturnErr(t *testing.T) { numSendCalled := 0 numGetCalled := 0 - expectedErr := errors.New("expected err") arg := createMockSliceRequesterArg() arg.Storage = &storageStubs.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { @@ -240,7 +297,6 @@ func TestSliceRequester_SendErroredShouldReturnErr(t *testing.T) { numSendCalled := 0 numGetCalled := 0 - expectedErr := errors.New("expected err") arg := createMockSliceRequesterArg() arg.Storage = &storageStubs.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { diff --git a/dataRetriever/storageRequesters/trieNodeRequester_test.go b/dataRetriever/storageRequesters/trieNodeRequester_test.go index 042c1390826..6b22579b730 100644 --- a/dataRetriever/storageRequesters/trieNodeRequester_test.go +++ b/dataRetriever/storageRequesters/trieNodeRequester_test.go @@ -127,6 +127,38 @@ func TestTrieNodeRequester_RequestDataFromHashShouldWork(t *testing.T) { assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) } +func TestTrieNodeRequester_RequestDataFromHashArrayMarshalFails(t *testing.T) { + t.Parallel() + + args := createMockTrieRequesterArguments() + buff := []byte("data") + args.TrieDataGetter = &trieMock.TrieStub{ + GetSerializedNodesCalled: func(bytes []byte, u uint64) ([][]byte, uint64, error) { + return [][]byte{buff}, 1, nil + }, + } + args.Messenger = &p2pmocks.MessengerStub{ + SendToConnectedPeerCalled: func(topic string, buff []byte, peerID core.PeerID) error { + assert.Fail(t, "should not have been called") + return nil + }, + } + args.Marshalizer = &mock.MarshalizerStub{ + MarshalCalled: func(obj interface{}) ([]byte, error) { + return nil, expectedErr + }, + } + tnr, _ := NewTrieNodeRequester(args) + + err := tnr.RequestDataFromHashArray( + [][]byte{ + []byte("hash1"), + []byte("hash2"), + }, 0) + assert.Equal(t, expectedErr, err) + assert.Equal(t, 0, len(args.ChanGracefullyClose)) +} + func TestTrieNodeRequester_RequestDataFromHashArrayShouldWork(t *testing.T) { t.Parallel() @@ -159,3 +191,30 @@ func TestTrieNodeRequester_RequestDataFromHashArrayShouldWork(t *testing.T) { assert.Equal(t, uint32(1), atomic.LoadUint32(&numSendToConnectedPeerCalled)) assert.Equal(t, uint32(2), atomic.LoadUint32(&numGetSerializedNodesCalled)) } + +func TestTrieNodeRequester_Close(t *testing.T) { + t.Parallel() + + t.Run("trieStorageManager.Close error should error", func(t *testing.T) { + t.Parallel() + + args := createMockTrieRequesterArguments() + args.TrieStorageManager = &testscommon.StorageManagerStub{ + CloseCalled: func() error { + return expectedErr + }, + } + tnr, _ := NewTrieNodeRequester(args) + + err := tnr.Close() + assert.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tnr, _ := NewTrieNodeRequester(createMockTrieRequesterArguments()) + + err := tnr.Close() + assert.NoError(t, err) + }) +} diff --git a/dataRetriever/topicSender/diffPeerListCreator_test.go b/dataRetriever/topicSender/diffPeerListCreator_test.go index be87e933ca4..73b1a63b418 100644 --- a/dataRetriever/topicSender/diffPeerListCreator_test.go +++ b/dataRetriever/topicSender/diffPeerListCreator_test.go @@ -240,3 +240,21 @@ func TestDiffPeerListCreator_IntraShardPeersList(t *testing.T) { assert.Equal(t, peerList, dplc.IntraShardPeerList()) } + +func TestDiffPeerListCreator_FullHistoryList(t *testing.T) { + t.Parallel() + + peerList := []core.PeerID{"pid1", "pid2"} + dplc, _ := topicsender.NewDiffPeerListCreator( + &mock.MessageHandlerStub{ + ConnectedFullHistoryPeersOnTopicCalled: func(topic string) []core.PeerID { + return peerList + }, + }, + mainTopic, + intraTopic, + excludedTopic, + ) + + assert.Equal(t, peerList, dplc.FullHistoryList()) +} diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go index b08ab8daa76..4f80506c304 100644 --- a/dataRetriever/txpool/shardedTxPool_test.go +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -76,6 +76,13 @@ func Test_NewShardedTxPool_WhenBadConfig(t *testing.T) { require.NotNil(t, err) require.Errorf(t, err, dataRetriever.ErrCacheConfigInvalidShards.Error()) + args = goodArgs + args.TxGasHandler = nil + pool, err = NewShardedTxPool(args) + require.Nil(t, pool) + require.NotNil(t, err) + require.Errorf(t, err, dataRetriever.ErrNilTxGasHandler.Error()) + args = goodArgs args.TxGasHandler = &txcachemocks.TxGasHandlerMock{ MinimumGasMove: 50000, @@ -167,6 +174,7 @@ func Test_AddData(t *testing.T) { pool := poolAsInterface.(*shardedTxPool) cache := pool.getTxCache("0") + pool.AddData([]byte("hash-invalid-cache"), createTx("alice", 0), 0, "invalid-cache-id") pool.AddData([]byte("hash-x"), createTx("alice", 42), 0, "0") pool.AddData([]byte("hash-y"), createTx("alice", 43), 0, "0") require.Equal(t, 2, cache.Len()) @@ -346,6 +354,23 @@ func Test_Keys(t *testing.T) { require.ElementsMatch(t, txsHashes, pool.Keys()) } +func TestShardedTxPool_Diagnose(t *testing.T) { + t.Parallel() + + poolAsInterface, _ := newTxPoolToTest() + pool := poolAsInterface.(*shardedTxPool) + pool.AddData([]byte("hash"), createTx("alice", 10), 0, "0") + pool.Diagnose(true) +} + +func TestShardedTxPool_ImmunizeSetOfDataAgainstEviction(t *testing.T) { + t.Parallel() + + poolAsInterface, _ := newTxPoolToTest() + pool := poolAsInterface.(*shardedTxPool) + pool.ImmunizeSetOfDataAgainstEviction([][]byte{[]byte("hash")}, "0") +} + func Test_IsInterfaceNil(t *testing.T) { poolAsInterface, _ := newTxPoolToTest() require.False(t, check.IfNil(poolAsInterface)) diff --git a/dataRetriever/unitType_test.go b/dataRetriever/unitType_test.go new file mode 100644 index 00000000000..83c4381a3b9 --- /dev/null +++ b/dataRetriever/unitType_test.go @@ -0,0 +1,68 @@ +package dataRetriever + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUnitType_String(t *testing.T) { + t.Parallel() + + ut := TransactionUnit + require.Equal(t, "TransactionUnit", ut.String()) + ut = MiniBlockUnit + require.Equal(t, "MiniBlockUnit", ut.String()) + ut = PeerChangesUnit + require.Equal(t, "PeerChangesUnit", ut.String()) + ut = BlockHeaderUnit + require.Equal(t, "BlockHeaderUnit", ut.String()) + ut = MetaBlockUnit + require.Equal(t, "MetaBlockUnit", ut.String()) + ut = UnsignedTransactionUnit + require.Equal(t, "UnsignedTransactionUnit", ut.String()) + ut = RewardTransactionUnit + require.Equal(t, "RewardTransactionUnit", ut.String()) + ut = MetaHdrNonceHashDataUnit + require.Equal(t, "MetaHdrNonceHashDataUnit", ut.String()) + ut = HeartbeatUnit + require.Equal(t, "HeartbeatUnit", ut.String()) + ut = BootstrapUnit + require.Equal(t, "BootstrapUnit", ut.String()) + ut = StatusMetricsUnit + require.Equal(t, "StatusMetricsUnit", ut.String()) + ut = TxLogsUnit + require.Equal(t, "TxLogsUnit", ut.String()) + ut = MiniblocksMetadataUnit + require.Equal(t, "MiniblocksMetadataUnit", ut.String()) + ut = EpochByHashUnit + require.Equal(t, "EpochByHashUnit", ut.String()) + ut = MiniblockHashByTxHashUnit + require.Equal(t, "MiniblockHashByTxHashUnit", ut.String()) + ut = ReceiptsUnit + require.Equal(t, "ReceiptsUnit", ut.String()) + ut = ResultsHashesByTxHashUnit + require.Equal(t, "ResultsHashesByTxHashUnit", ut.String()) + ut = TrieEpochRootHashUnit + require.Equal(t, "TrieEpochRootHashUnit", ut.String()) + ut = ESDTSuppliesUnit + require.Equal(t, "ESDTSuppliesUnit", ut.String()) + ut = RoundHdrHashDataUnit + require.Equal(t, "RoundHdrHashDataUnit", ut.String()) + ut = UserAccountsUnit + require.Equal(t, "UserAccountsUnit", ut.String()) + ut = UserAccountsCheckpointsUnit + require.Equal(t, "UserAccountsCheckpointsUnit", ut.String()) + ut = PeerAccountsUnit + require.Equal(t, "PeerAccountsUnit", ut.String()) + ut = PeerAccountsCheckpointsUnit + require.Equal(t, "PeerAccountsCheckpointsUnit", ut.String()) + ut = ScheduledSCRsUnit + require.Equal(t, "ScheduledSCRsUnit", ut.String()) + + ut = 200 + require.Equal(t, "ShardHdrNonceHashDataUnit100", ut.String()) + + ut = 99 + require.Equal(t, "unknown type 99", ut.String()) +} diff --git a/testscommon/storageManagerStub.go b/testscommon/storageManagerStub.go index b7673a4b4cd..825a118722b 100644 --- a/testscommon/storageManagerStub.go +++ b/testscommon/storageManagerStub.go @@ -28,6 +28,7 @@ type StorageManagerStub struct { IsClosedCalled func() bool RemoveFromCheckpointHashesHolderCalled func([]byte) GetBaseTrieStorageManagerCalled func() common.StorageManager + CloseCalled func() error } // Put - @@ -186,6 +187,9 @@ func (sms *StorageManagerStub) GetLatestStorageEpoch() (uint32, error) { // Close - func (sms *StorageManagerStub) Close() error { + if sms.CloseCalled != nil { + return sms.CloseCalled() + } return nil } From 5d57d8ed59fa5bb24c3adacc00aaa4f220453326 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Wed, 3 May 2023 17:10:16 +0300 Subject: [PATCH 122/221] MX-14120: fixes after review --- consensus/mock/bootstrapperStub.go | 10 +++-- factory/processing/blockProcessorCreator.go | 4 +- factory/processing/processComponents.go | 8 +--- node/nodeRunner.go | 2 - process/interface.go | 2 +- process/mock/bootstrapperStub.go | 10 +++-- process/sync/baseSync.go | 8 +++- process/sync/disabled/disabledBootstrapper.go | 3 +- process/sync/metablock.go | 4 +- process/sync/shardblock.go | 6 ++- .../trieIterators/tokensSuppliesComputer.go | 16 +++++--- .../tokensSuppliesComputer_test.go | 41 +++++++++++++++++++ .../trieIterators/trieAccountsIterator.go | 8 ++++ .../trieAccountsIterator_test.go | 26 ++++++++++-- testscommon/components/components.go | 5 ++- 15 files changed, 117 insertions(+), 36 deletions(-) diff --git a/consensus/mock/bootstrapperStub.go b/consensus/mock/bootstrapperStub.go index 171062a29f1..bd4a1b98bf2 100644 --- a/consensus/mock/bootstrapperStub.go +++ b/consensus/mock/bootstrapperStub.go @@ -11,7 +11,7 @@ type BootstrapperStub struct { CreateAndCommitEmptyBlockCalled func(uint32) (data.BodyHandler, data.HeaderHandler, error) AddSyncStateListenerCalled func(func(bool)) GetNodeStateCalled func() common.NodeState - StartSyncingBlocksCalled func() + StartSyncingBlocksCalled func() error } // CreateAndCommitEmptyBlock - @@ -40,8 +40,12 @@ func (boot *BootstrapperStub) GetNodeState() common.NodeState { } // StartSyncingBlocks - -func (boot *BootstrapperStub) StartSyncingBlocks() { - boot.StartSyncingBlocksCalled() +func (boot *BootstrapperStub) StartSyncingBlocks() error { + if boot.StartSyncingBlocksCalled != nil { + return boot.StartSyncingBlocksCalled() + } + + return nil } // Close - diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 73d79a87b6f..2c154c682db 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -1179,7 +1179,7 @@ func (pcf *processComponentsFactory) createVMFactoryShard( BuiltInFunctions: builtInFuncs, DataPool: pcf.data.Datapool(), CompiledSCPool: pcf.data.Datapool().SmartContracts(), - WorkingDir: pcf.workingDir, + WorkingDir: pcf.flagsConfig.WorkingDir, NFTStorageHandler: nftStorageHandler, GlobalSettingsHandler: globalSettingsHandler, EpochNotifier: pcf.coreData.EpochNotifier(), @@ -1230,7 +1230,7 @@ func (pcf *processComponentsFactory) createVMFactoryMeta( DataPool: pcf.data.Datapool(), CompiledSCPool: pcf.data.Datapool().SmartContracts(), ConfigSCStorage: configSCStorage, - WorkingDir: pcf.workingDir, + WorkingDir: pcf.flagsConfig.WorkingDir, NFTStorageHandler: nftStorageHandler, GlobalSettingsHandler: globalSettingsHandler, EpochNotifier: pcf.coreData.EpochNotifier(), diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index d9ea9e8a935..c1cd90af2a7 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -140,9 +140,7 @@ type ProcessComponentsFactoryArgs struct { WhiteListerVerifiedTxs process.WhiteListHandler MaxRating uint32 SystemSCConfig *config.SystemSmartContractsConfig - Version string ImportStartHandler update.ImportStartHandler - WorkingDir string HistoryRepo dblookupext.HistoryRepository FlagsConfig config.ContextFlagsConfig @@ -173,7 +171,6 @@ type processComponentsFactory struct { txLogsProcessor process.TransactionLogProcessor version string importStartHandler update.ImportStartHandler - workingDir string historyRepo dblookupext.HistoryRepository epochNotifier process.EpochNotifier importHandler update.ImportHandler @@ -218,9 +215,7 @@ func NewProcessComponentsFactory(args ProcessComponentsFactoryArgs) (*processCom whiteListerVerifiedTxs: args.WhiteListerVerifiedTxs, maxRating: args.MaxRating, systemSCConfig: args.SystemSCConfig, - version: args.Version, importStartHandler: args.ImportStartHandler, - workingDir: args.WorkingDir, historyRepo: args.HistoryRepo, epochNotifier: args.CoreData.EpochNotifier(), statusCoreComponents: args.StatusCoreComponents, @@ -863,7 +858,6 @@ func (pcf *processComponentsFactory) generateGenesisHeadersAndApplyInitialBalanc TrieStorageManagers: pcf.state.TrieStorageManagers(), SystemSCConfig: *pcf.systemSCConfig, ImportStartHandler: pcf.importStartHandler, - WorkingDir: pcf.workingDir, BlockSignKeyGen: pcf.crypto.BlockSignKeyGen(), GenesisString: pcf.config.GeneralSettings.GenesisString, GenesisNodePrice: genesisNodePrice, @@ -1754,7 +1748,7 @@ func (pcf *processComponentsFactory) createExportFactoryHandler( accountsDBs := make(map[state.AccountsDbIdentifier]state.AccountsAdapter) accountsDBs[state.UserAccountsState] = pcf.state.AccountsAdapter() accountsDBs[state.PeerAccountsState] = pcf.state.PeerAccounts() - exportFolder := filepath.Join(pcf.workingDir, hardforkConfig.ImportFolder) + exportFolder := filepath.Join(pcf.flagsConfig.WorkingDir, hardforkConfig.ImportFolder) argsExporter := updateFactory.ArgsExporter{ CoreComponents: pcf.coreData, CryptoComponents: pcf.crypto, diff --git a/node/nodeRunner.go b/node/nodeRunner.go index c7f275144b3..3a6a58b2026 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -1224,9 +1224,7 @@ func (nr *nodeRunner) CreateManagedProcessComponents( WhiteListerVerifiedTxs: whiteListerVerifiedTxs, MaxRating: configs.RatingsConfig.General.MaxRating, SystemSCConfig: configs.SystemSCConfig, - Version: configs.FlagsConfig.Version, ImportStartHandler: importStartHandler, - WorkingDir: configs.FlagsConfig.WorkingDir, HistoryRepo: historyRepository, FlagsConfig: *configs.FlagsConfig, } diff --git a/process/interface.go b/process/interface.go index 8adfa1c3bfe..4f27614eef5 100644 --- a/process/interface.go +++ b/process/interface.go @@ -347,7 +347,7 @@ type Bootstrapper interface { Close() error AddSyncStateListener(func(isSyncing bool)) GetNodeState() common.NodeState - StartSyncingBlocks() + StartSyncingBlocks() error IsInterfaceNil() bool } diff --git a/process/mock/bootstrapperStub.go b/process/mock/bootstrapperStub.go index 171062a29f1..bd4a1b98bf2 100644 --- a/process/mock/bootstrapperStub.go +++ b/process/mock/bootstrapperStub.go @@ -11,7 +11,7 @@ type BootstrapperStub struct { CreateAndCommitEmptyBlockCalled func(uint32) (data.BodyHandler, data.HeaderHandler, error) AddSyncStateListenerCalled func(func(bool)) GetNodeStateCalled func() common.NodeState - StartSyncingBlocksCalled func() + StartSyncingBlocksCalled func() error } // CreateAndCommitEmptyBlock - @@ -40,8 +40,12 @@ func (boot *BootstrapperStub) GetNodeState() common.NodeState { } // StartSyncingBlocks - -func (boot *BootstrapperStub) StartSyncingBlocks() { - boot.StartSyncingBlocksCalled() +func (boot *BootstrapperStub) StartSyncingBlocks() error { + if boot.StartSyncingBlocksCalled != nil { + return boot.StartSyncingBlocksCalled() + } + + return nil } // Close - diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 51c6cd86b05..38f2f8eb6ef 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -1195,11 +1195,15 @@ func (boot *baseBootstrap) GetNodeState() common.NodeState { } func (boot *baseBootstrap) handleAccountsTrieIteration() error { - // change this if more trie accounts iterators handlers are needed if !boot.repopulateTokensSupplies { - return nil + return boot.handleTokensSuppliesRepopulation() } + // add more flags and trie iterators here + return nil +} + +func (boot *baseBootstrap) handleTokensSuppliesRepopulation() error { argsTrieAccountsIteratorProc := trieIterators.ArgsTrieAccountsIterator{ Marshaller: boot.marshalizer, Accounts: boot.accounts, diff --git a/process/sync/disabled/disabledBootstrapper.go b/process/sync/disabled/disabledBootstrapper.go index 5d0a9a02086..ce39c262612 100644 --- a/process/sync/disabled/disabledBootstrapper.go +++ b/process/sync/disabled/disabledBootstrapper.go @@ -22,7 +22,8 @@ func (d *disabledBootstrapper) GetNodeState() common.NodeState { } // StartSyncingBlocks won't do anything as this is a disabled component -func (d *disabledBootstrapper) StartSyncingBlocks() { +func (d *disabledBootstrapper) StartSyncingBlocks() error { + return nil } // Close will return a nil error as this is a disabled component diff --git a/process/sync/metablock.go b/process/sync/metablock.go index c4c10196dcf..4e5f464d4bc 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -135,7 +135,7 @@ func (boot *MetaBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data. } // StartSyncingBlocks method will start syncing blocks as a go routine -func (boot *MetaBootstrap) StartSyncingBlocks() { +func (boot *MetaBootstrap) StartSyncingBlocks() error { // when a node starts it first tries to bootstrap from storage, if there already exist a database saved errNotCritical := boot.storageBootstrapper.LoadFromStorage() if errNotCritical != nil { @@ -147,6 +147,8 @@ func (boot *MetaBootstrap) StartSyncingBlocks() { var ctx context.Context ctx, boot.cancelFunc = context.WithCancel(context.Background()) go boot.syncBlocks(ctx) + + return nil } func (boot *MetaBootstrap) setLastEpochStartRound() { diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 749a4c85699..a315358b6c0 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -126,7 +126,7 @@ func (boot *ShardBootstrap) getBlockBody(headerHandler data.HeaderHandler) (data } // StartSyncingBlocks method will start syncing blocks as a go routine -func (boot *ShardBootstrap) StartSyncingBlocks() { +func (boot *ShardBootstrap) StartSyncingBlocks() error { errNotCritical := boot.storageBootstrapper.LoadFromStorage() if errNotCritical != nil { log.Debug("boot.syncFromStorer", @@ -139,9 +139,11 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { err := boot.handleAccountsTrieIteration() if err != nil { - panic(fmt.Sprintf("cannot handle start-up trie accounts iteration: %s", err.Error())) + return fmt.Errorf("%w while handling accounts trie iteration", err) } + go boot.syncBlocks(ctx) + return nil } // SyncBlock method actually does the synchronization. It requests the next block header from the pool diff --git a/process/sync/trieIterators/tokensSuppliesComputer.go b/process/sync/trieIterators/tokensSuppliesComputer.go index 501ea96504e..25ad5ecd0d1 100644 --- a/process/sync/trieIterators/tokensSuppliesComputer.go +++ b/process/sync/trieIterators/tokensSuppliesComputer.go @@ -3,6 +3,7 @@ package trieIterators import ( "bytes" "context" + "encoding/hex" "fmt" "math/big" @@ -47,6 +48,7 @@ func NewTokensSuppliesProcessor(args ArgsTokensSuppliesProcessor) (*tokensSuppli } // HandleTrieAccountIteration is the handler for the trie account iteration +// note that this function is not concurrent safe func (t *tokensSuppliesProcessor) HandleTrieAccountIteration(userAccount state.UserAccountHandler) error { if check.IfNil(userAccount) { return errNilUserAccount @@ -55,8 +57,10 @@ func (t *tokensSuppliesProcessor) HandleTrieAccountIteration(userAccount state.U log.Debug("repopulate tokens supplies: skipping system account address") return nil } + rh := userAccount.GetRootHash() - if len(rh) == 0 { + isValidRootHashToIterateFor := len(rh) > 0 && !bytes.Equal(rh, make([]byte, len(rh))) + if !isValidRootHashToIterateFor { return nil } @@ -67,7 +71,7 @@ func (t *tokensSuppliesProcessor) HandleTrieAccountIteration(userAccount state.U errDataTrieGet := userAccount.DataTrie().GetAllLeavesOnChannel(dataTrie, context.Background(), rh, keyBuilder.NewKeyBuilder()) if errDataTrieGet != nil { - return errDataTrieGet + return fmt.Errorf("%w while getting all leaves for root hash %s", errDataTrieGet, hex.EncodeToString(rh)) } log.Trace("extractTokensSupplies - parsing account", "address", userAccount.AddressBytes()) @@ -82,13 +86,12 @@ func (t *tokensSuppliesProcessor) HandleTrieAccountIteration(userAccount state.U suffix := append(userLeaf.Key(), userAccount.AddressBytes()...) value, errVal := userLeaf.ValueWithoutSuffix(suffix) if errVal != nil { - log.Warn("cannot get value without suffix", "error", errVal, "key", userLeaf.Key()) - return errVal + return fmt.Errorf("%w while parsing the token with key %s", errVal, hex.EncodeToString(tokenKey)) } var esToken esdt.ESDigitalToken err := t.marshaller.Unmarshal(&esToken, value) if err != nil { - return err + return fmt.Errorf("%w while unmarshaling the token with key %s", err, hex.EncodeToString(tokenKey)) } tokenName := string(tokenKey)[lenESDTPrefix:] @@ -98,7 +101,7 @@ func (t *tokensSuppliesProcessor) HandleTrieAccountIteration(userAccount state.U err := dataTrie.ErrChan.ReadFromChanNonBlocking() if err != nil { - return fmt.Errorf("error while iterating over an account's trie: %w", err) + return fmt.Errorf("%w while parsing errors from the trie iteration", err) } return nil @@ -130,6 +133,7 @@ func (t *tokensSuppliesProcessor) putInSuppliesMap(id string, value *big.Int) { } // SaveSupplies will store the recomputed tokens supplies into the database +// note that this function is not concurrent safe func (t *tokensSuppliesProcessor) SaveSupplies() error { suppliesStorer, err := t.storageService.GetStorer(dataRetriever.ESDTSuppliesUnit) if err != nil { diff --git a/process/sync/trieIterators/tokensSuppliesComputer_test.go b/process/sync/trieIterators/tokensSuppliesComputer_test.go index 77c548ef7d6..ff46c82b79f 100644 --- a/process/sync/trieIterators/tokensSuppliesComputer_test.go +++ b/process/sync/trieIterators/tokensSuppliesComputer_test.go @@ -1,6 +1,7 @@ package trieIterators import ( + "bytes" "context" "errors" "math/big" @@ -96,6 +97,46 @@ func TestTokensSuppliesProcessor_HandleTrieAccountIteration(t *testing.T) { require.NoError(t, err) }) + t.Run("root hash of account is zero only", func(t *testing.T) { + t.Parallel() + + tsp, _ := NewTokensSuppliesProcessor(getTokensSuppliesProcessorArgs()) + + userAcc := stateMock.NewAccountWrapMock([]byte("addr")) + userAcc.SetRootHash(bytes.Repeat([]byte{0}, 32)) + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + }) + + t.Run("should not save tokens from the system account", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + userAcc, _ := state.NewUserAccount(vmcommon.SystemAccountAddress) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + esToken := &esdt.ESDigitalToken{ + Value: big.NewInt(37), + } + esBytes, _ := args.Marshaller.Marshal(esToken) + tknKey := []byte("ELRONDesdtTKN-00aacc") + value := append(esBytes, tknKey...) + value = append(value, []byte("addr")...) + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(tknKey, value) + + close(leavesChannels.LeavesChan) + return nil + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + require.Empty(t, tsp.tokensSupplies) + }) + t.Run("should work", func(t *testing.T) { t.Parallel() diff --git a/process/sync/trieIterators/trieAccountsIterator.go b/process/sync/trieIterators/trieAccountsIterator.go index a60ca9cee59..49cbc98547d 100644 --- a/process/sync/trieIterators/trieAccountsIterator.go +++ b/process/sync/trieIterators/trieAccountsIterator.go @@ -44,6 +44,10 @@ func NewTrieAccountsIterator(args ArgsTrieAccountsIterator) (*trieAccountsIterat // Process will iterate over the entire trie and iterate over the Accounts while calling the received handlers func (t *trieAccountsIterator) Process(handlers ...trieAccountIteratorHandler) error { + if len(handlers) == 0 { + return nil + } + rootHash, err := t.accounts.RootHash() if err != nil { return err @@ -58,6 +62,10 @@ func (t *trieAccountsIterator) Process(handlers ...trieAccountIteratorHandler) e return err } + return t.iterateOverHandlers(iteratorChannels, handlers) +} + +func (t *trieAccountsIterator) iterateOverHandlers(iteratorChannels *common.TrieIteratorChannels, handlers []trieAccountIteratorHandler) error { log.Debug("starting the trie's accounts iteration with calling the handlers") for leaf := range iteratorChannels.LeavesChan { userAddress, isAccount := t.getAddress(leaf) diff --git a/process/sync/trieIterators/trieAccountsIterator_test.go b/process/sync/trieIterators/trieAccountsIterator_test.go index bab0b88a1b5..3ab76944ee7 100644 --- a/process/sync/trieIterators/trieAccountsIterator_test.go +++ b/process/sync/trieIterators/trieAccountsIterator_test.go @@ -21,6 +21,10 @@ func getTrieAccountsIteratorArgs() ArgsTrieAccountsIterator { } } +func dummyIterator(_ state.UserAccountHandler) error { + return nil +} + func TestNewTrieAccountsIterator(t *testing.T) { t.Parallel() @@ -61,6 +65,20 @@ func TestTrieAccountsIterator_Process(t *testing.T) { var expectedErr = errors.New("expected error") + t.Run("skip processing if no handler", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return nil, errors.New("error that should not be returned") + }, + } + tai, _ := NewTrieAccountsIterator(args) + err := tai.Process() + require.NoError(t, err) + }) + t.Run("cannot get root hash", func(t *testing.T) { t.Parallel() @@ -72,7 +90,7 @@ func TestTrieAccountsIterator_Process(t *testing.T) { } tai, _ := NewTrieAccountsIterator(args) - err := tai.Process() + err := tai.Process(dummyIterator) require.Equal(t, expectedErr, err) }) @@ -90,7 +108,7 @@ func TestTrieAccountsIterator_Process(t *testing.T) { } tai, _ := NewTrieAccountsIterator(args) - err := tai.Process() + err := tai.Process(dummyIterator) require.Equal(t, expectedErr, err) }) @@ -117,7 +135,7 @@ func TestTrieAccountsIterator_Process(t *testing.T) { } tai, _ := NewTrieAccountsIterator(args) - err := tai.Process() + err := tai.Process(dummyIterator) require.Equal(t, expectedErr, err) }) @@ -144,7 +162,7 @@ func TestTrieAccountsIterator_Process(t *testing.T) { } tai, _ := NewTrieAccountsIterator(args) - err := tai.Process() + err := tai.Process(dummyIterator) require.NoError(t, err) }) diff --git a/testscommon/components/components.go b/testscommon/components/components.go index c5c204aaa5b..a94d5217430 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -565,9 +565,10 @@ func GetProcessArgs( MaxServiceFee: 100, }, }, - Version: "v1.0.0", HistoryRepo: &dblookupext.HistoryRepositoryStub{}, - FlagsConfig: config.ContextFlagsConfig{}, + FlagsConfig: config.ContextFlagsConfig{ + Version: "v1.0.0", + }, } } From a3b1530899f23a1e06b7b5359079390b280bd1e2 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 4 May 2023 12:05:32 +0300 Subject: [PATCH 123/221] fix after review --- dataRetriever/requestHandlers/requesters/requesters_test.go | 2 -- dataRetriever/shardedData/shardedData_test.go | 2 -- dataRetriever/txpool/shardedTxPool_test.go | 2 -- 3 files changed, 6 deletions(-) diff --git a/dataRetriever/requestHandlers/requesters/requesters_test.go b/dataRetriever/requestHandlers/requesters/requesters_test.go index a0029d755ae..4ec7ec9a74e 100644 --- a/dataRetriever/requestHandlers/requesters/requesters_test.go +++ b/dataRetriever/requestHandlers/requesters/requesters_test.go @@ -10,7 +10,6 @@ import ( "github.com/multiversx/mx-chain-go/dataRetriever/mock" "github.com/multiversx/mx-chain-go/dataRetriever/requestHandlers" dataRetrieverStub "github.com/multiversx/mx-chain-go/testscommon/dataRetriever" - logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/assert" ) @@ -82,7 +81,6 @@ func testRequestDataFromHashArray(t *testing.T, requesterType requestHandlerType t.Run("should work", func(t *testing.T) { t.Parallel() - _ = logger.SetLogLevel("*:TRACE") // coverage providedEpoch := uint32(1234) providedHashes := [][]byte{[]byte("hash 1"), []byte("hash 2"), []byte("hash 3")} args := createMockArgBaseRequester() diff --git a/dataRetriever/shardedData/shardedData_test.go b/dataRetriever/shardedData/shardedData_test.go index 071b0c00356..7ca7c379c17 100644 --- a/dataRetriever/shardedData/shardedData_test.go +++ b/dataRetriever/shardedData/shardedData_test.go @@ -311,8 +311,6 @@ func TestShardedData_SearchFirstDataFoundShouldRetResults(t *testing.T) { assert.True(t, ok) } -// TODO: Add high load test, reach maximum capacity and inspect RAM usage. EN-6735. - func TestShardedData_RemoveSetOfDataFromPool(t *testing.T) { t.Parallel() diff --git a/dataRetriever/txpool/shardedTxPool_test.go b/dataRetriever/txpool/shardedTxPool_test.go index 4f80506c304..6fdaa4676ad 100644 --- a/dataRetriever/txpool/shardedTxPool_test.go +++ b/dataRetriever/txpool/shardedTxPool_test.go @@ -446,5 +446,3 @@ func newTxPoolToTest() (dataRetriever.ShardedDataCacherNotifier, error) { } return NewShardedTxPool(args) } - -// TODO: Add high load test, reach maximum capacity and inspect RAM usage. EN-6735. From ad10f3cd1b88d3025412614100ef650332e1ec8b Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 4 May 2023 14:55:16 +0300 Subject: [PATCH 124/221] notify and sync missing data trie node --- common/interface.go | 13 ++ epochStart/metachain/systemSCs_test.go | 35 +++-- errors/missingTrieNodeError.go | 44 ------ factory/api/apiResolverFactory.go | 40 ++--- factory/consensus/consensusComponents.go | 7 + factory/interface.go | 1 + factory/mock/stateComponentsHolderStub.go | 22 ++- factory/processing/blockProcessorCreator.go | 85 +++++----- .../processing/blockProcessorCreator_test.go | 2 + factory/processing/export_test.go | 2 + factory/processing/processComponents.go | 1 + factory/state/stateComponents.go | 27 ++-- factory/state/stateComponentsHandler.go | 11 ++ genesis/process/genesisBlockCreator.go | 36 +++-- genesis/process/metaGenesisBlockCreator.go | 36 +++-- genesis/process/shardGenesisBlockCreator.go | 36 +++-- go.mod | 2 +- go.sum | 8 +- integrationTests/testProcessorNode.go | 106 +++++++------ integrationTests/vm/testInitializer.go | 146 +++++++++--------- integrationTests/vm/wasm/utils.go | 5 +- process/smartContract/hooks/blockChainHook.go | 100 +++++++----- .../hooks/blockChainHook_test.go | 102 +++++++++++- process/smartContract/hooks/errors.go | 3 + process/sync/export_test.go | 5 - process/sync/interface.go | 7 - process/sync/metablock.go | 2 +- process/sync/metablock_test.go | 3 +- process/sync/shardblock.go | 18 +-- process/sync/shardblock_test.go | 24 +-- state/syncer/export_test.go | 6 + state/syncer/missingTrieNodesNotifier.go | 53 +++++++ state/syncer/missingTrieNodesNotifier_test.go | 55 +++++++ state/syncer/userAccountsSyncer.go | 43 +++++- testscommon/components/default.go | 1 + testscommon/missingTrieNodesNotifierStub.go | 25 +++ testscommon/stateComponentsMock.go | 17 +- .../stateSyncNotifierSubscriberStub.go | 18 +++ trie/node.go | 5 +- 39 files changed, 725 insertions(+), 427 deletions(-) delete mode 100644 errors/missingTrieNodeError.go create mode 100644 state/syncer/export_test.go create mode 100644 state/syncer/missingTrieNodesNotifier.go create mode 100644 state/syncer/missingTrieNodesNotifier_test.go create mode 100644 testscommon/missingTrieNodesNotifierStub.go create mode 100644 testscommon/stateSyncNotifierSubscriberStub.go diff --git a/common/interface.go b/common/interface.go index 9e23d0786c0..e5e2f118e07 100644 --- a/common/interface.go +++ b/common/interface.go @@ -353,3 +353,16 @@ type ManagedPeersHolder interface { IsMultiKeyMode() bool IsInterfaceNil() bool } + +// MissingTrieNodesNotifier defines the operations of an entity that notifies about missing trie nodes +type MissingTrieNodesNotifier interface { + RegisterHandler(handler StateSyncNotifierSubscriber) + NotifyMissingTrieNode(hash []byte) + IsInterfaceNil() bool +} + +// StateSyncNotifierSubscriber defines the operations of an entity that subscribes to a missing trie nodes notifier +type StateSyncNotifierSubscriber interface { + MissingDataTrieNodeFound(hash []byte) + IsInterfaceNil() bool +} diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index 8ca727ee0cb..81096d0697f 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -953,23 +953,24 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp nodesSetup := &mock.NodesSetupStub{} argsHook := hooks.ArgBlockChainHook{ - Accounts: userAccountsDB, - PubkeyConv: &testscommon.PubkeyConverterMock{}, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: blockChain, - ShardCoordinator: &mock.ShardCoordinatorStub{}, - Marshalizer: marshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), - NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, - DataPool: testDataPool, - CompiledSCPool: testDataPool.SmartContracts(), - EpochNotifier: en, - EnableEpochsHandler: enableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gasScheduleNotifier, - Counter: &testscommon.BlockChainHookCounterStub{}, + Accounts: userAccountsDB, + PubkeyConv: &testscommon.PubkeyConverterMock{}, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: blockChain, + ShardCoordinator: &mock.ShardCoordinatorStub{}, + Marshalizer: marshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + DataPool: testDataPool, + CompiledSCPool: testDataPool.SmartContracts(), + EpochNotifier: en, + EnableEpochsHandler: enableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gasScheduleNotifier, + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } blockChainHookImpl, _ := hooks.NewBlockChainHookImpl(argsHook) diff --git a/errors/missingTrieNodeError.go b/errors/missingTrieNodeError.go deleted file mode 100644 index 7ddfefbfcbc..00000000000 --- a/errors/missingTrieNodeError.go +++ /dev/null @@ -1,44 +0,0 @@ -package errors - -import ( - "encoding/hex" - "fmt" - - "github.com/multiversx/mx-chain-core-go/core" -) - -// GetNodeFromDBErrWithKey defines a custom error for trie get node -type GetNodeFromDBErrWithKey struct { - getErr error - key []byte - dbIdentifier string -} - -// NewGetNodeFromDBErrWithKey will create a new instance of GetNodeFromDBErrWithKey -func NewGetNodeFromDBErrWithKey(key []byte, err error, id string) *GetNodeFromDBErrWithKey { - return &GetNodeFromDBErrWithKey{ - getErr: err, - key: key, - dbIdentifier: id, - } -} - -// Error returns the error as string -func (e *GetNodeFromDBErrWithKey) Error() string { - return fmt.Sprintf( - "%s: %s for key %v", - core.GetNodeFromDBErrorString, - e.getErr.Error(), - hex.EncodeToString(e.key), - ) -} - -// GetKey will return the key that generated the error -func (e *GetNodeFromDBErrWithKey) GetKey() []byte { - return e.key -} - -// GetIdentifier will return the db identifier corresponding to the db -func (e *GetNodeFromDBErrWithKey) GetIdentifier() string { - return e.dbIdentifier -} diff --git a/factory/api/apiResolverFactory.go b/factory/api/apiResolverFactory.go index 3c815aecb3e..7033187a800 100644 --- a/factory/api/apiResolverFactory.go +++ b/factory/api/apiResolverFactory.go @@ -33,6 +33,7 @@ import ( "github.com/multiversx/mx-chain-go/process/txstatus" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/syncer" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/vm" @@ -355,25 +356,26 @@ func createScQueryElement( scStorage := args.generalConfig.SmartContractsStorageForSCQuery scStorage.DB.FilePath += fmt.Sprintf("%d", args.index) argsHook := hooks.ArgBlockChainHook{ - Accounts: args.stateComponents.AccountsAdapterAPI(), - PubkeyConv: args.coreComponents.AddressPubKeyConverter(), - StorageService: args.dataComponents.StorageService(), - BlockChain: args.dataComponents.Blockchain(), - ShardCoordinator: args.processComponents.ShardCoordinator(), - Marshalizer: args.coreComponents.InternalMarshalizer(), - Uint64Converter: args.coreComponents.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: args.dataComponents.Datapool(), - ConfigSCStorage: scStorage, - CompiledSCPool: smartContractsCache, - WorkingDir: args.workingDir, - EpochNotifier: args.coreComponents.EpochNotifier(), - EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), - NilCompiledSCStore: true, - GasSchedule: args.gasScheduleNotifier, - Counter: counters.NewDisabledCounter(), + Accounts: args.stateComponents.AccountsAdapterAPI(), + PubkeyConv: args.coreComponents.AddressPubKeyConverter(), + StorageService: args.dataComponents.StorageService(), + BlockChain: args.dataComponents.Blockchain(), + ShardCoordinator: args.processComponents.ShardCoordinator(), + Marshalizer: args.coreComponents.InternalMarshalizer(), + Uint64Converter: args.coreComponents.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: args.dataComponents.Datapool(), + ConfigSCStorage: scStorage, + CompiledSCPool: smartContractsCache, + WorkingDir: args.workingDir, + EpochNotifier: args.coreComponents.EpochNotifier(), + EnableEpochsHandler: args.coreComponents.EnableEpochsHandler(), + NilCompiledSCStore: true, + GasSchedule: args.gasScheduleNotifier, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } maxGasForVmQueries := args.generalConfig.VirtualMachine.GasConfig.ShardMaxGasPerVmQuery diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 50e05ad2a1a..a3fe9931c53 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -1,6 +1,7 @@ package consensus import ( + "fmt" "time" "github.com/multiversx/mx-chain-core-go/core" @@ -475,6 +476,12 @@ func (ccf *consensusComponentsFactory) createShardBootstrapper() (process.Bootst return nil, err } + stateNodesNotifierSubscriber, ok := accountsDBSyncer.(common.StateSyncNotifierSubscriber) + if !ok { + return nil, fmt.Errorf("wrong type conversion for accountsDBSyncer, type: %T", accountsDBSyncer) + } + ccf.stateComponents.MissingTrieNodesNotifier().RegisterHandler(stateNodesNotifierSubscriber) + argsBaseBootstrapper := sync.ArgBaseBootstrapper{ PoolsHolder: ccf.dataComponents.Datapool(), Store: ccf.dataComponents.StorageService(), diff --git a/factory/interface.go b/factory/interface.go index cd686dafa14..2f7aa233296 100644 --- a/factory/interface.go +++ b/factory/interface.go @@ -326,6 +326,7 @@ type StateComponentsHolder interface { AccountsRepository() state.AccountsRepository TriesContainer() common.TriesHolder TrieStorageManagers() map[string]common.StorageManager + MissingTrieNodesNotifier() common.MissingTrieNodesNotifier IsInterfaceNil() bool } diff --git a/factory/mock/stateComponentsHolderStub.go b/factory/mock/stateComponentsHolderStub.go index 65cf2efdb0d..010eb9e9168 100644 --- a/factory/mock/stateComponentsHolderStub.go +++ b/factory/mock/stateComponentsHolderStub.go @@ -7,12 +7,13 @@ import ( // StateComponentsHolderStub - type StateComponentsHolderStub struct { - PeerAccountsCalled func() state.AccountsAdapter - AccountsAdapterCalled func() state.AccountsAdapter - AccountsAdapterAPICalled func() state.AccountsAdapter - AccountsRepositoryCalled func() state.AccountsRepository - TriesContainerCalled func() common.TriesHolder - TrieStorageManagersCalled func() map[string]common.StorageManager + PeerAccountsCalled func() state.AccountsAdapter + AccountsAdapterCalled func() state.AccountsAdapter + AccountsAdapterAPICalled func() state.AccountsAdapter + AccountsRepositoryCalled func() state.AccountsRepository + TriesContainerCalled func() common.TriesHolder + TrieStorageManagersCalled func() map[string]common.StorageManager + MissingTrieNodesNotifierCalled func() common.MissingTrieNodesNotifier } // PeerAccounts - @@ -69,6 +70,15 @@ func (s *StateComponentsHolderStub) TrieStorageManagers() map[string]common.Stor return nil } +// MissingTrieNodesNotifier - +func (s *StateComponentsHolderStub) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { + if s.MissingTrieNodesNotifierCalled != nil { + return s.MissingTrieNodesNotifierCalled() + } + + return nil +} + // IsInterfaceNil - func (s *StateComponentsHolderStub) IsInterfaceNil() bool { return s == nil diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 4f73a39db02..e1cde3353e7 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -37,6 +37,7 @@ import ( "github.com/multiversx/mx-chain-go/process/transaction" "github.com/multiversx/mx-chain-go/process/txsimulator" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/vm" logger "github.com/multiversx/mx-chain-logger-go" @@ -64,6 +65,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, + missingTrieNodesNotifier common.MissingTrieNodesNotifier, ) (*blockProcessorAndVmFactories, error) { if pcf.bootstrapComponents.ShardCoordinator().SelfId() < pcf.bootstrapComponents.ShardCoordinator().NumberOfShards() { return pcf.newShardBlockProcessor( @@ -79,6 +81,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + missingTrieNodesNotifier, ) } if pcf.bootstrapComponents.ShardCoordinator().SelfId() == core.MetachainShardId { @@ -117,6 +120,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, + missingTrieNodesNotifier common.MissingTrieNodesNotifier, ) (*blockProcessorAndVmFactories, error) { argsParser := smartContract.NewArgumentParser() @@ -141,6 +145,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( vmFactory, err := pcf.createVMFactoryShard( pcf.state.AccountsAdapter(), + missingTrieNodesNotifier, builtInFuncFactory.BuiltInFunctionContainer(), esdtTransferParser, wasmVMChangeLocker, @@ -974,6 +979,7 @@ func (pcf *processComponentsFactory) createShardTxSimulatorProcessor( smartContractStorageSimulate := pcf.config.SmartContractsStorageSimulate vmFactory, err := pcf.createVMFactoryShard( readOnlyAccountsDB, + syncer.NewMissingTrieNodesNotifier(), builtInFuncFactory.BuiltInFunctionContainer(), esdtTransferParser, wasmVMChangeLocker, @@ -1138,6 +1144,7 @@ func (pcf *processComponentsFactory) createMetaTxSimulatorProcessor( func (pcf *processComponentsFactory) createVMFactoryShard( accounts state.AccountsAdapter, + notifier common.MissingTrieNodesNotifier, builtInFuncs vmcommon.BuiltInFunctionContainer, esdtTransferParser vmcommon.ESDTTransferParser, wasmVMChangeLocker common.Locker, @@ -1151,25 +1158,26 @@ func (pcf *processComponentsFactory) createVMFactoryShard( } argsHook := hooks.ArgBlockChainHook{ - Accounts: accounts, - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - StorageService: pcf.data.StorageService(), - BlockChain: pcf.data.Blockchain(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncs, - DataPool: pcf.data.Datapool(), - CompiledSCPool: pcf.data.Datapool().SmartContracts(), - WorkingDir: pcf.workingDir, - NFTStorageHandler: nftStorageHandler, - GlobalSettingsHandler: globalSettingsHandler, - EpochNotifier: pcf.coreData.EpochNotifier(), - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - NilCompiledSCStore: false, - ConfigSCStorage: configSCStorage, - GasSchedule: pcf.gasSchedule, - Counter: counter, + Accounts: accounts, + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + StorageService: pcf.data.StorageService(), + BlockChain: pcf.data.Blockchain(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncs, + DataPool: pcf.data.Datapool(), + CompiledSCPool: pcf.data.Datapool().SmartContracts(), + WorkingDir: pcf.workingDir, + NFTStorageHandler: nftStorageHandler, + GlobalSettingsHandler: globalSettingsHandler, + EpochNotifier: pcf.coreData.EpochNotifier(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + NilCompiledSCStore: false, + ConfigSCStorage: configSCStorage, + GasSchedule: pcf.gasSchedule, + Counter: counter, + MissingTrieNodesNotifier: notifier, } blockChainHookImpl, err := hooks.NewBlockChainHookImpl(argsHook) @@ -1201,25 +1209,26 @@ func (pcf *processComponentsFactory) createVMFactoryMeta( globalSettingsHandler vmcommon.ESDTGlobalSettingsHandler, ) (process.VirtualMachinesContainerFactory, error) { argsHook := hooks.ArgBlockChainHook{ - Accounts: accounts, - PubkeyConv: pcf.coreData.AddressPubKeyConverter(), - StorageService: pcf.data.StorageService(), - BlockChain: pcf.data.Blockchain(), - ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), - Marshalizer: pcf.coreData.InternalMarshalizer(), - Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncs, - DataPool: pcf.data.Datapool(), - CompiledSCPool: pcf.data.Datapool().SmartContracts(), - ConfigSCStorage: configSCStorage, - WorkingDir: pcf.workingDir, - NFTStorageHandler: nftStorageHandler, - GlobalSettingsHandler: globalSettingsHandler, - EpochNotifier: pcf.coreData.EpochNotifier(), - EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), - NilCompiledSCStore: false, - GasSchedule: pcf.gasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: accounts, + PubkeyConv: pcf.coreData.AddressPubKeyConverter(), + StorageService: pcf.data.StorageService(), + BlockChain: pcf.data.Blockchain(), + ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), + Marshalizer: pcf.coreData.InternalMarshalizer(), + Uint64Converter: pcf.coreData.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncs, + DataPool: pcf.data.Datapool(), + CompiledSCPool: pcf.data.Datapool().SmartContracts(), + ConfigSCStorage: configSCStorage, + WorkingDir: pcf.workingDir, + NFTStorageHandler: nftStorageHandler, + GlobalSettingsHandler: globalSettingsHandler, + EpochNotifier: pcf.coreData.EpochNotifier(), + EnableEpochsHandler: pcf.coreData.EnableEpochsHandler(), + NilCompiledSCStore: false, + GasSchedule: pcf.gasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } blockChainHookImpl, err := hooks.NewBlockChainHookImpl(argsHook) diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index e31604fe662..774962cf943 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -59,6 +59,7 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, &testscommon.ReceiptsRepositoryStub{}, + &testscommon.MissingTrieNodesNotifierStub{}, ) require.NoError(t, err) @@ -182,6 +183,7 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, &testscommon.ReceiptsRepositoryStub{}, + &testscommon.MissingTrieNodesNotifierStub{}, ) require.NoError(t, err) diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index f9cae468a41..94cb332031b 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -26,6 +26,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository factory.ReceiptsRepository, + missingTrieNodesNotifier common.MissingTrieNodesNotifier, ) (process.BlockProcessor, process.VirtualMachinesContainerFactory, error) { blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, @@ -41,6 +42,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + missingTrieNodesNotifier, ) if err != nil { return nil, nil, err diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 24d35bbd61a..250aadb0621 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -611,6 +611,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + pcf.state.MissingTrieNodesNotifier(), ) if err != nil { return nil, err diff --git a/factory/state/stateComponents.go b/factory/state/stateComponents.go index d0b104248c8..a66990515cd 100644 --- a/factory/state/stateComponents.go +++ b/factory/state/stateComponents.go @@ -15,6 +15,7 @@ import ( factoryState "github.com/multiversx/mx-chain-go/state/factory" "github.com/multiversx/mx-chain-go/state/storagePruningManager" "github.com/multiversx/mx-chain-go/state/storagePruningManager/evictionWaitingList" + "github.com/multiversx/mx-chain-go/state/syncer" trieFactory "github.com/multiversx/mx-chain-go/trie/factory" ) @@ -47,12 +48,13 @@ type stateComponentsFactory struct { // stateComponents struct holds the state components of the MultiversX protocol type stateComponents struct { - peerAccounts state.AccountsAdapter - accountsAdapter state.AccountsAdapter - accountsAdapterAPI state.AccountsAdapter - accountsRepository state.AccountsRepository - triesContainer common.TriesHolder - trieStorageManagers map[string]common.StorageManager + peerAccounts state.AccountsAdapter + accountsAdapter state.AccountsAdapter + accountsAdapterAPI state.AccountsAdapter + accountsRepository state.AccountsRepository + triesContainer common.TriesHolder + trieStorageManagers map[string]common.StorageManager + missingTrieNodesNotifier common.MissingTrieNodesNotifier } // NewStateComponentsFactory will return a new instance of stateComponentsFactory @@ -121,12 +123,13 @@ func (scf *stateComponentsFactory) Create() (*stateComponents, error) { } return &stateComponents{ - peerAccounts: peerAdapter, - accountsAdapter: accountsAdapter, - accountsAdapterAPI: accountsAdapterAPI, - accountsRepository: accountsRepository, - triesContainer: triesContainer, - trieStorageManagers: trieStorageManagers, + peerAccounts: peerAdapter, + accountsAdapter: accountsAdapter, + accountsAdapterAPI: accountsAdapterAPI, + accountsRepository: accountsRepository, + triesContainer: triesContainer, + trieStorageManagers: trieStorageManagers, + missingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), }, nil } diff --git a/factory/state/stateComponentsHandler.go b/factory/state/stateComponentsHandler.go index 0fe1465d450..c3af204327a 100644 --- a/factory/state/stateComponentsHandler.go +++ b/factory/state/stateComponentsHandler.go @@ -199,6 +199,17 @@ func (msc *managedStateComponents) SetTriesStorageManagers(managers map[string]c return nil } +func (msc *managedStateComponents) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { + msc.mutStateComponents.RLock() + defer msc.mutStateComponents.RUnlock() + + if check.IfNil(msc.missingTrieNodesNotifier) { + return nil + } + + return msc.missingTrieNodesNotifier +} + // IsInterfaceNil returns true if the interface is nil func (msc *managedStateComponents) IsInterfaceNil() bool { return msc == nil diff --git a/genesis/process/genesisBlockCreator.go b/genesis/process/genesisBlockCreator.go index f15833913a9..c4724f04ff0 100644 --- a/genesis/process/genesisBlockCreator.go +++ b/genesis/process/genesisBlockCreator.go @@ -24,6 +24,7 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" "github.com/multiversx/mx-chain-go/sharding" factoryState "github.com/multiversx/mx-chain-go/state/factory" + "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/statusHandler" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/factory" @@ -428,23 +429,24 @@ func (gbc *genesisBlockCreator) computeDNSAddresses(enableEpochsConfig config.En builtInFuncs := vmcommonBuiltInFunctions.NewBuiltInFunctionContainer() argsHook := hooks.ArgBlockChainHook{ - Accounts: gbc.arg.Accounts, - PubkeyConv: gbc.arg.Core.AddressPubKeyConverter(), - StorageService: gbc.arg.Data.StorageService(), - BlockChain: gbc.arg.Data.Blockchain(), - ShardCoordinator: gbc.arg.ShardCoordinator, - Marshalizer: gbc.arg.Core.InternalMarshalizer(), - Uint64Converter: gbc.arg.Core.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncs, - NFTStorageHandler: &disabled.SimpleNFTStorage{}, - GlobalSettingsHandler: &disabled.ESDTGlobalSettingsHandler{}, - DataPool: gbc.arg.Data.Datapool(), - CompiledSCPool: gbc.arg.Data.Datapool().SmartContracts(), - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gbc.arg.GasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: gbc.arg.Accounts, + PubkeyConv: gbc.arg.Core.AddressPubKeyConverter(), + StorageService: gbc.arg.Data.StorageService(), + BlockChain: gbc.arg.Data.Blockchain(), + ShardCoordinator: gbc.arg.ShardCoordinator, + Marshalizer: gbc.arg.Core.InternalMarshalizer(), + Uint64Converter: gbc.arg.Core.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncs, + NFTStorageHandler: &disabled.SimpleNFTStorage{}, + GlobalSettingsHandler: &disabled.ESDTGlobalSettingsHandler{}, + DataPool: gbc.arg.Data.Datapool(), + CompiledSCPool: gbc.arg.Data.Datapool().SmartContracts(), + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gbc.arg.GasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } blockChainHook, err := hooks.NewBlockChainHookImpl(argsHook) if err != nil { diff --git a/genesis/process/metaGenesisBlockCreator.go b/genesis/process/metaGenesisBlockCreator.go index bfb4f26cb53..472ae9de959 100644 --- a/genesis/process/metaGenesisBlockCreator.go +++ b/genesis/process/metaGenesisBlockCreator.go @@ -33,6 +33,7 @@ import ( "github.com/multiversx/mx-chain-go/process/smartContract/hooks/counters" syncDisabled "github.com/multiversx/mx-chain-go/process/sync/disabled" processTransaction "github.com/multiversx/mx-chain-go/process/transaction" + "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/update" hardForkProcess "github.com/multiversx/mx-chain-go/update/process" @@ -302,23 +303,24 @@ func createProcessorsForMetaGenesisBlock(arg ArgsGenesisBlockCreator, enableEpoc builtInFuncs := vmcommonBuiltInFunctions.NewBuiltInFunctionContainer() argsHook := hooks.ArgBlockChainHook{ - Accounts: arg.Accounts, - PubkeyConv: arg.Core.AddressPubKeyConverter(), - StorageService: arg.Data.StorageService(), - BlockChain: arg.Data.Blockchain(), - ShardCoordinator: arg.ShardCoordinator, - Marshalizer: arg.Core.InternalMarshalizer(), - Uint64Converter: arg.Core.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncs, - NFTStorageHandler: &disabled.SimpleNFTStorage{}, - GlobalSettingsHandler: &disabled.ESDTGlobalSettingsHandler{}, - DataPool: arg.Data.Datapool(), - CompiledSCPool: arg.Data.Datapool().SmartContracts(), - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: arg.GasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: arg.Accounts, + PubkeyConv: arg.Core.AddressPubKeyConverter(), + StorageService: arg.Data.StorageService(), + BlockChain: arg.Data.Blockchain(), + ShardCoordinator: arg.ShardCoordinator, + Marshalizer: arg.Core.InternalMarshalizer(), + Uint64Converter: arg.Core.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncs, + NFTStorageHandler: &disabled.SimpleNFTStorage{}, + GlobalSettingsHandler: &disabled.ESDTGlobalSettingsHandler{}, + DataPool: arg.Data.Datapool(), + CompiledSCPool: arg.Data.Datapool().SmartContracts(), + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: arg.GasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } pubKeyVerifier, err := disabled.NewMessageSignVerifier(arg.BlockSignKeyGen) diff --git a/genesis/process/shardGenesisBlockCreator.go b/genesis/process/shardGenesisBlockCreator.go index aa140fe629a..0fb9f77a0f0 100644 --- a/genesis/process/shardGenesisBlockCreator.go +++ b/genesis/process/shardGenesisBlockCreator.go @@ -31,6 +31,7 @@ import ( syncDisabled "github.com/multiversx/mx-chain-go/process/sync/disabled" "github.com/multiversx/mx-chain-go/process/transaction" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/storage/txcache" "github.com/multiversx/mx-chain-go/update" hardForkProcess "github.com/multiversx/mx-chain-go/update/process" @@ -405,23 +406,24 @@ func createProcessorsForShardGenesisBlock(arg ArgsGenesisBlockCreator, enableEpo } argsHook := hooks.ArgBlockChainHook{ - Accounts: arg.Accounts, - PubkeyConv: arg.Core.AddressPubKeyConverter(), - StorageService: arg.Data.StorageService(), - BlockChain: arg.Data.Blockchain(), - ShardCoordinator: arg.ShardCoordinator, - Marshalizer: arg.Core.InternalMarshalizer(), - Uint64Converter: arg.Core.Uint64ByteSliceConverter(), - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: arg.Data.Datapool(), - CompiledSCPool: arg.Data.Datapool().SmartContracts(), - EpochNotifier: epochNotifier, - EnableEpochsHandler: enableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: arg.GasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: arg.Accounts, + PubkeyConv: arg.Core.AddressPubKeyConverter(), + StorageService: arg.Data.StorageService(), + BlockChain: arg.Data.Blockchain(), + ShardCoordinator: arg.ShardCoordinator, + Marshalizer: arg.Core.InternalMarshalizer(), + Uint64Converter: arg.Core.Uint64ByteSliceConverter(), + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: arg.Data.Datapool(), + CompiledSCPool: arg.Data.Datapool().SmartContracts(), + EpochNotifier: epochNotifier, + EnableEpochsHandler: enableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: arg.GasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: syncer.NewMissingTrieNodesNotifier(), } esdtTransferParser, err := parsers.NewESDTTransferParser(arg.Core.InternalMarshalizer()) if err != nil { diff --git a/go.mod b/go.mod index 9fc383bb950..c22f4c55a22 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230403113932-916b16d18978 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230504075947-f67a2083a86f github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.0 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 90649986acf..936bffeb71a 100644 --- a/go.sum +++ b/go.sum @@ -611,8 +611,11 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230403113932-916b16d18978 h1:caHg1OhZmaA8oX3TbimkBaty+eHvhpNSO8rQOicrS7o= +github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230403113932-916b16d18978/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230504075947-f67a2083a86f h1:I1MpzgdJIWovjvJqdifvLepuG1Tg1vbQXchjPKtlqy8= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230504075947-f67a2083a86f/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.0 h1:t2UCfbLRbFPBWK1IC1/qOVg+2D6y189xZZ1BoV83gq8= @@ -621,13 +624,12 @@ github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDT github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.15 h1:H7273huZG/zAR6MPvWuXwBEVBsJWH1MeSIDshYV0nh0= github.com/multiversx/mx-chain-p2p-go v1.0.15/go.mod h1:hUE4H8kGJk3u9gTqeetF3uhjJpnfdV/hALKsJ6bMI+8= -github.com/multiversx/mx-chain-storage-go v1.0.7 h1:UqLo/OLTD3IHiE/TB/SEdNRV1GG2f1R6vIP5ehHwCNw= github.com/multiversx/mx-chain-storage-go v1.0.7/go.mod h1:gtKoV32Cg2Uy8deHzF8Ud0qAl0zv92FvWgPSYIP0Zmg= github.com/multiversx/mx-chain-storage-go v1.0.8-0.20230403115027-9139fce478e0 h1:jTGuq0IAQdghGLoNx2BgkxWvkcZV9ZmJ0qB8/oU4MNQ= github.com/multiversx/mx-chain-storage-go v1.0.8-0.20230403115027-9139fce478e0/go.mod h1:FGhaeTNIcLZOPqsJZQ1TdcMaPVLhj642OzRNmt6+RQs= -github.com/multiversx/mx-chain-vm-common-go v1.3.34/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.36/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= +github.com/multiversx/mx-chain-vm-common-go v1.4.0/go.mod h1:odBJC92ANA8zLtPh/wwajUUGJOaS88F5QYGf0t8Wgzw= github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230403123953-7fc57accc0c6 h1:3G8BHyVfz1DkeZcds4iME5vDHzg8Yg2++wet0DDYZ3c= github.com/multiversx/mx-chain-vm-common-go v1.4.1-0.20230403123953-7fc57accc0c6/go.mod h1:rxb8laeh06wayB/dZPpN5LT3qcwv4SgpNHiSvPsNjuw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 h1:ScUq7/wq78vthMTQ6v5Ux1DvSMQMHxQ2Sl7aPP26q1w= diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index e0a1c3d129a..638ebef7018 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -804,23 +804,24 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str smartContractsCache := testscommon.NewCacherMock() argsHook := hooks.ArgBlockChainHook{ - Accounts: tpn.AccntState, - PubkeyConv: TestAddressPubkeyConverter, - StorageService: tpn.Storage, - BlockChain: tpn.BlockChain, - ShardCoordinator: tpn.ShardCoordinator, - Marshalizer: TestMarshalizer, - Uint64Converter: TestUint64Converter, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: tpn.DataPool, - CompiledSCPool: smartContractsCache, - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: tpn.AccntState, + PubkeyConv: TestAddressPubkeyConverter, + StorageService: tpn.Storage, + BlockChain: tpn.BlockChain, + ShardCoordinator: tpn.ShardCoordinator, + Marshalizer: TestMarshalizer, + Uint64Converter: TestUint64Converter, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: tpn.DataPool, + CompiledSCPool: smartContractsCache, + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { @@ -1481,23 +1482,24 @@ func (tpn *TestProcessorNode) initInnerProcessors(gasMap map[string]map[string]u log.LogIfError(err) argsHook := hooks.ArgBlockChainHook{ - Accounts: tpn.AccntState, - PubkeyConv: TestAddressPubkeyConverter, - StorageService: tpn.Storage, - BlockChain: tpn.BlockChain, - ShardCoordinator: tpn.ShardCoordinator, - Marshalizer: TestMarshalizer, - Uint64Converter: TestUint64Converter, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: tpn.DataPool, - CompiledSCPool: tpn.DataPool.SmartContracts(), - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gasSchedule, - Counter: counter, + Accounts: tpn.AccntState, + PubkeyConv: TestAddressPubkeyConverter, + StorageService: tpn.Storage, + BlockChain: tpn.BlockChain, + ShardCoordinator: tpn.ShardCoordinator, + Marshalizer: TestMarshalizer, + Uint64Converter: TestUint64Converter, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: tpn.DataPool, + CompiledSCPool: tpn.DataPool.SmartContracts(), + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gasSchedule, + Counter: counter, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } maxGasLimitPerBlock := uint64(0xFFFFFFFFFFFFFFFF) @@ -1689,23 +1691,24 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri argsBuiltIn.AutomaticCrawlerAddresses = GenerateOneAddressPerShard(argsBuiltIn.ShardCoordinator) builtInFuncFactory, _ := builtInFunctions.CreateBuiltInFunctionsFactory(argsBuiltIn) argsHook := hooks.ArgBlockChainHook{ - Accounts: tpn.AccntState, - PubkeyConv: TestAddressPubkeyConverter, - StorageService: tpn.Storage, - BlockChain: tpn.BlockChain, - ShardCoordinator: tpn.ShardCoordinator, - Marshalizer: TestMarshalizer, - Uint64Converter: TestUint64Converter, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: tpn.DataPool, - CompiledSCPool: tpn.DataPool.SmartContracts(), - EpochNotifier: tpn.EpochNotifier, - EnableEpochsHandler: tpn.EnableEpochsHandler, - NilCompiledSCStore: true, - GasSchedule: gasSchedule, - Counter: counters.NewDisabledCounter(), + Accounts: tpn.AccntState, + PubkeyConv: TestAddressPubkeyConverter, + StorageService: tpn.Storage, + BlockChain: tpn.BlockChain, + ShardCoordinator: tpn.ShardCoordinator, + Marshalizer: TestMarshalizer, + Uint64Converter: TestUint64Converter, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: tpn.DataPool, + CompiledSCPool: tpn.DataPool.SmartContracts(), + EpochNotifier: tpn.EpochNotifier, + EnableEpochsHandler: tpn.EnableEpochsHandler, + NilCompiledSCStore: true, + GasSchedule: gasSchedule, + Counter: counters.NewDisabledCounter(), + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } var signVerifier vm.MessageSignVerifier @@ -3188,6 +3191,7 @@ func GetDefaultStateComponents() *testscommon.StateComponentsMock { dataRetriever.UserAccountsUnit.String(): &testscommon.StorageManagerStub{}, dataRetriever.PeerAccountsUnit.String(): &testscommon.StorageManagerStub{}, }, + MissingNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } } diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 1acb1994d02..0ee90fb4317 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -366,24 +366,25 @@ func CreateTxProcessorWithOneSCExecutorMockVM( builtInFuncs := vmcommonBuiltInFunctions.NewBuiltInFunctionContainer() datapool := dataRetrieverMock.NewPoolsHolderMock() args := hooks.ArgBlockChainHook{ - Accounts: accnts, - PubkeyConv: pubkeyConv, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: &testscommon.ChainHandlerStub{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), - Marshalizer: integrationtests.TestMarshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: builtInFuncs, - NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, - DataPool: datapool, - CompiledSCPool: datapool.SmartContracts(), - NilCompiledSCStore: true, - ConfigSCStorage: *defaultStorageConfig(), - EpochNotifier: genericEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - GasSchedule: gasScheduleNotifier, - Counter: &testscommon.BlockChainHookCounterStub{}, + Accounts: accnts, + PubkeyConv: pubkeyConv, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: &testscommon.ChainHandlerStub{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), + Marshalizer: integrationtests.TestMarshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: builtInFuncs, + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + DataPool: datapool, + CompiledSCPool: datapool.SmartContracts(), + NilCompiledSCStore: true, + ConfigSCStorage: *defaultStorageConfig(), + EpochNotifier: genericEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + GasSchedule: gasScheduleNotifier, + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } blockChainHook, _ := hooks.NewBlockChainHookImpl(args) @@ -462,24 +463,25 @@ func CreateTxProcessorWithOneSCExecutorMockVM( func CreateOneSCExecutorMockVM(accnts state.AccountsAdapter) vmcommon.VMExecutionHandler { datapool := dataRetrieverMock.NewPoolsHolderMock() args := hooks.ArgBlockChainHook{ - Accounts: accnts, - PubkeyConv: pubkeyConv, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: &testscommon.ChainHandlerStub{}, - ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), - Marshalizer: integrationtests.TestMarshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), - NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, - GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, - DataPool: datapool, - CompiledSCPool: datapool.SmartContracts(), - NilCompiledSCStore: true, - ConfigSCStorage: *defaultStorageConfig(), - EpochNotifier: &epochNotifier.EpochNotifierStub{}, - EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, - GasSchedule: CreateMockGasScheduleNotifier(), - Counter: &testscommon.BlockChainHookCounterStub{}, + Accounts: accnts, + PubkeyConv: pubkeyConv, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: &testscommon.ChainHandlerStub{}, + ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), + Marshalizer: integrationtests.TestMarshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: vmcommonBuiltInFunctions.NewBuiltInFunctionContainer(), + NFTStorageHandler: &testscommon.SimpleNFTStorageHandlerStub{}, + GlobalSettingsHandler: &testscommon.ESDTGlobalSettingsHandlerStub{}, + DataPool: datapool, + CompiledSCPool: datapool.SmartContracts(), + NilCompiledSCStore: true, + ConfigSCStorage: *defaultStorageConfig(), + EpochNotifier: &epochNotifier.EpochNotifierStub{}, + EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, + GasSchedule: CreateMockGasScheduleNotifier(), + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } blockChainHook, _ := hooks.NewBlockChainHookImpl(args) vm, _ := mock.NewOneSCExecutorMockVM(blockChainHook, integrationtests.TestHasher) @@ -524,24 +526,25 @@ func CreateVMAndBlockchainHookAndDataPool( datapool := dataRetrieverMock.NewPoolsHolderMock() args := hooks.ArgBlockChainHook{ - Accounts: accnts, - PubkeyConv: pubkeyConv, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: chainHandler, - ShardCoordinator: shardCoordinator, - Marshalizer: integrationtests.TestMarshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: datapool, - CompiledSCPool: datapool.SmartContracts(), - NilCompiledSCStore: true, - ConfigSCStorage: *defaultStorageConfig(), - EpochNotifier: epochNotifierInstance, - EnableEpochsHandler: enableEpochsHandler, - GasSchedule: gasSchedule, - Counter: counter, + Accounts: accnts, + PubkeyConv: pubkeyConv, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: chainHandler, + ShardCoordinator: shardCoordinator, + Marshalizer: integrationtests.TestMarshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: datapool, + CompiledSCPool: datapool.SmartContracts(), + NilCompiledSCStore: true, + ConfigSCStorage: *defaultStorageConfig(), + EpochNotifier: epochNotifierInstance, + EnableEpochsHandler: enableEpochsHandler, + GasSchedule: gasSchedule, + Counter: counter, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } hasher := &hashingMocks.HasherMock{} @@ -606,23 +609,24 @@ func CreateVMAndBlockchainHookMeta( datapool := dataRetrieverMock.NewPoolsHolderMock() args := hooks.ArgBlockChainHook{ - Accounts: accnts, - PubkeyConv: pubkeyConv, - StorageService: &storageStubs.ChainStorerStub{}, - BlockChain: &testscommon.ChainHandlerStub{}, - ShardCoordinator: shardCoordinator, - Marshalizer: integrationtests.TestMarshalizer, - Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, - BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), - NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), - GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), - DataPool: datapool, - CompiledSCPool: datapool.SmartContracts(), - NilCompiledSCStore: true, - EpochNotifier: globalEpochNotifier, - EnableEpochsHandler: enableEpochsHandler, - GasSchedule: gasSchedule, - Counter: &testscommon.BlockChainHookCounterStub{}, + Accounts: accnts, + PubkeyConv: pubkeyConv, + StorageService: &storageStubs.ChainStorerStub{}, + BlockChain: &testscommon.ChainHandlerStub{}, + ShardCoordinator: shardCoordinator, + Marshalizer: integrationtests.TestMarshalizer, + Uint64Converter: &mock.Uint64ByteSliceConverterMock{}, + BuiltInFunctions: builtInFuncFactory.BuiltInFunctionContainer(), + NFTStorageHandler: builtInFuncFactory.NFTStorageHandler(), + GlobalSettingsHandler: builtInFuncFactory.ESDTGlobalSettingsHandler(), + DataPool: datapool, + CompiledSCPool: datapool.SmartContracts(), + NilCompiledSCStore: true, + EpochNotifier: globalEpochNotifier, + EnableEpochsHandler: enableEpochsHandler, + GasSchedule: gasSchedule, + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } economicsData, err := createEconomicsData(config.EnableEpochs{}) diff --git a/integrationTests/vm/wasm/utils.go b/integrationTests/vm/wasm/utils.go index a5b9fab8f80..85db964ab33 100644 --- a/integrationTests/vm/wasm/utils.go +++ b/integrationTests/vm/wasm/utils.go @@ -285,8 +285,9 @@ func (context *TestContext) initVMAndBlockchainHook() { MaxBatchSize: 100, }, }, - GasSchedule: gasSchedule, - Counter: &testscommon.BlockChainHookCounterStub{}, + GasSchedule: gasSchedule, + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } vmFactoryConfig := config.VirtualMachineConfig{ diff --git a/process/smartContract/hooks/blockChainHook.go b/process/smartContract/hooks/blockChainHook.go index bbca8334da8..775d4b80e8f 100644 --- a/process/smartContract/hooks/blockChainHook.go +++ b/process/smartContract/hooks/blockChainHook.go @@ -41,26 +41,27 @@ const executeDurationAlarmThreshold = time.Duration(50) * time.Millisecond // ArgBlockChainHook represents the arguments structure for the blockchain hook type ArgBlockChainHook struct { - Accounts state.AccountsAdapter - PubkeyConv core.PubkeyConverter - StorageService dataRetriever.StorageService - DataPool dataRetriever.PoolsHolder - BlockChain data.ChainHandler - ShardCoordinator sharding.Coordinator - Marshalizer marshal.Marshalizer - Uint64Converter typeConverters.Uint64ByteSliceConverter - BuiltInFunctions vmcommon.BuiltInFunctionContainer - NFTStorageHandler vmcommon.SimpleESDTNFTStorageHandler - GlobalSettingsHandler vmcommon.ESDTGlobalSettingsHandler - CompiledSCPool storage.Cacher - ConfigSCStorage config.StorageConfig - EnableEpochs config.EnableEpochs - EpochNotifier vmcommon.EpochNotifier - EnableEpochsHandler common.EnableEpochsHandler - WorkingDir string - NilCompiledSCStore bool - GasSchedule core.GasScheduleNotifier - Counter BlockChainHookCounter + Accounts state.AccountsAdapter + PubkeyConv core.PubkeyConverter + StorageService dataRetriever.StorageService + DataPool dataRetriever.PoolsHolder + BlockChain data.ChainHandler + ShardCoordinator sharding.Coordinator + Marshalizer marshal.Marshalizer + Uint64Converter typeConverters.Uint64ByteSliceConverter + BuiltInFunctions vmcommon.BuiltInFunctionContainer + NFTStorageHandler vmcommon.SimpleESDTNFTStorageHandler + GlobalSettingsHandler vmcommon.ESDTGlobalSettingsHandler + CompiledSCPool storage.Cacher + ConfigSCStorage config.StorageConfig + EnableEpochs config.EnableEpochs + EpochNotifier vmcommon.EpochNotifier + EnableEpochsHandler common.EnableEpochsHandler + WorkingDir string + NilCompiledSCStore bool + GasSchedule core.GasScheduleNotifier + Counter BlockChainHookCounter + MissingTrieNodesNotifier common.MissingTrieNodesNotifier } // BlockChainHookImpl is a wrapper over AccountsAdapter that satisfy vmcommon.BlockchainHook interface @@ -89,8 +90,9 @@ type BlockChainHookImpl struct { mapActivationEpochs map[uint32]struct{} - mutGasLock sync.RWMutex - gasSchedule core.GasScheduleNotifier + mutGasLock sync.RWMutex + gasSchedule core.GasScheduleNotifier + missingTrieNodesNotifier common.MissingTrieNodesNotifier } // NewBlockChainHookImpl creates a new BlockChainHookImpl instance @@ -103,23 +105,24 @@ func NewBlockChainHookImpl( } blockChainHookImpl := &BlockChainHookImpl{ - accounts: args.Accounts, - pubkeyConv: args.PubkeyConv, - storageService: args.StorageService, - blockChain: args.BlockChain, - shardCoordinator: args.ShardCoordinator, - marshalizer: args.Marshalizer, - uint64Converter: args.Uint64Converter, - builtInFunctions: args.BuiltInFunctions, - compiledScPool: args.CompiledSCPool, - configSCStorage: args.ConfigSCStorage, - workingDir: args.WorkingDir, - nilCompiledSCStore: args.NilCompiledSCStore, - nftStorageHandler: args.NFTStorageHandler, - globalSettingsHandler: args.GlobalSettingsHandler, - enableEpochsHandler: args.EnableEpochsHandler, - gasSchedule: args.GasSchedule, - counter: args.Counter, + accounts: args.Accounts, + pubkeyConv: args.PubkeyConv, + storageService: args.StorageService, + blockChain: args.BlockChain, + shardCoordinator: args.ShardCoordinator, + marshalizer: args.Marshalizer, + uint64Converter: args.Uint64Converter, + builtInFunctions: args.BuiltInFunctions, + compiledScPool: args.CompiledSCPool, + configSCStorage: args.ConfigSCStorage, + workingDir: args.WorkingDir, + nilCompiledSCStore: args.NilCompiledSCStore, + nftStorageHandler: args.NFTStorageHandler, + globalSettingsHandler: args.GlobalSettingsHandler, + enableEpochsHandler: args.EnableEpochsHandler, + gasSchedule: args.GasSchedule, + counter: args.Counter, + missingTrieNodesNotifier: args.MissingTrieNodesNotifier, } err = blockChainHookImpl.makeCompiledSCStorage() @@ -198,7 +201,9 @@ func checkForNil(args ArgBlockChainHook) error { if check.IfNil(args.Counter) { return ErrNilBlockchainHookCounter } - + if check.IfNil(args.MissingTrieNodesNotifier) { + return ErrNilMissingTrieNodesNotifier + } return nil } @@ -264,6 +269,8 @@ func (bh *BlockChainHookImpl) GetStorageData(accountAddress []byte, index []byte if err != nil { messages = append(messages, "error") messages = append(messages, err) + + bh.syncMissingDataTrieNode(err) } log.Trace("GetStorageData ", messages...) @@ -272,6 +279,19 @@ func (bh *BlockChainHookImpl) GetStorageData(accountAddress []byte, index []byte return value, trieDepth, nil } +func (bh *BlockChainHookImpl) syncMissingDataTrieNode(err error) { + if !core.IsGetNodeFromDBError(err) { + return + } + + getNodeErr := core.UnwrapGetNodeFromDBErr(err) + if check.IfNil(getNodeErr) { + return + } + + bh.missingTrieNodesNotifier.NotifyMissingTrieNode(getNodeErr.GetKey()) +} + func (bh *BlockChainHookImpl) processMaxReadsCounters() error { if !bh.enableEpochsHandler.IsMaxBlockchainHookCountersFlagEnabled() { return nil diff --git a/process/smartContract/hooks/blockChainHook_test.go b/process/smartContract/hooks/blockChainHook_test.go index ad04afc1f7a..0269db81ca8 100644 --- a/process/smartContract/hooks/blockChainHook_test.go +++ b/process/smartContract/hooks/blockChainHook_test.go @@ -63,8 +63,9 @@ func createMockBlockChainHookArgs() hooks.ArgBlockChainHook { EnableEpochs: config.EnableEpochs{ DoNotReturnOldBlockInBlockchainHookEnableEpoch: math.MaxUint32, }, - GasSchedule: testscommon.NewGasScheduleNotifierMock(make(map[string]map[string]uint64)), - Counter: &testscommon.BlockChainHookCounterStub{}, + GasSchedule: testscommon.NewGasScheduleNotifierMock(make(map[string]map[string]uint64)), + Counter: &testscommon.BlockChainHookCounterStub{}, + MissingTrieNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } return arguments } @@ -214,6 +215,14 @@ func TestNewBlockChainHookImpl(t *testing.T) { }, expectedErr: storage.ErrCacheSizeIsLowerThanBatchSize, }, + { + args: func() hooks.ArgBlockChainHook { + args := createMockBlockChainHookArgs() + args.MissingTrieNodesNotifier = nil + return args + }, + expectedErr: hooks.ErrNilMissingTrieNodesNotifier, + }, { args: func() hooks.ArgBlockChainHook { return createMockBlockChainHookArgs() @@ -522,6 +531,95 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { assert.Equal(t, variableValue, value) assert.False(t, counterProcessedCalled) }) + t.Run("data trie node not found should call missingTrieNodesNotifier", func(t *testing.T) { + t.Parallel() + + missingDataTrieKey := []byte("missingDataTrieKey") + notifyMissingTrieNodeCalled := false + accnt := stateMock.NewAccountWrapMock(nil) + accnt.AccountDataHandlerCalled = func() (handler vmcommon.AccountDataHandler) { + return &trie.DataTrieTrackerStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + trieErr := core.NewGetNodeFromDBErrWithKey(key, errors.New(core.GetNodeFromDBErrorString), "") + return nil, 0, fmt.Errorf("error: %w", trieErr) + }, + } + } + + args := createMockBlockChainHookArgs() + args.Accounts = &stateMock.AccountsStub{ + GetExistingAccountCalled: func(address []byte) (handler vmcommon.AccountHandler, e error) { + return accnt, nil + }, + } + args.MissingTrieNodesNotifier = &testscommon.MissingTrieNodesNotifierStub{ + NotifyMissingTrieNodeCalled: func(hash []byte) { + assert.Equal(t, missingDataTrieKey, hash) + notifyMissingTrieNodeCalled = true + }, + } + bh, _ := hooks.NewBlockChainHookImpl(args) + + _, _, _ = bh.GetStorageData([]byte("address"), missingDataTrieKey) + assert.True(t, notifyMissingTrieNodeCalled) + }) + t.Run("random retrieve err should not call missingTrieNodesNotifier", func(t *testing.T) { + t.Parallel() + + missingDataTrieKey := []byte("missingDataTrieKey") + accnt := stateMock.NewAccountWrapMock(nil) + accnt.AccountDataHandlerCalled = func() (handler vmcommon.AccountDataHandler) { + return &trie.DataTrieTrackerStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + return nil, 0, errors.New("random error") + }, + } + } + + args := createMockBlockChainHookArgs() + args.Accounts = &stateMock.AccountsStub{ + GetExistingAccountCalled: func(address []byte) (handler vmcommon.AccountHandler, e error) { + return accnt, nil + }, + } + args.MissingTrieNodesNotifier = &testscommon.MissingTrieNodesNotifierStub{ + NotifyMissingTrieNodeCalled: func(hash []byte) { + assert.Fail(t, "should not have been called") + }, + } + bh, _ := hooks.NewBlockChainHookImpl(args) + + _, _, _ = bh.GetStorageData([]byte("address"), missingDataTrieKey) + }) + t.Run("unwrapped err is not of wanted type", func(t *testing.T) { + t.Parallel() + + missingDataTrieKey := []byte("missingDataTrieKey") + accnt := stateMock.NewAccountWrapMock(nil) + accnt.AccountDataHandlerCalled = func() (handler vmcommon.AccountDataHandler) { + return &trie.DataTrieTrackerStub{ + RetrieveValueCalled: func(key []byte) ([]byte, uint32, error) { + baseErr := errors.New(core.GetNodeFromDBErrorString) + return nil, 0, fmt.Errorf("error: %w", baseErr) + }, + } + } + + args := createMockBlockChainHookArgs() + args.Accounts = &stateMock.AccountsStub{ + GetExistingAccountCalled: func(address []byte) (handler vmcommon.AccountHandler, e error) { + return accnt, nil + }, + } + args.MissingTrieNodesNotifier = &testscommon.MissingTrieNodesNotifierStub{ + NotifyMissingTrieNodeCalled: func(hash []byte) { + assert.Fail(t, "should not have been called") + }, + } + bh, _ := hooks.NewBlockChainHookImpl(args) + + _, _, _ = bh.GetStorageData([]byte("address"), missingDataTrieKey) + }) } func TestBlockChainHookImpl_NewAddressLengthNoGood(t *testing.T) { diff --git a/process/smartContract/hooks/errors.go b/process/smartContract/hooks/errors.go index 402e2c5311e..43894f455e0 100644 --- a/process/smartContract/hooks/errors.go +++ b/process/smartContract/hooks/errors.go @@ -16,3 +16,6 @@ var ErrVMTypeLengthIsNotCorrect = errors.New("vm type length is not correct") // ErrNilBlockchainHookCounter signals that a nil blockchain hook counter was provided var ErrNilBlockchainHookCounter = errors.New("nil blockchain hook counter") + +// ErrNilMissingTrieNodesNotifier signals that a nil missing trie nodes notifier was provided +var ErrNilMissingTrieNodesNotifier = errors.New("nil missing trie nodes notifier") diff --git a/process/sync/export_test.go b/process/sync/export_test.go index dae5be09c68..719e7599f9f 100644 --- a/process/sync/export_test.go +++ b/process/sync/export_test.go @@ -288,8 +288,3 @@ func (boot *baseBootstrap) IsInImportMode() bool { func (boot *baseBootstrap) ProcessWaitTime() time.Duration { return boot.processWaitTime } - -// UnwrapGetNodeFromDBErr - -func UnwrapGetNodeFromDBErr(wrappedErr error) getKeyHandler { - return unwrapGetNodeFromDBErr(wrappedErr) -} diff --git a/process/sync/interface.go b/process/sync/interface.go index fb7e11e3b5f..9c9e00fc899 100644 --- a/process/sync/interface.go +++ b/process/sync/interface.go @@ -30,13 +30,6 @@ type forkDetector interface { computeFinalCheckpoint() } -// getKeyHandler defines the behaviour of a component that can provide a trie node key and identifier -type getKeyHandler interface { - Error() string - GetKey() []byte - GetIdentifier() string -} - type dbStorerWithIdentifier interface { GetIdentifier() string } diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 6b820235074..ed61906768c 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -180,7 +180,7 @@ func (boot *MetaBootstrap) setLastEpochStartRound() { func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if core.IsGetNodeFromDBError(err) { - getNodeErr := unwrapGetNodeFromDBErr(err) + getNodeErr := core.UnwrapGetNodeFromDBErr(err) if getNodeErr == nil { return err } diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 019076d66b9..3d80b2f32c7 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -19,7 +19,6 @@ import ( "github.com/multiversx/mx-chain-go/consensus/round" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" - commonErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/process/sync" @@ -1626,7 +1625,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := commonErrors.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), "userAccountsUnit") + errGetNodeFromDB := core.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), "userAccountsUnit") blockProcessor := createMetaBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index 55f6c7f6d84..8bfae8bc72f 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -2,7 +2,6 @@ package sync import ( "context" - "errors" "math" "github.com/multiversx/mx-chain-core-go/core" @@ -144,7 +143,7 @@ func (boot *ShardBootstrap) StartSyncingBlocks() { func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { err := boot.syncBlock() if core.IsGetNodeFromDBError(err) { - getNodeErr := unwrapGetNodeFromDBErr(err) + getNodeErr := core.UnwrapGetNodeFromDBErr(err) if getNodeErr == nil { return err } @@ -156,21 +155,6 @@ func (boot *ShardBootstrap) SyncBlock(ctx context.Context) error { return err } -func unwrapGetNodeFromDBErr(wrappedErr error) getKeyHandler { - errWithKeyHandler, ok := wrappedErr.(getKeyHandler) - for !ok { - if wrappedErr == nil { - return nil - } - - err := errors.Unwrap(wrappedErr) - errWithKeyHandler, ok = err.(getKeyHandler) - wrappedErr = err - } - - return errWithKeyHandler -} - // Close closes the synchronization loop func (boot *ShardBootstrap) Close() error { if check.IfNil(boot.baseBootstrap) { diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index b0175890e43..eb3c1b1edd7 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/block" @@ -20,7 +21,6 @@ import ( "github.com/multiversx/mx-chain-go/consensus/round" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dataRetriever/blockchain" - commonErrors "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/mock" "github.com/multiversx/mx-chain-go/process/sync" @@ -2065,7 +2065,7 @@ func TestShardBootstrap_SyncBlockGetNodeDBErrorShouldSync(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := commonErrors.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), "") + errGetNodeFromDB := core.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), "") blockProcessor := createBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB @@ -2156,23 +2156,3 @@ func TestShardBootstrap_NilInnerBootstrapperClose(t *testing.T) { bootstrapper := &sync.ShardBootstrap{} assert.Nil(t, bootstrapper.Close()) } - -func TestUnwrapGetNodeFromDBErr(t *testing.T) { - t.Parallel() - - key := []byte("key") - identifier := "identifier" - err := fmt.Errorf("key not found") - - getNodeFromDbErr := commonErrors.NewGetNodeFromDBErrWithKey(key, err, identifier) - wrappedErr1 := fmt.Errorf("wrapped error 1: %w", getNodeFromDbErr) - wrappedErr2 := fmt.Errorf("wrapped error 2: %w", wrappedErr1) - wrappedErr3 := fmt.Errorf("wrapped error 3: %w", wrappedErr2) - - assert.Nil(t, sync.UnwrapGetNodeFromDBErr(nil)) - assert.Nil(t, sync.UnwrapGetNodeFromDBErr(err)) - assert.Equal(t, getNodeFromDbErr, sync.UnwrapGetNodeFromDBErr(getNodeFromDbErr)) - assert.Equal(t, getNodeFromDbErr, sync.UnwrapGetNodeFromDBErr(wrappedErr1)) - assert.Equal(t, getNodeFromDbErr, sync.UnwrapGetNodeFromDBErr(wrappedErr2)) - assert.Equal(t, getNodeFromDbErr, sync.UnwrapGetNodeFromDBErr(wrappedErr3)) -} diff --git a/state/syncer/export_test.go b/state/syncer/export_test.go new file mode 100644 index 00000000000..1cfbb0aa96e --- /dev/null +++ b/state/syncer/export_test.go @@ -0,0 +1,6 @@ +package syncer + +// GetNumHandlers - +func (mtnn *missingTrieNodesNotifier) GetNumHandlers() int { + return len(mtnn.handlers) +} diff --git a/state/syncer/missingTrieNodesNotifier.go b/state/syncer/missingTrieNodesNotifier.go new file mode 100644 index 00000000000..d718b12ab6d --- /dev/null +++ b/state/syncer/missingTrieNodesNotifier.go @@ -0,0 +1,53 @@ +package syncer + +import ( + "sync" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" +) + +type missingTrieNodesNotifier struct { + handlers []common.StateSyncNotifierSubscriber + mutex sync.RWMutex +} + +// NewMissingTrieNodesNotifier creates a new missing trie nodes notifier +func NewMissingTrieNodesNotifier() *missingTrieNodesNotifier { + return &missingTrieNodesNotifier{ + handlers: make([]common.StateSyncNotifierSubscriber, 0), + mutex: sync.RWMutex{}, + } +} + +// RegisterHandler registers a new handler for the missing trie nodes notifier +func (mtnn *missingTrieNodesNotifier) RegisterHandler(handler common.StateSyncNotifierSubscriber) { + if check.IfNil(handler) { + log.Warn("missingTrieNodesNotifier: nil handler") + return + } + + mtnn.mutex.Lock() + mtnn.handlers = append(mtnn.handlers, handler) + mtnn.mutex.Unlock() +} + +// NotifyMissingTrieNode notifies all the registered handlers that a trie node is missing +func (mtnn *missingTrieNodesNotifier) NotifyMissingTrieNode(hash []byte) { + if common.IsEmptyTrie(hash) { + log.Warn("missingTrieNodesNotifier: empty trie hash") + return + } + + mtnn.mutex.RLock() + defer mtnn.mutex.RUnlock() + + for _, handler := range mtnn.handlers { + go handler.MissingDataTrieNodeFound(hash) + } +} + +// IsInterfaceNil returns true if there is no value under the interface +func (mtnn *missingTrieNodesNotifier) IsInterfaceNil() bool { + return mtnn == nil +} diff --git a/state/syncer/missingTrieNodesNotifier_test.go b/state/syncer/missingTrieNodesNotifier_test.go new file mode 100644 index 00000000000..4ddbf3b8cbc --- /dev/null +++ b/state/syncer/missingTrieNodesNotifier_test.go @@ -0,0 +1,55 @@ +package syncer + +import ( + "sync" + "testing" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/assert" +) + +func TestNewMissingTrieNodesNotifier(t *testing.T) { + t.Parallel() + + assert.False(t, check.IfNil(NewMissingTrieNodesNotifier())) +} + +func TestMissingTrieNodesNotifier_RegisterHandler(t *testing.T) { + t.Parallel() + + notifier := NewMissingTrieNodesNotifier() + + notifier.RegisterHandler(nil) + notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{}) + notifier.RegisterHandler(nil) + + assert.Equal(t, 1, notifier.GetNumHandlers()) +} + +func TestMissingTrieNodesNotifier_NotifyMissingTrieNode(t *testing.T) { + t.Parallel() + + numMissingDataTrieNodeFoundCalled := 0 + notifier := NewMissingTrieNodesNotifier() + notifier.NotifyMissingTrieNode([]byte("hash1")) + + wg := sync.WaitGroup{} + wg.Add(2) + + notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{ + MissingDataTrieNodeFoundCalled: func(_ []byte) { + numMissingDataTrieNodeFoundCalled++ + wg.Done() + }, + }) + + notifier.NotifyMissingTrieNode(nil) + notifier.NotifyMissingTrieNode([]byte("hash2")) + notifier.NotifyMissingTrieNode([]byte("hash3")) + + wg.Wait() + + assert.Equal(t, 1, notifier.GetNumHandlers()) + assert.Equal(t, 2, numMissingDataTrieNodeFoundCalled) +} diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index 0170af9d17e..c5f3b36d1a3 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -150,6 +150,17 @@ func (u *userAccountsSyncer) syncDataTrie(rootHash []byte, address []byte, ctx c u.dataTries[string(rootHash)] = struct{}{} u.syncerMutex.Unlock() + trieSyncer, err := u.createAndStartSyncer(ctx, rootHash) + if err != nil { + return err + } + + u.updateDataTrieStatistics(trieSyncer, address) + + return nil +} + +func (u *userAccountsSyncer) createAndStartSyncer(ctx context.Context, hash []byte) (trie.TrieSyncer, error) { arg := trie.ArgTrieSyncer{ RequestHandler: u.requestHandler, InterceptedNodes: u.cacher, @@ -165,18 +176,15 @@ func (u *userAccountsSyncer) syncDataTrie(rootHash []byte, address []byte, ctx c } trieSyncer, err := trie.CreateTrieSyncer(arg, u.trieSyncerVersion) if err != nil { - - return err + return nil, err } - err = trieSyncer.StartSyncing(rootHash, ctx) + err = trieSyncer.StartSyncing(hash, ctx) if err != nil { - return err + return nil, err } - u.updateDataTrieStatistics(trieSyncer, address) - - return nil + return trieSyncer, nil } func (u *userAccountsSyncer) updateDataTrieStatistics(trieSyncer trie.TrieSyncer, address []byte) { @@ -323,6 +331,27 @@ func (u *userAccountsSyncer) resetTimeoutHandlerWatchdog() { u.timeoutHandler.ResetWatchdog() } +// MissingDataTrieNodeFound is called whenever a missing data trie node is found. +// This will trigger the sync process for the whole sub trie, starting from the given hash. +func (u *userAccountsSyncer) MissingDataTrieNodeFound(hash []byte) { + u.mutex.Lock() + defer u.mutex.Unlock() + + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + u.cacher.Clear() + cancel() + }() + + _, err := u.createAndStartSyncer(ctx, hash) + if err != nil { + log.Error("cannot sync trie", "err", err, "hash", hash) + return + } + + log.Debug("finished sync data trie", "hash", hash) +} + // IsInterfaceNil returns true if there is no value under the interface func (u *userAccountsSyncer) IsInterfaceNil() bool { return u == nil diff --git a/testscommon/components/default.go b/testscommon/components/default.go index 72c0f58778f..6f742aeff8e 100644 --- a/testscommon/components/default.go +++ b/testscommon/components/default.go @@ -95,6 +95,7 @@ func GetDefaultStateComponents() *testscommon.StateComponentsMock { dataRetriever.UserAccountsUnit.String(): &testscommon.StorageManagerStub{}, dataRetriever.PeerAccountsUnit.String(): &testscommon.StorageManagerStub{}, }, + MissingNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, } } diff --git a/testscommon/missingTrieNodesNotifierStub.go b/testscommon/missingTrieNodesNotifierStub.go new file mode 100644 index 00000000000..7c5a2e38736 --- /dev/null +++ b/testscommon/missingTrieNodesNotifierStub.go @@ -0,0 +1,25 @@ +package testscommon + +import "github.com/multiversx/mx-chain-go/common" + +// MissingTrieNodesNotifierStub - +type MissingTrieNodesNotifierStub struct { + RegisterHandlerCalled func(handler common.StateSyncNotifierSubscriber) + NotifyMissingTrieNodeCalled func(hash []byte) +} + +func (mtnns *MissingTrieNodesNotifierStub) RegisterHandler(handler common.StateSyncNotifierSubscriber) { + if mtnns.RegisterHandlerCalled != nil { + mtnns.RegisterHandlerCalled(handler) + } +} + +func (mtnns *MissingTrieNodesNotifierStub) NotifyMissingTrieNode(hash []byte) { + if mtnns.NotifyMissingTrieNodeCalled != nil { + mtnns.NotifyMissingTrieNodeCalled(hash) + } +} + +func (mtnns *MissingTrieNodesNotifierStub) IsInterfaceNil() bool { + return mtnns == nil +} diff --git a/testscommon/stateComponentsMock.go b/testscommon/stateComponentsMock.go index 15b11bb4ad0..52a429d1da1 100644 --- a/testscommon/stateComponentsMock.go +++ b/testscommon/stateComponentsMock.go @@ -7,12 +7,13 @@ import ( // StateComponentsMock - type StateComponentsMock struct { - PeersAcc state.AccountsAdapter - Accounts state.AccountsAdapter - AccountsAPI state.AccountsAdapter - AccountsRepo state.AccountsRepository - Tries common.TriesHolder - StorageManagers map[string]common.StorageManager + PeersAcc state.AccountsAdapter + Accounts state.AccountsAdapter + AccountsAPI state.AccountsAdapter + AccountsRepo state.AccountsRepository + Tries common.TriesHolder + StorageManagers map[string]common.StorageManager + MissingNodesNotifier common.MissingTrieNodesNotifier } // Create - @@ -65,6 +66,10 @@ func (scm *StateComponentsMock) String() string { return "StateComponentsMock" } +func (scm *StateComponentsMock) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { + return scm.MissingNodesNotifier +} + // IsInterfaceNil - func (scm *StateComponentsMock) IsInterfaceNil() bool { return scm == nil diff --git a/testscommon/stateSyncNotifierSubscriberStub.go b/testscommon/stateSyncNotifierSubscriberStub.go new file mode 100644 index 00000000000..d6dfc3df276 --- /dev/null +++ b/testscommon/stateSyncNotifierSubscriberStub.go @@ -0,0 +1,18 @@ +package testscommon + +// StateSyncNotifierSubscriberStub - +type StateSyncNotifierSubscriberStub struct { + MissingDataTrieNodeFoundCalled func(hash []byte) +} + +// MissingDataTrieNodeFound - +func (ssns *StateSyncNotifierSubscriberStub) MissingDataTrieNodeFound(hash []byte) { + if ssns.MissingDataTrieNodeFoundCalled != nil { + ssns.MissingDataTrieNodeFoundCalled(hash) + } +} + +// IsInterfaceNil - +func (ssns *StateSyncNotifierSubscriberStub) IsInterfaceNil() bool { + return ssns == nil +} diff --git a/trie/node.go b/trie/node.go index 9a127a7ecf7..52ff22bacc4 100644 --- a/trie/node.go +++ b/trie/node.go @@ -11,7 +11,6 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/trie/keyBuilder" ) @@ -124,11 +123,11 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { - getNodeFromDbErr := errors.NewGetNodeFromDBErrWithKey(n, err, "") + getNodeFromDbErr := core.NewGetNodeFromDBErrWithKey(n, err, "") return nil, fmt.Errorf("db does not have an identifier, db type: %T, error: %w", db, getNodeFromDbErr) } - return nil, errors.NewGetNodeFromDBErrWithKey(n, err, dbWithID.GetIdentifier()) + return nil, core.NewGetNodeFromDBErrWithKey(n, err, dbWithID.GetIdentifier()) } return decodeNode(encChild, marshalizer, hasher) From ce1eafd95c70d81be172366c552e4d7fcd36f843 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 4 May 2023 15:51:56 +0300 Subject: [PATCH 125/221] bugfix + add custom rounds per epoch in local testnet --- factory/consensus/consensusComponents.go | 5 ++++- process/sync/baseSync.go | 2 +- process/sync/trieIterators/trieAccountsIterator.go | 7 ++++--- scripts/testnet/include/config.sh | 7 +++++++ scripts/testnet/variables.sh | 3 +++ 5 files changed, 19 insertions(+), 5 deletions(-) diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 44aaaa0bcfa..790824a4743 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -136,7 +136,10 @@ func (ccf *consensusComponentsFactory) Create() (*consensusComponents, error) { return nil, err } - cc.bootstrapper.StartSyncingBlocks() + err = cc.bootstrapper.StartSyncingBlocks() + if err != nil { + return nil, err + } epoch := ccf.getEpoch() consensusState, err := ccf.createConsensusState(epoch, cc.consensusGroupSize) diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 38f2f8eb6ef..030d45bc8f4 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -1195,7 +1195,7 @@ func (boot *baseBootstrap) GetNodeState() common.NodeState { } func (boot *baseBootstrap) handleAccountsTrieIteration() error { - if !boot.repopulateTokensSupplies { + if boot.repopulateTokensSupplies { return boot.handleTokensSuppliesRepopulation() } diff --git a/process/sync/trieIterators/trieAccountsIterator.go b/process/sync/trieIterators/trieAccountsIterator.go index 49cbc98547d..e936d723e3e 100644 --- a/process/sync/trieIterators/trieAccountsIterator.go +++ b/process/sync/trieIterators/trieAccountsIterator.go @@ -14,7 +14,8 @@ import ( var log = logger.GetOrCreate("trieIterators") -type trieAccountIteratorHandler func(account state.UserAccountHandler) error +// TrieAccountIteratorHandler represents a type that maps a handler for the trie's accounts iterator +type TrieAccountIteratorHandler func(account state.UserAccountHandler) error type trieAccountsIterator struct { marshaller marshal.Marshalizer @@ -43,7 +44,7 @@ func NewTrieAccountsIterator(args ArgsTrieAccountsIterator) (*trieAccountsIterat } // Process will iterate over the entire trie and iterate over the Accounts while calling the received handlers -func (t *trieAccountsIterator) Process(handlers ...trieAccountIteratorHandler) error { +func (t *trieAccountsIterator) Process(handlers ...TrieAccountIteratorHandler) error { if len(handlers) == 0 { return nil } @@ -65,7 +66,7 @@ func (t *trieAccountsIterator) Process(handlers ...trieAccountIteratorHandler) e return t.iterateOverHandlers(iteratorChannels, handlers) } -func (t *trieAccountsIterator) iterateOverHandlers(iteratorChannels *common.TrieIteratorChannels, handlers []trieAccountIteratorHandler) error { +func (t *trieAccountsIterator) iterateOverHandlers(iteratorChannels *common.TrieIteratorChannels, handlers []TrieAccountIteratorHandler) error { log.Debug("starting the trie's accounts iteration with calling the handlers") for leaf := range iteratorChannels.LeavesChan { userAddress, isAccount := t.getAddress(leaf) diff --git a/scripts/testnet/include/config.sh b/scripts/testnet/include/config.sh index 425339f99c6..9d8c9490a86 100644 --- a/scripts/testnet/include/config.sh +++ b/scripts/testnet/include/config.sh @@ -112,6 +112,13 @@ updateNodeConfig() { updateTOMLValue config_observer.toml "ChainID" "\"local-testnet"\" fi + if [ $ROUNDS_PER_EPOCH -ne 0 ]; then + sed -i "s,RoundsPerEpoch.*$,RoundsPerEpoch = $ROUNDS_PER_EPOCH," config_observer.toml + sed -i "s,MinRoundsBetweenEpochs.*$,MinRoundsBetweenEpochs = $ROUNDS_PER_EPOCH," config_observer.toml + sed -i "s,RoundsPerEpoch.*$,RoundsPerEpoch = $ROUNDS_PER_EPOCH," config_validator.toml + sed -i "s,MinRoundsBetweenEpochs.*$,MinRoundsBetweenEpochs = $ROUNDS_PER_EPOCH," config_validator.toml + fi + cp nodesSetup_edit.json nodesSetup.json rm nodesSetup_edit.json diff --git a/scripts/testnet/variables.sh b/scripts/testnet/variables.sh index 14eff94e7e9..135a29b8478 100644 --- a/scripts/testnet/variables.sh +++ b/scripts/testnet/variables.sh @@ -68,6 +68,9 @@ export MULTI_KEY_NODES=0 # ALWAYS_NEW_CHAINID will generate a fresh new chain ID each time start.sh/config.sh is called export ALWAYS_NEW_CHAINID=1 +# ROUNDS_PER_EPOCH represents the number of rounds per epoch. If set to 0, it won't override the node's config +export ROUNDS_PER_EPOCH=0 + # HYSTERESIS defines the hysteresis value for number of nodes in shard export HYSTERESIS=0.0 From 0881d9e4e6927b6e3174ff76692b4e3c3eb977f8 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 4 May 2023 16:11:16 +0300 Subject: [PATCH 126/221] add unit test for MissingDataTrieNodeFound method --- state/syncer/userAccountSyncer_test.go | 59 ++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/state/syncer/userAccountSyncer_test.go b/state/syncer/userAccountSyncer_test.go index a5168e2cbeb..c1ff8bf7462 100644 --- a/state/syncer/userAccountSyncer_test.go +++ b/state/syncer/userAccountSyncer_test.go @@ -9,6 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" ) @@ -47,3 +48,61 @@ func TestUserAccountsSyncer_SyncAccounts(t *testing.T) { err = syncer.SyncAccounts([]byte("rootHash"), nil) assert.Equal(t, ErrNilStorageMarker, err) } + +func TestUserAccountsSyncer_MissingDataTrieNodeFound(t *testing.T) { + t.Parallel() + + numNodesSynced := 0 + numProcessedCalled := 0 + setNumMissingCalled := 0 + args := ArgsNewUserAccountsSyncer{ + ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), + ShardId: 0, + Throttler: &mock.ThrottlerStub{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterStub{}, + } + args.TrieStorageManager = &storageManager.StorageManagerStub{ + PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { + numNodesSynced++ + return nil + }, + } + args.UserAccountsSyncStatisticsHandler = &testscommon.SizeSyncStatisticsHandlerStub{ + AddNumProcessedCalled: func(value int) { + numProcessedCalled++ + }, + SetNumMissingCalled: func(rootHash []byte, value int) { + setNumMissingCalled++ + assert.Equal(t, 0, value) + }, + } + + var serializedLeafNode []byte + tsm := &storageManager.StorageManagerStub{ + PutCalled: func(key []byte, val []byte) error { + serializedLeafNode = val + return nil + }, + } + + tr, _ := trie.NewTrie(tsm, args.Marshalizer, args.Hasher, 5) + key := []byte("key") + value := []byte("value") + _ = tr.Update(key, value) + rootHash, _ := tr.RootHash() + _ = tr.Commit() + + args.Cacher = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + interceptedNode, _ := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) + return interceptedNode, true + }, + } + + syncer, _ := NewUserAccountsSyncer(args) + syncer.MissingDataTrieNodeFound(rootHash) + + assert.Equal(t, 1, numNodesSynced) + assert.Equal(t, 1, numProcessedCalled) + assert.Equal(t, 1, setNumMissingCalled) +} From 455e0f2cd7c076043a33b555b595cf97c942a20d Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 4 May 2023 16:29:16 +0300 Subject: [PATCH 127/221] fix race condition --- state/syncer/missingTrieNodesNotifier_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/state/syncer/missingTrieNodesNotifier_test.go b/state/syncer/missingTrieNodesNotifier_test.go index 4ddbf3b8cbc..470abbc764f 100644 --- a/state/syncer/missingTrieNodesNotifier_test.go +++ b/state/syncer/missingTrieNodesNotifier_test.go @@ -36,11 +36,14 @@ func TestMissingTrieNodesNotifier_NotifyMissingTrieNode(t *testing.T) { wg := sync.WaitGroup{} wg.Add(2) + mutex := sync.Mutex{} notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{ MissingDataTrieNodeFoundCalled: func(_ []byte) { + mutex.Lock() numMissingDataTrieNodeFoundCalled++ wg.Done() + mutex.Unlock() }, }) From 13d01bd90bc3b5ccf90a9d54208a9653b494ef3d Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 4 May 2023 16:44:15 +0300 Subject: [PATCH 128/221] fix after review --- factory/processing/blockProcessorCreator.go | 4 ++-- factory/processing/processComponents.go | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 2c154c682db..b4faff2ae1f 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -401,7 +401,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( StatusComponents: pcf.statusComponents, StatusCoreComponents: pcf.statusCoreComponents, Config: pcf.config, - Version: pcf.version, + Version: pcf.flagsConfig.Version, AccountsDB: accountsDb, ForkDetector: forkDetector, NodesCoordinator: pcf.nodesCoordinator, @@ -821,7 +821,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( StatusComponents: pcf.statusComponents, StatusCoreComponents: pcf.statusCoreComponents, Config: pcf.config, - Version: pcf.version, + Version: pcf.flagsConfig.Version, AccountsDB: accountsDb, ForkDetector: forkDetector, NodesCoordinator: pcf.nodesCoordinator, diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index c1cd90af2a7..aa9515eef1c 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -169,7 +169,6 @@ type processComponentsFactory struct { maxRating uint32 systemSCConfig *config.SystemSmartContractsConfig txLogsProcessor process.TransactionLogProcessor - version string importStartHandler update.ImportStartHandler historyRepo dblookupext.HistoryRepository epochNotifier process.EpochNotifier From 0d49956d531cffe4a8da3366d5f0b77c9250f8a0 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 5 May 2023 10:42:34 +0300 Subject: [PATCH 129/221] fixes after review --- factory/processing/processComponents.go | 12 +- factory/processing/processComponents_test.go | 525 ------------------- genesis/errors.go | 2 +- 3 files changed, 9 insertions(+), 530 deletions(-) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 13840946633..be35bf32a99 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -827,6 +827,7 @@ func (pcf *processComponentsFactory) newEpochStartTrigger(requestHandler epochSt AppStatusHandler: pcf.statusCoreComponents.AppStatusHandler(), DataPool: pcf.data.Datapool(), } + return metachain.NewEpochStartTrigger(argEpochStart) } @@ -1383,6 +1384,7 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), PayloadValidator: payloadValidator, } + return resolverscontainer.NewMetaResolversContainerFactory(resolversContainerFactoryArgs) } @@ -1395,10 +1397,10 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( return pcf.newStorageRequesters() } - shardC := pcf.bootstrapComponents.ShardCoordinator() + shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() requestersContainerFactoryArgs := requesterscontainer.FactoryArgs{ RequesterConfig: pcf.config.Requesters, - ShardCoordinator: shardC, + ShardCoordinator: shardCoordinator, Messenger: pcf.network.NetworkMessenger(), Marshaller: pcf.coreData.InternalMarshalizer(), Uint64ByteSliceConverter: pcf.coreData.Uint64ByteSliceConverter(), @@ -1409,10 +1411,10 @@ func (pcf *processComponentsFactory) newRequestersContainerFactory( SizeCheckDelta: pcf.config.Marshalizer.SizeCheckDelta, } - if shardC.SelfId() < shardC.NumberOfShards() { + if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { return requesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) } - if shardC.SelfId() == core.MetachainShardId { + if shardCoordinator.SelfId() == core.MetachainShardId { return requesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) } @@ -1542,6 +1544,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForMeta( ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), SnapshotsEnabled: pcf.snapshotsEnabled, } + return storagerequesterscontainer.NewMetaRequestersContainerFactory(requestersContainerFactoryArgs) } @@ -1570,6 +1573,7 @@ func (pcf *processComponentsFactory) createStorageRequestersForShard( ChanGracefullyClose: pcf.coreData.ChanStopNodeProcess(), SnapshotsEnabled: pcf.snapshotsEnabled, } + return storagerequesterscontainer.NewShardRequestersContainerFactory(requestersContainerFactoryArgs) } diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index 701828e7ddb..92642f808e0 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -9,7 +9,6 @@ import ( "sync" "testing" - "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/keyValStorage" coreData "github.com/multiversx/mx-chain-core-go/data" dataBlock "github.com/multiversx/mx-chain-core-go/data/block" @@ -21,7 +20,6 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/factory" "github.com/multiversx/mx-chain-go/config" - retriever "github.com/multiversx/mx-chain-go/dataRetriever" errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/factory/mock" processComp "github.com/multiversx/mx-chain-go/factory/processing" @@ -33,7 +31,6 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" mxState "github.com/multiversx/mx-chain-go/state" - "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/bootstrapMocks" "github.com/multiversx/mx-chain-go/testscommon/components" @@ -52,14 +49,11 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - storageStubs "github.com/multiversx/mx-chain-go/testscommon/storage" updateMocks "github.com/multiversx/mx-chain-go/update/mock" "github.com/stretchr/testify/require" ) const ( - unreachableStep = 10000 - blockProcessorOnMetaStep = 31 testingProtocolSustainabilityAddress = "erd1932eft30w753xyvme8d49qejgkjc09n5e49w4mwdjtm0neld797su0dlxp" ) @@ -588,8 +582,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.PrefConfigs.FullArchive = true testCreateWithArgs(t, args, "rounds per epoch") }) - t.Run("NewFallbackHeaderValidator fails should error", testWithNilMarshaller(1, "Marshalizer", unreachableStep)) - t.Run("NewHeaderSigVerifier fails should error", testWithNilMarshaller(2, "Marshalizer", unreachableStep)) t.Run("createNetworkShardingCollector fails due to invalid PublicKeyPeerId config should error", func(t *testing.T) { t.Parallel() @@ -645,52 +637,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.StoragePruning.NumActivePersisters = 0 testCreateWithArgs(t, args, "active persisters") }) - t.Run("newStorageRequester fails due to NewSimpleDataPacker failure on createStorageRequestersForMeta should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - args.ImportDBConfig.IsImportDBMode = true - - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - step := 0 - coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { - step++ - if step > 3 { - return nil - } - return &testscommon.MarshalizerStub{} - } - args.CoreData = coreCompStub - updateShardCoordinatorForMetaAtStep(t, args, 3) - testCreateWithArgs(t, args, "marshalizer") - }) - t.Run("newStorageRequester fails due to NewSimpleDataPacker failure on createStorageRequestersForShard should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - args.ImportDBConfig.IsImportDBMode = true - - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - step := 0 - coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { - step++ - if step > 3 { - return nil - } - return &testscommon.MarshalizerStub{} - } - args.CoreData = coreCompStub - testCreateWithArgs(t, args, "marshalizer") - }) - t.Run("newStorageRequester fails due to CreateForMeta failure should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - args.ImportDBConfig.IsImportDBMode = true - args.Config.ShardHdrNonceHashStorage.Cache.Type = "invalid" - updateShardCoordinatorForMetaAtStep(t, args, 0) - testCreateWithArgs(t, args, "ShardHdrNonceHashStorage") - }) t.Run("newResolverContainerFactory fails due to NewPeerAuthenticationPayloadValidator failure should error", func(t *testing.T) { t.Parallel() @@ -698,51 +644,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.HeartbeatV2.HeartbeatExpiryTimespanInSec = 0 testCreateWithArgs(t, args, "expiry timespan") }) - t.Run("newResolverContainerFactory fails due to invalid shard should error", - testWithInvalidShard(0, "could not create interceptor and resolver container factory")) - t.Run("newRequesterContainerFactory fails due to invalid shard should error", - testWithInvalidShard(5, "could not create requester container factory")) - t.Run("newMetaResolverContainerFactory fails due to NewSimpleDataPacker failure should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - updateShardCoordinatorForMetaAtStep(t, args, 0) - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - cnt := 0 - coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { - cnt++ - if cnt > 3 { - return nil - } - return &testscommon.MarshalizerStub{} - } - args.CoreData = coreCompStub - testCreateWithArgs(t, args, "marshalizer") - }) - t.Run("newShardResolverContainerFactory fails due to NewSimpleDataPacker failure should error", - testWithNilMarshaller(3, "marshalizer", unreachableStep)) - t.Run("NewRequestersFinder fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - args.ImportDBConfig.IsImportDBMode = true // coverage - bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) - require.True(t, ok) - cnt := 0 - bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { - cnt++ - if cnt > 5 { - return nil - } - return &testscommon.ShardsCoordinatorMock{ - NoShards: 2, - CurrentShard: common.MetachainShardId, // coverage - } - } - testCreateWithArgs(t, args, "shard coordinator") - }) - t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(0, retriever.TxLogsUnit, unreachableStep)) - t.Run("NewRequestersFinder fails should error", testWithNilMarshaller(5, "Marshalizer", unreachableStep)) t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to invalid GenesisNodePrice should error", func(t *testing.T) { t.Parallel() @@ -752,10 +653,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.SystemSCConfig.StakingSystemSCConfig.GenesisNodePrice = "invalid" testCreateWithArgs(t, args, "invalid genesis node price") }) - t.Run("generateGenesisHeadersAndApplyInitialBalances fails due to NewGenesisBlockCreator failure should error", - testWithNilMarshaller(7, "Marshalizer", unreachableStep)) - t.Run("setGenesisHeader fails due to invalid shard should error", - testWithInvalidShard(8, "genesis block does not exist")) t.Run("newValidatorStatisticsProcessor fails due to nil genesis header should error", func(t *testing.T) { t.Parallel() @@ -770,8 +667,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, errorsMx.ErrGenesisBlockNotInitialized.Error()) }) - t.Run("indexGenesisBlocks fails due to CalculateHash failure should error", - testWithNilMarshaller(42, "marshalizer", unreachableStep)) t.Run("indexGenesisBlocks fails due to GenerateInitialTransactions failure should error", func(t *testing.T) { t.Parallel() @@ -783,102 +678,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { } testCreateWithArgs(t, args, expectedErr.Error()) }) - t.Run("NewValidatorsProvider fails should error", - testWithNilPubKeyConv(2, "pubkey converter", unreachableStep)) - t.Run("newEpochStartTrigger fails due to invalid shard should error", - testWithInvalidShard(16, "error creating new start of epoch trigger because of invalid shard id")) - t.Run("newEpochStartTrigger fails due to NewHeaderValidator failure should error", - testWithNilMarshaller(47, "Marshalizer", unreachableStep)) - t.Run("newEpochStartTrigger fails due to NewPeerMiniBlockSyncer failure should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) - require.True(t, ok) - dataPool := dataCompStub.DataPool - cnt := 0 - dataCompStub.DataPool = &dataRetriever.PoolsHolderStub{ - HeadersCalled: dataPool.Headers, - TransactionsCalled: dataPool.Transactions, - MiniBlocksCalled: dataPool.MiniBlocks, - CurrBlockTxsCalled: dataPool.CurrentBlockTxs, - TrieNodesCalled: dataPool.TrieNodes, - ValidatorsInfoCalled: func() retriever.ShardedDataCacherNotifier { - cnt++ - if cnt > 3 { - return nil - } - return dataPool.ValidatorsInfo() - }, - CloseCalled: dataPool.Close, - } - testCreateWithArgs(t, args, "validators info pool") - }) - t.Run("newEpochStartTrigger fails due to NewPeerMiniBlockSyncer failure should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - updateShardCoordinatorForMetaAtStep(t, args, 16) - dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) - require.True(t, ok) - blockChainStub, ok := dataCompStub.BlockChain.(*testscommon.ChainHandlerStub) - require.True(t, ok) - cnt := 0 - blockChainStub.GetGenesisHeaderCalled = func() coreData.HeaderHandler { - cnt++ - if cnt > 1 { - return nil - } - return &testscommon.HeaderHandlerStub{} - } - testCreateWithArgs(t, args, errorsMx.ErrGenesisBlockNotInitialized.Error()) - }) - t.Run("newEpochStartTrigger fails due to invalid shard should error", - testWithInvalidShard(17, "error creating new start of epoch trigger because of invalid shard id")) - t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(48, "marshalizer", unreachableStep)) - t.Run("prepareGenesisBlock fails due to CalculateHash failure should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) - require.True(t, ok) - blockChainStub, ok := dataCompStub.BlockChain.(*testscommon.ChainHandlerStub) - require.True(t, ok) - cnt := 0 - blockChainStub.SetGenesisHeaderCalled = func(handler coreData.HeaderHandler) error { - cnt++ - if cnt > 1 { - return expectedErr - } - return nil - } - testCreateWithArgs(t, args, expectedErr.Error()) - }) - t.Run("saveGenesisHeaderToStorage fails due to Marshal failure should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - cnt := 0 - coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { - return &testscommon.MarshalizerStub{ - MarshalCalled: func(obj interface{}) ([]byte, error) { - cnt++ - if cnt > 38 { - return nil, expectedErr - } - return []byte(""), nil - }, - } - } - args.CoreData = coreCompStub - testCreateWithArgs(t, args, expectedErr.Error()) - }) - t.Run("GetStorer TxLogsUnit fails should error", testWithMissingStorer(2, retriever.BootstrapUnit, unreachableStep)) - t.Run("NewBootstrapStorer fails should error", testWithNilMarshaller(51, "Marshalizer", unreachableStep)) - t.Run("NewHeaderValidator fails should error", testWithNilMarshaller(52, "Marshalizer", unreachableStep)) - t.Run("newBlockTracker fails due to invalid shard should error", - testWithInvalidShard(20, "could not create block tracker")) t.Run("NewMiniBlocksPoolsCleaner fails should error", func(t *testing.T) { t.Parallel() @@ -893,22 +692,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.PoolsCleanersConfig.MaxRoundsToKeepUnprocessedTransactions = 0 testCreateWithArgs(t, args, "MaxRoundsToKeepUnprocessedData") }) - t.Run("NewMiniBlockTrack fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) - require.True(t, ok) - cnt := 0 - bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { - cnt++ - if cnt > 25 { - return nil - } - return mock.NewMultiShardsCoordinatorMock(2) - } - testCreateWithArgs(t, args, "shard coordinator") - }) t.Run("createHardforkTrigger fails due to Decode failure should error", func(t *testing.T) { t.Parallel() @@ -916,26 +699,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.Hardfork.PublicKeyToListenFrom = "invalid key" testCreateWithArgs(t, args, "PublicKeyToListenFrom") }) - t.Run("newInterceptorContainerFactory fails due to invalid shard should error", - testWithInvalidShard(24, "could not create interceptor container factory")) - t.Run("createExportFactoryHandler fails", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) - require.True(t, ok) - cnt := 0 - bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { - cnt++ - if cnt > 26 { - return nil - } - return mock.NewMultiShardsCoordinatorMock(2) - } - testCreateWithArgs(t, args, "shard coordinator") - }) - t.Run("newForkDetector fails due to invalid shard should error", - testWithInvalidShard(28, "could not create fork detector")) t.Run("NewCache fails for vmOutput should error", func(t *testing.T) { t.Parallel() @@ -943,50 +706,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.VMOutputCacher.Type = "invalid" testCreateWithArgs(t, args, "cache type") }) - t.Run("GetStorer TxLogsUnit fails should error", - testWithMissingStorer(0, retriever.ScheduledSCRsUnit, unreachableStep)) - t.Run("NewScheduledTxsExecution fails should error", - testWithNilMarshaller(104, "Marshalizer", unreachableStep)) - t.Run("NewESDTDataStorage fails should error", - testWithNilMarshaller(106, "Marshalizer", unreachableStep)) - t.Run("NewReceiptsRepository fails should error", - testWithNilMarshaller(107, "marshalizer", unreachableStep)) - t.Run("newBlockProcessor fails due to invalid shard should error", - testWithInvalidShard(32, "could not create block processor")) - - // newShardBlockProcessor - t.Run("newShardBlockProcessor: NewESDTTransferParser fails should error", - testWithNilMarshaller(108, "marshaller", unreachableStep)) - t.Run("newShardBlockProcessor: createBuiltInFunctionContainer fails should error", - testWithNilAddressPubKeyConv(46, "public key converter", unreachableStep)) - t.Run("newShardBlockProcessor: createVMFactoryShard fails due to NewBlockChainHookImpl failure should error", - testWithNilAddressPubKeyConv(47, "pubkey converter", unreachableStep)) - t.Run("newShardBlockProcessor: NewIntermediateProcessorsContainerFactory fails should error", - testWithNilMarshaller(111, "Marshalizer", unreachableStep)) - t.Run("newShardBlockProcessor: NewTxTypeHandler fails should error", - testWithNilAddressPubKeyConv(49, "pubkey converter", unreachableStep)) - t.Run("newShardBlockProcessor: NewGasComputation fails should error", - testWithNilEnableEpochsHandler(13, "enable epochs handler", unreachableStep)) - t.Run("newShardBlockProcessor: NewSmartContractProcessor fails should error", - testWithNilAddressPubKeyConv(50, "pubkey converter", unreachableStep)) - t.Run("newShardBlockProcessor: NewRewardTxProcessor fails should error", - testWithNilAddressPubKeyConv(51, "pubkey converter", unreachableStep)) - t.Run("newShardBlockProcessor: NewTxProcessor fails should error", - testWithNilAddressPubKeyConv(52, "pubkey converter", unreachableStep)) - t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to NewReadOnlyAccountsDB failure should error", - testWithNilAccountsAdapterAPI(1, "accounts adapter", unreachableStep)) - t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to NewIntermediateProcessorsContainerFactory failure should error", - testWithNilAddressPubKeyConv(53, "pubkey converter", unreachableStep)) - t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to createBuiltInFunctionContainer failure should error", - testWithNilAddressPubKeyConv(54, "public key converter", unreachableStep)) - t.Run("newShardBlockProcessor: createShardTxSimulatorProcessor fails due to createVMFactoryShard failure should error", - testWithNilAddressPubKeyConv(55, "pubkey converter", unreachableStep)) - t.Run("newShardBlockProcessor: createOutportDataProvider fails due to missing TransactionUnit should error", - testWithMissingStorer(3, retriever.TransactionUnit, unreachableStep)) - t.Run("newShardBlockProcessor: createOutportDataProvider fails due to missing MiniBlockUnit should error", - testWithMissingStorer(4, retriever.MiniBlockUnit, unreachableStep)) - t.Run("newShardBlockProcessor: NewShardProcessor fails should error", - testWithNilEnableEpochsHandler(23, "enable epochs handler", unreachableStep)) t.Run("newShardBlockProcessor: attachProcessDebugger fails should error", func(t *testing.T) { t.Parallel() @@ -995,64 +714,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.Config.Debug.Process.PollingTimeInSeconds = 0 testCreateWithArgs(t, args, "PollingTimeInSeconds") }) - t.Run("newShardBlockProcessor: NewBlockSizeComputation fails should error", - testWithNilMarshaller(117, "Marshalizer", unreachableStep)) - t.Run("newShardBlockProcessor: NewPreProcessorsContainerFactory fails should error", - testWithNilMarshaller(118, "Marshalizer", unreachableStep)) - t.Run("newShardBlockProcessor: NewPrintDoubleTransactionsDetector fails should error", - testWithNilMarshaller(119, "Marshalizer", unreachableStep)) - t.Run("newShardBlockProcessor: NewTransactionCoordinator fails should error", - testWithNilMarshaller(120, "Marshalizer", unreachableStep)) - - // newMetaBlockProcessor, step for meta is 31 inside newBlockProcessor - t.Run("newMetaBlockProcessor: createBuiltInFunctionContainer fails should error", - testWithNilAddressPubKeyConv(46, "public key converter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: createVMFactoryMeta fails due to NewBlockChainHookImpl failure should error", - testWithNilAddressPubKeyConv(47, "pubkey converter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewIntermediateProcessorsContainerFactory fails should error", - testWithNilMarshaller(111, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewESDTTransferParser fails should error", - testWithNilMarshaller(112, "marshaller", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewTxTypeHandler fails should error", - testWithNilAddressPubKeyConv(49, "pubkey converter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewGasComputation fails should error", - testWithNilEnableEpochsHandler(13, "enable epochs handler", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewSmartContractProcessor fails should error", - testWithNilAddressPubKeyConv(50, "pubkey converter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewMetaTxProcessor fails should error", - testWithNilAddressPubKeyConv(51, "pubkey converter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to NewIntermediateProcessorsContainerFactory failure should error", - testWithNilAddressPubKeyConv(52, "pubkey converter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to NewReadOnlyAccountsDB failure should error", - testWithNilAccountsAdapterAPI(1, "accounts adapter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to createBuiltInFunctionContainer failure should error", - testWithNilAddressPubKeyConv(53, "public key converter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to createVMFactoryMeta failure should error", - testWithNilAddressPubKeyConv(54, "pubkey converter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: createMetaTxSimulatorProcessor fails due to NewMetaTxProcessor failure second time should error", - testWithNilAddressPubKeyConv(55, "pubkey converter", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewBlockSizeComputation fails should error", - testWithNilMarshaller(120, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewPreProcessorsContainerFactory fails should error", - testWithNilMarshaller(121, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewPrintDoubleTransactionsDetector fails should error", - testWithNilMarshaller(122, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewTransactionCoordinator fails should error", - testWithNilMarshaller(123, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewStakingToPeer fails should error", - testWithNilMarshaller(124, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewEpochStartData fails should error", - testWithNilMarshaller(125, "Marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewEndOfEpochEconomicsDataCreator fails should error", - testWithNilMarshaller(126, "marshalizer", blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: GetStorer RewardTransactionUnit fails should error", - testWithMissingStorer(1, retriever.RewardTransactionUnit, blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: GetStorer MiniBlockUnit fails should error", - testWithMissingStorer(4, retriever.MiniBlockUnit, blockProcessorOnMetaStep)) - t.Run("newMetaBlockProcessor: NewRewardsCreatorProxy fails should error", - testWithNilMarshaller(127, "marshalizer", blockProcessorOnMetaStep)) - - t.Run("NewNodesSetupChecker fails should error", testWithNilPubKeyConv(5, "pubkey converter", unreachableStep)) t.Run("nodesSetupChecker.Check fails should error", func(t *testing.T) { t.Parallel() @@ -1078,24 +739,6 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args.CoreData = coreCompStub testCreateWithArgs(t, args, "no one staked") }) - t.Run("NewNodeRedundancy fails should error", func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - netwCompStub, ok := args.Network.(*testsMocks.NetworkComponentsStub) - require.True(t, ok) - cnt := 0 - netwCompStub.MessengerCalled = func() p2p.Messenger { - cnt++ - if cnt > 8 { - return nil - } - return &p2pmocks.MessengerStub{} - } - testCreateWithArgs(t, args, "messenger") - }) - t.Run("NewReceiptsRepository fails should error", testWithNilMarshaller(124, "marshalizer", unreachableStep)) - t.Run("NewTxsSenderWithAccumulator fails should error", testWithNilMarshaller(125, "Marshalizer", unreachableStep)) t.Run("should work with indexAndReturnGenesisAccounts failing due to RootHash failure", func(t *testing.T) { t.Parallel() @@ -1367,174 +1010,6 @@ func fundGenesisWallets(t *testing.T, args processComp.ProcessComponentsFactoryA } } -func testWithNilMarshaller(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { - return func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - step := 0 - coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { - step++ - if step > nilStep { - return nil - } - return &testscommon.MarshalizerStub{} - } - args.CoreData = coreCompStub - updateShardCoordinatorForMetaAtStep(t, args, metaStep) - testCreateWithArgs(t, args, expectedErrSubstr) - } -} - -func testWithNilPubKeyConv(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { - return func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - pubKeyConv := args.CoreData.ValidatorPubKeyConverter() - step := 0 - coreCompStub.ValidatorPubKeyConverterCalled = func() core.PubkeyConverter { - step++ - if step > nilStep { - return nil - } - return pubKeyConv - } - args.CoreData = coreCompStub - updateShardCoordinatorForMetaAtStep(t, args, metaStep) - testCreateWithArgs(t, args, expectedErrSubstr) - } -} - -func testWithNilAddressPubKeyConv(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { - return func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - pubKeyConv := args.CoreData.AddressPubKeyConverter() - step := 0 - coreCompStub.AddressPubKeyConverterCalled = func() core.PubkeyConverter { - step++ - if step > nilStep { - return nil - } - return pubKeyConv - } - args.CoreData = coreCompStub - updateShardCoordinatorForMetaAtStep(t, args, metaStep) - testCreateWithArgs(t, args, expectedErrSubstr) - } -} - -func testWithNilEnableEpochsHandler(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { - return func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) - enableEpochsHandler := coreCompStub.EnableEpochsHandler() - step := 0 - coreCompStub.EnableEpochsHandlerCalled = func() common.EnableEpochsHandler { - step++ - if step > nilStep { - return nil - } - return enableEpochsHandler - } - args.CoreData = coreCompStub - updateShardCoordinatorForMetaAtStep(t, args, metaStep) - testCreateWithArgs(t, args, expectedErrSubstr) - } -} - -func testWithNilAccountsAdapterAPI(nilStep int, expectedErrSubstr string, metaStep int) func(t *testing.T) { - return func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - stateCompMock := factoryMocks.NewStateComponentsMockFromRealComponent(args.State) - accountsAdapterAPI := stateCompMock.AccountsAdapterAPI() - step := 0 - stateCompMock.AccountsAdapterAPICalled = func() mxState.AccountsAdapter { - step++ - if step > nilStep { - return nil - } - return accountsAdapterAPI - } - args.State = stateCompMock - updateShardCoordinatorForMetaAtStep(t, args, metaStep) - testCreateWithArgs(t, args, expectedErrSubstr) - } -} - -func testWithMissingStorer(failStep int, missingUnitType retriever.UnitType, metaStep int) func(t *testing.T) { - return func(t *testing.T) { - t.Parallel() - - expectedErr := errors.New("expected error") - args := createMockProcessComponentsFactoryArgs() - dataCompStub, ok := args.Data.(*testsMocks.DataComponentsStub) - require.True(t, ok) - store := args.Data.StorageService() - cnt := 0 - dataCompStub.Store = &storageStubs.ChainStorerStub{ - GetStorerCalled: func(unitType retriever.UnitType) (storage.Storer, error) { - if unitType == missingUnitType { - cnt++ - if cnt > failStep { - return nil, expectedErr - } - } - return store.GetStorer(unitType) - }, - } - updateShardCoordinatorForMetaAtStep(t, args, metaStep) - testCreateWithArgs(t, args, expectedErr.Error()) - } -} - -func updateShardCoordinatorForMetaAtStep(t *testing.T, args processComp.ProcessComponentsFactoryArgs, metaStep int) { - bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) - require.True(t, ok) - step := 0 - bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { - step++ - shardC := mock.NewMultiShardsCoordinatorMock(2) - if step > metaStep { - shardC.CurrentShard = common.MetachainShardId - } - return shardC - } -} - -func testWithInvalidShard(failingStep int, expectedErrSubstr string) func(t *testing.T) { - return func(t *testing.T) { - t.Parallel() - - args := createMockProcessComponentsFactoryArgs() - bootstrapCompStub, ok := args.BootstrapComponents.(*mainFactoryMocks.BootstrapComponentsStub) - require.True(t, ok) - - x := bootstrapCompStub.ShardCoordinator() - cnt := 0 - bootstrapCompStub.ShardCoordinatorCalled = func() sharding.Coordinator { - cnt++ - if cnt > failingStep { - return &testscommon.ShardsCoordinatorMock{ - NoShards: 2, - CurrentShard: 3, - } - } - return x - } - testCreateWithArgs(t, args, expectedErrSubstr) - } -} - func testCreateWithArgs(t *testing.T, args processComp.ProcessComponentsFactoryArgs, expectedErrSubstr string) { pcf, _ := processComp.NewProcessComponentsFactory(args) require.NotNil(t, pcf) diff --git a/genesis/errors.go b/genesis/errors.go index 77fee48171b..1c0330e4cad 100644 --- a/genesis/errors.go +++ b/genesis/errors.go @@ -171,4 +171,4 @@ var ErrMissingDeployedSC = errors.New("missing deployed SC") var ErrNilEpochConfig = errors.New("nil epoch config") // ErrNilGasSchedule signals that an operation has been attempted with a nil gas schedule -var ErrNilGasSchedule = errors.New("nil GasSchedule") +var ErrNilGasSchedule = errors.New("nil gas schedule") From 98fe8ce657860be6afa5f1fcf12dbe0b77c08450 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Fri, 5 May 2023 14:10:29 +0300 Subject: [PATCH 130/221] adding lost proposal fee --- cmd/node/config/systemSmartContractsConfig.toml | 1 + config/systemSmartContractsConfig.go | 1 + epochStart/metachain/systemSCs_test.go | 3 ++- genesis/process/genesisBlockCreator_test.go | 3 ++- integrationTests/multiShard/hardFork/hardFork_test.go | 1 + integrationTests/testInitializer.go | 2 ++ integrationTests/testProcessorNode.go | 2 ++ integrationTests/vm/testInitializer.go | 1 + process/factory/metachain/vmContainerFactory_test.go | 2 ++ testscommon/components/components.go | 1 + vm/factory/systemSCFactory_test.go | 1 + vm/systemSmartContracts/governance.go | 6 ++++++ vm/systemSmartContracts/governance.pb.go | 1 + vm/systemSmartContracts/governance.proto | 3 ++- vm/systemSmartContracts/governance_test.go | 1 + 15 files changed, 26 insertions(+), 3 deletions(-) diff --git a/cmd/node/config/systemSmartContractsConfig.toml b/cmd/node/config/systemSmartContractsConfig.toml index 256dc292171..33d7713f3d7 100644 --- a/cmd/node/config/systemSmartContractsConfig.toml +++ b/cmd/node/config/systemSmartContractsConfig.toml @@ -26,6 +26,7 @@ MinVetoThreshold = 50 [GovernanceSystemSCConfig.Active] ProposalCost = "1000000000000000000000" #1000 eGLD + LostProposalFee = "10000000000000000000" #10 eGLD MinQuorum = 0.5 #fraction of value 0.5 - 50% MinPassThreshold = 0.5 #fraction of value 0.5 - 50% MinVetoThreshold = 0.33 #fraction of value 0.33 - 33% diff --git a/config/systemSmartContractsConfig.go b/config/systemSmartContractsConfig.go index 895aea97c8b..0d991492ddf 100644 --- a/config/systemSmartContractsConfig.go +++ b/config/systemSmartContractsConfig.go @@ -45,6 +45,7 @@ type GovernanceSystemSCConfigV1 struct { // system smart contract once it activates type GovernanceSystemSCConfigActive struct { ProposalCost string + LostProposalFee string MinQuorum float64 MinPassThreshold float64 MinVetoThreshold float64 diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index bfa00d3c444..3388d7fb48b 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -996,6 +996,7 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, @@ -1113,7 +1114,7 @@ func createEconomicsData() process.EconomicsDataHandler { EpochNotifier: &epochNotifier.EpochNotifierStub{}, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{}, BuiltInFunctionsCostHandler: &mock.BuiltInCostHandlerStub{}, - TxVersionChecker: &testscommon.TxVersionCheckerStub{}, + TxVersionChecker: &testscommon.TxVersionCheckerStub{}, } economicsData, _ := economicsHandler.NewEconomicsData(argsNewEconomicsData) return economicsData diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index 07ea7f986db..0ef9aff4e14 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -70,7 +70,7 @@ func createMockArgument( UInt64ByteSliceConv: &mock.Uint64ByteSliceConverterMock{}, AddrPubKeyConv: testscommon.NewPubkeyConverterMock(32), Chain: "chainID", - TxVersionCheck: &testscommon.TxVersionCheckerStub{}, + TxVersionCheck: &testscommon.TxVersionCheckerStub{}, MinTxVersion: 1, EnableEpochsHandlerField: &testscommon.EnableEpochsHandlerStub{}, }, @@ -105,6 +105,7 @@ func createMockArgument( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 929d6afc1b9..1d6980f4132 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -443,6 +443,7 @@ func hardForkImport( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: integrationTests.DelegationManagerConfigChangeAddress, }, diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index dac26a1b4be..567be527904 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -676,6 +676,7 @@ func CreateFullGenesisBlocks( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ @@ -783,6 +784,7 @@ func CreateGenesisMetaBlock( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: DelegationManagerConfigChangeAddress, }, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index fb55a30cb98..a37fc24d45d 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -861,6 +861,7 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: DelegationManagerConfigChangeAddress, }, @@ -1762,6 +1763,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: DelegationManagerConfigChangeAddress, }, diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index 0633bb3e546..cb2d1d37a42 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -708,6 +708,7 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index cc98654e8f3..1032ea2604e 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -47,6 +47,7 @@ func createVmContainerMockArgument(gasSchedule core.GasScheduleNotifier) ArgsNew MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ @@ -313,6 +314,7 @@ func TestVmContainerFactory_Create(t *testing.T) { MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, diff --git a/testscommon/components/components.go b/testscommon/components/components.go index a2344fce33a..83f44cb8cbf 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -538,6 +538,7 @@ func GetProcessArgs( MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, diff --git a/vm/factory/systemSCFactory_test.go b/vm/factory/systemSCFactory_test.go index 2d52a260a18..7f78491d429 100644 --- a/vm/factory/systemSCFactory_test.go +++ b/vm/factory/systemSCFactory_test.go @@ -48,6 +48,7 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 2e97f2ccb97..8f690c6283b 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -1117,11 +1117,17 @@ func (g *governanceContract) convertV2Config(config config.GovernanceSystemSCCon return nil, vm.ErrIncorrectConfig } + lostProposalFee, success := big.NewInt(0).SetString(config.Active.LostProposalFee, conversionBase) + if !success { + return nil, vm.ErrIncorrectConfig + } + return &GovernanceConfigV2{ MinQuorum: float32(config.Active.MinQuorum), MinPassThreshold: float32(config.Active.MinPassThreshold), MinVetoThreshold: float32(config.Active.MinVetoThreshold), ProposalFee: proposalFee, + LostProposalFee: lostProposalFee, }, nil } diff --git a/vm/systemSmartContracts/governance.pb.go b/vm/systemSmartContracts/governance.pb.go index 2f7e55df5c3..818034b53b7 100644 --- a/vm/systemSmartContracts/governance.pb.go +++ b/vm/systemSmartContracts/governance.pb.go @@ -198,6 +198,7 @@ type GovernanceConfig struct { MinPassThreshold int32 `protobuf:"varint,3,opt,name=MinPassThreshold,proto3" json:"MinPassThreshold"` MinVetoThreshold int32 `protobuf:"varint,4,opt,name=MinVetoThreshold,proto3" json:"MinVetoThreshold"` ProposalFee *math_big.Int `protobuf:"bytes,5,opt,name=ProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalFee"` + LostProposalFee *math_big.Int `protobuf:"bytes,5,opt,name=LostProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"LostProposalFee"` } func (m *GovernanceConfig) Reset() { *m = GovernanceConfig{} } diff --git a/vm/systemSmartContracts/governance.proto b/vm/systemSmartContracts/governance.proto index a60fee5b126..22adc951121 100644 --- a/vm/systemSmartContracts/governance.proto +++ b/vm/systemSmartContracts/governance.proto @@ -43,7 +43,8 @@ message GovernanceConfigV2 { float MinPassThreshold = 2 [(gogoproto.jsontag) = "MinPassThreshold"]; float MinVetoThreshold = 3 [(gogoproto.jsontag) = "MinVetoThreshold"]; bytes ProposalFee = 4 [(gogoproto.jsontag) = "ProposalFee", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - uint64 LastProposalNonce = 5 [(gogoproto.jsontag) = "LastProposalNonce"]; + bytes LostProposalFee = 5 [(gogoproto.jsontag) = "ProposalFee", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + uint64 LastProposalNonce = 6 [(gogoproto.jsontag) = "LastProposalNonce"]; } message OngoingVotedList { diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index 3faf8489503..dc7a4b0eaa5 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -46,6 +46,7 @@ func createArgsWithEEI(eei vm.SystemEI) ArgsNewGovernanceContract { MinQuorum: 0.5, MinPassThreshold: 0.5, MinVetoThreshold: 0.5, + LostProposalFee: "1", }, ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, From 591300e4a1f555c1fc498565b1b7490b201c9ae7 Mon Sep 17 00:00:00 2001 From: jules01 Date: Fri, 5 May 2023 14:15:17 +0300 Subject: [PATCH 131/221] - regenerated governance.pb.go --- vm/systemSmartContracts/governance.pb.go | 191 +++++++++++++++-------- 1 file changed, 130 insertions(+), 61 deletions(-) diff --git a/vm/systemSmartContracts/governance.pb.go b/vm/systemSmartContracts/governance.pb.go index 818034b53b7..d7cbb16ac1f 100644 --- a/vm/systemSmartContracts/governance.pb.go +++ b/vm/systemSmartContracts/governance.pb.go @@ -198,7 +198,6 @@ type GovernanceConfig struct { MinPassThreshold int32 `protobuf:"varint,3,opt,name=MinPassThreshold,proto3" json:"MinPassThreshold"` MinVetoThreshold int32 `protobuf:"varint,4,opt,name=MinVetoThreshold,proto3" json:"MinVetoThreshold"` ProposalFee *math_big.Int `protobuf:"bytes,5,opt,name=ProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalFee"` - LostProposalFee *math_big.Int `protobuf:"bytes,5,opt,name=LostProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"LostProposalFee"` } func (m *GovernanceConfig) Reset() { *m = GovernanceConfig{} } @@ -269,7 +268,8 @@ type GovernanceConfigV2 struct { MinPassThreshold float32 `protobuf:"fixed32,2,opt,name=MinPassThreshold,proto3" json:"MinPassThreshold"` MinVetoThreshold float32 `protobuf:"fixed32,3,opt,name=MinVetoThreshold,proto3" json:"MinVetoThreshold"` ProposalFee *math_big.Int `protobuf:"bytes,4,opt,name=ProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalFee"` - LastProposalNonce uint64 `protobuf:"varint,5,opt,name=LastProposalNonce,proto3" json:"LastProposalNonce"` + LostProposalFee *math_big.Int `protobuf:"bytes,5,opt,name=LostProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalFee"` + LastProposalNonce uint64 `protobuf:"varint,6,opt,name=LastProposalNonce,proto3" json:"LastProposalNonce"` } func (m *GovernanceConfigV2) Reset() { *m = GovernanceConfigV2{} } @@ -328,6 +328,13 @@ func (m *GovernanceConfigV2) GetProposalFee() *math_big.Int { return nil } +func (m *GovernanceConfigV2) GetLostProposalFee() *math_big.Int { + if m != nil { + return m.LostProposalFee + } + return nil +} + func (m *GovernanceConfigV2) GetLastProposalNonce() uint64 { if m != nil { return m.LastProposalNonce @@ -457,63 +464,63 @@ func init() { func init() { proto.RegisterFile("governance.proto", fileDescriptor_e18a03da5266c714) } var fileDescriptor_e18a03da5266c714 = []byte{ - // 883 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x8f, 0xe3, 0x34, - 0x18, 0x6e, 0x9a, 0x76, 0x3e, 0x3c, 0x9d, 0x25, 0x6b, 0x16, 0x29, 0xe2, 0x10, 0x8f, 0x7a, 0xaa, - 0x40, 0xd3, 0x4a, 0x80, 0xb4, 0x12, 0x5c, 0xd8, 0x74, 0x87, 0x65, 0xa4, 0xdd, 0xee, 0x6c, 0x66, - 0x28, 0x2c, 0x17, 0xe4, 0x26, 0x9e, 0x34, 0xa2, 0xb1, 0x2b, 0xdb, 0xdd, 0x0f, 0x24, 0x24, 0x4e, - 0x9c, 0xf9, 0x19, 0x88, 0x5f, 0xc2, 0x71, 0x8e, 0x73, 0x0a, 0x4c, 0xe7, 0x02, 0x11, 0x87, 0x95, - 0xf8, 0x03, 0xc8, 0x4e, 0x9b, 0x8f, 0xe9, 0x69, 0x44, 0xb4, 0x27, 0xdb, 0x8f, 0xed, 0xe7, 0xc9, - 0xfb, 0xd8, 0xaf, 0xdf, 0x00, 0x2b, 0x64, 0x2f, 0x08, 0xa7, 0x98, 0xfa, 0xa4, 0x3f, 0xe7, 0x4c, - 0x32, 0xd8, 0xd6, 0xcd, 0xfb, 0x87, 0x61, 0x24, 0xa7, 0x8b, 0x49, 0xdf, 0x67, 0xf1, 0x20, 0x64, - 0x21, 0x1b, 0x68, 0x78, 0xb2, 0x38, 0xd7, 0x23, 0x3d, 0xd0, 0xbd, 0x6c, 0x57, 0xf7, 0x9f, 0x6d, - 0xf0, 0xce, 0x23, 0x42, 0x09, 0xc7, 0xb3, 0x13, 0xce, 0xe6, 0x4c, 0xe0, 0x19, 0x44, 0xa0, 0x3d, - 0x62, 0xd4, 0x27, 0xb6, 0x71, 0x60, 0xf4, 0x5a, 0xee, 0x6e, 0x9a, 0xa0, 0x0c, 0xf0, 0xb2, 0x06, - 0xf6, 0x01, 0x18, 0xb2, 0x38, 0x8e, 0xe4, 0x97, 0x58, 0x4c, 0xed, 0xe6, 0x81, 0xd1, 0xeb, 0xb8, - 0x77, 0xd2, 0x04, 0x95, 0x50, 0xaf, 0xd4, 0x87, 0x9f, 0x82, 0x3b, 0xa7, 0x12, 0x73, 0x39, 0x66, - 0x92, 0x1c, 0xcd, 0x99, 0x3f, 0xb5, 0x4d, 0xcd, 0x0c, 0xd3, 0x04, 0x15, 0x33, 0x99, 0xc4, 0x8d, - 0x95, 0xf0, 0x13, 0xd0, 0x39, 0xa2, 0x41, 0xb1, 0xb3, 0xa5, 0x77, 0x5a, 0x69, 0x82, 0xd6, 0x78, - 0xb6, 0xaf, 0xb2, 0x0a, 0x4e, 0x80, 0xf9, 0x9c, 0x08, 0xbb, 0xad, 0x3f, 0xed, 0x24, 0x4d, 0x90, - 0x1a, 0xfe, 0xf6, 0x07, 0x3a, 0x8a, 0xb1, 0x9c, 0x0e, 0x26, 0x51, 0xd8, 0x3f, 0xa6, 0xf2, 0xb3, - 0x92, 0x55, 0xf1, 0x62, 0x26, 0xa3, 0x17, 0x84, 0x8b, 0x57, 0x83, 0xf8, 0xd5, 0xa1, 0x3f, 0xc5, - 0x11, 0x3d, 0xf4, 0x19, 0x27, 0x87, 0x21, 0x1b, 0x04, 0x58, 0xe2, 0xbe, 0x1b, 0x85, 0xc7, 0x54, - 0x0e, 0xb1, 0x90, 0x84, 0x7b, 0x8a, 0x0d, 0x7e, 0x07, 0x9a, 0x23, 0x66, 0x6f, 0x69, 0x89, 0xa7, - 0x69, 0x82, 0x9a, 0x23, 0x56, 0x9f, 0x42, 0x73, 0xc4, 0x20, 0x01, 0xad, 0x31, 0x91, 0xcc, 0xde, - 0xd6, 0x12, 0xcf, 0xd2, 0x04, 0xe9, 0x71, 0x7d, 0x22, 0x9a, 0x0e, 0x52, 0xb0, 0xfd, 0x60, 0x22, - 0x24, 0x8e, 0xa8, 0xbd, 0xa3, 0x95, 0xce, 0xd2, 0x04, 0xad, 0xa1, 0xfa, 0xc4, 0xd6, 0x8c, 0xf0, - 0x07, 0xb0, 0xf7, 0x6c, 0xc1, 0xf8, 0x22, 0x3e, 0x95, 0xf8, 0x7b, 0x62, 0xef, 0x6a, 0xcd, 0x6f, - 0xd2, 0x04, 0x95, 0xe1, 0xfa, 0x74, 0xcb, 0xac, 0xb0, 0x0b, 0xb6, 0x4e, 0xb0, 0x10, 0x24, 0xb0, - 0xc1, 0x81, 0xd1, 0xdb, 0x71, 0x41, 0x9a, 0xa0, 0x15, 0xe2, 0xad, 0x5a, 0xb5, 0x66, 0x38, 0x63, - 0x6a, 0xcd, 0x5e, 0xb1, 0x26, 0x43, 0xbc, 0x55, 0x0b, 0xef, 0x83, 0xfd, 0x63, 0x21, 0x16, 0x84, - 0x3f, 0x08, 0x02, 0x4e, 0x84, 0xb0, 0x3b, 0x3a, 0x8a, 0xbb, 0x69, 0x82, 0xaa, 0x13, 0x5e, 0x75, - 0x08, 0x7f, 0x04, 0x9d, 0x75, 0x9e, 0x0d, 0x99, 0x90, 0xf6, 0xbe, 0xde, 0xf7, 0x5c, 0x5d, 0xe7, - 0x32, 0x5e, 0x5f, 0xf8, 0x15, 0xda, 0xee, 0xdf, 0x4d, 0x60, 0x3d, 0xca, 0x5f, 0x8e, 0x21, 0xa3, - 0xe7, 0x51, 0x08, 0x7b, 0x60, 0x67, 0xb4, 0x88, 0x47, 0x2c, 0x20, 0x42, 0xa7, 0xbc, 0xe9, 0x76, - 0xd2, 0x04, 0xe5, 0x98, 0x97, 0xf7, 0xe0, 0x87, 0x60, 0xf7, 0x49, 0x44, 0x33, 0x43, 0x75, 0xde, - 0xb7, 0xdd, 0xfd, 0x34, 0x41, 0x05, 0xe8, 0x15, 0x5d, 0xf8, 0x39, 0xb0, 0x9e, 0x44, 0x54, 0x99, - 0x7a, 0x36, 0xe5, 0x44, 0x4c, 0xd9, 0x2c, 0xd0, 0x79, 0xdf, 0x76, 0xef, 0xa5, 0x09, 0xda, 0x98, - 0xf3, 0x36, 0x90, 0x15, 0x83, 0xba, 0xa4, 0x05, 0x43, 0xab, 0xc2, 0x50, 0x99, 0xf3, 0x36, 0x10, - 0x75, 0xd7, 0xd6, 0xf1, 0x7f, 0x41, 0xc8, 0xea, 0x3d, 0xd0, 0x77, 0xad, 0x04, 0xd7, 0x78, 0xd7, - 0x4a, 0xac, 0xdd, 0x9f, 0x4d, 0x00, 0x6f, 0x7a, 0x3d, 0xfe, 0xa8, 0xea, 0xa1, 0xb2, 0xbb, 0x79, - 0x4b, 0x0f, 0x9b, 0x7a, 0xcf, 0xff, 0xf1, 0xd0, 0xac, 0x30, 0xdc, 0xd2, 0xc3, 0xd6, 0x5b, 0xf4, - 0x10, 0x0e, 0xc1, 0xdd, 0xc7, 0x58, 0xc8, 0x35, 0x94, 0x95, 0xa5, 0xb6, 0x2e, 0x01, 0xef, 0xa5, - 0x09, 0xda, 0x9c, 0xf4, 0x36, 0xa1, 0xae, 0x0f, 0xac, 0xa7, 0x34, 0x64, 0x11, 0x0d, 0x55, 0x81, - 0x08, 0x1e, 0x47, 0x42, 0xaa, 0x24, 0x7f, 0x18, 0x71, 0xe2, 0x4b, 0xdb, 0x38, 0x30, 0x7b, 0xad, - 0x2c, 0xc9, 0x33, 0xc4, 0x5b, 0xb5, 0xea, 0xa4, 0x1e, 0x92, 0x19, 0x09, 0xb1, 0x24, 0xca, 0x75, - 0xb5, 0x4c, 0x9f, 0x54, 0x0e, 0x7a, 0x45, 0xb7, 0xfb, 0xaf, 0x09, 0xde, 0xcd, 0x47, 0xa7, 0x43, - 0xa5, 0x74, 0x4c, 0xcf, 0x19, 0x7c, 0x09, 0xc0, 0x19, 0x93, 0x78, 0x76, 0xc2, 0x5e, 0x12, 0xae, - 0xcf, 0xbb, 0xe3, 0x7e, 0xad, 0x6a, 0x65, 0x81, 0xd6, 0xe7, 0x5d, 0x89, 0x14, 0x4a, 0xb0, 0xfb, - 0x95, 0x20, 0x41, 0xa6, 0x9b, 0xd5, 0xe8, 0xb1, 0xfa, 0xfa, 0x1c, 0xac, 0x4f, 0xb6, 0xe0, 0xcc, - 0xc3, 0xcd, 0xde, 0x76, 0xf3, 0x46, 0xb8, 0x35, 0x3f, 0xed, 0x25, 0xd2, 0x75, 0xb8, 0x99, 0x6e, - 0xab, 0x1a, 0x6e, 0xcd, 0xb2, 0x05, 0xe7, 0x07, 0xf7, 0xc1, 0xbe, 0x3a, 0xe9, 0x31, 0x9e, 0x2d, - 0xc8, 0xd9, 0xeb, 0x39, 0x81, 0xdb, 0xfa, 0xc7, 0xc3, 0x6a, 0xc0, 0x2d, 0xf5, 0x77, 0x60, 0x19, - 0x70, 0x27, 0x2b, 0xe2, 0x56, 0x13, 0xee, 0xe5, 0x75, 0xd6, 0x32, 0xdd, 0xd1, 0xc5, 0x95, 0xd3, - 0xb8, 0xbc, 0x72, 0x1a, 0x6f, 0xae, 0x1c, 0xe3, 0xa7, 0xa5, 0x63, 0xfc, 0xba, 0x74, 0x8c, 0xdf, - 0x97, 0x8e, 0x71, 0xb1, 0x74, 0x8c, 0xcb, 0xa5, 0x63, 0xfc, 0xb9, 0x74, 0x8c, 0xbf, 0x96, 0x4e, - 0xe3, 0xcd, 0xd2, 0x31, 0x7e, 0xb9, 0x76, 0x1a, 0x17, 0xd7, 0x4e, 0xe3, 0xf2, 0xda, 0x69, 0x7c, - 0x7b, 0x4f, 0xbc, 0x16, 0x92, 0xc4, 0xa7, 0x31, 0xe6, 0x72, 0xc8, 0xa8, 0xe4, 0xd8, 0x97, 0x62, - 0xb2, 0xa5, 0x7f, 0xe7, 0x3e, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x52, 0x6e, 0xfa, 0xb5, 0x18, - 0x0a, 0x00, 0x00, + // 894 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x23, 0x35, + 0x14, 0xcf, 0xe4, 0x5f, 0x5b, 0x37, 0xdd, 0x9d, 0x35, 0x8b, 0x34, 0xe2, 0x30, 0xae, 0x72, 0x8a, + 0x40, 0x4d, 0x24, 0x40, 0x5a, 0x09, 0x2e, 0xec, 0x64, 0xcb, 0x52, 0xa9, 0x9b, 0xed, 0x4e, 0x4b, + 0x60, 0xb9, 0x20, 0x67, 0xc6, 0x9d, 0x8c, 0xc8, 0xd8, 0x95, 0xed, 0xec, 0x1f, 0x24, 0x24, 0xc4, + 0x95, 0x0b, 0x1f, 0x03, 0xf1, 0x49, 0x38, 0xf6, 0xd8, 0xd3, 0x40, 0xd3, 0x0b, 0x8c, 0x38, 0xac, + 0xc4, 0x17, 0x40, 0xf6, 0x24, 0xf3, 0x27, 0x39, 0xad, 0x18, 0xf5, 0x64, 0xbf, 0x9f, 0xed, 0xdf, + 0x6f, 0xde, 0x9b, 0x67, 0xbf, 0x07, 0xcc, 0x80, 0xbd, 0x20, 0x9c, 0x62, 0xea, 0x91, 0xfe, 0x05, + 0x67, 0x92, 0xc1, 0x96, 0x1e, 0xde, 0x3b, 0x08, 0x42, 0x39, 0x9d, 0x4f, 0xfa, 0x1e, 0x8b, 0x06, + 0x01, 0x0b, 0xd8, 0x40, 0xc3, 0x93, 0xf9, 0xb9, 0xb6, 0xb4, 0xa1, 0x67, 0xe9, 0xa9, 0xee, 0x3f, + 0x5b, 0xe0, 0xee, 0x63, 0x42, 0x09, 0xc7, 0xb3, 0x13, 0xce, 0x2e, 0x98, 0xc0, 0x33, 0x88, 0x40, + 0x6b, 0xc4, 0xa8, 0x47, 0x2c, 0x63, 0xdf, 0xe8, 0x35, 0x9d, 0x9d, 0x24, 0x46, 0x29, 0xe0, 0xa6, + 0x03, 0xec, 0x03, 0x30, 0x64, 0x51, 0x14, 0xca, 0x2f, 0xb0, 0x98, 0x5a, 0xf5, 0x7d, 0xa3, 0xd7, + 0x71, 0xee, 0x24, 0x31, 0x2a, 0xa0, 0x6e, 0x61, 0x0e, 0x3f, 0x01, 0x77, 0x4e, 0x25, 0xe6, 0x72, + 0xcc, 0x24, 0x39, 0xbc, 0x60, 0xde, 0xd4, 0x6a, 0x68, 0x66, 0x98, 0xc4, 0x68, 0x6d, 0xc5, 0x5d, + 0xb3, 0xe1, 0xc7, 0xa0, 0x73, 0x48, 0xfd, 0xfc, 0x64, 0x53, 0x9f, 0x34, 0x93, 0x18, 0x95, 0x70, + 0xb7, 0x64, 0xc1, 0x09, 0x68, 0x3c, 0x27, 0xc2, 0x6a, 0xe9, 0x4f, 0x3b, 0x49, 0x62, 0xa4, 0xcc, + 0xdf, 0xfe, 0x40, 0x87, 0x11, 0x96, 0xd3, 0xc1, 0x24, 0x0c, 0xfa, 0x47, 0x54, 0x7e, 0x5a, 0x08, + 0x55, 0x34, 0x9f, 0xc9, 0xf0, 0x05, 0xe1, 0xe2, 0xd5, 0x20, 0x7a, 0x75, 0xe0, 0x4d, 0x71, 0x48, + 0x0f, 0x3c, 0xc6, 0xc9, 0x41, 0xc0, 0x06, 0x3e, 0x96, 0xb8, 0xef, 0x84, 0xc1, 0x11, 0x95, 0x43, + 0x2c, 0x24, 0xe1, 0xae, 0x62, 0x83, 0xdf, 0x82, 0xfa, 0x88, 0x59, 0x6d, 0x2d, 0xf1, 0x34, 0x89, + 0x51, 0x7d, 0xc4, 0xaa, 0x53, 0xa8, 0x8f, 0x18, 0x24, 0xa0, 0x39, 0x26, 0x92, 0x59, 0x5b, 0x5a, + 0xe2, 0x59, 0x12, 0x23, 0x6d, 0x57, 0x27, 0xa2, 0xe9, 0x20, 0x05, 0x5b, 0x0f, 0x27, 0x42, 0xe2, + 0x90, 0x5a, 0xdb, 0x5a, 0xe9, 0x2c, 0x89, 0xd1, 0x0a, 0xaa, 0x4e, 0x6c, 0xc5, 0x08, 0xbf, 0x07, + 0xbb, 0xcf, 0xe6, 0x8c, 0xcf, 0xa3, 0x53, 0x89, 0xbf, 0x23, 0xd6, 0x8e, 0xd6, 0xfc, 0x3a, 0x89, + 0x51, 0x11, 0xae, 0x4e, 0xb7, 0xc8, 0x0a, 0xbb, 0xa0, 0x7d, 0x82, 0x85, 0x20, 0xbe, 0x05, 0xf6, + 0x8d, 0xde, 0xb6, 0x03, 0x92, 0x18, 0x2d, 0x11, 0x77, 0x39, 0xaa, 0x3d, 0xc3, 0x19, 0x53, 0x7b, + 0x76, 0xf3, 0x3d, 0x29, 0xe2, 0x2e, 0x47, 0xf8, 0x00, 0xec, 0x1d, 0x09, 0x31, 0x27, 0xfc, 0xa1, + 0xef, 0x73, 0x22, 0x84, 0xd5, 0xd1, 0x5e, 0xdc, 0x4b, 0x62, 0x54, 0x5e, 0x70, 0xcb, 0x26, 0xfc, + 0x01, 0x74, 0x56, 0xf7, 0x6c, 0xc8, 0x84, 0xb4, 0xf6, 0xf4, 0xb9, 0xe7, 0x2a, 0x9d, 0x8b, 0x78, + 0x75, 0xee, 0x97, 0x68, 0xbb, 0x7f, 0xd7, 0x81, 0xf9, 0x38, 0x7b, 0x39, 0x86, 0x8c, 0x9e, 0x87, + 0x01, 0xec, 0x81, 0xed, 0xd1, 0x3c, 0x1a, 0x31, 0x9f, 0x08, 0x7d, 0xe5, 0x1b, 0x4e, 0x27, 0x89, + 0x51, 0x86, 0xb9, 0xd9, 0x0c, 0x7e, 0x00, 0x76, 0x9e, 0x84, 0x34, 0x0d, 0xa8, 0xbe, 0xf7, 0x2d, + 0x67, 0x2f, 0x89, 0x51, 0x0e, 0xba, 0xf9, 0x14, 0x7e, 0x06, 0xcc, 0x27, 0x21, 0x55, 0x41, 0x3d, + 0x9b, 0x72, 0x22, 0xa6, 0x6c, 0xe6, 0xeb, 0x7b, 0xdf, 0x72, 0xee, 0x27, 0x31, 0xda, 0x58, 0x73, + 0x37, 0x90, 0x25, 0x83, 0x4a, 0xd2, 0x9c, 0xa1, 0x59, 0x62, 0x28, 0xad, 0xb9, 0x1b, 0x88, 0xca, + 0xb5, 0x95, 0xff, 0x9f, 0x13, 0xb2, 0x7c, 0x0f, 0x74, 0xae, 0x15, 0xe0, 0x0a, 0x73, 0xad, 0xc0, + 0xda, 0xfd, 0xb9, 0x09, 0xe0, 0x7a, 0xac, 0xc7, 0x1f, 0x96, 0x63, 0xa8, 0xc2, 0x5d, 0x7f, 0xcb, + 0x18, 0xd6, 0xf5, 0x99, 0xff, 0x13, 0xc3, 0x46, 0x89, 0xe1, 0x2d, 0x63, 0xd8, 0xbc, 0xc5, 0x18, + 0xc2, 0x9f, 0x0c, 0x70, 0xf7, 0x98, 0x09, 0x79, 0x9b, 0x3f, 0x71, 0x5d, 0x10, 0x0e, 0xc1, 0xbd, + 0x63, 0x9c, 0x43, 0x69, 0x6d, 0x6c, 0xeb, 0x3a, 0xf4, 0x6e, 0x12, 0xa3, 0xcd, 0x45, 0x77, 0x13, + 0xea, 0x7a, 0xc0, 0x7c, 0x4a, 0x03, 0x16, 0xd2, 0x40, 0x55, 0x29, 0xff, 0x38, 0x14, 0x52, 0xbd, + 0x34, 0x8f, 0x42, 0x4e, 0x3c, 0x69, 0x19, 0xfb, 0x8d, 0x5e, 0x33, 0x7d, 0x69, 0x52, 0xc4, 0x5d, + 0x8e, 0x2a, 0x5d, 0x1e, 0x91, 0x19, 0x09, 0xb0, 0x24, 0xea, 0xd7, 0xab, 0x6d, 0x3a, 0x5d, 0x32, + 0xd0, 0xcd, 0xa7, 0xdd, 0x7f, 0x1b, 0xe0, 0x9d, 0xcc, 0x3a, 0x1d, 0x2a, 0xa5, 0x23, 0x7a, 0xce, + 0xe0, 0x4b, 0x00, 0xce, 0x98, 0xc4, 0xb3, 0x13, 0xf6, 0x92, 0x70, 0x9d, 0x74, 0x1d, 0xe7, 0x2b, + 0x55, 0xb0, 0x73, 0xb4, 0xba, 0xf8, 0x15, 0x48, 0xa1, 0x04, 0x3b, 0x5f, 0x0a, 0xe2, 0xa7, 0xba, + 0x69, 0xa3, 0x30, 0x56, 0x5f, 0x9f, 0x81, 0xd5, 0xc9, 0xe6, 0x9c, 0x99, 0xbb, 0x69, 0x81, 0x69, + 0xac, 0xb9, 0x5b, 0x71, 0x7d, 0x29, 0x90, 0xae, 0xdc, 0x4d, 0x75, 0x9b, 0x65, 0x77, 0x2b, 0x96, + 0xcd, 0x39, 0xdf, 0x7f, 0x00, 0xf6, 0xd4, 0x9f, 0x1e, 0xe3, 0xd9, 0x9c, 0x9c, 0xbd, 0xbe, 0x20, + 0x70, 0x4b, 0x77, 0x3f, 0x66, 0x0d, 0xb6, 0x55, 0x8b, 0x62, 0x1a, 0x70, 0x3b, 0xed, 0x24, 0xcc, + 0x3a, 0xdc, 0xcd, 0x8a, 0xbd, 0xd9, 0x70, 0x46, 0x97, 0xd7, 0x76, 0xed, 0xea, 0xda, 0xae, 0xbd, + 0xb9, 0xb6, 0x8d, 0x1f, 0x17, 0xb6, 0xf1, 0xeb, 0xc2, 0x36, 0x7e, 0x5f, 0xd8, 0xc6, 0xe5, 0xc2, + 0x36, 0xae, 0x16, 0xb6, 0xf1, 0xe7, 0xc2, 0x36, 0xfe, 0x5a, 0xd8, 0xb5, 0x37, 0x0b, 0xdb, 0xf8, + 0xe5, 0xc6, 0xae, 0x5d, 0xde, 0xd8, 0xb5, 0xab, 0x1b, 0xbb, 0xf6, 0xcd, 0x7d, 0xf1, 0x5a, 0x48, + 0x12, 0x9d, 0x46, 0x98, 0xcb, 0x21, 0xa3, 0x92, 0x63, 0x4f, 0x8a, 0x49, 0x5b, 0xf7, 0x94, 0x1f, + 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xbe, 0x97, 0x28, 0x9d, 0x0a, 0x00, 0x00, } func (x VoteValueType) String() string { @@ -674,6 +681,12 @@ func (this *GovernanceConfigV2) Equal(that interface{}) bool { return false } } + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if !__caster.Equal(this.LostProposalFee, that1.LostProposalFee) { + return false + } + } if this.LastProposalNonce != that1.LastProposalNonce { return false } @@ -801,12 +814,13 @@ func (this *GovernanceConfigV2) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 10) s = append(s, "&systemSmartContracts.GovernanceConfigV2{") s = append(s, "MinQuorum: "+fmt.Sprintf("%#v", this.MinQuorum)+",\n") s = append(s, "MinPassThreshold: "+fmt.Sprintf("%#v", this.MinPassThreshold)+",\n") s = append(s, "MinVetoThreshold: "+fmt.Sprintf("%#v", this.MinVetoThreshold)+",\n") s = append(s, "ProposalFee: "+fmt.Sprintf("%#v", this.ProposalFee)+",\n") + s = append(s, "LostProposalFee: "+fmt.Sprintf("%#v", this.LostProposalFee)+",\n") s = append(s, "LastProposalNonce: "+fmt.Sprintf("%#v", this.LastProposalNonce)+",\n") s = append(s, "}") return strings.Join(s, "") @@ -1058,8 +1072,19 @@ func (m *GovernanceConfigV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { if m.LastProposalNonce != 0 { i = encodeVarintGovernance(dAtA, i, uint64(m.LastProposalNonce)) i-- - dAtA[i] = 0x28 + dAtA[i] = 0x30 + } + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + size := __caster.Size(m.LostProposalFee) + i -= size + if _, err := __caster.MarshalTo(m.LostProposalFee, dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGovernance(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a { __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} size := __caster.Size(m.ProposalFee) @@ -1337,6 +1362,11 @@ func (m *GovernanceConfigV2) Size() (n int) { l = __caster.Size(m.ProposalFee) n += 1 + l + sovGovernance(uint64(l)) } + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + l = __caster.Size(m.LostProposalFee) + n += 1 + l + sovGovernance(uint64(l)) + } if m.LastProposalNonce != 0 { n += 1 + sovGovernance(uint64(m.LastProposalNonce)) } @@ -1446,6 +1476,7 @@ func (this *GovernanceConfigV2) String() string { `MinPassThreshold:` + fmt.Sprintf("%v", this.MinPassThreshold) + `,`, `MinVetoThreshold:` + fmt.Sprintf("%v", this.MinVetoThreshold) + `,`, `ProposalFee:` + fmt.Sprintf("%v", this.ProposalFee) + `,`, + `LostProposalFee:` + fmt.Sprintf("%v", this.LostProposalFee) + `,`, `LastProposalNonce:` + fmt.Sprintf("%v", this.LastProposalNonce) + `,`, `}`, }, "") @@ -2197,6 +2228,44 @@ func (m *GovernanceConfigV2) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LostProposalFee", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGovernance + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGovernance + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGovernance + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + { + __caster := &github_com_multiversx_mx_chain_core_go_data.BigIntCaster{} + if tmp, err := __caster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } else { + m.LostProposalFee = tmp + } + } + iNdEx = postIndex + case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LastProposalNonce", wireType) } From 7aa5793c1ada01b8e59f6d71ea5d6d87444cc7a1 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Fri, 5 May 2023 14:31:13 +0300 Subject: [PATCH 132/221] integrate new host --- cmd/node/config/external.toml | 5 ++-- config/externalConfig.go | 7 +++--- factory/status/statusComponents.go | 34 +++++++++++++------------- go.mod | 2 +- go.sum | 4 ++-- outport/factory/outportFactory.go | 38 +++++++++++++++++------------- 6 files changed, 49 insertions(+), 41 deletions(-) diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index 2c2c85232cf..f539e707099 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -41,10 +41,10 @@ # marshalled structures in block events data MarshallerType = "json" -[WebSocketsConnector] +[WebSocketConnector] # This flag shall only be used for observer nodes Enabled = false - # This flag will start the WebSockets connector as server or client + # This flag will start the WebSocket connector as server or client IsServer = false # The url of the web-sockets client/server URL = "127.0.0.1:22111" @@ -53,3 +53,4 @@ MarshallerType = "json" # The number of seconds when the client will try again to send the data RetryDurationInSec = 5 + BlockingAckOnError = false diff --git a/config/externalConfig.go b/config/externalConfig.go index 55655b4cb0e..2a9aa208ac6 100644 --- a/config/externalConfig.go +++ b/config/externalConfig.go @@ -4,7 +4,7 @@ package config type ExternalConfig struct { ElasticSearchConnector ElasticSearchConfig EventNotifierConnector EventNotifierConfig - WebSocketsConnector WebSocketsDriverConfig + WebSocketConnector WebSocketDriverConfig } // ElasticSearchConfig will hold the configuration for the elastic search @@ -38,11 +38,12 @@ type CovalentConfig struct { RouteAcknowledgeData string } -// WebSocketsDriverConfig will hold the configuration for web socket driver -type WebSocketsDriverConfig struct { +// WebSocketDriverConfig will hold the configuration for WebSocket driver +type WebSocketDriverConfig struct { Enabled bool IsServer bool WithAcknowledge bool + BlockingAckOnError bool URL string MarshallerType string RetryDurationInSec int diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index 0a57d70a683..149726cf395 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -9,8 +9,8 @@ import ( nodeData "github.com/multiversx/mx-chain-core-go/data" outportCore "github.com/multiversx/mx-chain-core-go/data/outport" factoryMarshalizer "github.com/multiversx/mx-chain-core-go/marshal/factory" - "github.com/multiversx/mx-chain-core-go/webSockets/data" - wsDriverFactory "github.com/multiversx/mx-chain-core-go/webSockets/factory" + "github.com/multiversx/mx-chain-core-go/webSocket/data" + wsDriverFactory "github.com/multiversx/mx-chain-core-go/webSocket/factory" indexerFactory "github.com/multiversx/mx-chain-es-indexer-go/process/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/statistics" @@ -205,7 +205,7 @@ func (pc *statusComponents) Close() error { // createOutportDriver creates a new outport.OutportHandler which is used to register outport drivers // once a driver is subscribed it will receive data through the implemented outport.Driver methods func (scf *statusComponentsFactory) createOutportDriver() (outport.OutportHandler, error) { - webSocketsSenderDriverFactoryArgs, err := scf.makeWebSocketsDriverArgs() + webSocketSenderDriverFactoryArgs, err := scf.makeWebSocketDriverArgs() if err != nil { return nil, err } @@ -219,9 +219,9 @@ func (scf *statusComponentsFactory) createOutportDriver() (outport.OutportHandle RetrialInterval: common.RetrialIntervalForOutportDriver, ElasticIndexerFactoryArgs: scf.makeElasticIndexerArgs(), EventNotifierFactoryArgs: eventNotifierArgs, - WebSocketsSenderDriverFactoryArgs: outportDriverFactory.WrappedOutportDriverWebSocketsSenderFactoryArgs{ - Enabled: scf.externalConfig.WebSocketsConnector.Enabled, - ArgsWebSocketsDriverFactory: webSocketsSenderDriverFactoryArgs, + WebSocketSenderDriverFactoryArgs: outportDriverFactory.WrappedOutportDriverWebSocketSenderFactoryArgs{ + Enabled: scf.externalConfig.WebSocketConnector.Enabled, + ArgsWebSocketDriverFactory: webSocketSenderDriverFactoryArgs, }, } @@ -266,26 +266,26 @@ func (scf *statusComponentsFactory) makeEventNotifierArgs() (*outportDriverFacto }, nil } -func (scf *statusComponentsFactory) makeWebSocketsDriverArgs() (wsDriverFactory.ArgsWebSocketsDriverFactory, error) { - if !scf.externalConfig.WebSocketsConnector.Enabled { - return wsDriverFactory.ArgsWebSocketsDriverFactory{}, nil +func (scf *statusComponentsFactory) makeWebSocketDriverArgs() (wsDriverFactory.ArgsWebSocketDriverFactory, error) { + if !scf.externalConfig.WebSocketConnector.Enabled { + return wsDriverFactory.ArgsWebSocketDriverFactory{}, nil } - marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.WebSocketsConnector.MarshallerType) + marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.WebSocketConnector.MarshallerType) if err != nil { - return wsDriverFactory.ArgsWebSocketsDriverFactory{}, err + return wsDriverFactory.ArgsWebSocketDriverFactory{}, err } - return wsDriverFactory.ArgsWebSocketsDriverFactory{ + return wsDriverFactory.ArgsWebSocketDriverFactory{ Marshaller: marshaller, WebSocketConfig: data.WebSocketConfig{ - URL: scf.externalConfig.WebSocketsConnector.URL, - WithAcknowledge: scf.externalConfig.WebSocketsConnector.WithAcknowledge, - IsServer: scf.externalConfig.WebSocketsConnector.IsServer, - RetryDurationInSec: scf.externalConfig.WebSocketsConnector.RetryDurationInSec, + URL: scf.externalConfig.WebSocketConnector.URL, + WithAcknowledge: scf.externalConfig.WebSocketConnector.WithAcknowledge, + IsServer: scf.externalConfig.WebSocketConnector.IsServer, + RetryDurationInSec: scf.externalConfig.WebSocketConnector.RetryDurationInSec, + BlockingAckOnError: scf.externalConfig.WebSocketConnector.BlockingAckOnError, }, Uint64ByteSliceConverter: scf.coreComponents.Uint64ByteSliceConverter(), Log: log, - WithAcknowledge: scf.externalConfig.WebSocketsConnector.WithAcknowledge, }, nil } diff --git a/go.mod b/go.mod index cce8e71fee2..7b3a269c0c0 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412120535-3d7236f4510d + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230505112603-2cb497577ad1 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 4da83388a6c..9ae60259e7e 100644 --- a/go.sum +++ b/go.sum @@ -614,8 +614,8 @@ github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZ github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412120535-3d7236f4510d h1:iyGcH5HJB83y79qD4SSOOLkL/6F154wrrS2d9GXfHC0= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230412120535-3d7236f4510d/go.mod h1:EMXipgB7JzH9ozDhGZwjY1t6UQBtaVgxb1aMo/gzfEA= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230505112603-2cb497577ad1 h1:DnaoDTROvtbjXhV7HmB5969GcjG87U0Jvo/letH1uvE= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230505112603-2cb497577ad1/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= diff --git a/outport/factory/outportFactory.go b/outport/factory/outportFactory.go index 15b55771966..4c42f4b4202 100644 --- a/outport/factory/outportFactory.go +++ b/outport/factory/outportFactory.go @@ -3,23 +3,24 @@ package factory import ( "time" - wsDriverFactory "github.com/multiversx/mx-chain-core-go/webSockets/factory" + "github.com/multiversx/mx-chain-core-go/webSocket/data" + wsDriverFactory "github.com/multiversx/mx-chain-core-go/webSocket/factory" indexerFactory "github.com/multiversx/mx-chain-es-indexer-go/process/factory" "github.com/multiversx/mx-chain-go/outport" ) -// WrappedOutportDriverWebSocketsSenderFactoryArgs extends the wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs structure with the Enabled field -type WrappedOutportDriverWebSocketsSenderFactoryArgs struct { +// WrappedOutportDriverWebSocketSenderFactoryArgs extends the wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs structure with the Enabled field +type WrappedOutportDriverWebSocketSenderFactoryArgs struct { Enabled bool - wsDriverFactory.ArgsWebSocketsDriverFactory + wsDriverFactory.ArgsWebSocketDriverFactory } // OutportFactoryArgs holds the factory arguments of different outport drivers type OutportFactoryArgs struct { - RetrialInterval time.Duration - ElasticIndexerFactoryArgs indexerFactory.ArgsIndexerFactory - EventNotifierFactoryArgs *EventNotifierFactoryArgs - WebSocketsSenderDriverFactoryArgs WrappedOutportDriverWebSocketsSenderFactoryArgs + RetrialInterval time.Duration + ElasticIndexerFactoryArgs indexerFactory.ArgsIndexerFactory + EventNotifierFactoryArgs *EventNotifierFactoryArgs + WebSocketSenderDriverFactoryArgs WrappedOutportDriverWebSocketSenderFactoryArgs } // CreateOutport will create a new instance of OutportHandler @@ -53,7 +54,7 @@ func createAndSubscribeDrivers(outport outport.OutportHandler, args *OutportFact return err } - return createAndSubscribeWebSocketDriver(outport, args.WebSocketsSenderDriverFactoryArgs) + return createAndSubscribeWebSocketDriver(outport, args.WebSocketSenderDriverFactoryArgs) } func createAndSubscribeElasticDriverIfNeeded( @@ -98,18 +99,23 @@ func checkArguments(args *OutportFactoryArgs) error { func createAndSubscribeWebSocketDriver( outport outport.OutportHandler, - args WrappedOutportDriverWebSocketsSenderFactoryArgs, + args WrappedOutportDriverWebSocketSenderFactoryArgs, ) error { if !args.Enabled { return nil } - wsFactory, err := wsDriverFactory.NewWebSocketsDriverFactory(args.ArgsWebSocketsDriverFactory) - if err != nil { - return err - } - - wsDriver, err := wsFactory.Create() + wsDriver, err := wsDriverFactory.NewWebSocketDriver(wsDriverFactory.ArgsWebSocketDriverFactory{ + WebSocketConfig: data.WebSocketConfig{ + URL: args.WebSocketConfig.URL, + WithAcknowledge: args.WebSocketConfig.WithAcknowledge, + IsServer: args.WebSocketConfig.IsServer, + RetryDurationInSec: args.WebSocketConfig.RetryDurationInSec, + }, + Marshaller: args.Marshaller, + Uint64ByteSliceConverter: args.Uint64ByteSliceConverter, + Log: args.Log, + }) if err != nil { return err } From d141a5451d71ee90917dc798008b9db8e7c94ed4 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Fri, 5 May 2023 14:40:54 +0300 Subject: [PATCH 133/221] adding lost proposal fee --- vm/systemSmartContracts/governance.go | 43 ++++++++++++++-------- vm/systemSmartContracts/governance.proto | 2 +- vm/systemSmartContracts/governance_test.go | 40 ++++++++++++-------- 3 files changed, 52 insertions(+), 33 deletions(-) diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 8f690c6283b..f49b5d50198 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -18,6 +18,7 @@ import ( ) const governanceConfigKey = "governanceConfig" +const accumulatedFeeKey = "accumulatedFee" const noncePrefix = "n_" const proposalPrefix = "p_" const yesString = "yes" @@ -605,7 +606,13 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc } generalProposal.Closed = true - err = g.computeEndResults(generalProposal) + baseConfig, err := g.getConfig() + if err != nil { + g.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + generalProposal.Passed = g.computeEndResults(generalProposal, baseConfig) if err != nil { g.eei.AddReturnMessage("computeEndResults error " + err.Error()) return vmcommon.UserError @@ -617,7 +624,13 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc return vmcommon.UserError } - err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, generalProposal.ProposalCost, nil, 0) + tokensToReturn := big.NewInt(0).Set(generalProposal.ProposalCost) + if !generalProposal.Passed { + tokensToReturn.Sub(tokensToReturn, baseConfig.LostProposalFee) + g.addToAccumulatedFees(baseConfig.LostProposalFee) + } + + err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, tokensToReturn, nil, 0) if err != nil { g.eei.AddReturnMessage(err.Error()) return vmcommon.UserError @@ -633,6 +646,13 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc return vmcommon.Ok } +func (g *governanceContract) addToAccumulatedFees(value *big.Int) { + currentData := g.eei.GetStorage([]byte(accumulatedFeeKey)) + currentValue := big.NewInt(0).SetBytes(currentData) + currentValue.Add(currentValue, value) + g.eei.SetStorage([]byte(accumulatedFeeKey), currentValue.Bytes()) +} + // viewVotingPower returns the total voting power func (g *governanceContract) viewVotingPower(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { err := g.checkViewFuncArguments(args, 1) @@ -838,12 +858,7 @@ func (g *governanceContract) getTotalStakeInSystem() *big.Int { } // computeEndResults computes if a proposal has passed or not based on votes accumulated -func (g *governanceContract) computeEndResults(proposal *GeneralProposal) error { - baseConfig, err := g.getConfig() - if err != nil { - return err - } - +func (g *governanceContract) computeEndResults(proposal *GeneralProposal, baseConfig *GovernanceConfigV2) bool { totalVotes := big.NewInt(0).Add(proposal.Yes, proposal.No) totalVotes.Add(totalVotes, proposal.Veto) totalVotes.Add(totalVotes, proposal.Abstain) @@ -853,27 +868,23 @@ func (g *governanceContract) computeEndResults(proposal *GeneralProposal) error if totalVotes.Cmp(minQuorumOutOfStake) == -1 { g.eei.Finish([]byte("Proposal did not reach minQuorum")) - proposal.Passed = false - return nil + return false } minVetoOfTotalVotes := core.GetIntTrimmedPercentageOfValue(totalVotes, float64(baseConfig.MinVetoThreshold)) if proposal.Veto.Cmp(minVetoOfTotalVotes) >= 0 { - proposal.Passed = false g.eei.Finish([]byte("Proposal vetoed")) - return nil + return false } minPassOfTotalVotes := core.GetIntTrimmedPercentageOfValue(totalVotes, float64(baseConfig.MinPassThreshold)) if proposal.Yes.Cmp(minPassOfTotalVotes) >= 0 && proposal.Yes.Cmp(proposal.No) > 0 { g.eei.Finish([]byte("Proposal passed")) - proposal.Passed = true - return nil + return true } g.eei.Finish([]byte("Proposal rejected")) - proposal.Passed = false - return nil + return false } func (g *governanceContract) getActiveFundForDelegator(delegationAddress []byte, address []byte) (*big.Int, error) { diff --git a/vm/systemSmartContracts/governance.proto b/vm/systemSmartContracts/governance.proto index 22adc951121..019a0755eae 100644 --- a/vm/systemSmartContracts/governance.proto +++ b/vm/systemSmartContracts/governance.proto @@ -43,7 +43,7 @@ message GovernanceConfigV2 { float MinPassThreshold = 2 [(gogoproto.jsontag) = "MinPassThreshold"]; float MinVetoThreshold = 3 [(gogoproto.jsontag) = "MinVetoThreshold"]; bytes ProposalFee = 4 [(gogoproto.jsontag) = "ProposalFee", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bytes LostProposalFee = 5 [(gogoproto.jsontag) = "ProposalFee", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; + bytes LostProposalFee = 5 [(gogoproto.jsontag) = "LostProposalFee", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; uint64 LastProposalNonce = 6 [(gogoproto.jsontag) = "LastProposalNonce"]; } diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index dc7a4b0eaa5..ec8c868e010 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -598,6 +598,11 @@ func TestGovernanceContract_ProposalAlreadyExists(t *testing.T) { gsc.eei.SetStorage([]byte(proposalPrefix+string(proposalIdentifier)), []byte("1")) callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), callInputArgs) + + baseConfig, err := gsc.getConfig() + require.Nil(t, err) + fmt.Println(baseConfig) + retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) require.Equal(t, eei.GetReturnMessage(), "proposal already exists") @@ -1677,17 +1682,20 @@ func TestGovernanceContract_addNewVote(t *testing.T) { func TestComputeEndResults(t *testing.T) { t.Parallel() + baseConfig := &GovernanceConfigV2{ + MinQuorum: 0.4, + MinPassThreshold: 0.5, + MinVetoThreshold: 0.3, + ProposalFee: big.NewInt(10), + LostProposalFee: big.NewInt(1), + } + retMessage := "" args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ GetStorageCalled: func(key []byte) []byte { if bytes.Equal(key, []byte(governanceConfigKey)) { - configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{ - MinQuorum: 0.4, - MinPassThreshold: 0.5, - MinVetoThreshold: 0.3, - ProposalFee: big.NewInt(10), - }) + configBytes, _ := args.Marshalizer.Marshal(baseConfig) return configBytes } @@ -1708,8 +1716,8 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(0), Abstain: big.NewInt(10), } - err := gsc.computeEndResults(didNotPassQuorum) - require.Nil(t, err) + passed := gsc.computeEndResults(didNotPassQuorum, baseConfig) + require.False(t, passed) require.Equal(t, "Proposal did not reach minQuorum", retMessage) require.False(t, didNotPassQuorum.Passed) @@ -1719,8 +1727,8 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(0), Abstain: big.NewInt(10), } - err = gsc.computeEndResults(didNotPassVotes) - require.Nil(t, err) + passed = gsc.computeEndResults(didNotPassVotes, baseConfig) + require.False(t, passed) require.Equal(t, "Proposal rejected", retMessage) require.False(t, didNotPassVotes.Passed) @@ -1730,8 +1738,8 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(0), Abstain: big.NewInt(10), } - err = gsc.computeEndResults(didNotPassVotes2) - require.Nil(t, err) + passed = gsc.computeEndResults(didNotPassVotes2, baseConfig) + require.False(t, passed) require.Equal(t, "Proposal rejected", retMessage) require.False(t, didNotPassVotes2.Passed) @@ -1741,8 +1749,8 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(70), Abstain: big.NewInt(10), } - err = gsc.computeEndResults(didNotPassVeto) - require.Nil(t, err) + passed = gsc.computeEndResults(didNotPassVeto, baseConfig) + require.False(t, passed) require.Equal(t, "Proposal vetoed", retMessage) require.False(t, didNotPassVeto.Passed) @@ -1752,8 +1760,8 @@ func TestComputeEndResults(t *testing.T) { Veto: big.NewInt(10), Abstain: big.NewInt(10), } - err = gsc.computeEndResults(pass) - require.Nil(t, err) + passed = gsc.computeEndResults(pass, baseConfig) + require.True(t, passed) require.Equal(t, "Proposal passed", retMessage) require.True(t, pass.Passed) } From a672cebd108ae7209b040e95cc5676958851d2ae Mon Sep 17 00:00:00 2001 From: jules01 Date: Fri, 5 May 2023 14:43:24 +0300 Subject: [PATCH 134/221] - regenerated governance.pb.go --- vm/systemSmartContracts/governance.pb.go | 111 ++++++++++++----------- 1 file changed, 56 insertions(+), 55 deletions(-) diff --git a/vm/systemSmartContracts/governance.pb.go b/vm/systemSmartContracts/governance.pb.go index d7cbb16ac1f..49e8acfc63e 100644 --- a/vm/systemSmartContracts/governance.pb.go +++ b/vm/systemSmartContracts/governance.pb.go @@ -268,7 +268,7 @@ type GovernanceConfigV2 struct { MinPassThreshold float32 `protobuf:"fixed32,2,opt,name=MinPassThreshold,proto3" json:"MinPassThreshold"` MinVetoThreshold float32 `protobuf:"fixed32,3,opt,name=MinVetoThreshold,proto3" json:"MinVetoThreshold"` ProposalFee *math_big.Int `protobuf:"bytes,4,opt,name=ProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalFee"` - LostProposalFee *math_big.Int `protobuf:"bytes,5,opt,name=LostProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"ProposalFee"` + LostProposalFee *math_big.Int `protobuf:"bytes,5,opt,name=LostProposalFee,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"LostProposalFee"` LastProposalNonce uint64 `protobuf:"varint,6,opt,name=LastProposalNonce,proto3" json:"LastProposalNonce"` } @@ -464,63 +464,64 @@ func init() { func init() { proto.RegisterFile("governance.proto", fileDescriptor_e18a03da5266c714) } var fileDescriptor_e18a03da5266c714 = []byte{ - // 894 bytes of a gzipped FileDescriptorProto + // 902 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x23, 0x35, 0x14, 0xcf, 0xe4, 0x5f, 0x5b, 0x37, 0xdd, 0x9d, 0x35, 0x8b, 0x34, 0xe2, 0x30, 0xae, 0x72, 0x8a, 0x40, 0x4d, 0x24, 0x40, 0x5a, 0x09, 0x2e, 0xec, 0x64, 0xcb, 0x52, 0xa9, 0x9b, 0xed, 0x4e, 0x4b, - 0x60, 0xb9, 0x20, 0x67, 0xc6, 0x9d, 0x8c, 0xc8, 0xd8, 0x95, 0xed, 0xec, 0x1f, 0x24, 0x24, 0xc4, - 0x95, 0x0b, 0x1f, 0x03, 0xf1, 0x49, 0x38, 0xf6, 0xd8, 0xd3, 0x40, 0xd3, 0x0b, 0x8c, 0x38, 0xac, - 0xc4, 0x17, 0x40, 0xf6, 0x24, 0xf3, 0x27, 0x39, 0xad, 0x18, 0xf5, 0x64, 0xbf, 0x9f, 0xed, 0xdf, - 0x6f, 0xde, 0x9b, 0x67, 0xbf, 0x07, 0xcc, 0x80, 0xbd, 0x20, 0x9c, 0x62, 0xea, 0x91, 0xfe, 0x05, - 0x67, 0x92, 0xc1, 0x96, 0x1e, 0xde, 0x3b, 0x08, 0x42, 0x39, 0x9d, 0x4f, 0xfa, 0x1e, 0x8b, 0x06, - 0x01, 0x0b, 0xd8, 0x40, 0xc3, 0x93, 0xf9, 0xb9, 0xb6, 0xb4, 0xa1, 0x67, 0xe9, 0xa9, 0xee, 0x3f, - 0x5b, 0xe0, 0xee, 0x63, 0x42, 0x09, 0xc7, 0xb3, 0x13, 0xce, 0x2e, 0x98, 0xc0, 0x33, 0x88, 0x40, - 0x6b, 0xc4, 0xa8, 0x47, 0x2c, 0x63, 0xdf, 0xe8, 0x35, 0x9d, 0x9d, 0x24, 0x46, 0x29, 0xe0, 0xa6, - 0x03, 0xec, 0x03, 0x30, 0x64, 0x51, 0x14, 0xca, 0x2f, 0xb0, 0x98, 0x5a, 0xf5, 0x7d, 0xa3, 0xd7, - 0x71, 0xee, 0x24, 0x31, 0x2a, 0xa0, 0x6e, 0x61, 0x0e, 0x3f, 0x01, 0x77, 0x4e, 0x25, 0xe6, 0x72, - 0xcc, 0x24, 0x39, 0xbc, 0x60, 0xde, 0xd4, 0x6a, 0x68, 0x66, 0x98, 0xc4, 0x68, 0x6d, 0xc5, 0x5d, - 0xb3, 0xe1, 0xc7, 0xa0, 0x73, 0x48, 0xfd, 0xfc, 0x64, 0x53, 0x9f, 0x34, 0x93, 0x18, 0x95, 0x70, - 0xb7, 0x64, 0xc1, 0x09, 0x68, 0x3c, 0x27, 0xc2, 0x6a, 0xe9, 0x4f, 0x3b, 0x49, 0x62, 0xa4, 0xcc, - 0xdf, 0xfe, 0x40, 0x87, 0x11, 0x96, 0xd3, 0xc1, 0x24, 0x0c, 0xfa, 0x47, 0x54, 0x7e, 0x5a, 0x08, - 0x55, 0x34, 0x9f, 0xc9, 0xf0, 0x05, 0xe1, 0xe2, 0xd5, 0x20, 0x7a, 0x75, 0xe0, 0x4d, 0x71, 0x48, - 0x0f, 0x3c, 0xc6, 0xc9, 0x41, 0xc0, 0x06, 0x3e, 0x96, 0xb8, 0xef, 0x84, 0xc1, 0x11, 0x95, 0x43, - 0x2c, 0x24, 0xe1, 0xae, 0x62, 0x83, 0xdf, 0x82, 0xfa, 0x88, 0x59, 0x6d, 0x2d, 0xf1, 0x34, 0x89, - 0x51, 0x7d, 0xc4, 0xaa, 0x53, 0xa8, 0x8f, 0x18, 0x24, 0xa0, 0x39, 0x26, 0x92, 0x59, 0x5b, 0x5a, - 0xe2, 0x59, 0x12, 0x23, 0x6d, 0x57, 0x27, 0xa2, 0xe9, 0x20, 0x05, 0x5b, 0x0f, 0x27, 0x42, 0xe2, - 0x90, 0x5a, 0xdb, 0x5a, 0xe9, 0x2c, 0x89, 0xd1, 0x0a, 0xaa, 0x4e, 0x6c, 0xc5, 0x08, 0xbf, 0x07, - 0xbb, 0xcf, 0xe6, 0x8c, 0xcf, 0xa3, 0x53, 0x89, 0xbf, 0x23, 0xd6, 0x8e, 0xd6, 0xfc, 0x3a, 0x89, - 0x51, 0x11, 0xae, 0x4e, 0xb7, 0xc8, 0x0a, 0xbb, 0xa0, 0x7d, 0x82, 0x85, 0x20, 0xbe, 0x05, 0xf6, - 0x8d, 0xde, 0xb6, 0x03, 0x92, 0x18, 0x2d, 0x11, 0x77, 0x39, 0xaa, 0x3d, 0xc3, 0x19, 0x53, 0x7b, - 0x76, 0xf3, 0x3d, 0x29, 0xe2, 0x2e, 0x47, 0xf8, 0x00, 0xec, 0x1d, 0x09, 0x31, 0x27, 0xfc, 0xa1, - 0xef, 0x73, 0x22, 0x84, 0xd5, 0xd1, 0x5e, 0xdc, 0x4b, 0x62, 0x54, 0x5e, 0x70, 0xcb, 0x26, 0xfc, - 0x01, 0x74, 0x56, 0xf7, 0x6c, 0xc8, 0x84, 0xb4, 0xf6, 0xf4, 0xb9, 0xe7, 0x2a, 0x9d, 0x8b, 0x78, - 0x75, 0xee, 0x97, 0x68, 0xbb, 0x7f, 0xd7, 0x81, 0xf9, 0x38, 0x7b, 0x39, 0x86, 0x8c, 0x9e, 0x87, - 0x01, 0xec, 0x81, 0xed, 0xd1, 0x3c, 0x1a, 0x31, 0x9f, 0x08, 0x7d, 0xe5, 0x1b, 0x4e, 0x27, 0x89, - 0x51, 0x86, 0xb9, 0xd9, 0x0c, 0x7e, 0x00, 0x76, 0x9e, 0x84, 0x34, 0x0d, 0xa8, 0xbe, 0xf7, 0x2d, - 0x67, 0x2f, 0x89, 0x51, 0x0e, 0xba, 0xf9, 0x14, 0x7e, 0x06, 0xcc, 0x27, 0x21, 0x55, 0x41, 0x3d, - 0x9b, 0x72, 0x22, 0xa6, 0x6c, 0xe6, 0xeb, 0x7b, 0xdf, 0x72, 0xee, 0x27, 0x31, 0xda, 0x58, 0x73, - 0x37, 0x90, 0x25, 0x83, 0x4a, 0xd2, 0x9c, 0xa1, 0x59, 0x62, 0x28, 0xad, 0xb9, 0x1b, 0x88, 0xca, - 0xb5, 0x95, 0xff, 0x9f, 0x13, 0xb2, 0x7c, 0x0f, 0x74, 0xae, 0x15, 0xe0, 0x0a, 0x73, 0xad, 0xc0, - 0xda, 0xfd, 0xb9, 0x09, 0xe0, 0x7a, 0xac, 0xc7, 0x1f, 0x96, 0x63, 0xa8, 0xc2, 0x5d, 0x7f, 0xcb, - 0x18, 0xd6, 0xf5, 0x99, 0xff, 0x13, 0xc3, 0x46, 0x89, 0xe1, 0x2d, 0x63, 0xd8, 0xbc, 0xc5, 0x18, - 0xc2, 0x9f, 0x0c, 0x70, 0xf7, 0x98, 0x09, 0x79, 0x9b, 0x3f, 0x71, 0x5d, 0x10, 0x0e, 0xc1, 0xbd, - 0x63, 0x9c, 0x43, 0x69, 0x6d, 0x6c, 0xeb, 0x3a, 0xf4, 0x6e, 0x12, 0xa3, 0xcd, 0x45, 0x77, 0x13, - 0xea, 0x7a, 0xc0, 0x7c, 0x4a, 0x03, 0x16, 0xd2, 0x40, 0x55, 0x29, 0xff, 0x38, 0x14, 0x52, 0xbd, - 0x34, 0x8f, 0x42, 0x4e, 0x3c, 0x69, 0x19, 0xfb, 0x8d, 0x5e, 0x33, 0x7d, 0x69, 0x52, 0xc4, 0x5d, - 0x8e, 0x2a, 0x5d, 0x1e, 0x91, 0x19, 0x09, 0xb0, 0x24, 0xea, 0xd7, 0xab, 0x6d, 0x3a, 0x5d, 0x32, - 0xd0, 0xcd, 0xa7, 0xdd, 0x7f, 0x1b, 0xe0, 0x9d, 0xcc, 0x3a, 0x1d, 0x2a, 0xa5, 0x23, 0x7a, 0xce, - 0xe0, 0x4b, 0x00, 0xce, 0x98, 0xc4, 0xb3, 0x13, 0xf6, 0x92, 0x70, 0x9d, 0x74, 0x1d, 0xe7, 0x2b, - 0x55, 0xb0, 0x73, 0xb4, 0xba, 0xf8, 0x15, 0x48, 0xa1, 0x04, 0x3b, 0x5f, 0x0a, 0xe2, 0xa7, 0xba, - 0x69, 0xa3, 0x30, 0x56, 0x5f, 0x9f, 0x81, 0xd5, 0xc9, 0xe6, 0x9c, 0x99, 0xbb, 0x69, 0x81, 0x69, - 0xac, 0xb9, 0x5b, 0x71, 0x7d, 0x29, 0x90, 0xae, 0xdc, 0x4d, 0x75, 0x9b, 0x65, 0x77, 0x2b, 0x96, - 0xcd, 0x39, 0xdf, 0x7f, 0x00, 0xf6, 0xd4, 0x9f, 0x1e, 0xe3, 0xd9, 0x9c, 0x9c, 0xbd, 0xbe, 0x20, - 0x70, 0x4b, 0x77, 0x3f, 0x66, 0x0d, 0xb6, 0x55, 0x8b, 0x62, 0x1a, 0x70, 0x3b, 0xed, 0x24, 0xcc, - 0x3a, 0xdc, 0xcd, 0x8a, 0xbd, 0xd9, 0x70, 0x46, 0x97, 0xd7, 0x76, 0xed, 0xea, 0xda, 0xae, 0xbd, - 0xb9, 0xb6, 0x8d, 0x1f, 0x17, 0xb6, 0xf1, 0xeb, 0xc2, 0x36, 0x7e, 0x5f, 0xd8, 0xc6, 0xe5, 0xc2, - 0x36, 0xae, 0x16, 0xb6, 0xf1, 0xe7, 0xc2, 0x36, 0xfe, 0x5a, 0xd8, 0xb5, 0x37, 0x0b, 0xdb, 0xf8, - 0xe5, 0xc6, 0xae, 0x5d, 0xde, 0xd8, 0xb5, 0xab, 0x1b, 0xbb, 0xf6, 0xcd, 0x7d, 0xf1, 0x5a, 0x48, - 0x12, 0x9d, 0x46, 0x98, 0xcb, 0x21, 0xa3, 0x92, 0x63, 0x4f, 0x8a, 0x49, 0x5b, 0xf7, 0x94, 0x1f, - 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xbe, 0x97, 0x28, 0x9d, 0x0a, 0x00, 0x00, + 0x60, 0x11, 0x12, 0x72, 0x66, 0xdc, 0xc9, 0x88, 0x8c, 0x5d, 0x8d, 0x9d, 0xfd, 0x83, 0x84, 0xc4, + 0x89, 0x2b, 0x7c, 0x0c, 0xc4, 0x27, 0xe1, 0xd8, 0x63, 0x4f, 0x03, 0x4d, 0x2f, 0x60, 0x71, 0x58, + 0x89, 0x2f, 0x80, 0xec, 0x49, 0xe6, 0x4f, 0x72, 0xaa, 0x18, 0x71, 0xb2, 0xdf, 0xef, 0xd9, 0xbf, + 0xdf, 0xbc, 0x37, 0xcf, 0x7e, 0x06, 0x66, 0xc0, 0x5e, 0x90, 0x98, 0x62, 0xea, 0x91, 0xfe, 0x45, + 0xcc, 0x04, 0x83, 0x2d, 0x3d, 0xbc, 0x73, 0x10, 0x84, 0x62, 0x3a, 0x9f, 0xf4, 0x3d, 0x16, 0x0d, + 0x02, 0x16, 0xb0, 0x81, 0x86, 0x27, 0xf3, 0x73, 0x6d, 0x69, 0x43, 0xcf, 0xd2, 0x5d, 0xdd, 0xbf, + 0xb7, 0xc0, 0xdd, 0xc7, 0x84, 0x92, 0x18, 0xcf, 0x4e, 0x62, 0x76, 0xc1, 0x38, 0x9e, 0x41, 0x04, + 0x5a, 0x23, 0x46, 0x3d, 0x62, 0x19, 0xfb, 0x46, 0xaf, 0xe9, 0xec, 0xc8, 0x04, 0xa5, 0x80, 0x9b, + 0x0e, 0xb0, 0x0f, 0xc0, 0x90, 0x45, 0x51, 0x28, 0x3e, 0xc3, 0x7c, 0x6a, 0xd5, 0xf7, 0x8d, 0x5e, + 0xc7, 0xb9, 0x23, 0x13, 0x54, 0x40, 0xdd, 0xc2, 0x1c, 0x7e, 0x04, 0xee, 0x9c, 0x0a, 0x1c, 0x8b, + 0x31, 0x13, 0xe4, 0xf0, 0x82, 0x79, 0x53, 0xab, 0xa1, 0x99, 0xa1, 0x4c, 0xd0, 0x9a, 0xc7, 0x5d, + 0xb3, 0xe1, 0x87, 0xa0, 0x73, 0x48, 0xfd, 0x7c, 0x67, 0x53, 0xef, 0x34, 0x65, 0x82, 0x4a, 0xb8, + 0x5b, 0xb2, 0xe0, 0x04, 0x34, 0x9e, 0x13, 0x6e, 0xb5, 0xf4, 0xa7, 0x9d, 0xc8, 0x04, 0x29, 0xf3, + 0xd7, 0xdf, 0xd1, 0x61, 0x84, 0xc5, 0x74, 0x30, 0x09, 0x83, 0xfe, 0x11, 0x15, 0x1f, 0x17, 0x52, + 0x15, 0xcd, 0x67, 0x22, 0x7c, 0x41, 0x62, 0xfe, 0x6a, 0x10, 0xbd, 0x3a, 0xf0, 0xa6, 0x38, 0xa4, + 0x07, 0x1e, 0x8b, 0xc9, 0x41, 0xc0, 0x06, 0x3e, 0x16, 0xb8, 0xef, 0x84, 0xc1, 0x11, 0x15, 0x43, + 0xcc, 0x05, 0x89, 0x5d, 0xc5, 0x06, 0xbf, 0x01, 0xf5, 0x11, 0xb3, 0xda, 0x5a, 0xe2, 0xa9, 0x4c, + 0x50, 0x7d, 0xc4, 0xaa, 0x53, 0xa8, 0x8f, 0x18, 0x24, 0xa0, 0x39, 0x26, 0x82, 0x59, 0x5b, 0x5a, + 0xe2, 0x99, 0x4c, 0x90, 0xb6, 0xab, 0x13, 0xd1, 0x74, 0x90, 0x82, 0xad, 0x87, 0x13, 0x2e, 0x70, + 0x48, 0xad, 0x6d, 0xad, 0x74, 0x26, 0x13, 0xb4, 0x82, 0xaa, 0x13, 0x5b, 0x31, 0xc2, 0xef, 0xc0, + 0xee, 0xb3, 0x39, 0x8b, 0xe7, 0xd1, 0xa9, 0xc0, 0xdf, 0x12, 0x6b, 0x47, 0x6b, 0x7e, 0x29, 0x13, + 0x54, 0x84, 0xab, 0xd3, 0x2d, 0xb2, 0xc2, 0x2e, 0x68, 0x9f, 0x60, 0xce, 0x89, 0x6f, 0x81, 0x7d, + 0xa3, 0xb7, 0xed, 0x00, 0x99, 0xa0, 0x25, 0xe2, 0x2e, 0x47, 0xb5, 0x66, 0x38, 0x63, 0x6a, 0xcd, + 0x6e, 0xbe, 0x26, 0x45, 0xdc, 0xe5, 0x08, 0x1f, 0x80, 0xbd, 0x23, 0xce, 0xe7, 0x24, 0x7e, 0xe8, + 0xfb, 0x31, 0xe1, 0xdc, 0xea, 0xe8, 0x28, 0xee, 0xc9, 0x04, 0x95, 0x1d, 0x6e, 0xd9, 0x84, 0xdf, + 0x83, 0xce, 0xea, 0x9c, 0x0d, 0x19, 0x17, 0xd6, 0x9e, 0xde, 0xf7, 0x5c, 0x95, 0x73, 0x11, 0xaf, + 0x2e, 0xfc, 0x12, 0x6d, 0xf7, 0xaf, 0x3a, 0x30, 0x1f, 0x67, 0x37, 0xc7, 0x90, 0xd1, 0xf3, 0x30, + 0x80, 0x3d, 0xb0, 0x3d, 0x9a, 0x47, 0x23, 0xe6, 0x13, 0xae, 0x8f, 0x7c, 0xc3, 0xe9, 0xc8, 0x04, + 0x65, 0x98, 0x9b, 0xcd, 0xe0, 0x7b, 0x60, 0xe7, 0x49, 0x48, 0xd3, 0x84, 0xea, 0x73, 0xdf, 0x72, + 0xf6, 0x64, 0x82, 0x72, 0xd0, 0xcd, 0xa7, 0xf0, 0x13, 0x60, 0x3e, 0x09, 0xa9, 0x4a, 0xea, 0xd9, + 0x34, 0x26, 0x7c, 0xca, 0x66, 0xbe, 0x3e, 0xf7, 0x2d, 0xe7, 0xbe, 0x4c, 0xd0, 0x86, 0xcf, 0xdd, + 0x40, 0x96, 0x0c, 0xaa, 0x48, 0x73, 0x86, 0x66, 0x89, 0xa1, 0xe4, 0x73, 0x37, 0x10, 0x55, 0x6b, + 0xab, 0xf8, 0x3f, 0x25, 0x64, 0x79, 0x1f, 0xe8, 0x5a, 0x2b, 0xc0, 0x15, 0xd6, 0x5a, 0x81, 0xb5, + 0xfb, 0x53, 0x13, 0xc0, 0xf5, 0x5c, 0x8f, 0xdf, 0x2f, 0xe7, 0x50, 0xa5, 0xbb, 0x7e, 0xcb, 0x1c, + 0xd6, 0xf5, 0x9e, 0xff, 0x92, 0xc3, 0x46, 0x89, 0xe1, 0x96, 0x39, 0x6c, 0xfe, 0x8f, 0x39, 0x84, + 0x3f, 0x1a, 0xe0, 0xee, 0x31, 0xe3, 0x62, 0xf3, 0x27, 0x7e, 0x2d, 0x13, 0xb4, 0xee, 0xaa, 0xee, + 0x23, 0xd6, 0x99, 0xe1, 0x10, 0xdc, 0x3b, 0xc6, 0x39, 0x94, 0xf6, 0xc7, 0xb6, 0xee, 0x45, 0x6f, + 0xcb, 0x04, 0x6d, 0x3a, 0xdd, 0x4d, 0xa8, 0xeb, 0x01, 0xf3, 0x29, 0x0d, 0x58, 0x48, 0x03, 0xd5, + 0xa9, 0xfc, 0xe3, 0x90, 0x0b, 0x75, 0xdb, 0x3c, 0x0a, 0x63, 0xe2, 0x09, 0xcb, 0xd8, 0x6f, 0xf4, + 0x9a, 0xe9, 0x6d, 0x93, 0x22, 0xee, 0x72, 0x54, 0x25, 0xf3, 0x88, 0xcc, 0x48, 0x80, 0x05, 0x51, + 0xbf, 0x5f, 0x2d, 0xd3, 0x25, 0x93, 0x81, 0x6e, 0x3e, 0xed, 0xfe, 0xd3, 0x00, 0x6f, 0x65, 0xd6, + 0xe9, 0x50, 0x29, 0x1d, 0xd1, 0x73, 0x06, 0x5f, 0x02, 0x70, 0xc6, 0x04, 0x9e, 0x9d, 0xb0, 0x97, + 0x24, 0xd6, 0x85, 0xd7, 0x71, 0xbe, 0x50, 0x4d, 0x3b, 0x47, 0xab, 0xcb, 0x5f, 0x81, 0x14, 0x0a, + 0xb0, 0xf3, 0x39, 0x27, 0x7e, 0xaa, 0x9b, 0x3e, 0x16, 0xc6, 0xea, 0xeb, 0x33, 0xb0, 0x3a, 0xd9, + 0x9c, 0x33, 0x0b, 0x37, 0x6d, 0x32, 0x8d, 0xb5, 0x70, 0x2b, 0xee, 0x31, 0x05, 0xd2, 0x55, 0xb8, + 0xa9, 0x6e, 0xb3, 0x1c, 0x6e, 0xc5, 0xb2, 0x39, 0xe7, 0xbb, 0x0f, 0xc0, 0x9e, 0xfa, 0xd3, 0x63, + 0x3c, 0x9b, 0x93, 0xb3, 0xd7, 0x17, 0x04, 0x6e, 0xe9, 0x17, 0x90, 0x59, 0x83, 0x6d, 0xf5, 0x4c, + 0x31, 0x0d, 0xb8, 0x9d, 0xbe, 0x26, 0xcc, 0x3a, 0xdc, 0xcd, 0x1a, 0xbe, 0xd9, 0x70, 0x46, 0x97, + 0xd7, 0x76, 0xed, 0xea, 0xda, 0xae, 0xbd, 0xb9, 0xb6, 0x8d, 0x1f, 0x16, 0xb6, 0xf1, 0xcb, 0xc2, + 0x36, 0x7e, 0x5b, 0xd8, 0xc6, 0xe5, 0xc2, 0x36, 0xae, 0x16, 0xb6, 0xf1, 0xc7, 0xc2, 0x36, 0xfe, + 0x5c, 0xd8, 0xb5, 0x37, 0x0b, 0xdb, 0xf8, 0xf9, 0xc6, 0xae, 0x5d, 0xde, 0xd8, 0xb5, 0xab, 0x1b, + 0xbb, 0xf6, 0xd5, 0x7d, 0xfe, 0x9a, 0x0b, 0x12, 0x9d, 0x46, 0x38, 0x16, 0x43, 0x46, 0x45, 0x8c, + 0x3d, 0xc1, 0x27, 0x6d, 0xfd, 0xae, 0xfc, 0xe0, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0xcd, + 0x78, 0x44, 0xa1, 0x0a, 0x00, 0x00, } func (x VoteValueType) String() string { From 841e3086b62573ffe479e889ea373142cf5bd109 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Fri, 5 May 2023 15:00:18 +0300 Subject: [PATCH 135/221] add claim accumulated fees function --- vm/systemSmartContracts/governance.go | 47 ++++++++++++++++++++-- vm/systemSmartContracts/governance_test.go | 17 ++++---- 2 files changed, 54 insertions(+), 10 deletions(-) diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index f49b5d50198..61dc5e30802 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -157,6 +157,8 @@ func (g *governanceContract) Execute(args *vmcommon.ContractCallInput) vmcommon. return g.viewDelegatedVoteInfo(args) case "viewProposal": return g.viewProposal(args) + case "claimAccumulatedFees": + return g.claimAccumulatedFees(args) } g.eei.AddReturnMessage("invalid method to call") @@ -646,11 +648,50 @@ func (g *governanceContract) closeProposal(args *vmcommon.ContractCallInput) vmc return vmcommon.Ok } -func (g *governanceContract) addToAccumulatedFees(value *big.Int) { +func (g *governanceContract) getAccumulatedFees() *big.Int { currentData := g.eei.GetStorage([]byte(accumulatedFeeKey)) - currentValue := big.NewInt(0).SetBytes(currentData) + return big.NewInt(0).SetBytes(currentData) +} + +func (g *governanceContract) setAccumulatedFees(value *big.Int) { + g.eei.SetStorage([]byte(accumulatedFeeKey), value.Bytes()) +} + +func (g *governanceContract) addToAccumulatedFees(value *big.Int) { + currentValue := g.getAccumulatedFees() currentValue.Add(currentValue, value) - g.eei.SetStorage([]byte(accumulatedFeeKey), currentValue.Bytes()) + g.setAccumulatedFees(currentValue) +} + +func (g *governanceContract) claimAccumulatedFees(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { + if args.CallValue.Cmp(zero) != 0 { + g.eei.AddReturnMessage("closeProposal callValue expected to be 0") + return vmcommon.UserError + } + if len(args.Arguments) != 0 { + g.eei.AddReturnMessage("invalid number of arguments expected 0") + return vmcommon.UserError + } + if !bytes.Equal(args.CallerAddr, g.changeConfigAddress) { + g.eei.AddReturnMessage("claimAccumulatedFees can be called only by owner") + return vmcommon.UserError + } + err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.CloseProposal) + if err != nil { + g.eei.AddReturnMessage("not enough gas") + return vmcommon.OutOfGas + } + + accumulatedFees := g.getAccumulatedFees() + g.setAccumulatedFees(big.NewInt(0)) + + err = g.eei.Transfer(args.CallerAddr, args.RecipientAddr, accumulatedFees, nil, 0) + if err != nil { + g.eei.AddReturnMessage(err.Error()) + return vmcommon.UserError + } + + return vmcommon.Ok } // viewVotingPower returns the total voting power diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index ec8c868e010..382b4bbbba7 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -329,7 +329,10 @@ func TestGovernanceContract_ChangeConfig(t *testing.T) { }, GetStorageCalled: func(key []byte) []byte { if bytes.Equal(key, []byte(governanceConfigKey)) { - configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{}) + configBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{ + ProposalFee: big.NewInt(10), + LostProposalFee: big.NewInt(1), + }) return configBytes } @@ -599,10 +602,6 @@ func TestGovernanceContract_ProposalAlreadyExists(t *testing.T) { gsc.eei.SetStorage([]byte(proposalPrefix+string(proposalIdentifier)), []byte("1")) callInput := createVMInput(big.NewInt(500), "proposal", vm.GovernanceSCAddress, []byte("addr1"), callInputArgs) - baseConfig, err := gsc.getConfig() - require.Nil(t, err) - fmt.Println(baseConfig) - retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) require.Equal(t, eei.GetReturnMessage(), "proposal already exists") @@ -910,6 +909,8 @@ func TestGovernanceContract_CloseProposal(t *testing.T) { MinQuorum: 0.1, MinVetoThreshold: 0.1, MinPassThreshold: 0.1, + ProposalFee: big.NewInt(10), + LostProposalFee: big.NewInt(1), }) return configBytes } @@ -918,6 +919,7 @@ func TestGovernanceContract_CloseProposal(t *testing.T) { } if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + ProposalCost: big.NewInt(10), Yes: big.NewInt(10), No: big.NewInt(10), Veto: big.NewInt(10), @@ -1200,7 +1202,7 @@ func TestGovernanceContract_CloseProposalComputeResultsErr(t *testing.T) { t.Parallel() retMessage := "" - errSubstr := "computeEndResults error" + errSubstr := "element was not found" callerAddress := []byte("address") proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) args := createMockGovernanceArgs() @@ -1214,6 +1216,7 @@ func TestGovernanceContract_CloseProposalComputeResultsErr(t *testing.T) { } if bytes.Equal(key, append([]byte(proposalPrefix), proposalIdentifier...)) { proposalBytes, _ := args.Marshalizer.Marshal(&GeneralProposal{ + ProposalCost: big.NewInt(10), Yes: big.NewInt(10), No: big.NewInt(10), Veto: big.NewInt(10), @@ -1372,6 +1375,7 @@ func TestGovernanceContract_ViewConfig(t *testing.T) { mockEEI.GetStorageCalled = func(key []byte) []byte { proposalBytes, _ := args.Marshalizer.Marshal(&GovernanceConfigV2{ ProposalFee: big.NewInt(10), + LostProposalFee: big.NewInt(1), LastProposalNonce: 10, MinQuorum: 0.4, MinPassThreshold: 0.4, @@ -1763,7 +1767,6 @@ func TestComputeEndResults(t *testing.T) { passed = gsc.computeEndResults(pass, baseConfig) require.True(t, passed) require.Equal(t, "Proposal passed", retMessage) - require.True(t, pass.Passed) } func TestGovernanceContract_ProposeVoteClose(t *testing.T) { From f6bbe261cac2e8ac8f3267797177cd88699ecbf2 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Fri, 5 May 2023 15:13:52 +0300 Subject: [PATCH 136/221] add claim accumulated fees function --- vm/systemSmartContracts/governance.go | 4 +-- vm/systemSmartContracts/governance_test.go | 40 ++++++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 61dc5e30802..bd6064e82b8 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -665,7 +665,7 @@ func (g *governanceContract) addToAccumulatedFees(value *big.Int) { func (g *governanceContract) claimAccumulatedFees(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if args.CallValue.Cmp(zero) != 0 { - g.eei.AddReturnMessage("closeProposal callValue expected to be 0") + g.eei.AddReturnMessage("callValue expected to be 0") return vmcommon.UserError } if len(args.Arguments) != 0 { @@ -673,7 +673,7 @@ func (g *governanceContract) claimAccumulatedFees(args *vmcommon.ContractCallInp return vmcommon.UserError } if !bytes.Equal(args.CallerAddr, g.changeConfigAddress) { - g.eei.AddReturnMessage("claimAccumulatedFees can be called only by owner") + g.eei.AddReturnMessage("can be called only by owner") return vmcommon.UserError } err := g.eei.UseGas(g.gasCost.MetaChainSystemSCsCost.CloseProposal) diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index 382b4bbbba7..62e5f2b0fea 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -1800,3 +1800,43 @@ func TestGovernanceContract_ProposeVoteClose(t *testing.T) { retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) } + +func TestGovernanceContract_ClaimAccumulatedFees(t *testing.T) { + t.Parallel() + + gsc, _, eei := createGovernanceBlockChainHookStubContextHandler() + callInput := createVMInput(big.NewInt(500), "claimAccumulatedFees", []byte("addr1"), vm.GovernanceSCAddress, [][]byte{{1}}) + + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.Equal(t, eei.GetReturnMessage(), "callValue expected to be 0") + + callInput.CallValue = big.NewInt(0) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "invalid number of arguments expected 0")) + + callInput.Arguments = [][]byte{} + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.UserError, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "can be called only by owner")) + + gsc.gasCost.MetaChainSystemSCsCost.CloseProposal = 100 + callInput.CallerAddr = gsc.changeConfigAddress + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.OutOfGas, retCode) + require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) + + gsc.gasCost.MetaChainSystemSCsCost.CloseProposal = 0 + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + require.Equal(t, big.NewInt(0), eei.GetTotalSentToUser(callInput.CallerAddr)) + + gsc.addToAccumulatedFees(big.NewInt(100)) + + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + require.Equal(t, big.NewInt(100), eei.GetTotalSentToUser(callInput.CallerAddr)) + + require.Equal(t, big.NewInt(0), gsc.getAccumulatedFees()) +} From 9c8fa9c8c7ff31eb733363cf48830c7c2b5ed95f Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Fri, 5 May 2023 15:14:19 +0300 Subject: [PATCH 137/221] MX-13965: block processing cut-off first implementation --- cmd/node/config/prefs.toml | 12 ++++++-- common/constants.go | 16 ++++++++--- config/prefsConfig.go | 12 ++++++-- factory/processing/blockProcessorCreator.go | 2 ++ factory/processing/processComponents.go | 14 +++++----- node/nodeRunner.go | 2 +- process/block/argProcessor.go | 1 + process/block/baseProcess.go | 31 +++++++++++++++++++++ process/block/metablock.go | 2 ++ process/block/shardblock.go | 2 ++ 10 files changed, 78 insertions(+), 16 deletions(-) diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index d6339fca6ab..6a52f8620b2 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -8,7 +8,7 @@ # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default NodeDisplayName = "" - # Identity represents the keybase's identity when the node does not run in multikey mode + # Identity represents the keybase/GitHub identity when the node does not run in multikey mode # In multikey mode, all bls keys not mentioned in NamedIdentity section will use this one as default Identity = "" @@ -50,10 +50,18 @@ # { File = "external.toml", Path = "ElasticSearchConnector.Enabled", Value = "true" } #] +# BlockProcessingCutoff can be used to stop processing blocks at a certain round, nonce or epoch. +# The Rest API endpoints will return values for the cutoff time. +# This can be useful for snapshotting different stuff. +[[BlockProcessingCutoff]] + Enabled = false + Type = "round" # possible values: "round", "nonce", or "epoch". For epoch, it resembles the start of the specified epoch + Value = 0 # the value of the cutoff. For example, if Type is "round", then Value is the round number to stop processing at + # NamedIdentity represents an identity that runs nodes on the multikey # There can be multiple identities set on the same node, each one of them having different bls keys, just by duplicating the NamedIdentity [[NamedIdentity]] - # Identity represents the keybase identity for the current NamedIdentity + # Identity represents the keybase/GitHub identity for the current NamedIdentity Identity = "" # NodeName represents the name that will be given to the names of the current identity NodeName = "" diff --git a/common/constants.go b/common/constants.go index 7dc897076e9..445ef4b7a47 100644 --- a/common/constants.go +++ b/common/constants.go @@ -730,10 +730,6 @@ const InvalidMessageBlacklistDuration = time.Second * 3600 // rating to a minimum threshold due to improper messages const PublicKeyBlacklistDuration = time.Second * 7200 -// WrongP2PMessageBlacklistDuration represents the time to keep a peer id in the blacklist if it sends a message that -// do not follow this protocol -const WrongP2PMessageBlacklistDuration = time.Second * 7200 - // InvalidSigningBlacklistDuration defines the time to keep a peer id in blacklist if it signs a message with invalid signature const InvalidSigningBlacklistDuration = time.Second * 7200 @@ -827,6 +823,18 @@ const ( ApiOutputFormatProto ApiOutputFormat = 1 ) +// BlockProcessingCutoffType represents the type of cutoff potentially used in block processing +type BlockProcessingCutoffType string + +const ( + // BlockProcessingCutoffByNonce represents the cutoff by nonce + BlockProcessingCutoffByNonce BlockProcessingCutoffType = "nonce" + // BlockProcessingCutoffByRound represents the cutoff by round + BlockProcessingCutoffByRound BlockProcessingCutoffType = "round" + // BlockProcessingCutoffByEpoch represents the cutoff by epoch + BlockProcessingCutoffByEpoch BlockProcessingCutoffType = "epoch" +) + // MaxIndexOfTxInMiniBlock defines the maximum index of a tx inside one mini block const MaxIndexOfTxInMiniBlock = int32(29999) diff --git a/config/prefsConfig.go b/config/prefsConfig.go index 4a6df0c9a73..e558dd6b485 100644 --- a/config/prefsConfig.go +++ b/config/prefsConfig.go @@ -2,8 +2,9 @@ package config // Preferences will hold the configuration related to node's preferences type Preferences struct { - Preferences PreferencesConfig - NamedIdentity []NamedIdentity + Preferences PreferencesConfig + BlockProcessingCutoff BlockProcessingCutoffConfig + NamedIdentity []NamedIdentity } // PreferencesConfig will hold the fields which are node specific such as the display name @@ -25,6 +26,13 @@ type OverridableConfig struct { Value string } +// BlockProcessingCutoffConfig holds the configuration for the block processing cutoff +type BlockProcessingCutoffConfig struct { + Enabled bool + Type string + Value uint64 +} + // NamedIdentity will hold the fields which are node named identities type NamedIdentity struct { Identity string diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 73d79a87b6f..923f87a971d 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -401,6 +401,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( StatusComponents: pcf.statusComponents, StatusCoreComponents: pcf.statusCoreComponents, Config: pcf.config, + PrefsConfig: pcf.prefConfigs, Version: pcf.version, AccountsDB: accountsDb, ForkDetector: forkDetector, @@ -821,6 +822,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( StatusComponents: pcf.statusComponents, StatusCoreComponents: pcf.statusCoreComponents, Config: pcf.config, + PrefsConfig: pcf.prefConfigs, Version: pcf.version, AccountsDB: accountsDb, ForkDetector: forkDetector, diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 205e528c1f1..8d9ce554e8a 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -129,7 +129,7 @@ type processComponents struct { type ProcessComponentsFactoryArgs struct { Config config.Config EpochConfig config.EpochConfig - PrefConfigs config.PreferencesConfig + PrefConfigs config.Preferences ImportDBConfig config.ImportDbConfig AccountsParser genesis.AccountsParser SmartContractParser genesis.InitialSmartContractParser @@ -159,7 +159,7 @@ type ProcessComponentsFactoryArgs struct { type processComponentsFactory struct { config config.Config epochConfig config.EpochConfig - prefConfigs config.PreferencesConfig + prefConfigs config.Preferences importDBConfig config.ImportDbConfig accountsParser genesis.AccountsParser smartContractParser genesis.InitialSmartContractParser @@ -236,7 +236,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { pcf.config, pcf.coreData.GenesisNodesSetup().GetRoundDuration(), pcf.coreData.GenesisTime().Unix(), - pcf.prefConfigs.FullArchive, + pcf.prefConfigs.Preferences.FullArchive, ) if err != nil { return nil, err @@ -653,7 +653,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { } nodeRedundancyArg := redundancy.ArgNodeRedundancy{ - RedundancyLevel: pcf.prefConfigs.RedundancyLevel, + RedundancyLevel: pcf.prefConfigs.Preferences.RedundancyLevel, Messenger: pcf.network.NetworkMessenger(), ObserverPrivateKey: observerBLSPrivateKey, } @@ -1362,7 +1362,7 @@ func (pcf *processComponentsFactory) newShardResolverContainerFactory( InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.FullArchive, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), PayloadValidator: payloadValidator, } @@ -1396,7 +1396,7 @@ func (pcf *processComponentsFactory) newMetaResolverContainerFactory( InputAntifloodHandler: pcf.network.InputAntiFloodHandler(), OutputAntifloodHandler: pcf.network.OutputAntiFloodHandler(), NumConcurrentResolvingJobs: pcf.config.Antiflood.NumConcurrentResolverJobs, - IsFullHistoryNode: pcf.prefConfigs.FullArchive, + IsFullHistoryNode: pcf.prefConfigs.Preferences.FullArchive, PreferredPeersHolder: pcf.network.PreferredPeersHolderHandler(), PayloadValidator: payloadValidator, } @@ -1496,7 +1496,7 @@ func (pcf *processComponentsFactory) newStorageRequesters() (dataRetriever.Reque storageServiceCreator, err := storageFactory.NewStorageServiceFactory( storageFactory.StorageServiceFactoryArgs{ Config: pcf.config, - PrefsConfig: pcf.prefConfigs, + PrefsConfig: pcf.prefConfigs.Preferences, ShardCoordinator: pcf.bootstrapComponents.ShardCoordinator(), PathManager: pathManager, EpochStartNotifier: manualEpochStartNotifier, diff --git a/node/nodeRunner.go b/node/nodeRunner.go index acad2c629ad..af0e17c3388 100644 --- a/node/nodeRunner.go +++ b/node/nodeRunner.go @@ -1209,7 +1209,7 @@ func (nr *nodeRunner) CreateManagedProcessComponents( processArgs := processComp.ProcessComponentsFactoryArgs{ Config: *configs.GeneralConfig, EpochConfig: *configs.EpochConfig, - PrefConfigs: configs.PreferencesConfig.Preferences, + PrefConfigs: *configs.PreferencesConfig, ImportDBConfig: *configs.ImportDbConfig, AccountsParser: accountsParser, SmartContractParser: smartContractParser, diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index ccacd4aa0c5..f8de4920c02 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -65,6 +65,7 @@ type ArgBaseProcessor struct { StatusCoreComponents statusCoreComponentsHolder Config config.Config + PrefsConfig config.Preferences AccountsDB map[state.AccountsDbIdentifier]state.AccountsAdapter ForkDetector process.ForkDetector NodesCoordinator nodesCoordinator.NodesCoordinator diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index c37f8fa65eb..f8e8e4f8dea 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -114,6 +114,8 @@ type baseProcessor struct { mutNonceOfFirstCommittedBlock sync.RWMutex nonceOfFirstCommittedBlock core.OptionalUint64 + + blockProcessingCutoffConfig config.BlockProcessingCutoffConfig } type bootStorerDataArgs struct { @@ -2075,3 +2077,32 @@ func (bp *baseProcessor) setNonceOfFirstCommittedBlock(nonce uint64) { bp.nonceOfFirstCommittedBlock.HasValue = true bp.nonceOfFirstCommittedBlock.Value = nonce } + +func (bp *baseProcessor) handleBlockProcessingCutoff(header data.HeaderHandler) { + if !bp.blockProcessingCutoffConfig.Enabled { + return + } + + cutOffFunction := func(printArgs ...interface{}) { + log.Info("cutting off the block processing. The node will not advance", printArgs) + neverEndingChannel := make(chan struct{}) + <-neverEndingChannel + } + + value := bp.blockProcessingCutoffConfig.Value + + switch common.BlockProcessingCutoffType(bp.blockProcessingCutoffConfig.Type) { + case common.BlockProcessingCutoffByRound: + if header.GetRound() == value { + cutOffFunction("round", header.GetRound()) + } + case common.BlockProcessingCutoffByNonce: + if header.GetNonce() == value { + cutOffFunction("nonce", header.GetNonce()) + } + case common.BlockProcessingCutoffByEpoch: + if header.IsStartOfEpochBlock() && header.GetEpoch() == uint32(value) { + cutOffFunction("epoch", header.GetEpoch()) + } + } +} diff --git a/process/block/metablock.go b/process/block/metablock.go index 768970cb44b..3f16f81d3d0 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -1166,6 +1166,8 @@ func (mp *metaProcessor) CommitBlock( return err } + mp.handleBlockProcessingCutoff(headerHandler) + mp.store.SetEpochForPutOperation(headerHandler.GetEpoch()) header, ok := headerHandler.(*block.MetaBlock) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 14faf2a8507..c68b57e912e 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -888,6 +888,8 @@ func (sp *shardProcessor) CommitBlock( return err } + sp.handleBlockProcessingCutoff(headerHandler) + sp.store.SetEpochForPutOperation(headerHandler.GetEpoch()) log.Debug("started committing block", From 0e2ebb42c9a87f7a72fe8d2da8c8ca8f073b8666 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Fri, 5 May 2023 15:36:27 +0300 Subject: [PATCH 138/221] fixes after review --- dblookupext/esdtSupply/proto/supplyESDT.proto | 2 +- dblookupext/esdtSupply/supplyESDT.pb.go | 42 +++++----- process/sync/shardblock.go | 1 + ...Computer.go => tokensSuppliesProcessor.go} | 5 +- ...est.go => tokensSuppliesProcessor_test.go} | 72 +++++++++++++++- .../trieAccountsIterator_test.go | 83 ++++++++++++++++++- 6 files changed, 177 insertions(+), 28 deletions(-) rename process/sync/trieIterators/{tokensSuppliesComputer.go => tokensSuppliesProcessor.go} (98%) rename process/sync/trieIterators/{tokensSuppliesComputer_test.go => tokensSuppliesProcessor_test.go} (74%) diff --git a/dblookupext/esdtSupply/proto/supplyESDT.proto b/dblookupext/esdtSupply/proto/supplyESDT.proto index a78c4f15b81..91417272e7b 100644 --- a/dblookupext/esdtSupply/proto/supplyESDT.proto +++ b/dblookupext/esdtSupply/proto/supplyESDT.proto @@ -12,5 +12,5 @@ message SupplyESDT { bytes Supply = 1 [(gogoproto.jsontag) = "value", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; bytes Burned = 2 [(gogoproto.jsontag) = "burned", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; bytes Minted = 3 [(gogoproto.jsontag) = "minted", (gogoproto.casttypewith) = "math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster"]; - bool RecomputedSupply = 4 [(gogoproto.jsontag) = "RecomputedSupply"]; + bool RecomputedSupply = 4 [(gogoproto.jsontag) = "recomputedSupply"]; } diff --git a/dblookupext/esdtSupply/supplyESDT.pb.go b/dblookupext/esdtSupply/supplyESDT.pb.go index 7a7f5c68971..342c1ec11a3 100644 --- a/dblookupext/esdtSupply/supplyESDT.pb.go +++ b/dblookupext/esdtSupply/supplyESDT.pb.go @@ -32,7 +32,7 @@ type SupplyESDT struct { Supply *math_big.Int `protobuf:"bytes,1,opt,name=Supply,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"value"` Burned *math_big.Int `protobuf:"bytes,2,opt,name=Burned,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"burned"` Minted *math_big.Int `protobuf:"bytes,3,opt,name=Minted,proto3,casttypewith=math/big.Int;github.com/multiversx/mx-chain-core-go/data.BigIntCaster" json:"minted"` - RecomputedSupply bool `protobuf:"varint,4,opt,name=RecomputedSupply,proto3" json:"RecomputedSupply"` + RecomputedSupply bool `protobuf:"varint,4,opt,name=RecomputedSupply,proto3" json:"recomputedSupply"` } func (m *SupplyESDT) Reset() { *m = SupplyESDT{} } @@ -98,28 +98,28 @@ func init() { func init() { proto.RegisterFile("supplyESDT.proto", fileDescriptor_173c6d56cc05b222) } var fileDescriptor_173c6d56cc05b222 = []byte{ - // 324 bytes of a gzipped FileDescriptorProto + // 326 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0xd1, 0x3f, 0x4e, 0xc3, 0x30, 0x14, 0x06, 0xf0, 0x98, 0xd2, 0x08, 0x59, 0x0c, 0x55, 0xc4, 0x50, 0x31, 0xbc, 0x54, 0x4c, 0x5d, - 0x92, 0x0c, 0x8c, 0x2c, 0x28, 0xb4, 0x43, 0x07, 0x96, 0x94, 0x89, 0x2d, 0x7f, 0x8c, 0x6b, 0xa8, - 0xe3, 0x28, 0xb1, 0xab, 0xb2, 0x71, 0x04, 0x06, 0x0e, 0x81, 0x38, 0x09, 0x63, 0xc7, 0x4e, 0x85, - 0xba, 0x0b, 0xea, 0xd4, 0x23, 0x20, 0x9c, 0x0a, 0x90, 0xba, 0x76, 0xb2, 0xbf, 0xcf, 0xf2, 0xfb, - 0x49, 0x36, 0x6e, 0x55, 0xaa, 0x28, 0xc6, 0x8f, 0xfd, 0x61, 0xef, 0xc6, 0x2f, 0x4a, 0x21, 0x85, - 0xd3, 0x34, 0xcb, 0xa9, 0x47, 0x99, 0x1c, 0xa9, 0xc4, 0x4f, 0x05, 0x0f, 0xa8, 0xa0, 0x22, 0x30, - 0x75, 0xa2, 0xee, 0x4c, 0x32, 0xc1, 0xec, 0xea, 0x5b, 0x67, 0x2f, 0x0d, 0x8c, 0x87, 0xbf, 0xa3, - 0x9c, 0x7b, 0x6c, 0xd7, 0xa9, 0x8d, 0x3a, 0xa8, 0x7b, 0x1c, 0x46, 0xeb, 0x85, 0xdb, 0x9c, 0xc4, - 0x63, 0x45, 0xde, 0x3e, 0xdc, 0x3e, 0x8f, 0xe5, 0x28, 0x48, 0x18, 0xf5, 0x07, 0xb9, 0xbc, 0xf8, - 0xe7, 0x70, 0x35, 0x96, 0x6c, 0x42, 0xca, 0x6a, 0x1a, 0xf0, 0xa9, 0x97, 0x8e, 0x62, 0x96, 0x7b, - 0xa9, 0x28, 0x89, 0x47, 0x45, 0x90, 0xc5, 0x32, 0xf6, 0x43, 0x46, 0x07, 0xb9, 0xbc, 0x8a, 0x2b, - 0x49, 0xca, 0x68, 0x2b, 0x38, 0x0f, 0xd8, 0x0e, 0x55, 0x99, 0x93, 0xac, 0x7d, 0x60, 0xac, 0xe1, - 0x7a, 0xe1, 0xda, 0x89, 0x69, 0xf6, 0x88, 0xd5, 0xc4, 0x0f, 0x76, 0xcd, 0x72, 0x49, 0xb2, 0x76, - 0xe3, 0x0f, 0xe3, 0xa6, 0xd9, 0x23, 0x56, 0x13, 0xce, 0x25, 0x6e, 0x45, 0x24, 0x15, 0xbc, 0x50, - 0x92, 0x64, 0xdb, 0xf7, 0x3c, 0xec, 0xa0, 0xee, 0x51, 0x78, 0xb2, 0x5e, 0xb8, 0x3b, 0x67, 0xd1, - 0x4e, 0x13, 0xf6, 0x66, 0x4b, 0xb0, 0xe6, 0x4b, 0xb0, 0x36, 0x4b, 0x40, 0x4f, 0x1a, 0xd0, 0xab, - 0x06, 0xf4, 0xae, 0x01, 0xcd, 0x34, 0xa0, 0xb9, 0x06, 0xf4, 0xa9, 0x01, 0x7d, 0x69, 0xb0, 0x36, - 0x1a, 0xd0, 0xf3, 0x0a, 0xac, 0xd9, 0x0a, 0xac, 0xf9, 0x0a, 0xac, 0x5b, 0x4c, 0xaa, 0x4c, 0xd6, - 0x53, 0x12, 0xdb, 0xfc, 0xf1, 0xf9, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x06, 0x34, 0x97, - 0x2d, 0x02, 0x00, 0x00, + 0x92, 0x0c, 0x8c, 0x2c, 0x28, 0xb4, 0x43, 0x07, 0x96, 0x94, 0x89, 0x2d, 0x7f, 0x4c, 0x6a, 0xa8, + 0xe3, 0xc8, 0xb1, 0xab, 0xb2, 0x71, 0x04, 0x06, 0x0e, 0x81, 0x38, 0x09, 0x63, 0xc7, 0x4e, 0x85, + 0xba, 0x0b, 0xea, 0xd4, 0x23, 0x20, 0x9c, 0x0a, 0x10, 0xac, 0x9d, 0x92, 0xef, 0xb3, 0xfd, 0x7e, + 0x92, 0x8d, 0x5b, 0x95, 0x2a, 0xcb, 0xf1, 0x7d, 0x7f, 0xd8, 0xbb, 0xf2, 0x4b, 0xc1, 0x25, 0x77, + 0x9a, 0xe6, 0x73, 0xec, 0xe5, 0x54, 0x8e, 0x54, 0xe2, 0xa7, 0x9c, 0x05, 0x39, 0xcf, 0x79, 0x60, + 0xea, 0x44, 0xdd, 0x98, 0x64, 0x82, 0xf9, 0xab, 0x4f, 0x9d, 0x3c, 0x35, 0x30, 0x1e, 0x7e, 0x8f, + 0x72, 0x6e, 0xb1, 0x5d, 0xa7, 0x36, 0xea, 0xa0, 0xee, 0x61, 0x18, 0xad, 0x17, 0x6e, 0x73, 0x12, + 0x8f, 0x15, 0x79, 0x79, 0x73, 0xfb, 0x2c, 0x96, 0xa3, 0x20, 0xa1, 0xb9, 0x3f, 0x28, 0xe4, 0xd9, + 0x2f, 0x87, 0xa9, 0xb1, 0xa4, 0x13, 0x22, 0xaa, 0x69, 0xc0, 0xa6, 0x5e, 0x3a, 0x8a, 0x69, 0xe1, + 0xa5, 0x5c, 0x10, 0x2f, 0xe7, 0x41, 0x16, 0xcb, 0xd8, 0x0f, 0x69, 0x3e, 0x28, 0xe4, 0x45, 0x5c, + 0x49, 0x22, 0xa2, 0xad, 0xe0, 0xdc, 0x61, 0x3b, 0x54, 0xa2, 0x20, 0x59, 0x7b, 0xcf, 0x58, 0xc3, + 0xf5, 0xc2, 0xb5, 0x13, 0xd3, 0xec, 0x10, 0xab, 0x89, 0x2f, 0xec, 0x92, 0x16, 0x92, 0x64, 0xed, + 0xc6, 0x0f, 0xc6, 0x4c, 0xb3, 0x43, 0xac, 0x26, 0x9c, 0x73, 0xdc, 0x8a, 0x48, 0xca, 0x59, 0xa9, + 0x24, 0xc9, 0xb6, 0xf7, 0xb9, 0xdf, 0x41, 0xdd, 0x83, 0xf0, 0x68, 0xbd, 0x70, 0x5b, 0xe2, 0xcf, + 0x5a, 0xf4, 0x6f, 0x77, 0xd8, 0x9b, 0x2d, 0xc1, 0x9a, 0x2f, 0xc1, 0xda, 0x2c, 0x01, 0x3d, 0x68, + 0x40, 0xcf, 0x1a, 0xd0, 0xab, 0x06, 0x34, 0xd3, 0x80, 0xe6, 0x1a, 0xd0, 0xbb, 0x06, 0xf4, 0xa1, + 0xc1, 0xda, 0x68, 0x40, 0x8f, 0x2b, 0xb0, 0x66, 0x2b, 0xb0, 0xe6, 0x2b, 0xb0, 0xae, 0x31, 0xa9, + 0x32, 0x59, 0x4f, 0x49, 0x6c, 0xf3, 0xc6, 0xa7, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x29, 0x1e, + 0x87, 0x1e, 0x2d, 0x02, 0x00, 0x00, } func (this *SupplyESDT) Equal(that interface{}) bool { diff --git a/process/sync/shardblock.go b/process/sync/shardblock.go index a315358b6c0..12530a7cdfc 100644 --- a/process/sync/shardblock.go +++ b/process/sync/shardblock.go @@ -143,6 +143,7 @@ func (boot *ShardBootstrap) StartSyncingBlocks() error { } go boot.syncBlocks(ctx) + return nil } diff --git a/process/sync/trieIterators/tokensSuppliesComputer.go b/process/sync/trieIterators/tokensSuppliesProcessor.go similarity index 98% rename from process/sync/trieIterators/tokensSuppliesComputer.go rename to process/sync/trieIterators/tokensSuppliesProcessor.go index 25ad5ecd0d1..632115eb214 100644 --- a/process/sync/trieIterators/tokensSuppliesComputer.go +++ b/process/sync/trieIterators/tokensSuppliesProcessor.go @@ -111,10 +111,7 @@ func (t *tokensSuppliesProcessor) addToBalance(tokenID []byte, nonce uint64, val tokenIDStr := string(tokenID) if nonce > 0 { t.putInSuppliesMap(string(tokenID), value) // put for collection as well - nonceStr := fmt.Sprintf("%d", nonce) - if len(nonceStr)%2 != 0 { - nonceStr = "0" + nonceStr - } + nonceStr := hex.EncodeToString(big.NewInt(int64(nonce)).Bytes()) tokenIDStr += fmt.Sprintf("-%s", nonceStr) } diff --git a/process/sync/trieIterators/tokensSuppliesComputer_test.go b/process/sync/trieIterators/tokensSuppliesProcessor_test.go similarity index 74% rename from process/sync/trieIterators/tokensSuppliesComputer_test.go rename to process/sync/trieIterators/tokensSuppliesProcessor_test.go index ff46c82b79f..21eaf09f919 100644 --- a/process/sync/trieIterators/tokensSuppliesComputer_test.go +++ b/process/sync/trieIterators/tokensSuppliesProcessor_test.go @@ -108,6 +108,76 @@ func TestTokensSuppliesProcessor_HandleTrieAccountIteration(t *testing.T) { require.NoError(t, err) }) + t.Run("cannot get all leaves on channel", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + expectedErr := errors.New("error") + userAcc, _ := state.NewUserAccount([]byte("addr")) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + return expectedErr + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.ErrorIs(t, err, expectedErr) + require.Empty(t, tsp.tokensSupplies) + }) + + t.Run("should ignore non-token keys", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + userAcc, _ := state.NewUserAccount([]byte("addr")) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("not a token key"), []byte("not a token value")) + + close(leavesChannels.LeavesChan) + return nil + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.NoError(t, err) + require.Empty(t, tsp.tokensSupplies) + }) + + t.Run("should return error if trie value cannot be extracted", func(t *testing.T) { + t.Parallel() + + args := getTokensSuppliesProcessorArgs() + tsp, _ := NewTokensSuppliesProcessor(args) + + userAcc, _ := state.NewUserAccount([]byte("addr")) + userAcc.SetRootHash([]byte("rootHash")) + userAcc.SetDataTrie(&trie.TrieStub{ + GetAllLeavesOnChannelCalled: func(leavesChannels *common.TrieIteratorChannels, ctx context.Context, rootHash []byte, keyBuilder common.KeyBuilder) error { + esToken := &esdt.ESDigitalToken{ + Value: big.NewInt(37), + } + esBytes, _ := args.Marshaller.Marshal(esToken) + tknKey := []byte("ELRONDesdtTKN-00aacc") + leavesChannels.LeavesChan <- keyValStorage.NewKeyValStorage(tknKey, esBytes) + + close(leavesChannels.LeavesChan) + return nil + }, + }) + + err := tsp.HandleTrieAccountIteration(userAcc) + require.Error(t, err) + require.Contains(t, err.Error(), "suffix is not present or the position is incorrect") + require.Empty(t, tsp.tokensSupplies) + }) + t.Run("should not save tokens from the system account", func(t *testing.T) { t.Parallel() @@ -178,7 +248,7 @@ func TestTokensSuppliesProcessor_HandleTrieAccountIteration(t *testing.T) { require.NoError(t, err) expectedSupplies := map[string]*big.Int{ - "SFT-00aabb-37": big.NewInt(2), + "SFT-00aabb-25": big.NewInt(2), "SFT-00aabb": big.NewInt(2), "TKN-00aacc": big.NewInt(74), } diff --git a/process/sync/trieIterators/trieAccountsIterator_test.go b/process/sync/trieIterators/trieAccountsIterator_test.go index 3ab76944ee7..8eb00d7a7f3 100644 --- a/process/sync/trieIterators/trieAccountsIterator_test.go +++ b/process/sync/trieIterators/trieAccountsIterator_test.go @@ -139,7 +139,88 @@ func TestTrieAccountsIterator_Process(t *testing.T) { require.Equal(t, expectedErr, err) }) - t.Run("should work without handlers", func(t *testing.T) { + t.Run("should ignore non-accounts leaves", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: []byte("rootHash"), + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("non-addr"), []byte("not an account")) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.NoError(t, err) + }) + + t.Run("should ignore user account without root hash", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := &stateMock.AccountWrapMock{ + RootHash: nil, + } + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return &stateMock.AccountWrapMock{}, nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.NoError(t, err) + }) + + t.Run("should ignore accounts that cannot be casted", func(t *testing.T) { + t.Parallel() + + args := getTrieAccountsIteratorArgs() + args.Accounts = &stateMock.AccountsStub{ + RootHashCalled: func() ([]byte, error) { + return []byte("rootHash"), nil + }, + GetAllLeavesCalled: func(iter *common.TrieIteratorChannels, _ context.Context, _ []byte) error { + userAcc := state.NewEmptyUserAccount() + userAcc.SetRootHash([]byte("root")) + userAccBytes, _ := args.Marshaller.Marshal(userAcc) + iter.LeavesChan <- keyValStorage.NewKeyValStorage([]byte("addr"), userAccBytes) + close(iter.LeavesChan) + return nil + }, + GetExistingAccountCalled: func(addressContainer []byte) (vmcommon.AccountHandler, error) { + return state.NewEmptyPeerAccount(), nil + }, + } + tai, _ := NewTrieAccountsIterator(args) + + err := tai.Process(dummyIterator) + require.NoError(t, err) + }) + + t.Run("should work with dummy handler", func(t *testing.T) { t.Parallel() args := getTrieAccountsIteratorArgs() From d588d341f0bfa7e1fd9b579a8e761a53dcc347c3 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Fri, 5 May 2023 17:39:47 +0300 Subject: [PATCH 139/221] MX-13965: fixes --- cmd/node/config/prefs.toml | 8 ++++---- config/tomlConfig_test.go | 10 ++++++++++ factory/core/coreComponents.go | 1 + process/block/baseProcess.go | 8 +++++++- process/block/metablock.go | 1 + process/block/shardblock.go | 1 + 6 files changed, 24 insertions(+), 5 deletions(-) diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 6a52f8620b2..093ff4c652d 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -53,10 +53,10 @@ # BlockProcessingCutoff can be used to stop processing blocks at a certain round, nonce or epoch. # The Rest API endpoints will return values for the cutoff time. # This can be useful for snapshotting different stuff. -[[BlockProcessingCutoff]] - Enabled = false - Type = "round" # possible values: "round", "nonce", or "epoch". For epoch, it resembles the start of the specified epoch - Value = 0 # the value of the cutoff. For example, if Type is "round", then Value is the round number to stop processing at +[BlockProcessingCutoff] + Enabled = false + Type = "round" # possible values: "round", "nonce", or "epoch". For epoch, it resembles the start of the specified epoch + Value = 20 # the value of the cutoff. For example, if Type is "round", then Value is the round number to stop processing at # NamedIdentity represents an identity that runs nodes on the multikey # There can be multiple identities set on the same node, each one of them having different bls keys, just by duplicating the NamedIdentity diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index d7a3b1c7170..11cbb903993 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -324,6 +324,11 @@ func TestTomlPreferencesParser(t *testing.T) { RedundancyLevel: redundancyLevel, PreferredConnections: []string{prefPubKey0, prefPubKey1}, }, + BlockProcessingCutoff: BlockProcessingCutoffConfig{ + Enabled: true, + Type: "round", + Value: 55, + }, } testString := ` @@ -336,6 +341,11 @@ func TestTomlPreferencesParser(t *testing.T) { "` + prefPubKey0 + `", "` + prefPubKey1 + `" ] + +[BlockProcessingCutoff] + Enabled = true + Type = "round" + Value = 55 ` cfg := Preferences{} diff --git a/factory/core/coreComponents.go b/factory/core/coreComponents.go index 33310be51b4..1bf74e428d4 100644 --- a/factory/core/coreComponents.go +++ b/factory/core/coreComponents.go @@ -224,6 +224,7 @@ func (ccf *coreComponentsFactory) Create() (*coreComponents, error) { } alarmScheduler := alarm.NewAlarmScheduler() + // TODO: disable watchdog if block processing cutoff is enabled watchdogTimer, err := watchdog.NewWatchdog(alarmScheduler, ccf.chanStopNodeProcess, log) if err != nil { return nil, err diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index f8e8e4f8dea..275dd202155 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -2084,7 +2084,13 @@ func (bp *baseProcessor) handleBlockProcessingCutoff(header data.HeaderHandler) } cutOffFunction := func(printArgs ...interface{}) { - log.Info("cutting off the block processing. The node will not advance", printArgs) + log.Info("cutting off the block processing. The node will not advance", printArgs...) + go func() { + for { + time.Sleep(time.Minute) + log.Info("node is in block processing cut-off mode", printArgs...) + } + }() neverEndingChannel := make(chan struct{}) <-neverEndingChannel } diff --git a/process/block/metablock.go b/process/block/metablock.go index 3f16f81d3d0..b02ac8618f3 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -133,6 +133,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { processDebugger: processDebugger, outportDataProvider: arguments.OutportDataProvider, processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), + blockProcessingCutoffConfig: arguments.PrefsConfig.BlockProcessingCutoff, } mp := metaProcessor{ diff --git a/process/block/shardblock.go b/process/block/shardblock.go index c68b57e912e..44cf3102026 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -118,6 +118,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { processDebugger: processDebugger, outportDataProvider: arguments.OutportDataProvider, processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), + blockProcessingCutoffConfig: arguments.PrefsConfig.BlockProcessingCutoff, } sp := shardProcessor{ From e5409573d53bb796b3a57521a966b44fc0bfc1bb Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 8 May 2023 12:31:53 +0300 Subject: [PATCH 140/221] reset watchdog timeout on MissingDataTrieNodeFound() call --- state/syncer/userAccountSyncer_test.go | 2 ++ state/syncer/userAccountsSyncer.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/state/syncer/userAccountSyncer_test.go b/state/syncer/userAccountSyncer_test.go index c1ff8bf7462..904ce00b326 100644 --- a/state/syncer/userAccountSyncer_test.go +++ b/state/syncer/userAccountSyncer_test.go @@ -100,6 +100,8 @@ func TestUserAccountsSyncer_MissingDataTrieNodeFound(t *testing.T) { } syncer, _ := NewUserAccountsSyncer(args) + // test that timeout watchdog is reset + time.Sleep(args.Timeout * 2) syncer.MissingDataTrieNodeFound(rootHash) assert.Equal(t, 1, numNodesSynced) diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index a5890c24e97..9249ce21ce5 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -339,6 +339,8 @@ func (u *userAccountsSyncer) MissingDataTrieNodeFound(hash []byte) { u.mutex.Lock() defer u.mutex.Unlock() + u.timeoutHandler.ResetWatchdog() + ctx, cancel := context.WithCancel(context.Background()) defer func() { u.cacher.Clear() From 618f1f30a9de936c02da6eea5bd0b4bc0a37ab95 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Mon, 8 May 2023 15:01:16 +0300 Subject: [PATCH 141/221] fixes after review --- .../config/systemSmartContractsConfig.toml | 2 +- config/systemSmartContractsConfig.go | 6 ++--- epochStart/metachain/systemSCs_test.go | 2 +- genesis/process/genesisBlockCreator_test.go | 2 +- .../multiShard/hardFork/hardFork_test.go | 2 +- integrationTests/testInitializer.go | 4 +-- integrationTests/testProcessorNode.go | 4 +-- integrationTests/vm/testInitializer.go | 2 +- .../metachain/vmContainerFactory_test.go | 2 +- testscommon/components/components.go | 2 +- vm/factory/systemSCFactory.go | 6 ++--- vm/factory/systemSCFactory_test.go | 2 +- vm/systemSmartContracts/governance.go | 18 +++++-------- vm/systemSmartContracts/governance_test.go | 26 +++++++++---------- 14 files changed, 36 insertions(+), 44 deletions(-) diff --git a/cmd/node/config/systemSmartContractsConfig.toml b/cmd/node/config/systemSmartContractsConfig.toml index 33d7713f3d7..512596dff05 100644 --- a/cmd/node/config/systemSmartContractsConfig.toml +++ b/cmd/node/config/systemSmartContractsConfig.toml @@ -17,7 +17,7 @@ OwnerAddress = "erd1fpkcgel4gcmh8zqqdt043yfcn5tyx8373kg6q2qmkxzu4dqamc0swts65c" [GovernanceSystemSCConfig] - ChangeConfigAddress = "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80" #should use a multisign contract instead of a wallet address + OwnerAddress = "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80" #should use a multisign contract instead of a wallet address [GovernanceSystemSCConfig.V1] NumNodes = 500 ProposalCost = "5000000000000000000" #5 eGLD diff --git a/config/systemSmartContractsConfig.go b/config/systemSmartContractsConfig.go index 0d991492ddf..d48027574eb 100644 --- a/config/systemSmartContractsConfig.go +++ b/config/systemSmartContractsConfig.go @@ -53,9 +53,9 @@ type GovernanceSystemSCConfigActive struct { // GovernanceSystemSCConfig defines the set of constants to initialize the governance system smart contract type GovernanceSystemSCConfig struct { - V1 GovernanceSystemSCConfigV1 - Active GovernanceSystemSCConfigActive - ChangeConfigAddress string + V1 GovernanceSystemSCConfigV1 + Active GovernanceSystemSCConfigActive + OwnerAddress string } // DelegationManagerSystemSCConfig defines a set of constants to initialize the delegation manager system smart contract diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index 3388d7fb48b..73687c7f6a9 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -998,7 +998,7 @@ func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEp MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index 0ef9aff4e14..030308bd007 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -107,7 +107,7 @@ func createMockArgument( MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: nodePrice.Text(10), diff --git a/integrationTests/multiShard/hardFork/hardFork_test.go b/integrationTests/multiShard/hardFork/hardFork_test.go index 1d6980f4132..69a5ccfbdcf 100644 --- a/integrationTests/multiShard/hardFork/hardFork_test.go +++ b/integrationTests/multiShard/hardFork/hardFork_test.go @@ -445,7 +445,7 @@ func hardForkImport( MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: integrationTests.DelegationManagerConfigChangeAddress, + OwnerAddress: integrationTests.DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 567be527904..8b495d9ab87 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -667,7 +667,7 @@ func CreateFullGenesisBlocks( OwnerAddress: "aaaaaa", }, GovernanceSystemSCConfig: config.GovernanceSystemSCConfig{ - ChangeConfigAddress: DelegationManagerConfigChangeAddress, + OwnerAddress: DelegationManagerConfigChangeAddress, V1: config.GovernanceSystemSCConfigV1{ ProposalCost: "500", }, @@ -786,7 +786,7 @@ func CreateGenesisMetaBlock( MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: DelegationManagerConfigChangeAddress, + OwnerAddress: DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index a37fc24d45d..b9f992cf42b 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -863,7 +863,7 @@ func (tpn *TestProcessorNode) createFullSCQueryService(gasMap map[string]map[str MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: DelegationManagerConfigChangeAddress, + OwnerAddress: DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", @@ -1765,7 +1765,7 @@ func (tpn *TestProcessorNode) initMetaInnerProcessors(gasMap map[string]map[stri MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: DelegationManagerConfigChangeAddress, + OwnerAddress: DelegationManagerConfigChangeAddress, }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/integrationTests/vm/testInitializer.go b/integrationTests/vm/testInitializer.go index cb2d1d37a42..54d179ef161 100644 --- a/integrationTests/vm/testInitializer.go +++ b/integrationTests/vm/testInitializer.go @@ -710,7 +710,7 @@ func createSystemSCConfig() *config.SystemSmartContractsConfig { MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "2500000000000000000000", diff --git a/process/factory/metachain/vmContainerFactory_test.go b/process/factory/metachain/vmContainerFactory_test.go index 1032ea2604e..7b182547752 100644 --- a/process/factory/metachain/vmContainerFactory_test.go +++ b/process/factory/metachain/vmContainerFactory_test.go @@ -316,7 +316,7 @@ func TestVmContainerFactory_Create(t *testing.T) { MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 83f44cb8cbf..9eea0414995 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -540,7 +540,7 @@ func GetProcessArgs( MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "2500000000000000000000", diff --git a/vm/factory/systemSCFactory.go b/vm/factory/systemSCFactory.go index d5272aed81a..0cccff2ce4b 100644 --- a/vm/factory/systemSCFactory.go +++ b/vm/factory/systemSCFactory.go @@ -219,9 +219,9 @@ func (scf *systemSCFactory) createESDTContract() (vm.SystemSmartContract, error) } func (scf *systemSCFactory) createGovernanceContract() (vm.SystemSmartContract, error) { - configChangeAddress, err := scf.addressPubKeyConverter.Decode(scf.systemSCConfig.GovernanceSystemSCConfig.ChangeConfigAddress) + ownerAddress, err := scf.addressPubKeyConverter.Decode(scf.systemSCConfig.GovernanceSystemSCConfig.OwnerAddress) if err != nil { - return nil, fmt.Errorf("%w for GovernanceSystemSCConfig.ChangeConfigAddress in systemSCFactory", vm.ErrInvalidAddress) + return nil, fmt.Errorf("%w for GovernanceSystemSCConfig.OwnerAddress in systemSCFactory", vm.ErrInvalidAddress) } argsGovernance := systemSmartContracts.ArgsNewGovernanceContract{ @@ -235,7 +235,7 @@ func (scf *systemSCFactory) createGovernanceContract() (vm.SystemSmartContract, ValidatorSCAddress: vm.ValidatorSCAddress, EnableEpochsHandler: scf.enableEpochsHandler, UnBondPeriodInEpochs: scf.systemSCConfig.StakingSystemSCConfig.UnBondPeriodInEpochs, - ConfigChangeAddress: configChangeAddress, + OwnerAddress: ownerAddress, } governance, err := systemSmartContracts.NewGovernanceContract(argsGovernance) return governance, err diff --git a/vm/factory/systemSCFactory_test.go b/vm/factory/systemSCFactory_test.go index 7f78491d429..5ea4e2b777e 100644 --- a/vm/factory/systemSCFactory_test.go +++ b/vm/factory/systemSCFactory_test.go @@ -50,7 +50,7 @@ func createMockNewSystemScFactoryArgs() ArgsNewSystemSCFactory { MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: "3132333435363738393031323334353637383930313233343536373839303234", + OwnerAddress: "3132333435363738393031323334353637383930313233343536373839303234", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "1000", diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index bd6064e82b8..3e1dd4bafb5 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -38,7 +38,7 @@ type ArgsNewGovernanceContract struct { GovernanceSCAddress []byte DelegationMgrSCAddress []byte ValidatorSCAddress []byte - ConfigChangeAddress []byte + OwnerAddress []byte UnBondPeriodInEpochs uint32 EnableEpochsHandler common.EnableEpochsHandler } @@ -51,7 +51,6 @@ type governanceContract struct { governanceSCAddress []byte delegationMgrSCAddress []byte validatorSCAddress []byte - changeConfigAddress []byte marshalizer marshal.Marshalizer hasher hashing.Hasher governanceConfig config.GovernanceSystemSCConfig @@ -89,7 +88,7 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, if len(args.GovernanceSCAddress) < 1 { return nil, fmt.Errorf("%w for governance sc address", vm.ErrInvalidAddress) } - if len(args.ConfigChangeAddress) < 1 { + if len(args.OwnerAddress) < 1 { return nil, fmt.Errorf("%w for change config address", vm.ErrInvalidAddress) } @@ -97,7 +96,7 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, eei: args.Eei, gasCost: args.GasCost, baseProposalCost: baseProposalCost, - ownerAddress: nil, + ownerAddress: args.OwnerAddress, governanceSCAddress: args.GovernanceSCAddress, delegationMgrSCAddress: args.DelegationMgrSCAddress, validatorSCAddress: args.ValidatorSCAddress, @@ -106,7 +105,6 @@ func NewGovernanceContract(args ArgsNewGovernanceContract) (*governanceContract, governanceConfig: args.GovernanceConfig, enableEpochsHandler: args.EnableEpochsHandler, unBondPeriodInEpochs: args.UnBondPeriodInEpochs, - changeConfigAddress: args.ConfigChangeAddress, } return g, nil @@ -178,8 +176,6 @@ func (g *governanceContract) init(args *vmcommon.ContractCallInput) vmcommon.Ret g.eei.SetStorage([]byte(governanceConfigKey), marshaledData) g.eei.SetStorage([]byte(ownerKey), args.CallerAddr) - g.ownerAddress = make([]byte, 0, len(args.CallerAddr)) - g.ownerAddress = append(g.ownerAddress, args.CallerAddr...) return vmcommon.Ok } @@ -201,8 +197,6 @@ func (g *governanceContract) initV2(args *vmcommon.ContractCallInput) vmcommon.R } g.eei.SetStorage([]byte(ownerKey), args.CallerAddr) - g.ownerAddress = make([]byte, 0, len(args.CallerAddr)) - g.ownerAddress = append(g.ownerAddress, args.CallerAddr...) return vmcommon.Ok } @@ -213,7 +207,7 @@ func (g *governanceContract) initV2(args *vmcommon.ContractCallInput) vmcommon.R // args.Arguments[2] - minVeto - 0-10000 - represents percentage // args.Arguments[3] - minPass - 0-10000 - represents percentage func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { - if !bytes.Equal(g.changeConfigAddress, args.CallerAddr) { + if !bytes.Equal(g.ownerAddress, args.CallerAddr) { g.eei.AddReturnMessage("changeConfig can be called only by owner") return vmcommon.UserError } @@ -669,10 +663,10 @@ func (g *governanceContract) claimAccumulatedFees(args *vmcommon.ContractCallInp return vmcommon.UserError } if len(args.Arguments) != 0 { - g.eei.AddReturnMessage("invalid number of arguments expected 0") + g.eei.AddReturnMessage("invalid number of arguments, expected 0") return vmcommon.UserError } - if !bytes.Equal(args.CallerAddr, g.changeConfigAddress) { + if !bytes.Equal(args.CallerAddr, g.ownerAddress) { g.eei.AddReturnMessage("can be called only by owner") return vmcommon.UserError } diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index 62e5f2b0fea..9375625c01e 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -48,14 +48,14 @@ func createArgsWithEEI(eei vm.SystemEI) ArgsNewGovernanceContract { MinVetoThreshold: 0.5, LostProposalFee: "1", }, - ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, Marshalizer: &mock.MarshalizerMock{}, Hasher: &hashingMocks.HasherMock{}, GovernanceSCAddress: vm.GovernanceSCAddress, DelegationMgrSCAddress: vm.DelegationManagerSCAddress, ValidatorSCAddress: vm.ValidatorSCAddress, - ConfigChangeAddress: bytes.Repeat([]byte{1}, 32), + OwnerAddress: bytes.Repeat([]byte{1}, 32), UnBondPeriodInEpochs: 10, EnableEpochsHandler: &testscommon.EnableEpochsHandlerStub{ IsGovernanceFlagEnabledField: true, @@ -257,7 +257,6 @@ func TestGovernanceContract_ExecuteInit(t *testing.T) { retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) - require.Equal(t, gsc.ownerAddress, callerAddr) } func TestGovernanceContract_ExecuteInitV2InvalidCaller(t *testing.T) { @@ -312,7 +311,6 @@ func TestGovernanceContract_ExecuteInitV2(t *testing.T) { retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) - require.Equal(t, gsc.ownerAddress, vm.GovernanceSCAddress) } func TestGovernanceContract_ChangeConfig(t *testing.T) { @@ -350,7 +348,7 @@ func TestGovernanceContract_ChangeConfig(t *testing.T) { } initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) _ = gsc.Execute(initInput) - callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput := createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) @@ -394,7 +392,7 @@ func TestGovernanceContract_ChangeConfigWrongCallValue(t *testing.T) { initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) _ = gsc.Execute(initInput) - callInput := createVMInput(big.NewInt(10), "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, nil) + callInput := createVMInput(big.NewInt(10), "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, nil) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -417,7 +415,7 @@ func TestGovernanceContract_ChangeConfigWrongArgumentsLength(t *testing.T) { initInput := createVMInput(zero, "initV2", vm.GovernanceSCAddress, vm.GovernanceSCAddress, nil) _ = gsc.Execute(initInput) - callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, nil) + callInput := createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, nil) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -447,7 +445,7 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { []byte("10"), []byte("5"), } - callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput := createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -460,7 +458,7 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { []byte("10"), []byte("5"), } - callInput = createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -473,7 +471,7 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { []byte("invalid"), []byte("5"), } - callInput = createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -486,7 +484,7 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { []byte("10"), []byte("invalid"), } - callInput = createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -523,7 +521,7 @@ func TestGovernanceContract_ChangeConfigGetConfigErr(t *testing.T) { []byte("10"), []byte("10"), } - callInput := createVMInput(zero, "changeConfig", args.ConfigChangeAddress, vm.GovernanceSCAddress, callInputArgs) + callInput := createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) retCode := gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) @@ -1814,7 +1812,7 @@ func TestGovernanceContract_ClaimAccumulatedFees(t *testing.T) { callInput.CallValue = big.NewInt(0) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.UserError, retCode) - require.True(t, strings.Contains(eei.GetReturnMessage(), "invalid number of arguments expected 0")) + require.True(t, strings.Contains(eei.GetReturnMessage(), "invalid number of arguments, expected 0")) callInput.Arguments = [][]byte{} retCode = gsc.Execute(callInput) @@ -1822,7 +1820,7 @@ func TestGovernanceContract_ClaimAccumulatedFees(t *testing.T) { require.True(t, strings.Contains(eei.GetReturnMessage(), "can be called only by owner")) gsc.gasCost.MetaChainSystemSCsCost.CloseProposal = 100 - callInput.CallerAddr = gsc.changeConfigAddress + callInput.CallerAddr = gsc.ownerAddress retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.OutOfGas, retCode) require.True(t, strings.Contains(eei.GetReturnMessage(), "not enough gas")) From 3748c5413e57b352c89b3220478d7c038e65a3a1 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Mon, 8 May 2023 16:21:22 +0300 Subject: [PATCH 142/221] fixes and additions --- cmd/node/config/prefs.toml | 18 +++-- cmd/node/flags.go | 11 +++ common/constants.go | 10 +++ config/prefsConfig.go | 7 +- config/tomlConfig_test.go | 10 +-- process/block/baseProcess.go | 95 ++++++++++++++++++++------ process/block/baseProcess_test.go | 107 ++++++++++++++++++++++++++++++ process/block/export_test.go | 13 +++- process/block/metablock.go | 12 +++- process/block/shardblock.go | 12 +++- process/errors.go | 6 ++ 11 files changed, 263 insertions(+), 38 deletions(-) diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 093ff4c652d..ed8cd075f97 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -51,12 +51,20 @@ #] # BlockProcessingCutoff can be used to stop processing blocks at a certain round, nonce or epoch. -# The Rest API endpoints will return values for the cutoff time. -# This can be useful for snapshotting different stuff. +# This can be useful for snapshotting different stuff and also for debugging purposes. +# It can only be enabled by using the `--block-processing-cutoff` CLI flag when starting the node [BlockProcessingCutoff] - Enabled = false - Type = "round" # possible values: "round", "nonce", or "epoch". For epoch, it resembles the start of the specified epoch - Value = 20 # the value of the cutoff. For example, if Type is "round", then Value is the round number to stop processing at + # Mode represents the cutoff mode. possible values: "pause" or "processError". + # "pause" mode will halt the processing at the block with the given coordinates. Useful for snapshots/analytics + # "process-error" will return an error when processing the block with the given coordinates. Useful for debugging + Mode = "pause" + + # CutoffType represents the kind of coordinate to look after when cutting off the processing. + # Possible values: "round", "nonce", or "epoch" + CutoffType = "round" + + # The value of the cutoff. For example, if CutoffType is set to "round", and Value to 20, then the node will stop processing at round 20 + Value = 20 # NamedIdentity represents an identity that runs nodes on the multikey # There can be multiple identities set on the same node, each one of them having different bls keys, just by duplicating the NamedIdentity diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 0cb32cb937e..31d2dbfcb3b 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -384,6 +384,12 @@ var ( Usage: "String flag for specifying the desired `operation mode`(s) of the node, resulting in altering some configuration values accordingly. Possible values are: snapshotless-observer, full-archive, db-lookup-extension, historical-balances or `\"\"` (empty). Multiple values can be separated via ,", Value: "", } + + // blockProcessingCutoff defines if the node should be started with the block processing cutoff feature + blockProcessingCutoff = cli.BoolFlag{ + Name: "block-processing-cutoff", + Usage: "Boolean option for enabling the block processing cutoff feature that is able to pause the processing at a given time. The configuration should be filled inside the `prefs.toml` file.", + } ) func getFlags() []cli.Flag { @@ -443,6 +449,7 @@ func getFlags() []cli.Flag { dbDirectory, logsDirectory, operationMode, + blockProcessingCutoff, } } @@ -503,6 +510,10 @@ func applyFlags(ctx *cli.Context, cfgs *config.Configs, flagsConfig *config.Cont if ctx.IsSet(fullArchive.Name) { cfgs.PreferencesConfig.Preferences.FullArchive = ctx.GlobalBool(fullArchive.Name) } + if ctx.IsSet(blockProcessingCutoff.Name) { + cfgs.PreferencesConfig.BlockProcessingCutoff.Enabled = true + cfgs.FlagsConfig.DisableConsensusWatchdog = true + } if ctx.IsSet(memoryUsageToCreateProfiles.Name) { cfgs.GeneralConfig.Health.MemoryUsageToCreateProfiles = int(ctx.GlobalUint64(memoryUsageToCreateProfiles.Name)) log.Info("setting a new value for the memoryUsageToCreateProfiles option", diff --git a/common/constants.go b/common/constants.go index 445ef4b7a47..4b8d5e2ec8d 100644 --- a/common/constants.go +++ b/common/constants.go @@ -823,6 +823,16 @@ const ( ApiOutputFormatProto ApiOutputFormat = 1 ) +// BlockProcessingCutoffMode represents the type to be used to identify the mode of the block processing cutoff +type BlockProcessingCutoffMode string + +const ( + // BlockProcessingCutoffModePause represents the mode where the node will pause the processing at the given coordinates + BlockProcessingCutoffModePause = "pause" + // BlockProcessingCutoffModeProcessError represents the mode where the node will reprocess with error the block at the given coordinates + BlockProcessingCutoffModeProcessError = "process-error" +) + // BlockProcessingCutoffType represents the type of cutoff potentially used in block processing type BlockProcessingCutoffType string diff --git a/config/prefsConfig.go b/config/prefsConfig.go index e558dd6b485..49c45b4158d 100644 --- a/config/prefsConfig.go +++ b/config/prefsConfig.go @@ -28,9 +28,10 @@ type OverridableConfig struct { // BlockProcessingCutoffConfig holds the configuration for the block processing cutoff type BlockProcessingCutoffConfig struct { - Enabled bool - Type string - Value uint64 + Enabled bool `toml:"-"` + Mode string + CutoffType string + Value uint64 } // NamedIdentity will hold the fields which are node named identities diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 11cbb903993..059c3b44644 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -325,9 +325,10 @@ func TestTomlPreferencesParser(t *testing.T) { PreferredConnections: []string{prefPubKey0, prefPubKey1}, }, BlockProcessingCutoff: BlockProcessingCutoffConfig{ - Enabled: true, - Type: "round", - Value: 55, + Enabled: false, // even though the TOML value is set to true, it should be ignored because of the "-" toml tag + Mode: "pause", + CutoffType: "round", + Value: 55, }, } @@ -344,7 +345,8 @@ func TestTomlPreferencesParser(t *testing.T) { [BlockProcessingCutoff] Enabled = true - Type = "round" + Mode = "pause" + CutoffType = "round" Value = 55 ` cfg := Preferences{} diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 275dd202155..c90bfd83eab 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -419,8 +419,8 @@ func displayHeader(headerHandler data.HeaderHandler) []*display.LineData { } } -// checkProcessorNilParameters will check the input parameters for nil values -func checkProcessorNilParameters(arguments ArgBaseProcessor) error { +// checkProcessorParameters will check the input parameters values +func checkProcessorParameters(arguments ArgBaseProcessor) error { for key := range arguments.AccountsDB { if check.IfNil(arguments.AccountsDB[key]) { @@ -538,6 +538,33 @@ func checkProcessorNilParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.ReceiptsRepository) { return process.ErrNilReceiptsRepository } + err := checkBlockProcessingCutoffConfig(arguments.PrefsConfig.BlockProcessingCutoff) + if err != nil { + return err + } + + return nil +} + +func checkBlockProcessingCutoffConfig(cutOffConfig config.BlockProcessingCutoffConfig) error { + if !cutOffConfig.Enabled { + // don't even check the configs if the feature is disabled. Useful when a node doesn't update `prefs.toml` with + // the new configuration + return nil + } + mode := common.BlockProcessingCutoffMode(cutOffConfig.Mode) + isValidMode := mode == common.BlockProcessingCutoffModePause || mode == common.BlockProcessingCutoffModeProcessError + if !isValidMode { + return fmt.Errorf("%w. provided value=%s", process.ErrInvalidBlockProcessingCutOffMode, mode) + } + + cutOffType := common.BlockProcessingCutoffType(cutOffConfig.CutoffType) + isValidCutOffType := cutOffType == common.BlockProcessingCutoffByRound || + cutOffType == common.BlockProcessingCutoffByNonce || + cutOffType == common.BlockProcessingCutoffByEpoch + if !isValidCutOffType { + return fmt.Errorf("%w. provided value=%s", process.ErrInvalidBlockProcessingCutOffType, cutOffType) + } return nil } @@ -2078,37 +2105,63 @@ func (bp *baseProcessor) setNonceOfFirstCommittedBlock(nonce uint64) { bp.nonceOfFirstCommittedBlock.Value = nonce } -func (bp *baseProcessor) handleBlockProcessingCutoff(header data.HeaderHandler) { - if !bp.blockProcessingCutoffConfig.Enabled { - return - } - - cutOffFunction := func(printArgs ...interface{}) { - log.Info("cutting off the block processing. The node will not advance", printArgs...) - go func() { - for { - time.Sleep(time.Minute) - log.Info("node is in block processing cut-off mode", printArgs...) - } - }() - neverEndingChannel := make(chan struct{}) - <-neverEndingChannel +func (bp *baseProcessor) handleBlockProcessingCutoff(header data.HeaderHandler) error { + if !bp.blockProcessingCutoffConfig.Enabled || check.IfNil(header) { + return nil } + cutOffFunction := getCutoffFunction(bp.blockProcessingCutoffConfig) value := bp.blockProcessingCutoffConfig.Value - switch common.BlockProcessingCutoffType(bp.blockProcessingCutoffConfig.Type) { + switch common.BlockProcessingCutoffType(bp.blockProcessingCutoffConfig.CutoffType) { case common.BlockProcessingCutoffByRound: if header.GetRound() == value { - cutOffFunction("round", header.GetRound()) + err := cutOffFunction("round", header.GetRound()) + if err != nil { + return err + } } case common.BlockProcessingCutoffByNonce: if header.GetNonce() == value { - cutOffFunction("nonce", header.GetNonce()) + err := cutOffFunction("nonce", header.GetNonce()) + if err != nil { + return err + } } case common.BlockProcessingCutoffByEpoch: if header.IsStartOfEpochBlock() && header.GetEpoch() == uint32(value) { - cutOffFunction("epoch", header.GetEpoch()) + err := cutOffFunction("epoch", header.GetEpoch()) + if err != nil { + return err + } + } + } + + return nil +} + +func getCutoffFunction(cfg config.BlockProcessingCutoffConfig) func(printArgs ...interface{}) error { + processErr := fmt.Errorf("block processing cuttoff - error") + if cfg.Mode == common.BlockProcessingCutoffModeProcessError { + return func(printArgs ...interface{}) error { + log.Info("block processing cutoff - return err", printArgs...) + return processErr } } + + blockingCutoffFunction := func(printArgs ...interface{}) error { + log.Info("cutting off the block processing. The node will not advance", printArgs...) + go func() { + for { + time.Sleep(time.Minute) + log.Info("node is in block processing cut-off mode", printArgs...) + } + }() + neverEndingChannel := make(chan struct{}) + <-neverEndingChannel + + return nil // should not reach this point + } + + return blockingCutoffFunction } diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index b86791c52ef..cd0acceff98 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -3104,3 +3104,110 @@ func TestBaseProcessor_ConcurrentCallsNonceOfFirstCommittedBlock(t *testing.T) { assert.True(t, len(values) <= 1) // we can have the situation when all reads are done before the first set assert.Equal(t, numCalls/2, values[lastValRead]+noValues) } + +func TestBaseProcessor_HandleBlockProcessingBackoff(t *testing.T) { + t.Parallel() + + t.Run("disabled or nil header - should exit", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: false, + } + bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) + + err := bp.HandleBlockProcessingCutoff(nil) + require.NoError(t, err) + + err = bp.HandleBlockProcessingCutoff(&block.MetaBlock{}) + require.NoError(t, err) + }) + + t.Run("process error via round", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffType: string(common.BlockProcessingCutoffByRound), + Value: 20, + } + bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) + + err := bp.HandleBlockProcessingCutoff(&block.MetaBlock{Round: 19}) // not the desired round + require.NoError(t, err) + + err = bp.HandleBlockProcessingCutoff(&block.MetaBlock{Round: 20}) + require.Equal(t, errors.New("block processing cuttoff - error"), err) + }) + + t.Run("process error via nonce", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffType: string(common.BlockProcessingCutoffByNonce), + Value: 20, + } + bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) + + err := bp.HandleBlockProcessingCutoff(&block.MetaBlock{Nonce: 19}) // not the desired nonce + require.NoError(t, err) + + err = bp.HandleBlockProcessingCutoff(&block.MetaBlock{Nonce: 20}) + require.Equal(t, errors.New("block processing cuttoff - error"), err) + }) + + t.Run("process error via epoch", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffType: string(common.BlockProcessingCutoffByEpoch), + Value: 20, + } + bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) + + dummyEpochStartData := block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{ + { + ShardID: 0, + }, + }, + } + err := bp.HandleBlockProcessingCutoff(&block.MetaBlock{Epoch: 19, EpochStart: dummyEpochStartData}) // not the desired nonce + require.NoError(t, err) + + err = bp.HandleBlockProcessingCutoff(&block.MetaBlock{Epoch: 20, EpochStart: dummyEpochStartData}) + require.Equal(t, errors.New("block processing cuttoff - error"), err) + }) + + t.Run("pause - should block the processing", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModePause, + CutoffType: string(common.BlockProcessingCutoffByRound), + Value: 20, + } + bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) + + err := bp.HandleBlockProcessingCutoff(&block.MetaBlock{Round: 19}) // not the desired round + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + _ = bp.HandleBlockProcessingCutoff(&block.MetaBlock{Round: 20}) + done <- struct{}{} + }() + + select { + case <-done: + require.Fail(t, "should have not advanced") + case <-time.After(time.Second): + } + }) +} diff --git a/process/block/export_test.go b/process/block/export_test.go index a47d9851500..8febbcbe600 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -64,6 +65,16 @@ func (bp *baseProcessor) CommitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl return bp.commitTrieEpochRootHashIfNeeded(metaBlock, rootHash) } +func NewBaseProcessorWithBlockProcessingCutoffConfig(cfg config.BlockProcessingCutoffConfig) *baseProcessor { + return &baseProcessor{ + blockProcessingCutoffConfig: cfg, + } +} + +func (bp *baseProcessor) HandleBlockProcessingCutoff(hdr data.HeaderHandler) error { + return bp.handleBlockProcessingCutoff(hdr) +} + func (sp *shardProcessor) ReceivedMetaBlock(header data.HeaderHandler, metaBlockHash []byte) { sp.receivedMetaBlock(header, metaBlockHash) } @@ -495,7 +506,7 @@ func (mp *metaProcessor) GetFinalMiniBlockHeaders(miniBlockHeaderHandlers []data } func CheckProcessorNilParameters(arguments ArgBaseProcessor) error { - return checkProcessorNilParameters(arguments) + return checkProcessorParameters(arguments) } func (bp *baseProcessor) SetIndexOfFirstTxProcessed(miniBlockHeaderHandler data.MiniBlockHeaderHandler) error { diff --git a/process/block/metablock.go b/process/block/metablock.go index b02ac8618f3..d4a09877e92 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -46,7 +46,7 @@ type metaProcessor struct { // NewMetaProcessor creates a new metaProcessor object func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { - err := checkProcessorNilParameters(arguments.ArgBaseProcessor) + err := checkProcessorParameters(arguments.ArgBaseProcessor) if err != nil { return nil, err } @@ -263,6 +263,11 @@ func (mp *metaProcessor) ProcessBlock( } }() + err = mp.handleBlockProcessingCutoff(header) + if err != nil { + return err + } + err = mp.createBlockStarted() if err != nil { return err @@ -1167,7 +1172,10 @@ func (mp *metaProcessor) CommitBlock( return err } - mp.handleBlockProcessingCutoff(headerHandler) + err = mp.handleBlockProcessingCutoff(headerHandler) + if err != nil { + return err + } mp.store.SetEpochForPutOperation(headerHandler.GetEpoch()) diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 44cf3102026..fcccd776493 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -51,7 +51,7 @@ type shardProcessor struct { // NewShardProcessor creates a new shardProcessor object func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { - err := checkProcessorNilParameters(arguments.ArgBaseProcessor) + err := checkProcessorParameters(arguments.ArgBaseProcessor) if err != nil { return nil, err } @@ -314,6 +314,11 @@ func (sp *shardProcessor) ProcessBlock( } }() + err = sp.handleBlockProcessingCutoff(header) + if err != nil { + return err + } + mbIndex := sp.getIndexOfFirstMiniBlockToBeExecuted(header) miniBlocks := body.MiniBlocks[mbIndex:] @@ -889,7 +894,10 @@ func (sp *shardProcessor) CommitBlock( return err } - sp.handleBlockProcessingCutoff(headerHandler) + err = sp.handleBlockProcessingCutoff(headerHandler) + if err != nil { + return err + } sp.store.SetEpochForPutOperation(headerHandler.GetEpoch()) diff --git a/process/errors.go b/process/errors.go index b00039aaa91..c4add797993 100644 --- a/process/errors.go +++ b/process/errors.go @@ -1196,3 +1196,9 @@ var ErrGasPriceTooHigh = errors.New("gas price is too high for the transaction") // ErrGuardedTransactionNotExpected signals that a guarded transaction was received for processing but the account is not guarded var ErrGuardedTransactionNotExpected = errors.New("guarded transaction not expected") + +// ErrInvalidBlockProcessingCutOffMode signals that an invalid block processing cutoff mode has been provided +var ErrInvalidBlockProcessingCutOffMode = errors.New("invalid block processing cutoff mode") + +// ErrInvalidBlockProcessingCutOffType signals that an invalid block processing cutoff type has been provided +var ErrInvalidBlockProcessingCutOffType = errors.New("invalid block processing cutoff type") From 5651ee360cb35b6fc2b3d2b0e09fd894c6e97fef Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Mon, 8 May 2023 16:24:43 +0300 Subject: [PATCH 143/221] rename cutoff type to cutoff trigger --- cmd/node/config/prefs.toml | 4 ++-- common/constants.go | 10 +++++----- config/prefsConfig.go | 8 ++++---- config/tomlConfig_test.go | 10 +++++----- process/block/baseProcess.go | 14 +++++++------- process/block/baseProcess_test.go | 32 +++++++++++++++---------------- process/errors.go | 4 ++-- 7 files changed, 41 insertions(+), 41 deletions(-) diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index ed8cd075f97..1653dee21c9 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -59,9 +59,9 @@ # "process-error" will return an error when processing the block with the given coordinates. Useful for debugging Mode = "pause" - # CutoffType represents the kind of coordinate to look after when cutting off the processing. + # CutoffTrigger represents the kind of coordinate to look after when cutting off the processing. # Possible values: "round", "nonce", or "epoch" - CutoffType = "round" + CutoffTrigger = "round" # The value of the cutoff. For example, if CutoffType is set to "round", and Value to 20, then the node will stop processing at round 20 Value = 20 diff --git a/common/constants.go b/common/constants.go index 4b8d5e2ec8d..552164fb144 100644 --- a/common/constants.go +++ b/common/constants.go @@ -833,16 +833,16 @@ const ( BlockProcessingCutoffModeProcessError = "process-error" ) -// BlockProcessingCutoffType represents the type of cutoff potentially used in block processing -type BlockProcessingCutoffType string +// BlockProcessingCutoffTrigger represents the trigger of the cutoff potentially used in block processing +type BlockProcessingCutoffTrigger string const ( // BlockProcessingCutoffByNonce represents the cutoff by nonce - BlockProcessingCutoffByNonce BlockProcessingCutoffType = "nonce" + BlockProcessingCutoffByNonce BlockProcessingCutoffTrigger = "nonce" // BlockProcessingCutoffByRound represents the cutoff by round - BlockProcessingCutoffByRound BlockProcessingCutoffType = "round" + BlockProcessingCutoffByRound BlockProcessingCutoffTrigger = "round" // BlockProcessingCutoffByEpoch represents the cutoff by epoch - BlockProcessingCutoffByEpoch BlockProcessingCutoffType = "epoch" + BlockProcessingCutoffByEpoch BlockProcessingCutoffTrigger = "epoch" ) // MaxIndexOfTxInMiniBlock defines the maximum index of a tx inside one mini block diff --git a/config/prefsConfig.go b/config/prefsConfig.go index 49c45b4158d..063d89c1b59 100644 --- a/config/prefsConfig.go +++ b/config/prefsConfig.go @@ -28,10 +28,10 @@ type OverridableConfig struct { // BlockProcessingCutoffConfig holds the configuration for the block processing cutoff type BlockProcessingCutoffConfig struct { - Enabled bool `toml:"-"` - Mode string - CutoffType string - Value uint64 + Enabled bool `toml:"-"` + Mode string + CutoffTrigger string + Value uint64 } // NamedIdentity will hold the fields which are node named identities diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 059c3b44644..9582657ec49 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -325,10 +325,10 @@ func TestTomlPreferencesParser(t *testing.T) { PreferredConnections: []string{prefPubKey0, prefPubKey1}, }, BlockProcessingCutoff: BlockProcessingCutoffConfig{ - Enabled: false, // even though the TOML value is set to true, it should be ignored because of the "-" toml tag - Mode: "pause", - CutoffType: "round", - Value: 55, + Enabled: false, // even though the TOML value is set to true, it should be ignored because of the "-" toml tag + Mode: "pause", + CutoffTrigger: "round", + Value: 55, }, } @@ -346,7 +346,7 @@ func TestTomlPreferencesParser(t *testing.T) { [BlockProcessingCutoff] Enabled = true Mode = "pause" - CutoffType = "round" + CutoffTrigger = "round" Value = 55 ` cfg := Preferences{} diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index c90bfd83eab..25f5fee43ee 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -558,12 +558,12 @@ func checkBlockProcessingCutoffConfig(cutOffConfig config.BlockProcessingCutoffC return fmt.Errorf("%w. provided value=%s", process.ErrInvalidBlockProcessingCutOffMode, mode) } - cutOffType := common.BlockProcessingCutoffType(cutOffConfig.CutoffType) - isValidCutOffType := cutOffType == common.BlockProcessingCutoffByRound || - cutOffType == common.BlockProcessingCutoffByNonce || - cutOffType == common.BlockProcessingCutoffByEpoch - if !isValidCutOffType { - return fmt.Errorf("%w. provided value=%s", process.ErrInvalidBlockProcessingCutOffType, cutOffType) + cutOffTrigger := common.BlockProcessingCutoffTrigger(cutOffConfig.CutoffTrigger) + isValidCutOffTrigger := cutOffTrigger == common.BlockProcessingCutoffByRound || + cutOffTrigger == common.BlockProcessingCutoffByNonce || + cutOffTrigger == common.BlockProcessingCutoffByEpoch + if !isValidCutOffTrigger { + return fmt.Errorf("%w. provided value=%s", process.ErrInvalidBlockProcessingCutOffTrigger, cutOffTrigger) } return nil @@ -2113,7 +2113,7 @@ func (bp *baseProcessor) handleBlockProcessingCutoff(header data.HeaderHandler) cutOffFunction := getCutoffFunction(bp.blockProcessingCutoffConfig) value := bp.blockProcessingCutoffConfig.Value - switch common.BlockProcessingCutoffType(bp.blockProcessingCutoffConfig.CutoffType) { + switch common.BlockProcessingCutoffTrigger(bp.blockProcessingCutoffConfig.CutoffTrigger) { case common.BlockProcessingCutoffByRound: if header.GetRound() == value { err := cutOffFunction("round", header.GetRound()) diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index cd0acceff98..31427f3eee3 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -3127,10 +3127,10 @@ func TestBaseProcessor_HandleBlockProcessingBackoff(t *testing.T) { t.Parallel() cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModeProcessError, - CutoffType: string(common.BlockProcessingCutoffByRound), - Value: 20, + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffTrigger: string(common.BlockProcessingCutoffByRound), + Value: 20, } bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) @@ -3145,10 +3145,10 @@ func TestBaseProcessor_HandleBlockProcessingBackoff(t *testing.T) { t.Parallel() cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModeProcessError, - CutoffType: string(common.BlockProcessingCutoffByNonce), - Value: 20, + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffTrigger: string(common.BlockProcessingCutoffByNonce), + Value: 20, } bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) @@ -3163,10 +3163,10 @@ func TestBaseProcessor_HandleBlockProcessingBackoff(t *testing.T) { t.Parallel() cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModeProcessError, - CutoffType: string(common.BlockProcessingCutoffByEpoch), - Value: 20, + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffTrigger: string(common.BlockProcessingCutoffByEpoch), + Value: 20, } bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) @@ -3188,10 +3188,10 @@ func TestBaseProcessor_HandleBlockProcessingBackoff(t *testing.T) { t.Parallel() cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModePause, - CutoffType: string(common.BlockProcessingCutoffByRound), - Value: 20, + Enabled: true, + Mode: common.BlockProcessingCutoffModePause, + CutoffTrigger: string(common.BlockProcessingCutoffByRound), + Value: 20, } bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) diff --git a/process/errors.go b/process/errors.go index c4add797993..45692d93f31 100644 --- a/process/errors.go +++ b/process/errors.go @@ -1200,5 +1200,5 @@ var ErrGuardedTransactionNotExpected = errors.New("guarded transaction not expec // ErrInvalidBlockProcessingCutOffMode signals that an invalid block processing cutoff mode has been provided var ErrInvalidBlockProcessingCutOffMode = errors.New("invalid block processing cutoff mode") -// ErrInvalidBlockProcessingCutOffType signals that an invalid block processing cutoff type has been provided -var ErrInvalidBlockProcessingCutOffType = errors.New("invalid block processing cutoff type") +// ErrInvalidBlockProcessingCutOffTrigger signals that an invalid block processing cutoff trigger has been provided +var ErrInvalidBlockProcessingCutOffTrigger = errors.New("invalid block processing cutoff trigger") From a50871bc44d921f9b214b0cd89d3e17309802850 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Mon, 8 May 2023 16:53:27 +0300 Subject: [PATCH 144/221] bugfix --- cmd/node/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/flags.go b/cmd/node/flags.go index 31d2dbfcb3b..b41354be29d 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -512,7 +512,7 @@ func applyFlags(ctx *cli.Context, cfgs *config.Configs, flagsConfig *config.Cont } if ctx.IsSet(blockProcessingCutoff.Name) { cfgs.PreferencesConfig.BlockProcessingCutoff.Enabled = true - cfgs.FlagsConfig.DisableConsensusWatchdog = true + flagsConfig.DisableConsensusWatchdog = true } if ctx.IsSet(memoryUsageToCreateProfiles.Name) { cfgs.GeneralConfig.Health.MemoryUsageToCreateProfiles = int(ctx.GlobalUint64(memoryUsageToCreateProfiles.Name)) From c54692e65ea7b68cb343b450b88b913c629c50c8 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Tue, 9 May 2023 12:27:26 +0300 Subject: [PATCH 145/221] fixes after review --- factory/processing/processComponents_test.go | 2 +- vm/systemSmartContracts/governance.go | 33 ++++++--- vm/systemSmartContracts/governance_test.go | 76 +++++++++++++++++++- 3 files changed, 100 insertions(+), 11 deletions(-) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index 92642f808e0..6c656fb8934 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -117,7 +117,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MinPassThreshold: 0.5, MinVetoThreshold: 0.5, }, - ChangeConfigAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", + OwnerAddress: "erd1vxy22x0fj4zv6hktmydg8vpfh6euv02cz4yg0aaws6rrad5a5awqgqky80", }, StakingSystemSCConfig: config.StakingSystemSCConfig{ GenesisNodePrice: "2500000000000000000000", diff --git a/vm/systemSmartContracts/governance.go b/vm/systemSmartContracts/governance.go index 3e1dd4bafb5..74763ffed1d 100644 --- a/vm/systemSmartContracts/governance.go +++ b/vm/systemSmartContracts/governance.go @@ -203,9 +203,10 @@ func (g *governanceContract) initV2(args *vmcommon.ContractCallInput) vmcommon.R // changeConfig allows the owner to change the configuration for requesting proposals // args.Arguments[0] - proposalFee - as string -// args.Arguments[1] - minQuorum - 0-10000 - represents percentage -// args.Arguments[2] - minVeto - 0-10000 - represents percentage -// args.Arguments[3] - minPass - 0-10000 - represents percentage +// args.Arguments[1] - lostProposalFee - as string +// args.Arguments[2] - minQuorum - 0-10000 - represents percentage +// args.Arguments[3] - minVeto - 0-10000 - represents percentage +// args.Arguments[4] - minPass - 0-10000 - represents percentage func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmcommon.ReturnCode { if !bytes.Equal(g.ownerAddress, args.CallerAddr) { g.eei.AddReturnMessage("changeConfig can be called only by owner") @@ -215,8 +216,8 @@ func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmco g.eei.AddReturnMessage("changeConfig can be called only without callValue") return vmcommon.UserError } - if len(args.Arguments) != 4 { - g.eei.AddReturnMessage("changeConfig needs 4 arguments") + if len(args.Arguments) != 5 { + g.eei.AddReturnMessage("changeConfig needs 5 arguments") return vmcommon.UserError } @@ -225,17 +226,28 @@ func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmco g.eei.AddReturnMessage("changeConfig first argument is incorrectly formatted") return vmcommon.UserError } - minQuorum, err := convertDecimalToPercentage(args.Arguments[1]) + lostProposalFee, okConvert := big.NewInt(0).SetString(string(args.Arguments[1]), conversionBase) + if !okConvert || proposalFee.Cmp(zero) <= 0 { + g.eei.AddReturnMessage("changeConfig second argument is incorrectly formatted") + return vmcommon.UserError + } + if proposalFee.Cmp(lostProposalFee) < 0 { + errLocal := fmt.Errorf("%w proposal fee is smaller than lost proposal fee ", vm.ErrIncorrectConfig) + g.eei.AddReturnMessage(errLocal.Error()) + return vmcommon.UserError + } + + minQuorum, err := convertDecimalToPercentage(args.Arguments[2]) if err != nil { g.eei.AddReturnMessage(err.Error() + " minQuorum") return vmcommon.UserError } - minVeto, err := convertDecimalToPercentage(args.Arguments[2]) + minVeto, err := convertDecimalToPercentage(args.Arguments[3]) if err != nil { g.eei.AddReturnMessage(err.Error() + " minVeto") return vmcommon.UserError } - minPass, err := convertDecimalToPercentage(args.Arguments[3]) + minPass, err := convertDecimalToPercentage(args.Arguments[4]) if err != nil { g.eei.AddReturnMessage(err.Error() + " minPass") return vmcommon.UserError @@ -251,6 +263,7 @@ func (g *governanceContract) changeConfig(args *vmcommon.ContractCallInput) vmco scConfig.MinVetoThreshold = minVeto scConfig.MinPassThreshold = minPass scConfig.ProposalFee = proposalFee + scConfig.LostProposalFee = lostProposalFee g.baseProposalCost.Set(proposalFee) err = g.saveConfig(scConfig) @@ -1168,6 +1181,10 @@ func (g *governanceContract) convertV2Config(config config.GovernanceSystemSCCon return nil, vm.ErrIncorrectConfig } + if proposalFee.Cmp(lostProposalFee) < 0 { + return nil, fmt.Errorf("%w proposal fee is smaller than lost proposal fee ", vm.ErrIncorrectConfig) + } + return &GovernanceConfigV2{ MinQuorum: float32(config.Active.MinQuorum), MinPassThreshold: float32(config.Active.MinPassThreshold), diff --git a/vm/systemSmartContracts/governance_test.go b/vm/systemSmartContracts/governance_test.go index 9375625c01e..82143331b06 100644 --- a/vm/systemSmartContracts/governance_test.go +++ b/vm/systemSmartContracts/governance_test.go @@ -341,6 +341,7 @@ func TestGovernanceContract_ChangeConfig(t *testing.T) { gsc, _ := NewGovernanceContract(args) callInputArgs := [][]byte{ + []byte("1"), []byte("1"), []byte("10"), []byte("10"), @@ -403,7 +404,7 @@ func TestGovernanceContract_ChangeConfigWrongArgumentsLength(t *testing.T) { t.Parallel() retMessage := "" - errSubstr := "changeConfig needs 4 arguments" + errSubstr := "changeConfig needs 5 arguments" args := createMockGovernanceArgs() args.Eei = &mock.SystemEIStub{ AddReturnMessageCalled: func(msg string) { @@ -440,6 +441,7 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { _ = gsc.Execute(initInput) callInputArgs := [][]byte{ + []byte("invalid"), []byte("invalid"), []byte("10"), []byte("10"), @@ -451,8 +453,37 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { require.Equal(t, vmcommon.UserError, retCode) require.Contains(t, retMessage, errSubstr) + errSubstr = "changeConfig second argument is incorrectly formatted" + callInputArgs = [][]byte{ + []byte("1"), + []byte("invalid"), + []byte("10"), + []byte("10"), + []byte("5"), + } + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode = gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) + + errSubstr = vm.ErrIncorrectConfig.Error() + " proposal fee is smaller than lost proposal fee " + callInputArgs = [][]byte{ + []byte("1"), + []byte("10"), + []byte("10"), + []byte("10"), + []byte("5"), + } + callInput = createVMInput(zero, "changeConfig", args.OwnerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode = gsc.Execute(callInput) + + require.Equal(t, vmcommon.UserError, retCode) + require.Contains(t, retMessage, errSubstr) + errSubstr = "config incorrect minQuorum" callInputArgs = [][]byte{ + []byte("1"), []byte("1"), []byte("invalid"), []byte("10"), @@ -466,6 +497,7 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { errSubstr = "config incorrect minVeto" callInputArgs = [][]byte{ + []byte("1"), []byte("1"), []byte("10"), []byte("invalid"), @@ -479,6 +511,7 @@ func TestGovernanceContract_ChangeConfigInvalidParams(t *testing.T) { errSubstr = "config incorrect minPass" callInputArgs = [][]byte{ + []byte("1"), []byte("1"), []byte("10"), []byte("10"), @@ -516,6 +549,7 @@ func TestGovernanceContract_ChangeConfigGetConfigErr(t *testing.T) { _ = gsc.Execute(initInput) callInputArgs := [][]byte{ + []byte("1"), []byte("1"), []byte("10"), []byte("10"), @@ -1773,7 +1807,7 @@ func TestGovernanceContract_ProposeVoteClose(t *testing.T) { callerAddress := bytes.Repeat([]byte{2}, 32) proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) - gsc, blockchainHook, _ := createGovernanceBlockChainHookStubContextHandler() + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() callInputArgs := [][]byte{ proposalIdentifier, @@ -1797,6 +1831,44 @@ func TestGovernanceContract_ProposeVoteClose(t *testing.T) { callInput = createVMInput(big.NewInt(0), "closeProposal", callerAddress, vm.GovernanceSCAddress, [][]byte{big.NewInt(1).Bytes()}) retCode = gsc.Execute(callInput) require.Equal(t, vmcommon.Ok, retCode) + + proposal, _ := gsc.getProposalFromNonce(big.NewInt(1)) + require.True(t, proposal.Closed) + require.True(t, proposal.Passed) + require.Equal(t, big.NewInt(500), eei.GetTotalSentToUser(callInput.CallerAddr)) +} + +func TestGovernanceContract_ProposeClosePayFee(t *testing.T) { + t.Parallel() + + callerAddress := bytes.Repeat([]byte{2}, 32) + proposalIdentifier := bytes.Repeat([]byte("a"), commitHashLength) + + gsc, blockchainHook, eei := createGovernanceBlockChainHookStubContextHandler() + + callInputArgs := [][]byte{ + proposalIdentifier, + big.NewInt(50).Bytes(), + big.NewInt(55).Bytes(), + } + callInput := createVMInput(big.NewInt(500), "proposal", callerAddress, vm.GovernanceSCAddress, callInputArgs) + retCode := gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + + currentEpoch := uint32(52) + blockchainHook.CurrentEpochCalled = func() uint32 { + return currentEpoch + } + + currentEpoch = 56 + callInput = createVMInput(big.NewInt(0), "closeProposal", callerAddress, vm.GovernanceSCAddress, [][]byte{big.NewInt(1).Bytes()}) + retCode = gsc.Execute(callInput) + require.Equal(t, vmcommon.Ok, retCode) + + proposal, _ := gsc.getProposalFromNonce(big.NewInt(1)) + require.True(t, proposal.Closed) + require.False(t, proposal.Passed) + require.Equal(t, big.NewInt(499), eei.GetTotalSentToUser(callInput.CallerAddr)) } func TestGovernanceContract_ClaimAccumulatedFees(t *testing.T) { From a7601a2f8c6672822201547331f40df6cb09f824 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Tue, 9 May 2023 14:09:41 +0300 Subject: [PATCH 146/221] fixes --- cmd/node/config/prefs.toml | 2 +- process/block/baseProcess.go | 6 +++--- process/block/metablock.go | 10 +++++----- process/block/shardblock.go | 10 +++++----- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 1653dee21c9..c9ec0bebe17 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -63,7 +63,7 @@ # Possible values: "round", "nonce", or "epoch" CutoffTrigger = "round" - # The value of the cutoff. For example, if CutoffType is set to "round", and Value to 20, then the node will stop processing at round 20 + # The minimum value of the cutoff. For example, if CutoffType is set to "round", and Value to 20, then the node will stop processing at round 20+ Value = 20 # NamedIdentity represents an identity that runs nodes on the multikey diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 25f5fee43ee..d974aa85038 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -2115,21 +2115,21 @@ func (bp *baseProcessor) handleBlockProcessingCutoff(header data.HeaderHandler) switch common.BlockProcessingCutoffTrigger(bp.blockProcessingCutoffConfig.CutoffTrigger) { case common.BlockProcessingCutoffByRound: - if header.GetRound() == value { + if header.GetRound() >= value { err := cutOffFunction("round", header.GetRound()) if err != nil { return err } } case common.BlockProcessingCutoffByNonce: - if header.GetNonce() == value { + if header.GetNonce() >= value { err := cutOffFunction("nonce", header.GetNonce()) if err != nil { return err } } case common.BlockProcessingCutoffByEpoch: - if header.IsStartOfEpochBlock() && header.GetEpoch() == uint32(value) { + if header.GetEpoch() >= uint32(value) { err := cutOffFunction("epoch", header.GetEpoch()) if err != nil { return err diff --git a/process/block/metablock.go b/process/block/metablock.go index d4a09877e92..76fcc137743 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -263,11 +263,6 @@ func (mp *metaProcessor) ProcessBlock( } }() - err = mp.handleBlockProcessingCutoff(header) - if err != nil { - return err - } - err = mp.createBlockStarted() if err != nil { return err @@ -402,6 +397,11 @@ func (mp *metaProcessor) ProcessBlock( return err } + err = mp.handleBlockProcessingCutoff(header) + if err != nil { + return err + } + return nil } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index fcccd776493..022998fa860 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -314,11 +314,6 @@ func (sp *shardProcessor) ProcessBlock( } }() - err = sp.handleBlockProcessingCutoff(header) - if err != nil { - return err - } - mbIndex := sp.getIndexOfFirstMiniBlockToBeExecuted(header) miniBlocks := body.MiniBlocks[mbIndex:] @@ -352,6 +347,11 @@ func (sp *shardProcessor) ProcessBlock( return err } + err = sp.handleBlockProcessingCutoff(header) + if err != nil { + return err + } + return nil } From 4a8523eee2871e2e4202c6e278d1cca91e9ff292 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Tue, 9 May 2023 15:06:56 +0300 Subject: [PATCH 147/221] fixes and additions --- factory/processing/processComponents_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index 92642f808e0..cc04371142c 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -77,7 +77,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto args := processComp.ProcessComponentsFactoryArgs{ Config: testscommon.GetGeneralConfig(), EpochConfig: config.EpochConfig{}, - PrefConfigs: config.PreferencesConfig{}, + PrefConfigs: config.Preferences{}, ImportDBConfig: config.ImportDbConfig{}, AccountsParser: &mock.AccountsParserStub{ GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.Pool, error) { @@ -579,7 +579,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.Config.EpochStartConfig.RoundsPerEpoch = 0 - args.PrefConfigs.FullArchive = true + args.PrefConfigs.Preferences.FullArchive = true testCreateWithArgs(t, args, "rounds per epoch") }) t.Run("createNetworkShardingCollector fails due to invalid PublicKeyPeerId config should error", func(t *testing.T) { From 8ace67f064664b8c2cbdb4301aed4a11a54eeb49 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Tue, 9 May 2023 16:55:34 +0300 Subject: [PATCH 148/221] fixes after review --- .../factory/containers/resolversContainer_test.go | 6 +++--- .../epochProviders/currentEpochProvidersFactory_test.go | 2 +- dataRetriever/interface.go | 2 +- dataRetriever/mock/marshalizerMock.go | 2 +- dataRetriever/resolvers/validatorInfoResolver_test.go | 2 +- dataRetriever/shardedData/shardedData_test.go | 4 ++-- dataRetriever/storageRequesters/headerRequester_test.go | 7 +------ dataRetriever/topicSender/diffPeerListCreator_test.go | 2 +- 8 files changed, 11 insertions(+), 16 deletions(-) diff --git a/dataRetriever/factory/containers/resolversContainer_test.go b/dataRetriever/factory/containers/resolversContainer_test.go index 969a93d6034..cf3dcfbe8d8 100644 --- a/dataRetriever/factory/containers/resolversContainer_test.go +++ b/dataRetriever/factory/containers/resolversContainer_test.go @@ -251,7 +251,7 @@ func TestResolversContainer_IterateNilHandlerShouldNotPanic(t *testing.T) { defer func() { r := recover() if r != nil { - assert.Fail(t, "should not have paniced") + assert.Fail(t, "should not have panicked") } }() @@ -269,7 +269,7 @@ func TestResolversContainer_IterateNotAValidKeyShouldWorkAndNotPanic(t *testing. defer func() { r := recover() if r != nil { - assert.Fail(t, "should not have paniced") + assert.Fail(t, "should not have panicked") } }() @@ -292,7 +292,7 @@ func TestResolversContainer_IterateNotAValidValueShouldWorkAndNotPanic(t *testin defer func() { r := recover() if r != nil { - assert.Fail(t, "should not have paniced") + assert.Fail(t, "should not have panicked") } }() diff --git a/dataRetriever/factory/epochProviders/currentEpochProvidersFactory_test.go b/dataRetriever/factory/epochProviders/currentEpochProvidersFactory_test.go index 236c47e4c35..7335f591826 100644 --- a/dataRetriever/factory/epochProviders/currentEpochProvidersFactory_test.go +++ b/dataRetriever/factory/epochProviders/currentEpochProvidersFactory_test.go @@ -25,7 +25,7 @@ func TestCreateCurrentEpochProvider_NilCurrentEpochProvider(t *testing.T) { assert.IsType(t, disabled.NewEpochProvider(), cnep) } -func TestCreateCurrentEpochProvider_ArithemticEpochProvider(t *testing.T) { +func TestCreateCurrentEpochProvider_ArithmeticEpochProvider(t *testing.T) { t.Parallel() cnep, err := CreateCurrentEpochProvider( diff --git a/dataRetriever/interface.go b/dataRetriever/interface.go index 88ba134aa6c..77f59710677 100644 --- a/dataRetriever/interface.go +++ b/dataRetriever/interface.go @@ -316,7 +316,7 @@ type WhiteListHandler interface { IsInterfaceNil() bool } -// DebugHandler defines an interface for debugging the reqested-resolved data +// DebugHandler defines an interface for debugging the requested-resolved data type DebugHandler interface { LogRequestedData(topic string, hashes [][]byte, numReqIntra int, numReqCross int) LogFailedToResolveData(topic string, hash []byte, err error) diff --git a/dataRetriever/mock/marshalizerMock.go b/dataRetriever/mock/marshalizerMock.go index 5299a5bb257..550359b01ca 100644 --- a/dataRetriever/mock/marshalizerMock.go +++ b/dataRetriever/mock/marshalizerMock.go @@ -32,7 +32,7 @@ func (mm *MarshalizerMock) Unmarshal(obj interface{}, buff []byte) error { } if obj == nil { - return errors.New("nil object to serilize to") + return errors.New("nil object to serialize to") } if buff == nil { diff --git a/dataRetriever/resolvers/validatorInfoResolver_test.go b/dataRetriever/resolvers/validatorInfoResolver_test.go index 92a9420cb54..0d5916c710e 100644 --- a/dataRetriever/resolvers/validatorInfoResolver_test.go +++ b/dataRetriever/resolvers/validatorInfoResolver_test.go @@ -379,7 +379,7 @@ func TestValidatorInfoResolver_ProcessReceivedMessage(t *testing.T) { require.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), dataRetriever.ErrValidatorInfoNotFound.Error())) }) - t.Run("pack data in chuncks returns error", func(t *testing.T) { + t.Run("pack data in chunks returns error", func(t *testing.T) { t.Parallel() args := createMockArgValidatorInfoResolver() diff --git a/dataRetriever/shardedData/shardedData_test.go b/dataRetriever/shardedData/shardedData_test.go index 7ca7c379c17..d9ab827df10 100644 --- a/dataRetriever/shardedData/shardedData_test.go +++ b/dataRetriever/shardedData/shardedData_test.go @@ -78,7 +78,7 @@ func TestShardedData_StorageEvictsData(t *testing.T) { } assert.Less(t, sd.ShardDataStore("1").Len(), int(defaultTestConfig.Capacity), - "Transaction pool entries excedes the maximum configured number") + "Transaction pool entries exceeds the maximum configured number") } func TestShardedData_NoDuplicates(t *testing.T) { @@ -228,7 +228,7 @@ func TestShardedData_RegisterAddedDataHandlerShouldWork(t *testing.T) { } } -func TestShardedData_RegisterAddedDataHandlerReallyAddsAhandler(t *testing.T) { +func TestShardedData_RegisterAddedDataHandlerReallyAddsHandler(t *testing.T) { t.Parallel() f := func(key []byte, value interface{}) { diff --git a/dataRetriever/storageRequesters/headerRequester_test.go b/dataRetriever/storageRequesters/headerRequester_test.go index f73f81f6b74..73e54a96e4c 100644 --- a/dataRetriever/storageRequesters/headerRequester_test.go +++ b/dataRetriever/storageRequesters/headerRequester_test.go @@ -1,7 +1,6 @@ package storagerequesters import ( - "errors" "math" "testing" "time" @@ -140,7 +139,6 @@ func TestHeaderRequester_SetEpochHandlerShouldWork(t *testing.T) { func TestHeaderRequester_RequestDataFromHashNotFoundNotBufferedChannelShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") newEpochCalled := false sendCalled := false arg := createMockHeaderRequesterArg() @@ -173,7 +171,6 @@ func TestHeaderRequester_RequestDataFromHashNotFoundNotBufferedChannelShouldErr( func TestHeaderRequester_RequestDataFromHashNotFoundShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") newEpochCalled := false sendCalled := false arg := createMockHeaderRequesterArg() @@ -248,7 +245,6 @@ func TestHeaderRequester_RequestDataFromHashShouldWork(t *testing.T) { func TestHeaderRequester_RequestDataFromNonceNotFoundShouldErr(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") newEpochCalled := false sendCalled := false arg := createMockHeaderRequesterArg() @@ -343,10 +339,9 @@ func TestHeaderRequester_RequestDataFromEpoch(t *testing.T) { err := hdReq.RequestDataFromEpoch(epochIdentifier) assert.Equal(t, core.ErrInvalidIdentifierForEpochStartBlockRequest, err) }) - t.Run("identifier not found should error should error", func(t *testing.T) { + t.Run("identifier not found should error", func(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") epochIdentifier := []byte(core.EpochStartIdentifier(100)) arg := createMockHeaderRequesterArg() arg.HdrStorage = &storageStubs.StorerStub{ diff --git a/dataRetriever/topicSender/diffPeerListCreator_test.go b/dataRetriever/topicSender/diffPeerListCreator_test.go index 73b1a63b418..4b63b757608 100644 --- a/dataRetriever/topicSender/diffPeerListCreator_test.go +++ b/dataRetriever/topicSender/diffPeerListCreator_test.go @@ -75,7 +75,7 @@ func TestNewDiffPeerListCreator_ShouldWork(t *testing.T) { assert.Equal(t, excludedTopic, dplc.ExcludedPeersOnTopic()) } -func TestMakeDiffList_EmptyExcludedShoudRetAllPeersList(t *testing.T) { +func TestMakeDiffList_EmptyExcludedShouldRetAllPeersList(t *testing.T) { t.Parallel() allPeers := []core.PeerID{core.PeerID("peer1"), core.PeerID("peer2")} From b778829d6fdd39c7440627e1a03568ea432da07f Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Tue, 9 May 2023 17:38:50 +0300 Subject: [PATCH 149/221] added some trie package tests --- testscommon/trie/snapshotPruningStorerStub.go | 9 + trie/snapshotTrieStorageManager_test.go | 143 ++++-- trie/syncTrieStorageManager_test.go | 17 + trie/sync_test.go | 49 ++ trie/trieNodesHandler_test.go | 14 + trie/trieStorageManagerInEpoch_test.go | 77 ++- ...ieStorageManagerWithoutCheckpoints_test.go | 22 +- .../trieStorageManagerWithoutSnapshot_test.go | 19 +- trie/trieStorageManager_test.go | 458 ++++++++++++++++-- update/container/accountDBSyncers_test.go | 188 ++++--- update/container/export_test.go | 15 + update/container/trieSyncers_test.go | 177 +++++-- 12 files changed, 984 insertions(+), 204 deletions(-) create mode 100644 update/container/export_test.go diff --git a/testscommon/trie/snapshotPruningStorerStub.go b/testscommon/trie/snapshotPruningStorerStub.go index 59c113eae9b..e1a03119734 100644 --- a/testscommon/trie/snapshotPruningStorerStub.go +++ b/testscommon/trie/snapshotPruningStorerStub.go @@ -16,6 +16,7 @@ type SnapshotPruningStorerStub struct { PutInEpochWithoutCacheCalled func(key []byte, data []byte, epoch uint32) error GetLatestStorageEpochCalled func() (uint32, error) RemoveFromCurrentEpochCalled func(key []byte) error + CloseCalled func() error } // GetFromOldEpochsWithoutAddingToCache - @@ -88,3 +89,11 @@ func (spss *SnapshotPruningStorerStub) RemoveFromCurrentEpoch(key []byte) error } return spss.Remove(key) } + +// Close - +func (spss *SnapshotPruningStorerStub) Close() error { + if spss.CloseCalled != nil { + return spss.CloseCalled() + } + return nil +} diff --git a/trie/snapshotTrieStorageManager_test.go b/trie/snapshotTrieStorageManager_test.go index 9db4a24a9e3..96b5c2144df 100644 --- a/trie/snapshotTrieStorageManager_test.go +++ b/trie/snapshotTrieStorageManager_test.go @@ -1,12 +1,15 @@ package trie import ( + "errors" "strings" "testing" "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" ) @@ -31,55 +34,117 @@ func TestNewSnapshotTrieStorageManager(t *testing.T) { assert.False(t, check.IfNil(stsm)) } -func TestNewSnapshotTrieStorageManager_GetFromOldEpochsWithoutCache(t *testing.T) { +func TestNewSnapshotTrieStorageManager_Get(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() - getFromOldEpochsWithoutCacheCalled := false - trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ - GetFromOldEpochsWithoutAddingToCacheCalled: func(_ []byte) ([]byte, core.OptionalUint32, error) { - getFromOldEpochsWithoutCacheCalled = true - return nil, core.OptionalUint32{}, nil - }, - } - stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) - - _, _ = stsm.Get([]byte("key")) - assert.True(t, getFromOldEpochsWithoutCacheCalled) + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{} + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + _ = stsm.Close() + + val, err := stsm.Get([]byte("key")) + assert.Equal(t, errorsMx.ErrContextClosing, err) + assert.Nil(t, val) + }) + t.Run("GetFromOldEpochsWithoutAddingToCache returns db closed should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromOldEpochsWithoutAddingToCacheCalled: func(_ []byte) ([]byte, core.OptionalUint32, error) { + return nil, core.OptionalUint32{}, storage.ErrDBIsClosed + }, + } + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + + val, err := stsm.Get([]byte("key")) + assert.Equal(t, storage.ErrDBIsClosed, err) + assert.Nil(t, val) + }) + t.Run("should work from old epochs without cache", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + getFromOldEpochsWithoutCacheCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromOldEpochsWithoutAddingToCacheCalled: func(_ []byte) ([]byte, core.OptionalUint32, error) { + getFromOldEpochsWithoutCacheCalled = true + return nil, core.OptionalUint32{}, nil + }, + } + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + + _, _ = stsm.Get([]byte("key")) + assert.True(t, getFromOldEpochsWithoutCacheCalled) + }) } -func TestNewSnapshotTrieStorageManager_PutWithoutCache(t *testing.T) { +func TestNewSnapshotTrieStorageManager_Put(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() - putWithoutCacheCalled := false - trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ - PutInEpochWithoutCacheCalled: func(_ []byte, _ []byte, _ uint32) error { - putWithoutCacheCalled = true - return nil - }, - } - stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) - - _ = stsm.Put([]byte("key"), []byte("data")) - assert.True(t, putWithoutCacheCalled) + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{} + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + _ = stsm.Close() + + err := stsm.Put([]byte("key"), []byte("data")) + assert.Equal(t, errorsMx.ErrContextClosing, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + putWithoutCacheCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + PutInEpochWithoutCacheCalled: func(_ []byte, _ []byte, _ uint32) error { + putWithoutCacheCalled = true + return nil + }, + } + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + + _ = stsm.Put([]byte("key"), []byte("data")) + assert.True(t, putWithoutCacheCalled) + }) } func TestNewSnapshotTrieStorageManager_GetFromLastEpoch(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() - getFromLastEpochCalled := false - trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ - GetFromLastEpochCalled: func(_ []byte) ([]byte, error) { - getFromLastEpochCalled = true - return nil, nil - }, - } - stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) - - _, _ = stsm.GetFromLastEpoch([]byte("key")) - assert.True(t, getFromLastEpochCalled) + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{} + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + _ = stsm.Close() + + val, err := stsm.GetFromLastEpoch([]byte("key")) + assert.Equal(t, errorsMx.ErrContextClosing, err) + assert.Nil(t, val) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + getFromLastEpochCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromLastEpochCalled: func(_ []byte) ([]byte, error) { + getFromLastEpochCalled = true + return nil, nil + }, + } + stsm, _ := newSnapshotTrieStorageManager(trieStorage, 0) + + _, _ = stsm.GetFromLastEpoch([]byte("key")) + assert.True(t, getFromLastEpochCalled) + }) } func TestSnapshotTrieStorageManager_AlsoAddInPreviousEpoch(t *testing.T) { @@ -200,7 +265,7 @@ func TestSnapshotTrieStorageManager_AlsoAddInPreviousEpoch(t *testing.T) { }, PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { putInEpochCalled = true - return nil + return errors.New("error for coverage only") }, } stsm, _ := newSnapshotTrieStorageManager(trieStorage, 5) diff --git a/trie/syncTrieStorageManager_test.go b/trie/syncTrieStorageManager_test.go index afebd9fd918..c4818388fdc 100644 --- a/trie/syncTrieStorageManager_test.go +++ b/trie/syncTrieStorageManager_test.go @@ -1,6 +1,7 @@ package trie import ( + "errors" "strings" "testing" @@ -57,6 +58,22 @@ func TestNewSyncTrieStorageManager_PutInFirstEpoch(t *testing.T) { assert.Equal(t, 1, putInEpochCalled) } +func TestNewSyncTrieStorageManager_PutInEpochError(t *testing.T) { + t.Parallel() + + expectedErr := errors.New("expected error") + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { + return expectedErr + }, + } + stsm, _ := NewSyncTrieStorageManager(trieStorage) + + err := stsm.Put([]byte("key"), []byte("val")) + assert.Equal(t, expectedErr, err) +} + func TestNewSyncTrieStorageManager_PutInEpoch(t *testing.T) { t.Parallel() diff --git a/trie/sync_test.go b/trie/sync_test.go index cf56628be2c..97fafe653f5 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -8,6 +8,8 @@ import ( "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" @@ -233,3 +235,50 @@ func TestTrieSync_FoundInStorageShouldNotRequest(t *testing.T) { err = ts.StartSyncing(rootHash, context.Background()) assert.Nil(t, err) } + +func TestTrieSync_StartSyncing(t *testing.T) { + t.Parallel() + + t.Run("nil hash should return nil", func(t *testing.T) { + t.Parallel() + + timeout := time.Second * 2 + arg := createMockArgument(timeout) + ts, _ := NewTrieSyncer(arg) + + err := ts.StartSyncing(nil, context.Background()) + assert.NoError(t, err) + }) + t.Run("empty trie hash should return nil", func(t *testing.T) { + t.Parallel() + + timeout := time.Second * 2 + arg := createMockArgument(timeout) + ts, _ := NewTrieSyncer(arg) + + err := ts.StartSyncing(common.EmptyTrieHash, context.Background()) + assert.NoError(t, err) + }) + t.Run("nil context should error", func(t *testing.T) { + t.Parallel() + + timeout := time.Second * 2 + arg := createMockArgument(timeout) + ts, _ := NewTrieSyncer(arg) + + err := ts.StartSyncing([]byte("roothash"), nil) + assert.Equal(t, ErrNilContext, err) + }) + t.Run("closed context should error", func(t *testing.T) { + t.Parallel() + + timeout := time.Second * 2 + arg := createMockArgument(timeout) + ts, _ := NewTrieSyncer(arg) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err := ts.StartSyncing([]byte("roothash"), ctx) + assert.Equal(t, errorsMx.ErrContextClosing, err) + }) +} diff --git a/trie/trieNodesHandler_test.go b/trie/trieNodesHandler_test.go index 86b4d6431ab..146200ac3ee 100644 --- a/trie/trieNodesHandler_test.go +++ b/trie/trieNodesHandler_test.go @@ -63,6 +63,20 @@ func TestTrieNodesHandler_jobDone(t *testing.T) { assert.True(t, handler.jobDone()) } +func TestTrieNodesHandler_noMissingHashes(t *testing.T) { + t.Parallel() + + roothash := "roothash" + handler := newTrieNodesHandler() + assert.True(t, handler.noMissingHashes()) + + handler.addInitialRootHash(roothash) + assert.False(t, handler.noMissingHashes()) + + handler.processMissingHashWasFound(&leafNode{}, roothash) + assert.True(t, handler.noMissingHashes()) +} + func TestTrieNodesHandler_replaceParentWithChildren(t *testing.T) { t.Parallel() diff --git a/trie/trieStorageManagerInEpoch_test.go b/trie/trieStorageManagerInEpoch_test.go index 9ba92d45549..45fd78df3ec 100644 --- a/trie/trieStorageManagerInEpoch_test.go +++ b/trie/trieStorageManagerInEpoch_test.go @@ -1,21 +1,24 @@ package trie import ( + "errors" "strings" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" + errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewTrieStorageManagerInEpochNilStorageManager(t *testing.T) { t.Parallel() tsmie, err := newTrieStorageManagerInEpoch(nil, 0) - assert.True(t, check.IfNil(tsmie)) + assert.Nil(t, tsmie) assert.Equal(t, ErrNilTrieStorage, err) } @@ -25,7 +28,7 @@ func TestNewTrieStorageManagerInEpochInvalidStorageManagerType(t *testing.T) { trieStorage := &testscommon.StorageManagerStub{} tsmie, err := newTrieStorageManagerInEpoch(trieStorage, 0) - assert.True(t, check.IfNil(tsmie)) + assert.Nil(t, tsmie) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "invalid storage manager, type is")) } @@ -37,7 +40,7 @@ func TestNewTrieStorageManagerInEpochInvalidStorerType(t *testing.T) { trieStorage.mainStorer = database.NewMemDB() tsmie, err := newTrieStorageManagerInEpoch(trieStorage, 0) - assert.True(t, check.IfNil(tsmie)) + assert.Nil(t, tsmie) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "invalid storer, type is")) } @@ -48,13 +51,41 @@ func TestNewTrieStorageManagerInEpoch(t *testing.T) { _, trieStorage := newEmptyTrie() tsmie, err := newTrieStorageManagerInEpoch(trieStorage, 0) - assert.False(t, check.IfNil(tsmie)) + assert.NotNil(t, tsmie) assert.Nil(t, err) } +func TestTrieStorageManagerInEpoch_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var tsmie *trieStorageManagerInEpoch + assert.True(t, tsmie.IsInterfaceNil()) + + _, trieStorage := newEmptyTrie() + tsmie, _ = newTrieStorageManagerInEpoch(trieStorage, 0) + assert.False(t, tsmie.IsInterfaceNil()) +} + func TestTrieStorageManagerInEpoch_GetFromEpoch(t *testing.T) { t.Parallel() + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromEpochCalled: func(_ []byte, _ uint32) ([]byte, error) { + require.Fail(t, "should have not been called") + return nil, nil + }, + } + tsmie, _ := newTrieStorageManagerInEpoch(trieStorage, 0) + _ = tsmie.Close() + + _, err := tsmie.Get([]byte("key")) + require.Equal(t, errorsMx.ErrContextClosing, err) + }) + t.Run("epoch 0 does not panic", func(t *testing.T) { t.Parallel() @@ -72,6 +103,42 @@ func TestTrieStorageManagerInEpoch_GetFromEpoch(t *testing.T) { assert.True(t, getFromEpochCalled) }) + t.Run("closing error should work", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + getFromEpochCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromEpochCalled: func(_ []byte, _ uint32) ([]byte, error) { + getFromEpochCalled = true + return nil, storage.ErrDBIsClosed + }, + } + tsmie, _ := newTrieStorageManagerInEpoch(trieStorage, 0) + + _, err := tsmie.Get([]byte("key")) + assert.Equal(t, ErrKeyNotFound, err) + assert.True(t, getFromEpochCalled) + }) + + t.Run("other error should work", func(t *testing.T) { + t.Parallel() + + _, trieStorage := newEmptyTrie() + getFromEpochCalled := false + trieStorage.mainStorer = &trie.SnapshotPruningStorerStub{ + GetFromEpochCalled: func(_ []byte, _ uint32) ([]byte, error) { + getFromEpochCalled = true + return nil, errors.New("not closing error") + }, + } + tsmie, _ := newTrieStorageManagerInEpoch(trieStorage, 0) + + _, err := tsmie.Get([]byte("key")) + assert.Equal(t, ErrKeyNotFound, err) + assert.True(t, getFromEpochCalled) + }) + t.Run("getFromEpoch searches more storers", func(t *testing.T) { t.Parallel() diff --git a/trie/trieStorageManagerWithoutCheckpoints_test.go b/trie/trieStorageManagerWithoutCheckpoints_test.go index 891a14a392e..7775037b289 100644 --- a/trie/trieStorageManagerWithoutCheckpoints_test.go +++ b/trie/trieStorageManagerWithoutCheckpoints_test.go @@ -9,15 +9,27 @@ import ( trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestNewTrieStorageManagerWithoutCheckpointsOkVals(t *testing.T) { +func TestNewTrieStorageManagerWithoutCheckpoints(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) - ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) - assert.Nil(t, err) - assert.NotNil(t, ts) + t.Run("nil storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(nil) + require.Equal(t, trie.ErrNilTrieStorage, err) + require.Nil(t, ts) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) + assert.Nil(t, err) + assert.NotNil(t, ts) + }) } func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { diff --git a/trie/trieStorageManagerWithoutSnapshot_test.go b/trie/trieStorageManagerWithoutSnapshot_test.go index 309e328433f..0dd15d21b68 100644 --- a/trie/trieStorageManagerWithoutSnapshot_test.go +++ b/trie/trieStorageManagerWithoutSnapshot_test.go @@ -15,10 +15,21 @@ import ( func TestNewTrieStorageManagerWithoutSnapshot(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) - ts, err := trie.NewTrieStorageManagerWithoutSnapshot(tsm) - assert.Nil(t, err) - assert.NotNil(t, ts) + t.Run("nil trie storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, err := trie.NewTrieStorageManagerWithoutSnapshot(nil) + assert.Equal(t, trie.ErrNilTrieStorage, err) + assert.Nil(t, ts) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + ts, err := trie.NewTrieStorageManagerWithoutSnapshot(tsm) + assert.Nil(t, err) + assert.NotNil(t, ts) + }) } func TestTrieStorageManagerWithoutSnapshot_GetFromCurrentEpoch(t *testing.T) { diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index a0b5a88ce63..cca48a65e92 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -12,8 +12,10 @@ import ( "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/errors" + storageMx "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" @@ -25,6 +27,12 @@ const ( hashSize = 32 ) +var ( + providedKey = []byte("key") + providedVal = []byte("value") + expectedErr = errorsGo.New("expected error") +) + func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { return trie.NewTrieStorageManagerArgs{ MainStorer: testscommon.CreateMemUnit(), @@ -46,6 +54,24 @@ type errChanWithLen interface { func TestNewTrieStorageManager(t *testing.T) { t.Parallel() + t.Run("nil main storer", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = nil + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) + }) + t.Run("nil checkpoints storer", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.CheckpointsStorer = nil + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) + }) t.Run("nil marshaller", func(t *testing.T) { t.Parallel() @@ -73,6 +99,24 @@ func TestNewTrieStorageManager(t *testing.T) { assert.Nil(t, ts) assert.Equal(t, trie.ErrNilCheckpointHashesHolder, err) }) + t.Run("nil idle provider", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.IdleProvider = nil + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.Equal(t, trie.ErrNilIdleNodeProvider, err) + }) + t.Run("invalid config should error", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.GeneralConfig.SnapshotsGoroutineNum = 0 + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.Error(t, err) + }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -211,6 +255,7 @@ func TestTrieStorageManager_IsPruningBlocked(t *testing.T) { args := getNewTrieStorageManagerArgs() ts, _ := trie.NewTrieStorageManager(args) + ts.ExitPruningBufferingMode() // early exit assert.False(t, ts.IsPruningBlocked()) @@ -224,34 +269,99 @@ func TestTrieStorageManager_IsPruningBlocked(t *testing.T) { func TestTrieStorageManager_Remove(t *testing.T) { t.Parallel() + t.Run("main storer not snapshotPruningStorer should call remove", func(t *testing.T) { + t.Parallel() + + wasCalled := false + args := getNewTrieStorageManagerArgs() + args.MainStorer = &storage.StorerStub{ + RemoveCalled: func(key []byte) error { + wasCalled = true + return nil + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.Remove(providedKey) + assert.Nil(t, err) + assert.True(t, wasCalled) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = testscommon.NewSnapshotPruningStorerMock() + args.CheckpointsStorer = testscommon.NewSnapshotPruningStorerMock() + ts, _ := trie.NewTrieStorageManager(args) + + _ = args.MainStorer.Put(providedKey, providedVal) + hashes := make(common.ModifiedHashes) + hashes[string(providedVal)] = struct{}{} + hashes[string(providedKey)] = struct{}{} + _ = args.CheckpointHashesHolder.Put(providedKey, hashes) + + val, err := args.MainStorer.Get(providedKey) + assert.Nil(t, err) + assert.NotNil(t, val) + ok := args.CheckpointHashesHolder.ShouldCommit(providedKey) + assert.True(t, ok) + + err = ts.Remove(providedKey) + assert.Nil(t, err) + + val, err = args.MainStorer.Get(providedKey) + assert.Nil(t, val) + assert.NotNil(t, err) + ok = args.CheckpointHashesHolder.ShouldCommit(providedKey) + assert.False(t, ok) + }) +} + +func TestTrieStorageManager_RemoveFromCheckpointHashesHolder(t *testing.T) { + t.Parallel() + + wasCalled := false args := getNewTrieStorageManagerArgs() - args.MainStorer = testscommon.NewSnapshotPruningStorerMock() - args.CheckpointsStorer = testscommon.NewSnapshotPruningStorerMock() + args.CheckpointHashesHolder = &trieMock.CheckpointHashesHolderStub{ + RemoveCalled: func(bytes []byte) { + wasCalled = true + }, + } ts, _ := trie.NewTrieStorageManager(args) - key := []byte("key") - value := []byte("value") + ts.RemoveFromCheckpointHashesHolder(providedKey) + assert.True(t, wasCalled) +} - _ = args.MainStorer.Put(key, value) - hashes := make(common.ModifiedHashes) - hashes[string(value)] = struct{}{} - hashes[string(key)] = struct{}{} - _ = args.CheckpointHashesHolder.Put(key, hashes) +func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { + t.Parallel() - val, err := args.MainStorer.Get(key) - assert.Nil(t, err) - assert.NotNil(t, val) - ok := args.CheckpointHashesHolder.ShouldCommit(key) - assert.True(t, ok) + t.Run("main storer not epochStorer should early exit", func(t *testing.T) { + t.Parallel() - err = ts.Remove(key) - assert.Nil(t, err) + args := getNewTrieStorageManagerArgs() + args.MainStorer = &storage.StorerStub{} + ts, _ := trie.NewTrieStorageManager(args) - val, err = args.MainStorer.Get(key) - assert.Nil(t, val) - assert.NotNil(t, err) - ok = args.CheckpointHashesHolder.ShouldCommit(key) - assert.False(t, ok) + ts.SetEpochForPutOperation(0) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedEpoch := uint32(100) + wasCalled := false + args := getNewTrieStorageManagerArgs() + args.MainStorer = &testscommon.StorageManagerStub{ + SetEpochForPutOperationCalled: func(u uint32) { + assert.Equal(t, providedEpoch, u) + wasCalled = true + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + ts.SetEpochForPutOperation(providedEpoch) + assert.True(t, wasCalled) + }) } func TestTrieStorageManager_PutInEpochClosedDb(t *testing.T) { @@ -261,9 +371,7 @@ func TestTrieStorageManager_PutInEpochClosedDb(t *testing.T) { ts, _ := trie.NewTrieStorageManager(args) _ = ts.Close() - key := []byte("key") - value := []byte("value") - err := ts.PutInEpoch(key, value, 0) + err := ts.PutInEpoch(providedKey, providedVal, 0) assert.Equal(t, errors.ErrContextClosing, err) } @@ -273,9 +381,7 @@ func TestTrieStorageManager_PutInEpochInvalidStorer(t *testing.T) { args := getNewTrieStorageManagerArgs() ts, _ := trie.NewTrieStorageManager(args) - key := []byte("key") - value := []byte("value") - err := ts.PutInEpoch(key, value, 0) + err := ts.PutInEpoch(providedKey, providedVal, 0) assert.True(t, strings.Contains(err.Error(), "invalid storer type")) } @@ -293,9 +399,7 @@ func TestTrieStorageManager_PutInEpoch(t *testing.T) { } ts, _ := trie.NewTrieStorageManager(args) - key := []byte("key") - value := []byte("value") - err := ts.PutInEpoch(key, value, 0) + err := ts.PutInEpoch(providedKey, providedVal, 0) assert.Nil(t, err) assert.True(t, putInEpochCalled) } @@ -415,32 +519,292 @@ func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { assert.True(t, strings.Contains(errRecovered.Error(), common.GetNodeFromDBErrorString)) } -func TestTrieStorageManager_ShouldTakeSnapshotInvalidStorer(t *testing.T) { +func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() - ts, _ := trie.NewTrieStorageManager(args) + t.Run("invalid storer should return false", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + ts, _ := trie.NewTrieStorageManager(args) + + assert.False(t, ts.ShouldTakeSnapshot()) + }) + t.Run("trie synced should return false", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return []byte(common.TrieSyncedVal), nil + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.False(t, ts.ShouldTakeSnapshot()) + }) + t.Run("GetFromOldEpochsWithoutAddingToCacheCalled error should return false", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return nil, expectedErr // isTrieSynced returns false + }, + GetFromOldEpochsWithoutAddingToCacheCalled: func(key []byte) ([]byte, core.OptionalUint32, error) { + return nil, core.OptionalUint32{}, storageMx.ErrDBIsClosed + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.False(t, ts.ShouldTakeSnapshot()) + }) + t.Run("GetFromOldEpochsWithoutAddingToCacheCalled returns non ActiveDBVal should return false", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return []byte("response"), nil + }, + GetFromOldEpochsWithoutAddingToCacheCalled: func(key []byte) ([]byte, core.OptionalUint32, error) { + return []byte("response"), core.OptionalUint32{}, nil + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.False(t, ts.ShouldTakeSnapshot()) + }) + t.Run("GetFromOldEpochsWithoutAddingToCacheCalled returns ActiveDBVal should return true", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { + return nil, expectedErr // isTrieSynced returns false + }, + GetFromOldEpochsWithoutAddingToCacheCalled: func(key []byte) ([]byte, core.OptionalUint32, error) { + return []byte(common.ActiveDBVal), core.OptionalUint32{}, nil + }, + MemDbMock: testscommon.NewMemDbMock(), + } + ts, _ := trie.NewTrieStorageManager(args) + + assert.True(t, ts.ShouldTakeSnapshot()) + }) +} + +func TestTrieStorageManager_Get(t *testing.T) { + t.Parallel() + + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + _ = ts.Close() + + val, err := ts.Get(providedKey) + assert.Equal(t, errors.ErrContextClosing, err) + assert.Nil(t, val) + }) + t.Run("main storer closing should error", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = &storage.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, storageMx.ErrDBIsClosed + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + val, err := ts.Get(providedKey) + assert.Equal(t, storageMx.ErrDBIsClosed, err) + assert.Nil(t, val) + }) + t.Run("checkpoints storer closing should error", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.CheckpointsStorer = &storage.StorerStub{ + GetCalled: func(key []byte) ([]byte, error) { + return nil, storageMx.ErrDBIsClosed + }, + } + ts, _ := trie.NewTrieStorageManager(args) - assert.False(t, ts.ShouldTakeSnapshot()) + val, err := ts.Get(providedKey) + assert.Equal(t, storageMx.ErrDBIsClosed, err) + assert.Nil(t, val) + }) + t.Run("should return from main storer", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + _ = args.MainStorer.Put(providedKey, providedVal) + ts, _ := trie.NewTrieStorageManager(args) + + val, err := ts.Get(providedKey) + assert.Nil(t, err) + assert.Equal(t, providedVal, val) + }) + t.Run("should return from checkpoints storer", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + _ = args.CheckpointsStorer.Put(providedKey, providedVal) + ts, _ := trie.NewTrieStorageManager(args) + + val, err := ts.Get(providedKey) + assert.Nil(t, err) + assert.Equal(t, providedVal, val) + }) } func TestNewSnapshotTrieStorageManager_GetFromCurrentEpoch(t *testing.T) { t.Parallel() - getFromCurrentEpochCalled := false - args := getNewTrieStorageManagerArgs() - args.MainStorer = &trieMock.SnapshotPruningStorerStub{ - MemDbMock: testscommon.NewMemDbMock(), - GetFromCurrentEpochCalled: func(_ []byte) ([]byte, error) { - getFromCurrentEpochCalled = true - return nil, nil - }, - } - ts, _ := trie.NewTrieStorageManager(args) + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() - _, err := ts.GetFromCurrentEpoch([]byte("key")) - assert.Nil(t, err) - assert.True(t, getFromCurrentEpochCalled) + ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + _ = ts.Close() + + val, err := ts.GetFromCurrentEpoch(providedKey) + assert.Equal(t, errors.ErrContextClosing, err) + assert.Nil(t, val) + }) + t.Run("main storer not snapshotPruningStorer should error", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = &storage.StorerStub{} + ts, _ := trie.NewTrieStorageManager(args) + + val, err := ts.GetFromCurrentEpoch(providedKey) + assert.True(t, strings.Contains(err.Error(), "invalid storer")) + assert.Nil(t, val) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + getFromCurrentEpochCalled := false + args := getNewTrieStorageManagerArgs() + args.MainStorer = &trieMock.SnapshotPruningStorerStub{ + MemDbMock: testscommon.NewMemDbMock(), + GetFromCurrentEpochCalled: func(_ []byte) ([]byte, error) { + getFromCurrentEpochCalled = true + return nil, nil + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + _, err := ts.GetFromCurrentEpoch(providedKey) + assert.Nil(t, err) + assert.True(t, getFromCurrentEpochCalled) + }) +} + +func TestTrieStorageManager_Put(t *testing.T) { + t.Parallel() + + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + _ = ts.Close() + + err := ts.Put(providedKey, providedVal) + assert.Equal(t, errors.ErrContextClosing, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + + _ = ts.Put(providedKey, providedVal) + val, err := ts.Get(providedKey) + assert.Nil(t, err) + assert.Equal(t, providedVal, val) + }) +} + +func TestTrieStorageManager_PutInEpochWithoutCache(t *testing.T) { + t.Parallel() + + t.Run("closed storage manager should error", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + _ = ts.Close() + + err := ts.PutInEpochWithoutCache(providedKey, providedVal, 0) + assert.Equal(t, errors.ErrContextClosing, err) + }) + t.Run("main storer not snapshotPruningStorer should error", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = &storage.StorerStub{} + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.PutInEpochWithoutCache(providedKey, providedVal, 0) + assert.True(t, strings.Contains(err.Error(), "invalid storer")) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = testscommon.NewSnapshotPruningStorerMock() + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.PutInEpochWithoutCache(providedKey, providedVal, 0) + assert.Nil(t, err) + }) +} + +func TestTrieStorageManager_Close(t *testing.T) { + t.Parallel() + + t.Run("error on main storer close", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.MainStorer = &storage.StorerStub{ + CloseCalled: func() error { + return expectedErr + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.Close() + assert.True(t, errorsGo.Is(err, expectedErr)) + }) + t.Run("error on checkpoints storer close", func(t *testing.T) { + t.Parallel() + + args := getNewTrieStorageManagerArgs() + args.CheckpointsStorer = &storage.StorerStub{ + CloseCalled: func() error { + return expectedErr + }, + } + ts, _ := trie.NewTrieStorageManager(args) + + err := ts.Close() + assert.True(t, errorsGo.Is(err, expectedErr)) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + + err := ts.Close() + assert.NoError(t, err) + }) } func TestWriteInChanNonBlocking(t *testing.T) { diff --git a/update/container/accountDBSyncers_test.go b/update/container/accountDBSyncers_test.go index 0a63210e6c9..92baf0218f2 100644 --- a/update/container/accountDBSyncers_test.go +++ b/update/container/accountDBSyncers_test.go @@ -10,6 +10,11 @@ import ( "github.com/stretchr/testify/require" ) +var ( + testKey = "key" + testAccountsDBSyncersVal = &mock.AccountsDBSyncerStub{} +) + func TestNewAccountsDBSyncersContainer(t *testing.T) { t.Parallel() @@ -17,82 +22,157 @@ func TestNewAccountsDBSyncersContainer(t *testing.T) { require.False(t, check.IfNil(adsc)) } -func TestAccountDBSyncers_AddGetShouldWork(t *testing.T) { +func TestAccountDBSyncers_Get(t *testing.T) { t.Parallel() - adsc := NewAccountsDBSyncersContainer() - testKey := "key" - testVal := &mock.AccountsDBSyncerStub{} - err := adsc.Add(testKey, testVal) - require.NoError(t, err) + t.Run("missing key should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + val, err := adsc.Get(testKey) + require.Equal(t, update.ErrInvalidContainerKey, err) + require.Nil(t, val) + }) + t.Run("invalid data should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + + _ = adsc.AddInterface(testKey, "not an account db syncer") + val, err := adsc.Get(testKey) + require.Equal(t, update.ErrWrongTypeInContainer, err) + require.Nil(t, val) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + + err := adsc.Add(testKey, testAccountsDBSyncersVal) + require.Nil(t, err) + val, err := adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testAccountsDBSyncersVal, val) + }) +} - res, err := adsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, testVal, res) +func TestAccountDBSyncers_Add(t *testing.T) { + t.Parallel() + + t.Run("nil value should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Add(testKey, nil) + require.Equal(t, update.ErrNilContainerElement, err) + }) + t.Run("duplicated key should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Add(testKey, testAccountsDBSyncersVal) + require.NoError(t, err) + + err = adsc.Add(testKey, testAccountsDBSyncersVal) + require.Equal(t, update.ErrContainerKeyAlreadyExists, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Add(testKey, testAccountsDBSyncersVal) + require.NoError(t, err) + }) } -func TestAccountDBSyncers_AddMultipleShouldWork(t *testing.T) { +func TestAccountDBSyncers_AddMultiple(t *testing.T) { t.Parallel() - adsc := NewAccountsDBSyncersContainer() testKey0 := "key0" - testVal0 := &mock.AccountsDBSyncerStub{} + testAccountsDBSyncersVal0 := &mock.AccountsDBSyncerStub{} testKey1 := "key1" - testVal1 := &mock.AccountsDBSyncerStub{} - - err := adsc.AddMultiple([]string{testKey0, testKey1}, []update.AccountsDBSyncer{testVal0, testVal1}) - require.NoError(t, err) - - res0, err := adsc.Get(testKey0) - require.NoError(t, err) - require.Equal(t, testVal0, res0) - - res1, err := adsc.Get(testKey1) - require.NoError(t, err) - require.Equal(t, testVal1, res1) - - require.Equal(t, 2, adsc.Len()) + testAccountsDBSyncersVal1 := &mock.AccountsDBSyncerStub{} + + t.Run("different lengths should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.AddMultiple([]string{testKey0}, nil) + require.Equal(t, update.ErrLenMismatch, err) + }) + t.Run("duplicated keys should should error on Add", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.AddMultiple([]string{testKey0, testKey1, testKey1}, []update.AccountsDBSyncer{testAccountsDBSyncersVal0, testAccountsDBSyncersVal1, testAccountsDBSyncersVal1}) + require.Equal(t, update.ErrContainerKeyAlreadyExists, err) + }) + t.Run("should work"+ + "", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.AddMultiple([]string{testKey0, testKey1}, []update.AccountsDBSyncer{testAccountsDBSyncersVal0, testAccountsDBSyncersVal1}) + require.NoError(t, err) + + res0, err := adsc.Get(testKey0) + require.NoError(t, err) + require.Equal(t, testAccountsDBSyncersVal0, res0) + + res1, err := adsc.Get(testKey1) + require.NoError(t, err) + require.Equal(t, testAccountsDBSyncersVal1, res1) + + require.Equal(t, 2, adsc.Len()) + }) } -func TestAccountDBSyncers_ReplaceShouldWork(t *testing.T) { +func TestAccountDBSyncers_Replace(t *testing.T) { t.Parallel() - adsc := NewAccountsDBSyncersContainer() - testKey := "key" - testVal := &mock.AccountsDBSyncerStub{} - err := adsc.Add(testKey, testVal) - require.NoError(t, err) - - res, err := adsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, testVal, res) - - // update - newTestVal := &mock.AccountsDBSyncerStub{ - SyncAccountsCalled: func(_ []byte) error { - return errors.New("local error") - }, - } - err = adsc.Replace(testKey, newTestVal) - require.NoError(t, err) - - res, err = adsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, newTestVal, res) + t.Run("nil val should error", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Replace(testKey, nil) + require.Equal(t, update.ErrNilContainerElement, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + adsc := NewAccountsDBSyncersContainer() + err := adsc.Add(testKey, testAccountsDBSyncersVal) + require.NoError(t, err) + + res, err := adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testAccountsDBSyncersVal, res) + + // update + newtestAccountsDBSyncersVal := &mock.AccountsDBSyncerStub{ + SyncAccountsCalled: func(_ []byte) error { + return errors.New("local error") + }, + } + err = adsc.Replace(testKey, newtestAccountsDBSyncersVal) + require.NoError(t, err) + + res, err = adsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, newtestAccountsDBSyncersVal, res) + }) } -func TestAccountDBSyncers_DeleteShouldWork(t *testing.T) { +func TestAccountDBSyncers_RemoveShouldWork(t *testing.T) { t.Parallel() adsc := NewAccountsDBSyncersContainer() - testKey := "key" - testVal := &mock.AccountsDBSyncerStub{} - err := adsc.Add(testKey, testVal) + err := adsc.Add(testKey, testAccountsDBSyncersVal) require.NoError(t, err) res, err := adsc.Get(testKey) require.NoError(t, err) - require.Equal(t, testVal, res) + require.Equal(t, testAccountsDBSyncersVal, res) adsc.Remove(testKey) diff --git a/update/container/export_test.go b/update/container/export_test.go new file mode 100644 index 00000000000..2611cf2805f --- /dev/null +++ b/update/container/export_test.go @@ -0,0 +1,15 @@ +package containers + +// AddInterface - +func (a *accountDBSyncers) AddInterface(key string, val interface{}) error { + a.objects.Insert(key, val) + + return nil +} + +// AddInterface - +func (t *trieSyncers) AddInterface(key string, val interface{}) error { + t.objects.Insert(key, val) + + return nil +} diff --git a/update/container/trieSyncers_test.go b/update/container/trieSyncers_test.go index d3362e6592d..f26556a55df 100644 --- a/update/container/trieSyncers_test.go +++ b/update/container/trieSyncers_test.go @@ -11,6 +11,8 @@ import ( "github.com/stretchr/testify/require" ) +var testTrieSyncersVal = &mock.TrieSyncersStub{} + func TestNewTrieSyncersContainer(t *testing.T) { t.Parallel() @@ -18,82 +20,157 @@ func TestNewTrieSyncersContainer(t *testing.T) { require.False(t, check.IfNil(tsc)) } -func TestTrieSyncers_AddGetShouldWork(t *testing.T) { +func TestTrieSyncers_Get(t *testing.T) { t.Parallel() - tsc := NewTrieSyncersContainer() - testKey := "key" - testVal := &mock.TrieSyncersStub{} - err := tsc.Add(testKey, testVal) - require.NoError(t, err) + t.Run("missing key should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + val, err := tsc.Get(testKey) + require.Equal(t, update.ErrInvalidContainerKey, err) + require.Nil(t, val) + }) + t.Run("invalid data should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + + _ = tsc.AddInterface(testKey, "not an account db syncer") + val, err := tsc.Get(testKey) + require.Equal(t, update.ErrWrongTypeInContainer, err) + require.Nil(t, val) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, testTrieSyncersVal) + require.NoError(t, err) + + res, err := tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testTrieSyncersVal, res) + }) +} - res, err := tsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, testVal, res) +func TestTrieSyncers_Add(t *testing.T) { + t.Parallel() + + t.Run("nil value should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, nil) + require.Equal(t, update.ErrNilContainerElement, err) + }) + t.Run("duplicated key should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, testTrieSyncersVal) + require.NoError(t, err) + + err = tsc.Add(testKey, testTrieSyncersVal) + require.Equal(t, update.ErrContainerKeyAlreadyExists, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, testTrieSyncersVal) + require.NoError(t, err) + }) } -func TestTrieSyncers_AddMultipleShouldWork(t *testing.T) { +func TestTrieSyncers_AddMultiple(t *testing.T) { t.Parallel() - tsc := NewTrieSyncersContainer() testKey0 := "key0" - testVal0 := &mock.TrieSyncersStub{} + testTrieSyncersVal0 := &mock.TrieSyncersStub{} testKey1 := "key1" - testVal1 := &mock.TrieSyncersStub{} + testTrieSyncersVal1 := &mock.TrieSyncersStub{} - err := tsc.AddMultiple([]string{testKey0, testKey1}, []update.TrieSyncer{testVal0, testVal1}) - require.NoError(t, err) + t.Run("different lengths should error", func(t *testing.T) { + t.Parallel() - res0, err := tsc.Get(testKey0) - require.NoError(t, err) - require.Equal(t, testVal0, res0) + tsc := NewTrieSyncersContainer() + err := tsc.AddMultiple([]string{testKey0}, nil) + require.Equal(t, update.ErrLenMismatch, err) + }) + t.Run("duplicated keys should should error on Add", func(t *testing.T) { + t.Parallel() - res1, err := tsc.Get(testKey1) - require.NoError(t, err) - require.Equal(t, testVal1, res1) + tsc := NewTrieSyncersContainer() + err := tsc.AddMultiple([]string{testKey0, testKey1, testKey1}, []update.TrieSyncer{testTrieSyncersVal0, testTrieSyncersVal1, testTrieSyncersVal1}) + require.Equal(t, update.ErrContainerKeyAlreadyExists, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() - require.Equal(t, 2, tsc.Len()) -} + tsc := NewTrieSyncersContainer() -func TestTrieSyncers_ReplaceShouldWork(t *testing.T) { - t.Parallel() + err := tsc.AddMultiple([]string{testKey0, testKey1}, []update.TrieSyncer{testTrieSyncersVal0, testTrieSyncersVal1}) + require.NoError(t, err) - tsc := NewTrieSyncersContainer() - testKey := "key" - testVal := &mock.TrieSyncersStub{} - err := tsc.Add(testKey, testVal) - require.NoError(t, err) + res0, err := tsc.Get(testKey0) + require.NoError(t, err) + require.Equal(t, testTrieSyncersVal0, res0) - res, err := tsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, testVal, res) - - // update - newTestVal := &mock.TrieSyncersStub{ - StartSyncingCalled: func(_ []byte, _ context.Context) error { - return errors.New("local err") - }, - } - err = tsc.Replace(testKey, newTestVal) - require.NoError(t, err) + res1, err := tsc.Get(testKey1) + require.NoError(t, err) + require.Equal(t, testTrieSyncersVal1, res1) - res, err = tsc.Get(testKey) - require.NoError(t, err) - require.Equal(t, newTestVal, res) + require.Equal(t, 2, tsc.Len()) + }) +} + +func TestTrieSyncers_Replace(t *testing.T) { + t.Parallel() + + t.Run("nil val should error", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Replace(testKey, nil) + require.Equal(t, update.ErrNilContainerElement, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tsc := NewTrieSyncersContainer() + err := tsc.Add(testKey, testTrieSyncersVal) + require.NoError(t, err) + + res, err := tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, testTrieSyncersVal, res) + + // update + newtestTrieSyncersVal := &mock.TrieSyncersStub{ + StartSyncingCalled: func(_ []byte, _ context.Context) error { + return errors.New("local err") + }, + } + err = tsc.Replace(testKey, newtestTrieSyncersVal) + require.NoError(t, err) + + res, err = tsc.Get(testKey) + require.NoError(t, err) + require.Equal(t, newtestTrieSyncersVal, res) + }) } -func TestTrieSyncers_DeleteShouldWork(t *testing.T) { +func TestTrieSyncers_RemoveShouldWork(t *testing.T) { t.Parallel() tsc := NewTrieSyncersContainer() - testKey := "key" - testVal := &mock.TrieSyncersStub{} - err := tsc.Add(testKey, testVal) + err := tsc.Add(testKey, testTrieSyncersVal) require.NoError(t, err) res, err := tsc.Get(testKey) require.NoError(t, err) - require.Equal(t, testVal, res) + require.Equal(t, testTrieSyncersVal, res) tsc.Remove(testKey) From 898a8c08af38347a323326c70910dd861d461d82 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Wed, 10 May 2023 12:03:12 +0300 Subject: [PATCH 150/221] added more tests --- trie/dfsIterator_test.go | 21 ++++++++++ trie/factory/trieCreator_test.go | 38 +++++++++++++++++- trie/keyBuilder/disabledKeyBuilder_test.go | 31 ++++++++++++++ .../trieStatisticsCollector_test.go | 6 ++- trie/statistics/trieStatistics_test.go | 40 +++++++++++++++++++ trie/storageMarker/trieStorageMarker_test.go | 31 ++++++++++++++ 6 files changed, 165 insertions(+), 2 deletions(-) create mode 100644 trie/keyBuilder/disabledKeyBuilder_test.go diff --git a/trie/dfsIterator_test.go b/trie/dfsIterator_test.go index 476889b6a2c..5e9f653db9b 100644 --- a/trie/dfsIterator_test.go +++ b/trie/dfsIterator_test.go @@ -7,6 +7,27 @@ import ( "github.com/stretchr/testify/assert" ) +func TestNewDFSIterator(t *testing.T) { + t.Parallel() + + t.Run("nil trie should error", func(t *testing.T) { + t.Parallel() + + it, err := trie.NewDFSIterator(nil) + assert.Equal(t, trie.ErrNilTrie, err) + assert.Nil(t, it) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + tr := initTrie() + + it, err := trie.NewDFSIterator(tr) + assert.Nil(t, err) + assert.NotNil(t, it) + }) +} + func TestDFSIterator_Next(t *testing.T) { t.Parallel() diff --git a/trie/factory/trieCreator_test.go b/trie/factory/trieCreator_test.go index 375969eb070..dcde970d85b 100644 --- a/trie/factory/trieCreator_test.go +++ b/trie/factory/trieCreator_test.go @@ -165,13 +165,49 @@ func TestTrieCreator_CreateWithNilCheckpointsStorerShouldErr(t *testing.T) { require.True(t, strings.Contains(err.Error(), trie.ErrNilStorer.Error())) } -func TestTrieCreator_CreateTriesComponentsForShardIdMissingStorer(t *testing.T) { +func TestTrieCreator_CreateWithInvalidMaxTrieLevelInMemShouldErr(t *testing.T) { + t.Parallel() + + args := getArgs() + tf, _ := factory.NewTrieFactory(args) + + createArgs := getCreateArgs() + createArgs.MaxTrieLevelInMem = 0 + _, tr, err := tf.Create(createArgs) + require.Nil(t, tr) + require.NotNil(t, err) + require.True(t, strings.Contains(err.Error(), trie.ErrInvalidLevelValue.Error())) +} + +func TestTrieCreator_CreateTriesComponentsForShardId(t *testing.T) { t.Parallel() t.Run("missing UserAccountsUnit", testWithMissingStorer(dataRetriever.UserAccountsUnit)) t.Run("missing UserAccountsCheckpointsUnit", testWithMissingStorer(dataRetriever.UserAccountsCheckpointsUnit)) t.Run("missing PeerAccountsUnit", testWithMissingStorer(dataRetriever.PeerAccountsUnit)) t.Run("missing PeerAccountsCheckpointsUnit", testWithMissingStorer(dataRetriever.PeerAccountsCheckpointsUnit)) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + holder, storageManager, err := factory.CreateTriesComponentsForShardId( + false, + testscommon.GetGeneralConfig(), + &mock.CoreComponentsStub{ + InternalMarshalizerField: &testscommon.MarshalizerMock{}, + HasherField: &hashingMocks.HasherMock{}, + PathHandlerField: &testscommon.PathManagerStub{}, + ProcessStatusHandlerInternal: &testscommon.ProcessStatusHandlerStub{}, + }, + &storageStubs.ChainStorerStub{ + GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { + return &storageStubs.StorerStub{}, nil + }, + }, + ) + require.NotNil(t, holder) + require.NotNil(t, storageManager) + require.Nil(t, err) + }) } func testWithMissingStorer(missingUnit dataRetriever.UnitType) func(t *testing.T) { diff --git a/trie/keyBuilder/disabledKeyBuilder_test.go b/trie/keyBuilder/disabledKeyBuilder_test.go new file mode 100644 index 00000000000..cdd63acfa1f --- /dev/null +++ b/trie/keyBuilder/disabledKeyBuilder_test.go @@ -0,0 +1,31 @@ +package keyBuilder + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDisabledKeyBuilder(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + if r != nil { + require.Fail(t, "should have not panicked") + } + }() + + builder := NewDisabledKeyBuilder() + require.NotNil(t, builder) + + builder.BuildKey([]byte("key")) + + key, err := builder.GetKey() + require.Nil(t, err) + require.True(t, bytes.Equal(key, []byte{})) + + clonedBuilder := builder.Clone() + require.Equal(t, &disabledKeyBuilder{}, clonedBuilder) +} diff --git a/trie/statistics/trieStatisticsCollector_test.go b/trie/statistics/trieStatisticsCollector_test.go index c2d6a9ab75d..e63af29fd9f 100644 --- a/trie/statistics/trieStatisticsCollector_test.go +++ b/trie/statistics/trieStatisticsCollector_test.go @@ -8,11 +8,13 @@ import ( "github.com/stretchr/testify/assert" ) -func TestSnapshotStatistics_AddTrieStats(t *testing.T) { +func TestSnapshotStatistics_Add(t *testing.T) { t.Parallel() tsc := NewTrieStatisticsCollector() + tsc.Add(nil) // coverage, early exit + numInserts := 100 for i := 0; i < numInserts; i++ { tsc.Add(getTrieStatsDTO(rand.Intn(numInserts), uint64(rand.Intn(numInserts)))) @@ -43,6 +45,8 @@ func TestSnapshotStatistics_AddTrieStats(t *testing.T) { assert.Equal(t, numTriesToPrint, len(tsc.triesBySize)) assert.Equal(t, numTriesToPrint, len(tsc.triesByDepth)) assert.Equal(t, uint64(i+1), tsc.GetNumNodes()) + + tsc.Print() // coverage } } diff --git a/trie/statistics/trieStatistics_test.go b/trie/statistics/trieStatistics_test.go index 31773b33fa7..d0870ced3dd 100644 --- a/trie/statistics/trieStatistics_test.go +++ b/trie/statistics/trieStatistics_test.go @@ -1,9 +1,13 @@ package statistics import ( + "encoding/hex" + "fmt" "testing" + "github.com/multiversx/mx-chain-core-go/core" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestTrieStatistics_AddBranchNode(t *testing.T) { @@ -107,3 +111,39 @@ func TestTrieStatistics_GetTrieStats(t *testing.T) { assert.Equal(t, uint64(numExtensions), stats.NumExtensionNodes) assert.Equal(t, uint64(numLeaves), stats.NumLeafNodes) } + +func TestTrieStatsDTO_ToString(t *testing.T) { + t.Parallel() + + tsd := TrieStatsDTO{ + Address: "address", + RootHash: []byte("root hash"), + TotalNodesSize: 1, + TotalNumNodes: 1, + MaxTrieDepth: 1, + BranchNodesSize: 1, + NumBranchNodes: 1, + ExtensionNodesSize: 1, + NumExtensionNodes: 1, + LeafNodesSize: 1, + NumLeafNodes: 1, + } + + expectedLines := []string{ + fmt.Sprintf("address %v,", tsd.Address), + fmt.Sprintf("rootHash %v,", hex.EncodeToString(tsd.RootHash)), + fmt.Sprintf("total trie size = %v,", core.ConvertBytes(tsd.TotalNodesSize)), + fmt.Sprintf("num trie nodes = %v,", tsd.TotalNumNodes), + fmt.Sprintf("max trie depth = %v,", tsd.MaxTrieDepth), + fmt.Sprintf("branch nodes size %v,", core.ConvertBytes(tsd.BranchNodesSize)), + fmt.Sprintf("extension nodes size %v,", core.ConvertBytes(tsd.ExtensionNodesSize)), + fmt.Sprintf("leaf nodes size %v,", core.ConvertBytes(tsd.LeafNodesSize)), + fmt.Sprintf("num branches %v,", tsd.NumBranchNodes), + fmt.Sprintf("num extensions %v,", tsd.NumExtensionNodes), + fmt.Sprintf("num leaves %v", tsd.NumLeafNodes), + } + stringDTO := tsd.ToString() + for i, line := range stringDTO { + require.Equal(t, expectedLines[i], line) + } +} diff --git a/trie/storageMarker/trieStorageMarker_test.go b/trie/storageMarker/trieStorageMarker_test.go index ae6699801cb..d30ab296c55 100644 --- a/trie/storageMarker/trieStorageMarker_test.go +++ b/trie/storageMarker/trieStorageMarker_test.go @@ -1,6 +1,7 @@ package storageMarker import ( + "errors" "testing" "github.com/multiversx/mx-chain-go/common" @@ -11,6 +12,36 @@ import ( func TestTrieStorageMarker_MarkStorerAsSyncedAndActive(t *testing.T) { t.Parallel() + t.Run("all operations error should work", func(t *testing.T) { + t.Parallel() + + sm := NewTrieStorageMarker() + assert.NotNil(t, sm) + + expectedErr := errors.New("expected err") + + getLatestStorageEpochCalled := false + putCalled := false + putInEpochWithoutCacheCalled := false + storer := &testscommon.StorageManagerStub{ + GetLatestStorageEpochCalled: func() (uint32, error) { + getLatestStorageEpochCalled = true + return 0, expectedErr + }, + PutCalled: func(key []byte, val []byte) error { + putCalled = true + return expectedErr + }, + PutInEpochWithoutCacheCalled: func(key []byte, val []byte, epoch uint32) error { + putInEpochWithoutCacheCalled = true + return expectedErr + }, + } + sm.MarkStorerAsSyncedAndActive(storer) + assert.True(t, getLatestStorageEpochCalled) + assert.True(t, putCalled) + assert.True(t, putInEpochWithoutCacheCalled) + }) t.Run("mark storer as synced and active epoch 5", func(t *testing.T) { sm := NewTrieStorageMarker() assert.NotNil(t, sm) From 606b6a06f9063d94d9bdb570b52c9e7258f23b14 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 10 May 2023 13:53:49 +0300 Subject: [PATCH 151/221] new address --- cmd/node/config/config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/config/config.toml b/cmd/node/config/config.toml index 1485024d509..68e5c80c6fa 100644 --- a/cmd/node/config/config.toml +++ b/cmd/node/config/config.toml @@ -716,7 +716,7 @@ ] MaxNumAddressesInTransferRole = 100 DNSV2Addresses =[ - "erd1qqqqqqqqqqqqqpgqcy67yanvwpepqmerkq6m8pgav0tlvgwxjmdq4hukxw", + "erd1qqqqqqqqqqqqqpgqr46jrxr6r2unaqh75ugd308dwx5vgnhwh47qtvepe3", ] [Hardfork] From 42f28c78f82ff2aa113b33227f0a8eaf97ed2ddf Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 10 May 2023 16:23:56 +0300 Subject: [PATCH 152/221] move host driver --- cmd/node/config/external.toml | 4 +- config/externalConfig.go | 6 +- factory/status/statusComponents.go | 31 +- go.mod | 5 +- go.sum | 9 +- outport/factory/hostDriverFactory.go | 41 +++ outport/factory/hostDriverFactory_test.go | 1 + outport/factory/outportFactory.go | 38 +-- outport/host/driver.go | 114 ++++++++ outport/host/driver_test.go | 334 ++++++++++++++++++++++ outport/host/errors.go | 9 + outport/host/interface.go | 8 + testscommon/outport/senderHostStub.go | 28 ++ 13 files changed, 568 insertions(+), 60 deletions(-) create mode 100644 outport/factory/hostDriverFactory.go create mode 100644 outport/factory/hostDriverFactory_test.go create mode 100644 outport/host/driver.go create mode 100644 outport/host/driver_test.go create mode 100644 outport/host/errors.go create mode 100644 outport/host/interface.go create mode 100644 testscommon/outport/senderHostStub.go diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index f539e707099..7de429cf6cc 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -41,12 +41,12 @@ # marshalled structures in block events data MarshallerType = "json" -[WebSocketConnector] +[HostDriverConfig] # This flag shall only be used for observer nodes Enabled = false # This flag will start the WebSocket connector as server or client IsServer = false - # The url of the web-sockets client/server + # The url of the WebSocket client/server URL = "127.0.0.1:22111" WithAcknowledge = true # Currently, only "json" is supported. In the future, "gogo protobuf" could also be supported diff --git a/config/externalConfig.go b/config/externalConfig.go index 2a9aa208ac6..72f59443fcf 100644 --- a/config/externalConfig.go +++ b/config/externalConfig.go @@ -4,7 +4,7 @@ package config type ExternalConfig struct { ElasticSearchConnector ElasticSearchConfig EventNotifierConnector EventNotifierConfig - WebSocketConnector WebSocketDriverConfig + HostDriverConnector HostDriverConfig } // ElasticSearchConfig will hold the configuration for the elastic search @@ -38,8 +38,8 @@ type CovalentConfig struct { RouteAcknowledgeData string } -// WebSocketDriverConfig will hold the configuration for WebSocket driver -type WebSocketDriverConfig struct { +// HostDriverConfig will hold the configuration for WebSocket driver +type HostDriverConfig struct { Enabled bool IsServer bool WithAcknowledge bool diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index 149726cf395..838a21580ec 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -9,8 +9,6 @@ import ( nodeData "github.com/multiversx/mx-chain-core-go/data" outportCore "github.com/multiversx/mx-chain-core-go/data/outport" factoryMarshalizer "github.com/multiversx/mx-chain-core-go/marshal/factory" - "github.com/multiversx/mx-chain-core-go/webSocket/data" - wsDriverFactory "github.com/multiversx/mx-chain-core-go/webSocket/factory" indexerFactory "github.com/multiversx/mx-chain-es-indexer-go/process/factory" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/statistics" @@ -205,7 +203,7 @@ func (pc *statusComponents) Close() error { // createOutportDriver creates a new outport.OutportHandler which is used to register outport drivers // once a driver is subscribed it will receive data through the implemented outport.Driver methods func (scf *statusComponentsFactory) createOutportDriver() (outport.OutportHandler, error) { - webSocketSenderDriverFactoryArgs, err := scf.makeWebSocketDriverArgs() + hostDriverArgs, err := scf.makeHostDriverArgs() if err != nil { return nil, err } @@ -219,10 +217,7 @@ func (scf *statusComponentsFactory) createOutportDriver() (outport.OutportHandle RetrialInterval: common.RetrialIntervalForOutportDriver, ElasticIndexerFactoryArgs: scf.makeElasticIndexerArgs(), EventNotifierFactoryArgs: eventNotifierArgs, - WebSocketSenderDriverFactoryArgs: outportDriverFactory.WrappedOutportDriverWebSocketSenderFactoryArgs{ - Enabled: scf.externalConfig.WebSocketConnector.Enabled, - ArgsWebSocketDriverFactory: webSocketSenderDriverFactoryArgs, - }, + HostDriverArgs: hostDriverArgs, } return outportDriverFactory.CreateOutport(outportFactoryArgs) @@ -266,26 +261,18 @@ func (scf *statusComponentsFactory) makeEventNotifierArgs() (*outportDriverFacto }, nil } -func (scf *statusComponentsFactory) makeWebSocketDriverArgs() (wsDriverFactory.ArgsWebSocketDriverFactory, error) { - if !scf.externalConfig.WebSocketConnector.Enabled { - return wsDriverFactory.ArgsWebSocketDriverFactory{}, nil +func (scf *statusComponentsFactory) makeHostDriverArgs() (outportDriverFactory.ArgsHostDriverFactory, error) { + if !scf.externalConfig.HostDriverConnector.Enabled { + return outportDriverFactory.ArgsHostDriverFactory{}, nil } - marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.WebSocketConnector.MarshallerType) + marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.HostDriverConnector.MarshallerType) if err != nil { - return wsDriverFactory.ArgsWebSocketDriverFactory{}, err + return outportDriverFactory.ArgsHostDriverFactory{}, err } - return wsDriverFactory.ArgsWebSocketDriverFactory{ + return outportDriverFactory.ArgsHostDriverFactory{ Marshaller: marshaller, - WebSocketConfig: data.WebSocketConfig{ - URL: scf.externalConfig.WebSocketConnector.URL, - WithAcknowledge: scf.externalConfig.WebSocketConnector.WithAcknowledge, - IsServer: scf.externalConfig.WebSocketConnector.IsServer, - RetryDurationInSec: scf.externalConfig.WebSocketConnector.RetryDurationInSec, - BlockingAckOnError: scf.externalConfig.WebSocketConnector.BlockingAckOnError, - }, - Uint64ByteSliceConverter: scf.coreComponents.Uint64ByteSliceConverter(), - Log: log, + HostConfig: scf.externalConfig.HostDriverConnector, }, nil } diff --git a/go.mod b/go.mod index 7b3a269c0c0..2b49742e37e 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,8 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230505112603-2cb497577ad1 + github.com/multiversx/mx-chain-communication-go v0.0.0-20230510122527-a68a0a8118b3 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230509110712-7a0e0a14ffa7 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 @@ -26,7 +27,7 @@ require ( github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 github.com/shirou/gopsutil v3.21.11+incompatible - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/urfave/cli v1.22.10 golang.org/x/crypto v0.5.0 gopkg.in/go-playground/validator.v8 v8.18.2 diff --git a/go.sum b/go.sum index 9ae60259e7e..9e43bb7a0bd 100644 --- a/go.sum +++ b/go.sum @@ -609,13 +609,15 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230510122527-a68a0a8118b3 h1:sfjcPi2ELYsx80RRmYZ4t8v3SIyqH0sNzlMBUm7g7Cw= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230510122527-a68a0a8118b3/go.mod h1:wdDzW6BgXd6hm3X8RzaSN+/CJ/CZYqerGgliBmVVOII= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230505112603-2cb497577ad1 h1:DnaoDTROvtbjXhV7HmB5969GcjG87U0Jvo/letH1uvE= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230505112603-2cb497577ad1/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230509110712-7a0e0a14ffa7 h1:vrTUro90oBtC1sSWCCBSL4MWRE2p/3UnN0853Ew7Gbs= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230509110712-7a0e0a14ffa7/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= @@ -788,8 +790,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= diff --git a/outport/factory/hostDriverFactory.go b/outport/factory/hostDriverFactory.go new file mode 100644 index 00000000000..a9cfce25eda --- /dev/null +++ b/outport/factory/hostDriverFactory.go @@ -0,0 +1,41 @@ +package factory + +import ( + "github.com/multiversx/mx-chain-communication-go/websocket/data" + "github.com/multiversx/mx-chain-communication-go/websocket/factory" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/outport" + "github.com/multiversx/mx-chain-go/outport/host" + logger "github.com/multiversx/mx-chain-logger-go" +) + +type ArgsHostDriverFactory struct { + HostConfig config.HostDriverConfig + Marshaller marshal.Marshalizer +} + +var log = logger.GetOrCreate("outport/factory/hostdriver") + +func CreateHostDriver(args ArgsHostDriverFactory) (outport.Driver, error) { + wsHost, err := factory.CreateWebSocketHost(factory.ArgsWebSocketHost{ + WebSocketConfig: data.WebSocketConfig{ + URL: args.HostConfig.URL, + WithAcknowledge: args.HostConfig.WithAcknowledge, + IsServer: args.HostConfig.IsServer, + RetryDurationInSec: args.HostConfig.RetryDurationInSec, + BlockingAckOnError: args.HostConfig.BlockingAckOnError, + }, + Marshaller: args.Marshaller, + Log: log, + }) + if err != nil { + return nil, err + } + + return host.NewHostDriver(host.ArgsHostDriver{ + Marshaller: args.Marshaller, + SenderHost: wsHost, + Log: log, + }) +} diff --git a/outport/factory/hostDriverFactory_test.go b/outport/factory/hostDriverFactory_test.go new file mode 100644 index 00000000000..7312cd2e2d8 --- /dev/null +++ b/outport/factory/hostDriverFactory_test.go @@ -0,0 +1 @@ +package factory diff --git a/outport/factory/outportFactory.go b/outport/factory/outportFactory.go index 4c42f4b4202..d7414ec1662 100644 --- a/outport/factory/outportFactory.go +++ b/outport/factory/outportFactory.go @@ -3,24 +3,16 @@ package factory import ( "time" - "github.com/multiversx/mx-chain-core-go/webSocket/data" - wsDriverFactory "github.com/multiversx/mx-chain-core-go/webSocket/factory" indexerFactory "github.com/multiversx/mx-chain-es-indexer-go/process/factory" "github.com/multiversx/mx-chain-go/outport" ) -// WrappedOutportDriverWebSocketSenderFactoryArgs extends the wsDriverFactory.OutportDriverWebSocketSenderFactoryArgs structure with the Enabled field -type WrappedOutportDriverWebSocketSenderFactoryArgs struct { - Enabled bool - wsDriverFactory.ArgsWebSocketDriverFactory -} - // OutportFactoryArgs holds the factory arguments of different outport drivers type OutportFactoryArgs struct { - RetrialInterval time.Duration - ElasticIndexerFactoryArgs indexerFactory.ArgsIndexerFactory - EventNotifierFactoryArgs *EventNotifierFactoryArgs - WebSocketSenderDriverFactoryArgs WrappedOutportDriverWebSocketSenderFactoryArgs + RetrialInterval time.Duration + ElasticIndexerFactoryArgs indexerFactory.ArgsIndexerFactory + EventNotifierFactoryArgs *EventNotifierFactoryArgs + HostDriverArgs ArgsHostDriverFactory } // CreateOutport will create a new instance of OutportHandler @@ -54,7 +46,7 @@ func createAndSubscribeDrivers(outport outport.OutportHandler, args *OutportFact return err } - return createAndSubscribeWebSocketDriver(outport, args.WebSocketSenderDriverFactoryArgs) + return createAndSubscribeHostDriverIfNeeded(outport, args.HostDriverArgs) } func createAndSubscribeElasticDriverIfNeeded( @@ -97,28 +89,18 @@ func checkArguments(args *OutportFactoryArgs) error { return nil } -func createAndSubscribeWebSocketDriver( +func createAndSubscribeHostDriverIfNeeded( outport outport.OutportHandler, - args WrappedOutportDriverWebSocketSenderFactoryArgs, + args ArgsHostDriverFactory, ) error { - if !args.Enabled { + if !args.HostConfig.Enabled { return nil } - wsDriver, err := wsDriverFactory.NewWebSocketDriver(wsDriverFactory.ArgsWebSocketDriverFactory{ - WebSocketConfig: data.WebSocketConfig{ - URL: args.WebSocketConfig.URL, - WithAcknowledge: args.WebSocketConfig.WithAcknowledge, - IsServer: args.WebSocketConfig.IsServer, - RetryDurationInSec: args.WebSocketConfig.RetryDurationInSec, - }, - Marshaller: args.Marshaller, - Uint64ByteSliceConverter: args.Uint64ByteSliceConverter, - Log: args.Log, - }) + hostDriver, err := CreateHostDriver(args) if err != nil { return err } - return outport.SubscribeDriver(wsDriver) + return outport.SubscribeDriver(hostDriver) } diff --git a/outport/host/driver.go b/outport/host/driver.go new file mode 100644 index 00000000000..2b12afd2612 --- /dev/null +++ b/outport/host/driver.go @@ -0,0 +1,114 @@ +package host + +import ( + "fmt" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/core/atomic" + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/marshal" +) + +// ArgsHostDriver holds the arguments needed for creating a new hostDriver +type ArgsHostDriver struct { + Marshaller marshal.Marshalizer + SenderHost SenderHost + Log core.Logger +} + +type hostDriver struct { + marshaller marshal.Marshalizer + senderHost SenderHost + isClosed atomic.Flag + log core.Logger +} + +func NewHostDriver(args ArgsHostDriver) (*hostDriver, error) { + if check.IfNil(args.SenderHost) { + return nil, ErrNilHost + } + if check.IfNil(args.Marshaller) { + return nil, core.ErrNilMarshalizer + } + if check.IfNil(args.Log) { + return nil, core.ErrNilLogger + } + + return &hostDriver{ + marshaller: args.Marshaller, + senderHost: args.SenderHost, + log: args.Log, + isClosed: atomic.Flag{}, + }, nil +} + +func (o *hostDriver) SaveBlock(outportBlock *outport.OutportBlock) error { + return o.handleAction(outportBlock, outport.TopicSaveBlock) +} + +// RevertIndexedBlock will handle the action of reverting the indexed block +func (o *hostDriver) RevertIndexedBlock(blockData *outport.BlockData) error { + return o.handleAction(blockData, outport.TopicRevertIndexedBlock) +} + +// SaveRoundsInfo will handle the saving of rounds +func (o *hostDriver) SaveRoundsInfo(roundsInfos *outport.RoundsInfo) error { + return o.handleAction(roundsInfos, outport.TopicSaveRoundsInfo) +} + +// SaveValidatorsPubKeys will handle the saving of the validators' public keys +func (o *hostDriver) SaveValidatorsPubKeys(validatorsPubKeys *outport.ValidatorsPubKeys) error { + return o.handleAction(validatorsPubKeys, outport.TopicSaveValidatorsPubKeys) +} + +// SaveValidatorsRating will handle the saving of the validators' rating +func (o *hostDriver) SaveValidatorsRating(validatorsRating *outport.ValidatorsRating) error { + return o.handleAction(validatorsRating, outport.TopicSaveValidatorsRating) +} + +// SaveAccounts will handle the accounts' saving +func (o *hostDriver) SaveAccounts(accounts *outport.Accounts) error { + return o.handleAction(accounts, outport.TopicSaveAccounts) +} + +// FinalizedBlock will handle the finalized block +func (o *hostDriver) FinalizedBlock(finalizedBlock *outport.FinalizedBlock) error { + return o.handleAction(finalizedBlock, outport.TopicFinalizedBlock) +} + +// GetMarshaller returns the internal marshaller +func (o *hostDriver) GetMarshaller() marshal.Marshalizer { + return o.marshaller +} + +func (o *hostDriver) handleAction(args interface{}, topic string) error { + if o.isClosed.IsSet() { + return ErrHostIsClosed + } + + marshalledPayload, err := o.marshaller.Marshal(args) + if err != nil { + o.log.Error("cannot marshal block", "topic", topic, "error", err) + return fmt.Errorf("%w while marshaling block for topic %s", err, topic) + } + + err = o.senderHost.Send(marshalledPayload, topic) + if err != nil { + o.log.Error("cannot send on route", "topic", topic, "error", err) + return fmt.Errorf("%w while sending data on route for topic %s", err, topic) + } + + return nil +} + +// Close will handle the closing of the outport driver web socket sender +func (o *hostDriver) Close() error { + o.isClosed.SetValue(true) + return o.senderHost.Close() +} + +// IsInterfaceNil returns true if there is no value under the interface +func (o *hostDriver) IsInterfaceNil() bool { + return o == nil +} diff --git a/outport/host/driver_test.go b/outport/host/driver_test.go new file mode 100644 index 00000000000..d07dae2c41e --- /dev/null +++ b/outport/host/driver_test.go @@ -0,0 +1,334 @@ +package host + +import ( + "errors" + "testing" + + "github.com/multiversx/mx-chain-core-go/core" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-core-go/data/outport" + "github.com/multiversx/mx-chain-core-go/marshal" + "github.com/multiversx/mx-chain-core-go/webSocket/data" + outportStubs "github.com/multiversx/mx-chain-go/testscommon/outport" + logger "github.com/multiversx/mx-chain-logger-go" + "github.com/stretchr/testify/require" +) + +var cannotSendOnRouteErr = errors.New("cannot send on route") + +var log = logger.GetOrCreate("test") + +func getMockArgs() ArgsHostDriver { + return ArgsHostDriver{ + Marshaller: &marshal.JsonMarshalizer{}, + SenderHost: &outportStubs.SenderHostStub{}, + Log: log, + } +} + +func TestNewWebsocketOutportDriverNodePart(t *testing.T) { + t.Parallel() + + t.Run("nil marshaller", func(t *testing.T) { + t.Parallel() + + args := getMockArgs() + args.Marshaller = nil + + o, err := NewHostDriver(args) + require.Nil(t, o) + require.Equal(t, core.ErrNilMarshalizer, err) + }) + + t.Run("nil logger", func(t *testing.T) { + t.Parallel() + + args := getMockArgs() + args.Log = nil + + o, err := NewHostDriver(args) + require.Nil(t, o) + require.Equal(t, data.ErrNilLogger, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + args := getMockArgs() + + o, err := NewHostDriver(args) + require.NotNil(t, o) + require.NoError(t, err) + require.False(t, o.IsInterfaceNil()) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveBlock(t *testing.T) { + t.Parallel() + + t.Run("SaveBlock - should error", func(t *testing.T) { + t.Parallel() + + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveBlock(&outport.OutportBlock{}) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveBlock - should work", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + require.Nil(t, r) + }() + args := getMockArgs() + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveBlock(&outport.OutportBlock{}) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_FinalizedBlock(t *testing.T) { + t.Parallel() + + t.Run("Finalized block - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.FinalizedBlock(&outport.FinalizedBlock{HeaderHash: []byte("header hash")}) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("Finalized block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.FinalizedBlock(&outport.FinalizedBlock{HeaderHash: []byte("header hash")}) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_RevertIndexedBlock(t *testing.T) { + t.Parallel() + + t.Run("RevertIndexedBlock - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.RevertIndexedBlock(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("RevertIndexedBlock block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.RevertIndexedBlock(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveAccounts(t *testing.T) { + t.Parallel() + + t.Run("SaveAccounts - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveAccounts(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveAccounts block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveAccounts(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveRoundsInfo(t *testing.T) { + t.Parallel() + + t.Run("SaveRoundsInfo - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveRoundsInfo(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveRoundsInfo block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveRoundsInfo(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveValidatorsPubKeys(t *testing.T) { + t.Parallel() + + t.Run("SaveValidatorsPubKeys - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveValidatorsPubKeys(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveValidatorsPubKeys block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveValidatorsPubKeys(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveValidatorsRating(t *testing.T) { + t.Parallel() + + t.Run("SaveValidatorsRating - should error", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return cannotSendOnRouteErr + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveValidatorsRating(nil) + require.True(t, errors.Is(err, cannotSendOnRouteErr)) + }) + + t.Run("SaveValidatorsRating block - should work", func(t *testing.T) { + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(_ []byte, _ string) error { + return nil + }, + } + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.SaveValidatorsRating(nil) + require.NoError(t, err) + }) +} + +func TestWebsocketOutportDriverNodePart_SaveBlock_PayloadCheck(t *testing.T) { + t.Parallel() + + mockArgs := getMockArgs() + + outportBlock := &outport.OutportBlock{BlockData: &outport.BlockData{Body: &block.Body{}}} + marshaledData, err := mockArgs.Marshaller.Marshal(outportBlock) + require.Nil(t, err) + + mockArgs.SenderHost = &outportStubs.SenderHostStub{ + SendCalled: func(payload []byte, _ string) error { + require.Equal(t, marshaledData, payload) + + return nil + }, + } + o, err := NewHostDriver(mockArgs) + require.NoError(t, err) + + err = o.SaveBlock(outportBlock) + require.NoError(t, err) +} + +func TestWebsocketOutportDriverNodePart_Close(t *testing.T) { + t.Parallel() + + closedWasCalled := false + args := getMockArgs() + args.SenderHost = &outportStubs.SenderHostStub{ + CloseCalled: func() error { + closedWasCalled = true + return nil + }, + } + + o, err := NewHostDriver(args) + require.NoError(t, err) + + err = o.Close() + require.NoError(t, err) + require.True(t, closedWasCalled) +} diff --git a/outport/host/errors.go b/outport/host/errors.go new file mode 100644 index 00000000000..de45f08acef --- /dev/null +++ b/outport/host/errors.go @@ -0,0 +1,9 @@ +package host + +import "errors" + +// ErrHostIsClosed signals that the host was closed while trying to perform actions +var ErrHostIsClosed = errors.New("server is closed") + +// ErrNilHost signals that a nil host has been provided +var ErrNilHost = errors.New("nil host provided") diff --git a/outport/host/interface.go b/outport/host/interface.go new file mode 100644 index 00000000000..8d0931abd9f --- /dev/null +++ b/outport/host/interface.go @@ -0,0 +1,8 @@ +package host + +// SenderHost defines the actions that a host sender should do +type SenderHost interface { + Send(payload []byte, topic string) error + Close() error + IsInterfaceNil() bool +} diff --git a/testscommon/outport/senderHostStub.go b/testscommon/outport/senderHostStub.go new file mode 100644 index 00000000000..ee506100fd4 --- /dev/null +++ b/testscommon/outport/senderHostStub.go @@ -0,0 +1,28 @@ +package outport + +// SenderHostStub - +type SenderHostStub struct { + SendCalled func(payload []byte, topic string) error + CloseCalled func() error +} + +// Send - +func (s *SenderHostStub) Send(payload []byte, topic string) error { + if s.SendCalled != nil { + return s.SendCalled(payload, topic) + } + return nil +} + +// Close - +func (s *SenderHostStub) Close() error { + if s.CloseCalled() != nil { + return s.CloseCalled() + } + return nil +} + +// IsInterfaceNil - +func (s *SenderHostStub) IsInterfaceNil() bool { + return s == nil +} From d42c502eef5f27551e5f9292b3553d8f3d664b2d Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 10 May 2023 17:28:24 +0300 Subject: [PATCH 153/221] latest version of communication go --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2b49742e37e..6717a1a9e69 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v0.0.0-20230510122527-a68a0a8118b3 + github.com/multiversx/mx-chain-communication-go v0.0.0-20230510142707-7db6d08558fe github.com/multiversx/mx-chain-core-go v1.2.1-0.20230509110712-7a0e0a14ffa7 github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 diff --git a/go.sum b/go.sum index 9e43bb7a0bd..8ea40286f19 100644 --- a/go.sum +++ b/go.sum @@ -609,8 +609,8 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230510122527-a68a0a8118b3 h1:sfjcPi2ELYsx80RRmYZ4t8v3SIyqH0sNzlMBUm7g7Cw= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230510122527-a68a0a8118b3/go.mod h1:wdDzW6BgXd6hm3X8RzaSN+/CJ/CZYqerGgliBmVVOII= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230510142707-7db6d08558fe h1:hSS9VhLBKKhOFTUS5mclHC4ENMnkzey505oCGk2Mbqc= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230510142707-7db6d08558fe/go.mod h1:wdDzW6BgXd6hm3X8RzaSN+/CJ/CZYqerGgliBmVVOII= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= From 386a99e038a2f33880b254283c7166fa94b8efc6 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Wed, 10 May 2023 17:34:26 +0300 Subject: [PATCH 154/221] fixes after review --- cmd/node/config/prefs.toml | 6 +- cmd/node/flags.go | 16 +- config/prefsConfig.go | 2 +- config/tomlConfig_test.go | 2 +- factory/processing/blockProcessorCreator.go | 8 + factory/processing/processComponents.go | 7 + integrationTests/testProcessorNode.go | 1 + integrationTests/testSyncNode.go | 1 + process/block/argProcessor.go | 2 + process/block/baseProcess.go | 93 +------ process/block/baseProcess_test.go | 108 +------- .../cutoff/blockProcessingCutoffFactory.go | 12 + .../blockProcessingCutoffFactory_test.go | 40 +++ .../cutoff/blockProcessingCutoffHandler.go | 130 +++++++++ .../blockProcessingCutoffHandler_test.go | 253 ++++++++++++++++++ .../cutoff/disabledBlockProcessingCutoff.go | 25 ++ .../disabledBlockProcessingCutoff_test.go | 22 ++ process/block/cutoff/errors.go | 9 + process/block/cutoff/interface.go | 10 + process/block/export_test.go | 12 +- process/block/metablock.go | 11 +- process/block/metablock_test.go | 12 + process/block/shardblock.go | 11 +- process/errors.go | 9 +- testscommon/blockProcessingCutoffStub.go | 31 +++ 25 files changed, 591 insertions(+), 242 deletions(-) create mode 100644 process/block/cutoff/blockProcessingCutoffFactory.go create mode 100644 process/block/cutoff/blockProcessingCutoffFactory_test.go create mode 100644 process/block/cutoff/blockProcessingCutoffHandler.go create mode 100644 process/block/cutoff/blockProcessingCutoffHandler_test.go create mode 100644 process/block/cutoff/disabledBlockProcessingCutoff.go create mode 100644 process/block/cutoff/disabledBlockProcessingCutoff_test.go create mode 100644 process/block/cutoff/errors.go create mode 100644 process/block/cutoff/interface.go create mode 100644 testscommon/blockProcessingCutoffStub.go diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index c9ec0bebe17..33cc150dee4 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -52,9 +52,11 @@ # BlockProcessingCutoff can be used to stop processing blocks at a certain round, nonce or epoch. # This can be useful for snapshotting different stuff and also for debugging purposes. -# It can only be enabled by using the `--block-processing-cutoff` CLI flag when starting the node [BlockProcessingCutoff] - # Mode represents the cutoff mode. possible values: "pause" or "processError". + # If set to true, the node will stop at the given coordinate + Enabled = false + + # Mode represents the cutoff mode. possible values: "pause" or "process-error". # "pause" mode will halt the processing at the block with the given coordinates. Useful for snapshots/analytics # "process-error" will return an error when processing the block with the given coordinates. Useful for debugging Mode = "pause" diff --git a/cmd/node/flags.go b/cmd/node/flags.go index b41354be29d..44c722da89a 100644 --- a/cmd/node/flags.go +++ b/cmd/node/flags.go @@ -384,12 +384,6 @@ var ( Usage: "String flag for specifying the desired `operation mode`(s) of the node, resulting in altering some configuration values accordingly. Possible values are: snapshotless-observer, full-archive, db-lookup-extension, historical-balances or `\"\"` (empty). Multiple values can be separated via ,", Value: "", } - - // blockProcessingCutoff defines if the node should be started with the block processing cutoff feature - blockProcessingCutoff = cli.BoolFlag{ - Name: "block-processing-cutoff", - Usage: "Boolean option for enabling the block processing cutoff feature that is able to pause the processing at a given time. The configuration should be filled inside the `prefs.toml` file.", - } ) func getFlags() []cli.Flag { @@ -449,7 +443,6 @@ func getFlags() []cli.Flag { dbDirectory, logsDirectory, operationMode, - blockProcessingCutoff, } } @@ -510,10 +503,6 @@ func applyFlags(ctx *cli.Context, cfgs *config.Configs, flagsConfig *config.Cont if ctx.IsSet(fullArchive.Name) { cfgs.PreferencesConfig.Preferences.FullArchive = ctx.GlobalBool(fullArchive.Name) } - if ctx.IsSet(blockProcessingCutoff.Name) { - cfgs.PreferencesConfig.BlockProcessingCutoff.Enabled = true - flagsConfig.DisableConsensusWatchdog = true - } if ctx.IsSet(memoryUsageToCreateProfiles.Name) { cfgs.GeneralConfig.Health.MemoryUsageToCreateProfiles = int(ctx.GlobalUint64(memoryUsageToCreateProfiles.Name)) log.Info("setting a new value for the memoryUsageToCreateProfiles option", @@ -589,6 +578,11 @@ func applyCompatibleConfigs(log logger.Logger, configs *config.Configs) error { return fmt.Errorf("import-db-no-sig-check can only be used with the import-db flag") } + if configs.PreferencesConfig.BlockProcessingCutoff.Enabled { + log.Debug("node is started by using the block processing cut-off - will disable the watchdog") + configs.FlagsConfig.DisableConsensusWatchdog = true + } + operationModes, err := operationmodes.ParseOperationModes(configs.FlagsConfig.OperationMode) if err != nil { return err diff --git a/config/prefsConfig.go b/config/prefsConfig.go index 063d89c1b59..34861d647e8 100644 --- a/config/prefsConfig.go +++ b/config/prefsConfig.go @@ -28,7 +28,7 @@ type OverridableConfig struct { // BlockProcessingCutoffConfig holds the configuration for the block processing cutoff type BlockProcessingCutoffConfig struct { - Enabled bool `toml:"-"` + Enabled bool Mode string CutoffTrigger string Value uint64 diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 9582657ec49..581459fc72d 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -325,7 +325,7 @@ func TestTomlPreferencesParser(t *testing.T) { PreferredConnections: []string{prefPubKey0, prefPubKey1}, }, BlockProcessingCutoff: BlockProcessingCutoffConfig{ - Enabled: false, // even though the TOML value is set to true, it should be ignored because of the "-" toml tag + Enabled: true, Mode: "pause", CutoffTrigger: "round", Value: 55, diff --git a/factory/processing/blockProcessorCreator.go b/factory/processing/blockProcessorCreator.go index 85944ed5904..02ec1e07a11 100644 --- a/factory/processing/blockProcessorCreator.go +++ b/factory/processing/blockProcessorCreator.go @@ -21,6 +21,7 @@ import ( factoryOutportProvider "github.com/multiversx/mx-chain-go/outport/process/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block" + "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/postprocess" "github.com/multiversx/mx-chain-go/process/block/preprocess" "github.com/multiversx/mx-chain-go/process/coordinator" @@ -64,6 +65,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, + blockCutoffProcessingHandler cutoff.BlockProcessingCutoffHandler, ) (*blockProcessorAndVmFactories, error) { shardCoordinator := pcf.bootstrapComponents.ShardCoordinator() if shardCoordinator.SelfId() < shardCoordinator.NumberOfShards() { @@ -80,6 +82,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + blockCutoffProcessingHandler, ) } if shardCoordinator.SelfId() == core.MetachainShardId { @@ -97,6 +100,7 @@ func (pcf *processComponentsFactory) newBlockProcessor( scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + blockCutoffProcessingHandler, ) } @@ -118,6 +122,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, + blockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler, ) (*blockProcessorAndVmFactories, error) { argsParser := smartContract.NewArgumentParser() @@ -425,6 +430,7 @@ func (pcf *processComponentsFactory) newShardBlockProcessor( ProcessedMiniBlocksTracker: processedMiniBlocksTracker, ReceiptsRepository: receiptsRepository, OutportDataProvider: outportDataProvider, + BlockProcessingCutoffHandler: blockProcessingCutoffHandler, } arguments := block.ArgShardProcessor{ ArgBaseProcessor: argumentsBaseProcessor, @@ -461,6 +467,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository mainFactory.ReceiptsRepository, + blockProcessingCutoffhandler cutoff.BlockProcessingCutoffHandler, ) (*blockProcessorAndVmFactories, error) { builtInFuncFactory, err := pcf.createBuiltInFunctionContainer(pcf.state.AccountsAdapter(), make(map[string]struct{})) if err != nil { @@ -844,6 +851,7 @@ func (pcf *processComponentsFactory) newMetaBlockProcessor( ProcessedMiniBlocksTracker: processedMiniBlocksTracker, ReceiptsRepository: receiptsRepository, OutportDataProvider: outportDataProvider, + BlockProcessingCutoffHandler: blockProcessingCutoffhandler, } esdtOwnerAddress, err := pcf.coreData.AddressPubKeyConverter().Decode(pcf.systemSCConfig.ESDTSystemSCConfig.OwnerAddress) diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index b8f61db3d54..656f90396b4 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -43,6 +43,7 @@ import ( "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/pendingMb" "github.com/multiversx/mx-chain-go/process/block/poolsCleaner" "github.com/multiversx/mx-chain-go/process/block/preprocess" @@ -598,6 +599,11 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { return nil, err } + blockCutoffProcessingHandler, err := cutoff.CreateBlockProcessingCutoffHandler(pcf.prefConfigs.BlockProcessingCutoff) + if err != nil { + return nil, err + } + blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, forkDetector, @@ -612,6 +618,7 @@ func (pcf *processComponentsFactory) Create() (*processComponents, error) { scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + blockCutoffProcessingHandler, ) if err != nil { return nil, err diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index e7bf0a3fc84..eabb4e6b128 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -2089,6 +2089,7 @@ func (tpn *TestProcessorNode) initBlockProcessor(stateCheckpointModulus uint) { ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, OutportDataProvider: &outport.OutportDataProviderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, } if check.IfNil(tpn.EpochStartNotifier) { diff --git a/integrationTests/testSyncNode.go b/integrationTests/testSyncNode.go index 8b2b72d5419..f4fd1fa2faf 100644 --- a/integrationTests/testSyncNode.go +++ b/integrationTests/testSyncNode.go @@ -100,6 +100,7 @@ func (tpn *TestProcessorNode) initBlockProcessorWithSync() { ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, OutportDataProvider: &outport.OutportDataProviderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, } if tpn.ShardCoordinator.SelfId() == core.MetachainShardId { diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index f8de4920c02..5e97ac59686 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-go/dblookupext" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" @@ -89,6 +90,7 @@ type ArgBaseProcessor struct { ScheduledMiniBlocksEnableEpoch uint32 ProcessedMiniBlocksTracker process.ProcessedMiniBlocksTracker ReceiptsRepository receiptsRepository + BlockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index d974aa85038..1570df3e346 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -33,6 +33,7 @@ import ( "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" + "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/block/processedMb" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" @@ -89,6 +90,7 @@ type baseProcessor struct { versionedHeaderFactory nodeFactory.VersionedHeaderFactory headerIntegrityVerifier process.HeaderIntegrityVerifier scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler + blockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler appStatusHandler core.AppStatusHandler stateCheckpointModulus uint @@ -114,8 +116,6 @@ type baseProcessor struct { mutNonceOfFirstCommittedBlock sync.RWMutex nonceOfFirstCommittedBlock core.OptionalUint64 - - blockProcessingCutoffConfig config.BlockProcessingCutoffConfig } type bootStorerDataArgs struct { @@ -538,32 +538,8 @@ func checkProcessorParameters(arguments ArgBaseProcessor) error { if check.IfNil(arguments.ReceiptsRepository) { return process.ErrNilReceiptsRepository } - err := checkBlockProcessingCutoffConfig(arguments.PrefsConfig.BlockProcessingCutoff) - if err != nil { - return err - } - - return nil -} - -func checkBlockProcessingCutoffConfig(cutOffConfig config.BlockProcessingCutoffConfig) error { - if !cutOffConfig.Enabled { - // don't even check the configs if the feature is disabled. Useful when a node doesn't update `prefs.toml` with - // the new configuration - return nil - } - mode := common.BlockProcessingCutoffMode(cutOffConfig.Mode) - isValidMode := mode == common.BlockProcessingCutoffModePause || mode == common.BlockProcessingCutoffModeProcessError - if !isValidMode { - return fmt.Errorf("%w. provided value=%s", process.ErrInvalidBlockProcessingCutOffMode, mode) - } - - cutOffTrigger := common.BlockProcessingCutoffTrigger(cutOffConfig.CutoffTrigger) - isValidCutOffTrigger := cutOffTrigger == common.BlockProcessingCutoffByRound || - cutOffTrigger == common.BlockProcessingCutoffByNonce || - cutOffTrigger == common.BlockProcessingCutoffByEpoch - if !isValidCutOffTrigger { - return fmt.Errorf("%w. provided value=%s", process.ErrInvalidBlockProcessingCutOffTrigger, cutOffTrigger) + if check.IfNil(arguments.BlockProcessingCutoffHandler) { + return process.ErrNilBlockProcessingCutoffHandler } return nil @@ -2104,64 +2080,3 @@ func (bp *baseProcessor) setNonceOfFirstCommittedBlock(nonce uint64) { bp.nonceOfFirstCommittedBlock.HasValue = true bp.nonceOfFirstCommittedBlock.Value = nonce } - -func (bp *baseProcessor) handleBlockProcessingCutoff(header data.HeaderHandler) error { - if !bp.blockProcessingCutoffConfig.Enabled || check.IfNil(header) { - return nil - } - - cutOffFunction := getCutoffFunction(bp.blockProcessingCutoffConfig) - value := bp.blockProcessingCutoffConfig.Value - - switch common.BlockProcessingCutoffTrigger(bp.blockProcessingCutoffConfig.CutoffTrigger) { - case common.BlockProcessingCutoffByRound: - if header.GetRound() >= value { - err := cutOffFunction("round", header.GetRound()) - if err != nil { - return err - } - } - case common.BlockProcessingCutoffByNonce: - if header.GetNonce() >= value { - err := cutOffFunction("nonce", header.GetNonce()) - if err != nil { - return err - } - } - case common.BlockProcessingCutoffByEpoch: - if header.GetEpoch() >= uint32(value) { - err := cutOffFunction("epoch", header.GetEpoch()) - if err != nil { - return err - } - } - } - - return nil -} - -func getCutoffFunction(cfg config.BlockProcessingCutoffConfig) func(printArgs ...interface{}) error { - processErr := fmt.Errorf("block processing cuttoff - error") - if cfg.Mode == common.BlockProcessingCutoffModeProcessError { - return func(printArgs ...interface{}) error { - log.Info("block processing cutoff - return err", printArgs...) - return processErr - } - } - - blockingCutoffFunction := func(printArgs ...interface{}) error { - log.Info("cutting off the block processing. The node will not advance", printArgs...) - go func() { - for { - time.Sleep(time.Minute) - log.Info("node is in block processing cut-off mode", printArgs...) - } - }() - neverEndingChannel := make(chan struct{}) - <-neverEndingChannel - - return nil // should not reach this point - } - - return blockingCutoffFunction -} diff --git a/process/block/baseProcess_test.go b/process/block/baseProcess_test.go index 31427f3eee3..3b93fb7a465 100644 --- a/process/block/baseProcess_test.go +++ b/process/block/baseProcess_test.go @@ -122,6 +122,7 @@ func createArgBaseProcessor( ScheduledMiniBlocksEnableEpoch: 2, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, } } @@ -3104,110 +3105,3 @@ func TestBaseProcessor_ConcurrentCallsNonceOfFirstCommittedBlock(t *testing.T) { assert.True(t, len(values) <= 1) // we can have the situation when all reads are done before the first set assert.Equal(t, numCalls/2, values[lastValRead]+noValues) } - -func TestBaseProcessor_HandleBlockProcessingBackoff(t *testing.T) { - t.Parallel() - - t.Run("disabled or nil header - should exit", func(t *testing.T) { - t.Parallel() - - cfg := config.BlockProcessingCutoffConfig{ - Enabled: false, - } - bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) - - err := bp.HandleBlockProcessingCutoff(nil) - require.NoError(t, err) - - err = bp.HandleBlockProcessingCutoff(&block.MetaBlock{}) - require.NoError(t, err) - }) - - t.Run("process error via round", func(t *testing.T) { - t.Parallel() - - cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModeProcessError, - CutoffTrigger: string(common.BlockProcessingCutoffByRound), - Value: 20, - } - bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) - - err := bp.HandleBlockProcessingCutoff(&block.MetaBlock{Round: 19}) // not the desired round - require.NoError(t, err) - - err = bp.HandleBlockProcessingCutoff(&block.MetaBlock{Round: 20}) - require.Equal(t, errors.New("block processing cuttoff - error"), err) - }) - - t.Run("process error via nonce", func(t *testing.T) { - t.Parallel() - - cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModeProcessError, - CutoffTrigger: string(common.BlockProcessingCutoffByNonce), - Value: 20, - } - bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) - - err := bp.HandleBlockProcessingCutoff(&block.MetaBlock{Nonce: 19}) // not the desired nonce - require.NoError(t, err) - - err = bp.HandleBlockProcessingCutoff(&block.MetaBlock{Nonce: 20}) - require.Equal(t, errors.New("block processing cuttoff - error"), err) - }) - - t.Run("process error via epoch", func(t *testing.T) { - t.Parallel() - - cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModeProcessError, - CutoffTrigger: string(common.BlockProcessingCutoffByEpoch), - Value: 20, - } - bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) - - dummyEpochStartData := block.EpochStart{ - LastFinalizedHeaders: []block.EpochStartShardData{ - { - ShardID: 0, - }, - }, - } - err := bp.HandleBlockProcessingCutoff(&block.MetaBlock{Epoch: 19, EpochStart: dummyEpochStartData}) // not the desired nonce - require.NoError(t, err) - - err = bp.HandleBlockProcessingCutoff(&block.MetaBlock{Epoch: 20, EpochStart: dummyEpochStartData}) - require.Equal(t, errors.New("block processing cuttoff - error"), err) - }) - - t.Run("pause - should block the processing", func(t *testing.T) { - t.Parallel() - - cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModePause, - CutoffTrigger: string(common.BlockProcessingCutoffByRound), - Value: 20, - } - bp := blproc.NewBaseProcessorWithBlockProcessingCutoffConfig(cfg) - - err := bp.HandleBlockProcessingCutoff(&block.MetaBlock{Round: 19}) // not the desired round - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - _ = bp.HandleBlockProcessingCutoff(&block.MetaBlock{Round: 20}) - done <- struct{}{} - }() - - select { - case <-done: - require.Fail(t, "should have not advanced") - case <-time.After(time.Second): - } - }) -} diff --git a/process/block/cutoff/blockProcessingCutoffFactory.go b/process/block/cutoff/blockProcessingCutoffFactory.go new file mode 100644 index 00000000000..e2a53f02caa --- /dev/null +++ b/process/block/cutoff/blockProcessingCutoffFactory.go @@ -0,0 +1,12 @@ +package cutoff + +import "github.com/multiversx/mx-chain-go/config" + +// CreateBlockProcessingCutoffHandler will create the desired block processing cutoff handler based on configuration +func CreateBlockProcessingCutoffHandler(cfg config.BlockProcessingCutoffConfig) (BlockProcessingCutoffHandler, error) { + if !cfg.Enabled { + return NewDisabledBlockProcessingCutoff(), nil + } + + return NewBlockProcessingCutoffHandler(cfg) +} diff --git a/process/block/cutoff/blockProcessingCutoffFactory_test.go b/process/block/cutoff/blockProcessingCutoffFactory_test.go new file mode 100644 index 00000000000..1e259a182c4 --- /dev/null +++ b/process/block/cutoff/blockProcessingCutoffFactory_test.go @@ -0,0 +1,40 @@ +package cutoff + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/require" +) + +func TestCreateBlockProcessingCutoffHandler(t *testing.T) { + t.Parallel() + + t.Run("should create disabled instance", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: false, + } + + instance, err := CreateBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + require.Equal(t, "*cutoff.disabledBlockProcessingCutoff", fmt.Sprintf("%T", instance)) + }) + + t.Run("should create regular instance", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: "pause", + CutoffTrigger: "nonce", + Value: 37, + } + + instance, err := CreateBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + require.Equal(t, "*cutoff.blockProcessingCutoffHandler", fmt.Sprintf("%T", instance)) + }) +} diff --git a/process/block/cutoff/blockProcessingCutoffHandler.go b/process/block/cutoff/blockProcessingCutoffHandler.go new file mode 100644 index 00000000000..da14d1286de --- /dev/null +++ b/process/block/cutoff/blockProcessingCutoffHandler.go @@ -0,0 +1,130 @@ +package cutoff + +import ( + "fmt" + "time" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + logger "github.com/multiversx/mx-chain-logger-go" +) + +var log = logger.GetOrCreate("process/block/cutoff") + +type blockProcessingCutoffHandler struct { + config config.BlockProcessingCutoffConfig +} + +// NewBlockProcessingCutoffHandler will return a new instance of blockProcessingCutoffHandler +func NewBlockProcessingCutoffHandler(cfg config.BlockProcessingCutoffConfig) (*blockProcessingCutoffHandler, error) { + err := checkConfig(cfg) + if err != nil { + return nil, err + } + + return &blockProcessingCutoffHandler{ + config: cfg, + }, nil +} + +// HandlePauseCutoff will pause the processing if the required coordinates are met +func (b *blockProcessingCutoffHandler) HandlePauseCutoff(header data.HeaderHandler) { + shouldSkip := !b.config.Enabled || + check.IfNil(header) || + b.config.Mode != common.BlockProcessingCutoffModePause + if shouldSkip { + return + } + + blockingCutoffFunction := func(printArgs ...interface{}) error { + log.Info("cutting off the block processing. The node will not advance", printArgs...) + go func() { + for { + time.Sleep(time.Minute) + log.Info("node is in block processing cut-off mode", printArgs...) + } + }() + neverEndingChannel := make(chan struct{}) + <-neverEndingChannel + + return nil // should not reach this point + } + + _ = b.handleCutoffIfCoordinatesAreMet(header, blockingCutoffFunction) + // should never reach this point +} + +// HandleProcessErrorCutoff will return error if the processing the block at the required coordinates +func (b *blockProcessingCutoffHandler) HandleProcessErrorCutoff(header data.HeaderHandler) error { + shouldSkip := !b.config.Enabled || + check.IfNil(header) || + b.config.Mode != common.BlockProcessingCutoffModeProcessError + if shouldSkip { + return nil + } + + return b.handleCutoffIfCoordinatesAreMet(header, func(printArgs ...interface{}) error { + log.Info("block processing cutoff - return err", printArgs...) + return errProcess + }) +} + +func (b *blockProcessingCutoffHandler) handleCutoffIfCoordinatesAreMet(header data.HeaderHandler, cutOffFunction func(printArgs ...interface{}) error) error { + value := b.config.Value + + switch common.BlockProcessingCutoffTrigger(b.config.CutoffTrigger) { + case common.BlockProcessingCutoffByRound: + if header.GetRound() >= value { + err := cutOffFunction("round", header.GetRound()) + if err != nil { + return err + } + } + case common.BlockProcessingCutoffByNonce: + if header.GetNonce() >= value { + err := cutOffFunction("nonce", header.GetNonce()) + if err != nil { + return err + } + } + case common.BlockProcessingCutoffByEpoch: + if header.GetEpoch() >= uint32(value) { + err := cutOffFunction("epoch", header.GetEpoch()) + if err != nil { + return err + } + } + } + + return nil +} + +// IsInterfaceNil returns true if there is no value under the interface +func (b *blockProcessingCutoffHandler) IsInterfaceNil() bool { + return b == nil +} + +func checkConfig(cutOffConfig config.BlockProcessingCutoffConfig) error { + if !cutOffConfig.Enabled { + // don't even check the configs if the feature is disabled. Useful when a node doesn't update `prefs.toml` with + // the new configuration + return nil + } + mode := common.BlockProcessingCutoffMode(cutOffConfig.Mode) + isValidMode := mode == common.BlockProcessingCutoffModePause || mode == common.BlockProcessingCutoffModeProcessError + if !isValidMode { + return fmt.Errorf("%w. provided value=%s", errInvalidBlockProcessingCutOffMode, mode) + } + + cutOffTrigger := common.BlockProcessingCutoffTrigger(cutOffConfig.CutoffTrigger) + isValidCutOffTrigger := cutOffTrigger == common.BlockProcessingCutoffByRound || + cutOffTrigger == common.BlockProcessingCutoffByNonce || + cutOffTrigger == common.BlockProcessingCutoffByEpoch + if !isValidCutOffTrigger { + return fmt.Errorf("%w. provided value=%s", errInvalidBlockProcessingCutOffTrigger, cutOffTrigger) + } + + return nil +} diff --git a/process/block/cutoff/blockProcessingCutoffHandler_test.go b/process/block/cutoff/blockProcessingCutoffHandler_test.go new file mode 100644 index 00000000000..c216f91d3ed --- /dev/null +++ b/process/block/cutoff/blockProcessingCutoffHandler_test.go @@ -0,0 +1,253 @@ +package cutoff + +import ( + "testing" + "time" + + "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/stretchr/testify/require" +) + +func TestNewBlockProcessingCutoffHandler(t *testing.T) { + t.Parallel() + + t.Run("invalid mode - should error", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: "invalid", + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.Equal(t, "invalid block processing cutoff mode. provided value=invalid", err.Error()) + require.Nil(t, b) + }) + + t.Run("invalid cutoff trigger - should error", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: "pause", + CutoffTrigger: "invalid", + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.Equal(t, "invalid block processing cutoff trigger. provided value=invalid", err.Error()) + require.Nil(t, b) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: "pause", + CutoffTrigger: "epoch", + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + require.False(t, check.IfNil(b)) + }) +} + +func TestBlockProcessingCutoffHandler_HandlePauseBackoff(t *testing.T) { + t.Parallel() + + t.Run("bad config - should skip", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + require.Nil(t, r) + }() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: false, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + b.HandlePauseCutoff(nil) + b.HandlePauseCutoff(&block.MetaBlock{}) + b.config.CutoffTrigger = common.BlockProcessingCutoffModeProcessError + b.HandlePauseCutoff(&block.MetaBlock{}) + }) + + t.Run("pause via round - should work", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModePause, + CutoffTrigger: string(common.BlockProcessingCutoffByRound), + Value: 20, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{Round: 19}) // not the desired round + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + b.HandlePauseCutoff(&block.MetaBlock{Round: 20}) + done <- struct{}{} + }() + + select { + case <-done: + require.Fail(t, "should have not advanced") + case <-time.After(100 * time.Millisecond): + } + }) + + t.Run("pause via nonce - should work", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModePause, + CutoffTrigger: string(common.BlockProcessingCutoffByNonce), + Value: 20, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{Nonce: 19}) // not the desired round + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + b.HandlePauseCutoff(&block.MetaBlock{Nonce: 20}) + done <- struct{}{} + }() + + select { + case <-done: + require.Fail(t, "should have not advanced") + case <-time.After(100 * time.Millisecond): + } + }) + + t.Run("pause via epoch - should work", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModePause, + CutoffTrigger: string(common.BlockProcessingCutoffByEpoch), + Value: 20, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{Epoch: 19}) // not the desired round + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + b.HandlePauseCutoff(&block.MetaBlock{Epoch: 20}) + done <- struct{}{} + }() + + select { + case <-done: + require.Fail(t, "should have not advanced") + case <-time.After(time.Millisecond): + } + }) +} + +func TestBlockProcessingCutoffHandler_HandleProcessErrorBackoff(t *testing.T) { + t.Parallel() + + t.Run("bad config - should skip", func(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + require.Nil(t, r) + }() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: false, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(nil) + require.NoError(t, err) + err = b.HandleProcessErrorCutoff(&block.MetaBlock{}) + require.NoError(t, err) + b.config.CutoffTrigger = "pause" + err = b.HandleProcessErrorCutoff(&block.MetaBlock{}) + require.NoError(t, err) + }) + + t.Run("process error via round", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffTrigger: string(common.BlockProcessingCutoffByRound), + Value: 20, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{Round: 19}) // not the desired round + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{Round: 20}) + require.Equal(t, errProcess, err) + }) + + t.Run("process error via nonce", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffTrigger: string(common.BlockProcessingCutoffByNonce), + Value: 20, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{Nonce: 19}) // not the desired nonce + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{Nonce: 20}) + require.Equal(t, errProcess, err) + }) + + t.Run("process error via epoch", func(t *testing.T) { + t.Parallel() + + cfg := config.BlockProcessingCutoffConfig{ + Enabled: true, + Mode: common.BlockProcessingCutoffModeProcessError, + CutoffTrigger: string(common.BlockProcessingCutoffByEpoch), + Value: 20, + } + b, err := NewBlockProcessingCutoffHandler(cfg) + require.NoError(t, err) + + dummyEpochStartData := block.EpochStart{ + LastFinalizedHeaders: []block.EpochStartShardData{ + { + ShardID: 0, + }, + }, + } + err = b.HandleProcessErrorCutoff(&block.MetaBlock{Epoch: 19, EpochStart: dummyEpochStartData}) // not the desired nonce + require.NoError(t, err) + + err = b.HandleProcessErrorCutoff(&block.MetaBlock{Epoch: 20, EpochStart: dummyEpochStartData}) + require.Equal(t, errProcess, err) + }) +} diff --git a/process/block/cutoff/disabledBlockProcessingCutoff.go b/process/block/cutoff/disabledBlockProcessingCutoff.go new file mode 100644 index 00000000000..2a3f64e66f4 --- /dev/null +++ b/process/block/cutoff/disabledBlockProcessingCutoff.go @@ -0,0 +1,25 @@ +package cutoff + +import "github.com/multiversx/mx-chain-core-go/data" + +type disabledBlockProcessingCutoff struct { +} + +// NewDisabledBlockProcessingCutoff will return a new instance of disabledBlockProcessingCutoff +func NewDisabledBlockProcessingCutoff() *disabledBlockProcessingCutoff { + return &disabledBlockProcessingCutoff{} +} + +// HandleProcessErrorCutoff returns nil +func (d disabledBlockProcessingCutoff) HandleProcessErrorCutoff(_ data.HeaderHandler) error { + return nil +} + +// HandlePauseCutoff does nothing +func (d disabledBlockProcessingCutoff) HandlePauseCutoff(_ data.HeaderHandler) { +} + +// IsInterfaceNil returns true since this structure uses value receivers +func (d disabledBlockProcessingCutoff) IsInterfaceNil() bool { + return false +} diff --git a/process/block/cutoff/disabledBlockProcessingCutoff_test.go b/process/block/cutoff/disabledBlockProcessingCutoff_test.go new file mode 100644 index 00000000000..ebc45795e00 --- /dev/null +++ b/process/block/cutoff/disabledBlockProcessingCutoff_test.go @@ -0,0 +1,22 @@ +package cutoff + +import ( + "testing" + + "github.com/multiversx/mx-chain-core-go/data/block" + "github.com/stretchr/testify/require" +) + +func TestDisabledBlockProcessingCutoff_FunctionsShouldNotPanic(t *testing.T) { + t.Parallel() + + defer func() { + r := recover() + require.Nil(t, r) + }() + d := NewDisabledBlockProcessingCutoff() + + d.HandlePauseCutoff(&block.MetaBlock{Nonce: 37}) + _ = d.HandleProcessErrorCutoff(&block.MetaBlock{Round: 37}) + _ = d.IsInterfaceNil() +} diff --git a/process/block/cutoff/errors.go b/process/block/cutoff/errors.go new file mode 100644 index 00000000000..f91484088e9 --- /dev/null +++ b/process/block/cutoff/errors.go @@ -0,0 +1,9 @@ +package cutoff + +import "errors" + +var errProcess = errors.New("block processing cutoff - intended processing error") + +var errInvalidBlockProcessingCutOffMode = errors.New("invalid block processing cutoff mode") + +var errInvalidBlockProcessingCutOffTrigger = errors.New("invalid block processing cutoff trigger") diff --git a/process/block/cutoff/interface.go b/process/block/cutoff/interface.go new file mode 100644 index 00000000000..849c4f88c5d --- /dev/null +++ b/process/block/cutoff/interface.go @@ -0,0 +1,10 @@ +package cutoff + +import "github.com/multiversx/mx-chain-core-go/data" + +// BlockProcessingCutoffHandler defines the actions that a block processing handler has to take care of +type BlockProcessingCutoffHandler interface { + HandleProcessErrorCutoff(header data.HeaderHandler) error + HandlePauseCutoff(header data.HeaderHandler) + IsInterfaceNil() bool +} diff --git a/process/block/export_test.go b/process/block/export_test.go index 8febbcbe600..a382ac21519 100644 --- a/process/block/export_test.go +++ b/process/block/export_test.go @@ -11,7 +11,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data/scheduled" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/bootstrapStorage" @@ -65,16 +64,6 @@ func (bp *baseProcessor) CommitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl return bp.commitTrieEpochRootHashIfNeeded(metaBlock, rootHash) } -func NewBaseProcessorWithBlockProcessingCutoffConfig(cfg config.BlockProcessingCutoffConfig) *baseProcessor { - return &baseProcessor{ - blockProcessingCutoffConfig: cfg, - } -} - -func (bp *baseProcessor) HandleBlockProcessingCutoff(hdr data.HeaderHandler) error { - return bp.handleBlockProcessingCutoff(hdr) -} - func (sp *shardProcessor) ReceivedMetaBlock(header data.HeaderHandler, metaBlockHash []byte) { sp.receivedMetaBlock(header, metaBlockHash) } @@ -174,6 +163,7 @@ func NewShardProcessorEmptyWith3shards( ScheduledTxsExecutionHandler: &testscommon.ScheduledTxsExecutionStub{}, ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, }, } shardProc, err := NewShardProcessor(arguments) diff --git a/process/block/metablock.go b/process/block/metablock.go index 76fcc137743..80cc366582c 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -133,7 +133,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { processDebugger: processDebugger, outportDataProvider: arguments.OutportDataProvider, processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), - blockProcessingCutoffConfig: arguments.PrefsConfig.BlockProcessingCutoff, + blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, } mp := metaProcessor{ @@ -397,7 +397,7 @@ func (mp *metaProcessor) ProcessBlock( return err } - err = mp.handleBlockProcessingCutoff(header) + err = mp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) if err != nil { return err } @@ -1172,11 +1172,6 @@ func (mp *metaProcessor) CommitBlock( return err } - err = mp.handleBlockProcessingCutoff(headerHandler) - if err != nil { - return err - } - mp.store.SetEpochForPutOperation(headerHandler.GetEpoch()) header, ok := headerHandler.(*block.MetaBlock) @@ -1344,6 +1339,8 @@ func (mp *metaProcessor) CommitBlock( mp.cleanupPools(headerHandler) + mp.blockProcessingCutoffHandler.HandlePauseCutoff(header) + return nil } diff --git a/process/block/metablock_test.go b/process/block/metablock_test.go index 02e3f607814..0916ebc80b6 100644 --- a/process/block/metablock_test.go +++ b/process/block/metablock_test.go @@ -145,6 +145,7 @@ func createMockMetaArguments( ProcessedMiniBlocksTracker: &testscommon.ProcessedMiniBlocksTrackerStub{}, ReceiptsRepository: &testscommon.ReceiptsRepositoryStub{}, OutportDataProvider: &outport.OutportDataProviderStub{}, + BlockProcessingCutoffHandler: &testscommon.BlockProcessingCutoffStub{}, }, SCToProtocol: &mock.SCToProtocolStub{}, PendingMiniBlocksHandler: &mock.PendingMiniBlocksHandlerStub{}, @@ -520,6 +521,17 @@ func TestNewMetaProcessor_NilScheduledTxsExecutionHandlerShouldErr(t *testing.T) assert.Nil(t, be) } +func TestNewMetaProcessor_NilBlockProcessingCutoffHandlerShouldErr(t *testing.T) { + t.Parallel() + + arguments := createMockMetaArguments(createMockComponentHolders()) + arguments.BlockProcessingCutoffHandler = nil + + be, err := blproc.NewMetaProcessor(arguments) + assert.Equal(t, process.ErrNilBlockProcessingCutoffHandler, err) + assert.Nil(t, be) +} + func TestNewMetaProcessor_OkValsShouldWork(t *testing.T) { t.Parallel() diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 022998fa860..0c53e07653e 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -118,7 +118,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { processDebugger: processDebugger, outportDataProvider: arguments.OutportDataProvider, processStatusHandler: arguments.CoreComponents.ProcessStatusHandler(), - blockProcessingCutoffConfig: arguments.PrefsConfig.BlockProcessingCutoff, + blockProcessingCutoffHandler: arguments.BlockProcessingCutoffHandler, } sp := shardProcessor{ @@ -347,7 +347,7 @@ func (sp *shardProcessor) ProcessBlock( return err } - err = sp.handleBlockProcessingCutoff(header) + err = sp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) if err != nil { return err } @@ -894,11 +894,6 @@ func (sp *shardProcessor) CommitBlock( return err } - err = sp.handleBlockProcessingCutoff(headerHandler) - if err != nil { - return err - } - sp.store.SetEpochForPutOperation(headerHandler.GetEpoch()) log.Debug("started committing block", @@ -1088,6 +1083,8 @@ func (sp *shardProcessor) CommitBlock( sp.cleanupPools(headerHandler) + sp.blockProcessingCutoffHandler.HandlePauseCutoff(header) + return nil } diff --git a/process/errors.go b/process/errors.go index 45692d93f31..6303006239a 100644 --- a/process/errors.go +++ b/process/errors.go @@ -1131,6 +1131,9 @@ var ErrNilProcessedMiniBlocksTracker = errors.New("nil processed mini blocks tra // ErrNilReceiptsRepository signals that a nil receipts repository has been provided var ErrNilReceiptsRepository = errors.New("nil receipts repository") +// ErrNilBlockProcessingCutoffHandler signals that a nil block processing cutoff handler has been provided +var ErrNilBlockProcessingCutoffHandler = errors.New("nil block processing cutoff handler") + // ErrNilESDTGlobalSettingsHandler signals that nil global settings handler was provided var ErrNilESDTGlobalSettingsHandler = errors.New("nil esdt global settings handler") @@ -1196,9 +1199,3 @@ var ErrGasPriceTooHigh = errors.New("gas price is too high for the transaction") // ErrGuardedTransactionNotExpected signals that a guarded transaction was received for processing but the account is not guarded var ErrGuardedTransactionNotExpected = errors.New("guarded transaction not expected") - -// ErrInvalidBlockProcessingCutOffMode signals that an invalid block processing cutoff mode has been provided -var ErrInvalidBlockProcessingCutOffMode = errors.New("invalid block processing cutoff mode") - -// ErrInvalidBlockProcessingCutOffTrigger signals that an invalid block processing cutoff trigger has been provided -var ErrInvalidBlockProcessingCutOffTrigger = errors.New("invalid block processing cutoff trigger") diff --git a/testscommon/blockProcessingCutoffStub.go b/testscommon/blockProcessingCutoffStub.go new file mode 100644 index 00000000000..c0705738182 --- /dev/null +++ b/testscommon/blockProcessingCutoffStub.go @@ -0,0 +1,31 @@ +package testscommon + +import ( + "github.com/multiversx/mx-chain-core-go/data" +) + +type BlockProcessingCutoffStub struct { + HandleProcessErrorCutoffCalled func(header data.HeaderHandler) error + HandlePauseCutoffCalled func(header data.HeaderHandler) +} + +// HandleProcessErrorCutoff - +func (b *BlockProcessingCutoffStub) HandleProcessErrorCutoff(header data.HeaderHandler) error { + if b.HandleProcessErrorCutoffCalled != nil { + return b.HandleProcessErrorCutoffCalled(header) + } + + return nil +} + +// HandlePauseCutoff - +func (b *BlockProcessingCutoffStub) HandlePauseCutoff(header data.HeaderHandler) { + if b.HandlePauseCutoffCalled != nil { + b.HandlePauseCutoffCalled(header) + } +} + +// IsInterfaceNil - +func (b *BlockProcessingCutoffStub) IsInterfaceNil() bool { + return b == nil +} From 246b3f83004d0ef9659f6dea5320c784ea24edf4 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Wed, 10 May 2023 17:37:08 +0300 Subject: [PATCH 155/221] fix processing tests --- factory/processing/blockProcessorCreator_test.go | 2 ++ factory/processing/export_test.go | 3 +++ 2 files changed, 5 insertions(+) diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index 2842b92221f..e876d6e818c 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -56,6 +56,7 @@ func Test_newBlockProcessorCreatorForShard(t *testing.T) { &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, &testscommon.ReceiptsRepositoryStub{}, + &testscommon.BlockProcessingCutoffStub{}, ) require.NoError(t, err) @@ -176,6 +177,7 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { &testscommon.ScheduledTxsExecutionStub{}, &testscommon.ProcessedMiniBlocksTrackerStub{}, &testscommon.ReceiptsRepositoryStub{}, + &testscommon.BlockProcessingCutoffStub{}, ) require.NoError(t, err) diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index 1a1c90a383f..327d4512913 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -5,6 +5,7 @@ import ( "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" + "github.com/multiversx/mx-chain-go/process/block/cutoff" "github.com/multiversx/mx-chain-go/process/txsimulator" ) @@ -23,6 +24,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( scheduledTxsExecutionHandler process.ScheduledTxsExecutionHandler, processedMiniBlocksTracker process.ProcessedMiniBlocksTracker, receiptsRepository factory.ReceiptsRepository, + blockProcessingCutoff cutoff.BlockProcessingCutoffHandler, ) (process.BlockProcessor, process.VirtualMachinesContainerFactory, error) { blockProcessorComponents, err := pcf.newBlockProcessor( requestHandler, @@ -38,6 +40,7 @@ func (pcf *processComponentsFactory) NewBlockProcessor( scheduledTxsExecutionHandler, processedMiniBlocksTracker, receiptsRepository, + blockProcessingCutoff, ) if err != nil { return nil, nil, err From 908ff496aaa12ee13623f81cbf88d8000bc9a8e2 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Wed, 10 May 2023 18:15:53 +0300 Subject: [PATCH 156/221] update mx-chain-core and communication go --- go.mod | 4 ++-- go.sum | 8 ++++---- outport/host/driver_test.go | 3 +-- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 6717a1a9e69..9b31a0d0a8b 100644 --- a/go.mod +++ b/go.mod @@ -13,8 +13,8 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v0.0.0-20230510142707-7db6d08558fe - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230509110712-7a0e0a14ffa7 + github.com/multiversx/mx-chain-communication-go v0.0.0-20230510151427-d0bd41659967 + github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 8ea40286f19..7d1afb2cb83 100644 --- a/go.sum +++ b/go.sum @@ -609,15 +609,15 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230510142707-7db6d08558fe h1:hSS9VhLBKKhOFTUS5mclHC4ENMnkzey505oCGk2Mbqc= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230510142707-7db6d08558fe/go.mod h1:wdDzW6BgXd6hm3X8RzaSN+/CJ/CZYqerGgliBmVVOII= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230510151427-d0bd41659967 h1:/Zg28BlD8Xs5Gl8V4ltfJ2n/UkP5Jw4anvTOOMkb4Bg= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230510151427-d0bd41659967/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230509110712-7a0e0a14ffa7 h1:vrTUro90oBtC1sSWCCBSL4MWRE2p/3UnN0853Ew7Gbs= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230509110712-7a0e0a14ffa7/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df h1:ADV4QOB2Tg42SYyVmYNq4FBXCc4bzD5EA66IFhF+fb0= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= diff --git a/outport/host/driver_test.go b/outport/host/driver_test.go index d07dae2c41e..6e595206c07 100644 --- a/outport/host/driver_test.go +++ b/outport/host/driver_test.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/marshal" - "github.com/multiversx/mx-chain-core-go/webSocket/data" outportStubs "github.com/multiversx/mx-chain-go/testscommon/outport" logger "github.com/multiversx/mx-chain-logger-go" "github.com/stretchr/testify/require" @@ -48,7 +47,7 @@ func TestNewWebsocketOutportDriverNodePart(t *testing.T) { o, err := NewHostDriver(args) require.Nil(t, o) - require.Equal(t, data.ErrNilLogger, err) + require.Equal(t, core.ErrNilLogger, err) }) t.Run("should work", func(t *testing.T) { From 48fa297d3a8c3f7a31f663bf53a61b7bd2e16418 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Thu, 11 May 2023 10:57:57 +0300 Subject: [PATCH 157/221] fix --- config/externalConfig.go | 2 +- factory/status/statusComponents.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/config/externalConfig.go b/config/externalConfig.go index 72f59443fcf..fba21834846 100644 --- a/config/externalConfig.go +++ b/config/externalConfig.go @@ -4,7 +4,7 @@ package config type ExternalConfig struct { ElasticSearchConnector ElasticSearchConfig EventNotifierConnector EventNotifierConfig - HostDriverConnector HostDriverConfig + HostDriverConfig HostDriverConfig } // ElasticSearchConfig will hold the configuration for the elastic search diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index 838a21580ec..198ff1eb77e 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -262,17 +262,17 @@ func (scf *statusComponentsFactory) makeEventNotifierArgs() (*outportDriverFacto } func (scf *statusComponentsFactory) makeHostDriverArgs() (outportDriverFactory.ArgsHostDriverFactory, error) { - if !scf.externalConfig.HostDriverConnector.Enabled { + if !scf.externalConfig.HostDriverConfig.Enabled { return outportDriverFactory.ArgsHostDriverFactory{}, nil } - marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.HostDriverConnector.MarshallerType) + marshaller, err := factoryMarshalizer.NewMarshalizer(scf.externalConfig.HostDriverConfig.MarshallerType) if err != nil { return outportDriverFactory.ArgsHostDriverFactory{}, err } return outportDriverFactory.ArgsHostDriverFactory{ Marshaller: marshaller, - HostConfig: scf.externalConfig.HostDriverConnector, + HostConfig: scf.externalConfig.HostDriverConfig, }, nil } From c4a331290801eab212d31078a241bb0a2035ece8 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 11 May 2023 11:42:28 +0300 Subject: [PATCH 158/221] fixes after review --- cmd/node/config/prefs.toml | 2 +- .../cutoff/blockProcessingCutoffHandler.go | 138 +++++++++--------- .../blockProcessingCutoffHandler_test.go | 21 +-- .../cutoff/disabledBlockProcessingCutoff.go | 8 +- .../disabledBlockProcessingCutoff_test.go | 3 + process/block/metablock.go | 7 +- process/block/shardblock.go | 7 +- 7 files changed, 87 insertions(+), 99 deletions(-) diff --git a/cmd/node/config/prefs.toml b/cmd/node/config/prefs.toml index 33cc150dee4..98d5c02557f 100644 --- a/cmd/node/config/prefs.toml +++ b/cmd/node/config/prefs.toml @@ -66,7 +66,7 @@ CutoffTrigger = "round" # The minimum value of the cutoff. For example, if CutoffType is set to "round", and Value to 20, then the node will stop processing at round 20+ - Value = 20 + Value = 0 # NamedIdentity represents an identity that runs nodes on the multikey # There can be multiple identities set on the same node, each one of them having different bls keys, just by duplicating the NamedIdentity diff --git a/process/block/cutoff/blockProcessingCutoffHandler.go b/process/block/cutoff/blockProcessingCutoffHandler.go index da14d1286de..45bd4671f43 100644 --- a/process/block/cutoff/blockProcessingCutoffHandler.go +++ b/process/block/cutoff/blockProcessingCutoffHandler.go @@ -2,6 +2,7 @@ package cutoff import ( "fmt" + "math" "time" "github.com/multiversx/mx-chain-core-go/core/check" @@ -14,19 +15,50 @@ import ( var log = logger.GetOrCreate("process/block/cutoff") type blockProcessingCutoffHandler struct { - config config.BlockProcessingCutoffConfig + config config.BlockProcessingCutoffConfig + stopRound uint64 + stopNonce uint64 + stopEpoch uint32 } // NewBlockProcessingCutoffHandler will return a new instance of blockProcessingCutoffHandler func NewBlockProcessingCutoffHandler(cfg config.BlockProcessingCutoffConfig) (*blockProcessingCutoffHandler, error) { - err := checkConfig(cfg) + b := &blockProcessingCutoffHandler{ + config: cfg, + stopEpoch: math.MaxUint32, + stopNonce: math.MaxUint64, + stopRound: math.MaxUint64, + } + + err := b.applyConfig(cfg) if err != nil { return nil, err } - return &blockProcessingCutoffHandler{ - config: cfg, - }, nil + log.Warn("node is started by using block processing cutoff and will pause/error at the provided coordinate", cfg.CutoffTrigger, cfg.Value) + return b, nil +} + +func (b *blockProcessingCutoffHandler) applyConfig(cfg config.BlockProcessingCutoffConfig) error { + switch common.BlockProcessingCutoffMode(cfg.Mode) { + case common.BlockProcessingCutoffModeProcessError: + case common.BlockProcessingCutoffModePause: + default: + return fmt.Errorf("%w, provided value=%s", errInvalidBlockProcessingCutOffMode, cfg.Mode) + } + + switch common.BlockProcessingCutoffTrigger(cfg.CutoffTrigger) { + case common.BlockProcessingCutoffByRound: + b.stopRound = cfg.Value + case common.BlockProcessingCutoffByNonce: + b.stopNonce = cfg.Value + case common.BlockProcessingCutoffByEpoch: + b.stopEpoch = uint32(cfg.Value) + default: + return fmt.Errorf("%w, provided value=%s", errInvalidBlockProcessingCutOffTrigger, cfg.CutoffTrigger) + } + + return nil } // HandlePauseCutoff will pause the processing if the required coordinates are met @@ -38,22 +70,20 @@ func (b *blockProcessingCutoffHandler) HandlePauseCutoff(header data.HeaderHandl return } - blockingCutoffFunction := func(printArgs ...interface{}) error { - log.Info("cutting off the block processing. The node will not advance", printArgs...) - go func() { - for { - time.Sleep(time.Minute) - log.Info("node is in block processing cut-off mode", printArgs...) - } - }() - neverEndingChannel := make(chan struct{}) - <-neverEndingChannel - - return nil // should not reach this point + trigger, value, isTriggered := b.isTriggered(header) + if !isTriggered { + return } - _ = b.handleCutoffIfCoordinatesAreMet(header, blockingCutoffFunction) - // should never reach this point + log.Info("cutting off the block processing. The node will not advance", trigger, value) + go func() { + for { + time.Sleep(time.Minute) + log.Info("node is in block processing cut-off mode", trigger, value) + } + }() + neverEndingChannel := make(chan struct{}) + <-neverEndingChannel } // HandleProcessErrorCutoff will return error if the processing the block at the required coordinates @@ -65,66 +95,30 @@ func (b *blockProcessingCutoffHandler) HandleProcessErrorCutoff(header data.Head return nil } - return b.handleCutoffIfCoordinatesAreMet(header, func(printArgs ...interface{}) error { - log.Info("block processing cutoff - return err", printArgs...) - return errProcess - }) -} + trigger, value, isTriggered := b.isTriggered(header) + if !isTriggered { + return nil + } -func (b *blockProcessingCutoffHandler) handleCutoffIfCoordinatesAreMet(header data.HeaderHandler, cutOffFunction func(printArgs ...interface{}) error) error { - value := b.config.Value + log.Info("block processing cutoff - return err", trigger, value) + return errProcess +} - switch common.BlockProcessingCutoffTrigger(b.config.CutoffTrigger) { - case common.BlockProcessingCutoffByRound: - if header.GetRound() >= value { - err := cutOffFunction("round", header.GetRound()) - if err != nil { - return err - } - } - case common.BlockProcessingCutoffByNonce: - if header.GetNonce() >= value { - err := cutOffFunction("nonce", header.GetNonce()) - if err != nil { - return err - } - } - case common.BlockProcessingCutoffByEpoch: - if header.GetEpoch() >= uint32(value) { - err := cutOffFunction("epoch", header.GetEpoch()) - if err != nil { - return err - } - } +func (b *blockProcessingCutoffHandler) isTriggered(header data.HeaderHandler) (common.BlockProcessingCutoffTrigger, uint64, bool) { + if header.GetRound() >= b.stopRound { + return common.BlockProcessingCutoffByRound, header.GetRound(), true + } + if header.GetNonce() >= b.stopNonce { + return common.BlockProcessingCutoffByNonce, header.GetNonce(), true + } + if header.GetEpoch() >= b.stopEpoch { + return common.BlockProcessingCutoffByEpoch, uint64(header.GetEpoch()), true } - return nil + return "", 0, false } // IsInterfaceNil returns true if there is no value under the interface func (b *blockProcessingCutoffHandler) IsInterfaceNil() bool { return b == nil } - -func checkConfig(cutOffConfig config.BlockProcessingCutoffConfig) error { - if !cutOffConfig.Enabled { - // don't even check the configs if the feature is disabled. Useful when a node doesn't update `prefs.toml` with - // the new configuration - return nil - } - mode := common.BlockProcessingCutoffMode(cutOffConfig.Mode) - isValidMode := mode == common.BlockProcessingCutoffModePause || mode == common.BlockProcessingCutoffModeProcessError - if !isValidMode { - return fmt.Errorf("%w. provided value=%s", errInvalidBlockProcessingCutOffMode, mode) - } - - cutOffTrigger := common.BlockProcessingCutoffTrigger(cutOffConfig.CutoffTrigger) - isValidCutOffTrigger := cutOffTrigger == common.BlockProcessingCutoffByRound || - cutOffTrigger == common.BlockProcessingCutoffByNonce || - cutOffTrigger == common.BlockProcessingCutoffByEpoch - if !isValidCutOffTrigger { - return fmt.Errorf("%w. provided value=%s", errInvalidBlockProcessingCutOffTrigger, cutOffTrigger) - } - - return nil -} diff --git a/process/block/cutoff/blockProcessingCutoffHandler_test.go b/process/block/cutoff/blockProcessingCutoffHandler_test.go index c216f91d3ed..961a86d706c 100644 --- a/process/block/cutoff/blockProcessingCutoffHandler_test.go +++ b/process/block/cutoff/blockProcessingCutoffHandler_test.go @@ -22,7 +22,7 @@ func TestNewBlockProcessingCutoffHandler(t *testing.T) { Mode: "invalid", } b, err := NewBlockProcessingCutoffHandler(cfg) - require.Equal(t, "invalid block processing cutoff mode. provided value=invalid", err.Error()) + require.Equal(t, "invalid block processing cutoff mode, provided value=invalid", err.Error()) require.Nil(t, b) }) @@ -35,7 +35,7 @@ func TestNewBlockProcessingCutoffHandler(t *testing.T) { CutoffTrigger: "invalid", } b, err := NewBlockProcessingCutoffHandler(cfg) - require.Equal(t, "invalid block processing cutoff trigger. provided value=invalid", err.Error()) + require.Equal(t, "invalid block processing cutoff trigger, provided value=invalid", err.Error()) require.Nil(t, b) }) @@ -65,7 +65,9 @@ func TestBlockProcessingCutoffHandler_HandlePauseBackoff(t *testing.T) { }() cfg := config.BlockProcessingCutoffConfig{ - Enabled: false, + Enabled: false, + Mode: "pause", + CutoffTrigger: "nonce", } b, err := NewBlockProcessingCutoffHandler(cfg) require.NoError(t, err) @@ -88,8 +90,7 @@ func TestBlockProcessingCutoffHandler_HandlePauseBackoff(t *testing.T) { b, err := NewBlockProcessingCutoffHandler(cfg) require.NoError(t, err) - err = b.HandleProcessErrorCutoff(&block.MetaBlock{Round: 19}) // not the desired round - require.NoError(t, err) + b.HandlePauseCutoff(&block.MetaBlock{Round: 19}) // not the desired round done := make(chan struct{}) go func() { @@ -116,8 +117,7 @@ func TestBlockProcessingCutoffHandler_HandlePauseBackoff(t *testing.T) { b, err := NewBlockProcessingCutoffHandler(cfg) require.NoError(t, err) - err = b.HandleProcessErrorCutoff(&block.MetaBlock{Nonce: 19}) // not the desired round - require.NoError(t, err) + b.HandlePauseCutoff(&block.MetaBlock{Nonce: 19}) // not the desired round done := make(chan struct{}) go func() { @@ -144,8 +144,7 @@ func TestBlockProcessingCutoffHandler_HandlePauseBackoff(t *testing.T) { b, err := NewBlockProcessingCutoffHandler(cfg) require.NoError(t, err) - err = b.HandleProcessErrorCutoff(&block.MetaBlock{Epoch: 19}) // not the desired round - require.NoError(t, err) + b.HandlePauseCutoff(&block.MetaBlock{Epoch: 19}) // not the desired round done := make(chan struct{}) go func() { @@ -173,7 +172,9 @@ func TestBlockProcessingCutoffHandler_HandleProcessErrorBackoff(t *testing.T) { }() cfg := config.BlockProcessingCutoffConfig{ - Enabled: false, + Enabled: false, + Mode: "pause", + CutoffTrigger: "nonce", } b, err := NewBlockProcessingCutoffHandler(cfg) require.NoError(t, err) diff --git a/process/block/cutoff/disabledBlockProcessingCutoff.go b/process/block/cutoff/disabledBlockProcessingCutoff.go index 2a3f64e66f4..b909e88b964 100644 --- a/process/block/cutoff/disabledBlockProcessingCutoff.go +++ b/process/block/cutoff/disabledBlockProcessingCutoff.go @@ -11,15 +11,15 @@ func NewDisabledBlockProcessingCutoff() *disabledBlockProcessingCutoff { } // HandleProcessErrorCutoff returns nil -func (d disabledBlockProcessingCutoff) HandleProcessErrorCutoff(_ data.HeaderHandler) error { +func (d *disabledBlockProcessingCutoff) HandleProcessErrorCutoff(_ data.HeaderHandler) error { return nil } // HandlePauseCutoff does nothing -func (d disabledBlockProcessingCutoff) HandlePauseCutoff(_ data.HeaderHandler) { +func (d *disabledBlockProcessingCutoff) HandlePauseCutoff(_ data.HeaderHandler) { } // IsInterfaceNil returns true since this structure uses value receivers -func (d disabledBlockProcessingCutoff) IsInterfaceNil() bool { - return false +func (d *disabledBlockProcessingCutoff) IsInterfaceNil() bool { + return d == nil } diff --git a/process/block/cutoff/disabledBlockProcessingCutoff_test.go b/process/block/cutoff/disabledBlockProcessingCutoff_test.go index ebc45795e00..e213348ca20 100644 --- a/process/block/cutoff/disabledBlockProcessingCutoff_test.go +++ b/process/block/cutoff/disabledBlockProcessingCutoff_test.go @@ -19,4 +19,7 @@ func TestDisabledBlockProcessingCutoff_FunctionsShouldNotPanic(t *testing.T) { d.HandlePauseCutoff(&block.MetaBlock{Nonce: 37}) _ = d.HandleProcessErrorCutoff(&block.MetaBlock{Round: 37}) _ = d.IsInterfaceNil() + + var nilObj *disabledBlockProcessingCutoff + _ = nilObj.IsInterfaceNil() } diff --git a/process/block/metablock.go b/process/block/metablock.go index 80cc366582c..dc6e2b1303e 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -397,12 +397,7 @@ func (mp *metaProcessor) ProcessBlock( return err } - err = mp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) - if err != nil { - return err - } - - return nil + return mp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) } func (mp *metaProcessor) processEpochStartMetaBlock( diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 0c53e07653e..8f630e7ae91 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -347,12 +347,7 @@ func (sp *shardProcessor) ProcessBlock( return err } - err = sp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) - if err != nil { - return err - } - - return nil + return sp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) } func (sp *shardProcessor) requestEpochStartInfo(header data.ShardHeaderHandler, haveTime func() time.Duration) error { From 72ffd89cb94f779b7127e9e3ff7fb95d05a243a7 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Thu, 11 May 2023 12:20:40 +0300 Subject: [PATCH 159/221] change comm go --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9b31a0d0a8b..ca3142578dd 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v0.0.0-20230510151427-d0bd41659967 + github.com/multiversx/mx-chain-communication-go v0.0.0-20230511091919-0a98658ebc95 github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 diff --git a/go.sum b/go.sum index 7d1afb2cb83..ecff4ce1121 100644 --- a/go.sum +++ b/go.sum @@ -609,8 +609,8 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230510151427-d0bd41659967 h1:/Zg28BlD8Xs5Gl8V4ltfJ2n/UkP5Jw4anvTOOMkb4Bg= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230510151427-d0bd41659967/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230511091919-0a98658ebc95 h1:AhlMavTJgWfRKETuvRem+Mj8HY06DnzPpwq+0Uamzdw= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230511091919-0a98658ebc95/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= From 1c66c1e5ae7ffa4c852c3de0775a1b32810e6bdf Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 11 May 2023 12:36:27 +0300 Subject: [PATCH 160/221] fix after review --- common/errors.go | 3 +++ common/interface.go | 4 ++-- errors/errors.go | 3 +++ factory/consensus/consensusComponents.go | 5 ++++- factory/state/stateComponentsHandler.go | 8 +++++-- factory/state/stateComponentsHandler_test.go | 2 ++ process/smartContract/hooks/blockChainHook.go | 6 ++--- .../hooks/blockChainHook_test.go | 8 +++---- state/syncer/missingTrieNodesNotifier.go | 11 +++++----- state/syncer/missingTrieNodesNotifier_test.go | 22 +++++++++++-------- testscommon/missingTrieNodesNotifierStub.go | 19 ++++++++++------ testscommon/stateComponentsMock.go | 1 + 12 files changed, 59 insertions(+), 33 deletions(-) diff --git a/common/errors.go b/common/errors.go index 88be5aad958..47b976de9a8 100644 --- a/common/errors.go +++ b/common/errors.go @@ -7,3 +7,6 @@ var ErrInvalidTimeout = errors.New("invalid timeout value") // ErrNilWasmChangeLocker signals that a nil wasm change locker has been provided var ErrNilWasmChangeLocker = errors.New("nil wasm change locker") + +// ErrNilStateSyncNotifierSubscriber signals that a nil state sync notifier subscriber has been provided +var ErrNilStateSyncNotifierSubscriber = errors.New("nil state sync notifier subscriber") diff --git a/common/interface.go b/common/interface.go index b5ead49cbeb..91da2d7a8f1 100644 --- a/common/interface.go +++ b/common/interface.go @@ -362,8 +362,8 @@ type ManagedPeersHolder interface { // MissingTrieNodesNotifier defines the operations of an entity that notifies about missing trie nodes type MissingTrieNodesNotifier interface { - RegisterHandler(handler StateSyncNotifierSubscriber) - NotifyMissingTrieNode(hash []byte) + RegisterHandler(handler StateSyncNotifierSubscriber) error + AsyncNotifyMissingTrieNode(hash []byte) IsInterfaceNil() bool } diff --git a/errors/errors.go b/errors/errors.go index b897f2f1a6f..0f548b9cd60 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -552,3 +552,6 @@ var ErrNilPeersRatingHandler = errors.New("nil peers rating handler") // ErrNilPeersRatingMonitor signals that a nil peers rating monitor implementation has been provided var ErrNilPeersRatingMonitor = errors.New("nil peers rating monitor") + +// ErrNilMissingTrieNodesNotifier signals that a nil missing trie nodes notifier was provided +var ErrNilMissingTrieNodesNotifier = errors.New("nil missing trie nodes notifier") diff --git a/factory/consensus/consensusComponents.go b/factory/consensus/consensusComponents.go index 54e6dc24ca9..418868d4108 100644 --- a/factory/consensus/consensusComponents.go +++ b/factory/consensus/consensusComponents.go @@ -479,7 +479,10 @@ func (ccf *consensusComponentsFactory) createShardBootstrapper() (process.Bootst if !ok { return nil, fmt.Errorf("wrong type conversion for accountsDBSyncer, type: %T", accountsDBSyncer) } - ccf.stateComponents.MissingTrieNodesNotifier().RegisterHandler(stateNodesNotifierSubscriber) + err = ccf.stateComponents.MissingTrieNodesNotifier().RegisterHandler(stateNodesNotifierSubscriber) + if err != nil { + return nil, err + } argsBaseBootstrapper := sync.ArgBaseBootstrapper{ PoolsHolder: ccf.dataComponents.Datapool(), diff --git a/factory/state/stateComponentsHandler.go b/factory/state/stateComponentsHandler.go index c3af204327a..6adb9edc259 100644 --- a/factory/state/stateComponentsHandler.go +++ b/factory/state/stateComponentsHandler.go @@ -90,6 +90,9 @@ func (msc *managedStateComponents) CheckSubcomponents() error { return errors.ErrNilTrieStorageManager } } + if check.IfNil(msc.missingTrieNodesNotifier) { + return errors.ErrNilMissingTrieNodesNotifier + } return nil } @@ -199,14 +202,15 @@ func (msc *managedStateComponents) SetTriesStorageManagers(managers map[string]c return nil } +// MissingTrieNodesNotifier returns the missing trie nodes notifier func (msc *managedStateComponents) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { msc.mutStateComponents.RLock() defer msc.mutStateComponents.RUnlock() - if check.IfNil(msc.missingTrieNodesNotifier) { + if msc.stateComponents == nil { return nil } - + return msc.missingTrieNodesNotifier } diff --git a/factory/state/stateComponentsHandler_test.go b/factory/state/stateComponentsHandler_test.go index b303b873940..2abaecdfba0 100644 --- a/factory/state/stateComponentsHandler_test.go +++ b/factory/state/stateComponentsHandler_test.go @@ -47,6 +47,7 @@ func TestManagedStateComponents_CreateShouldWork(t *testing.T) { require.Nil(t, managedStateComponents.PeerAccounts()) require.Nil(t, managedStateComponents.TriesContainer()) require.Nil(t, managedStateComponents.TrieStorageManagers()) + require.Nil(t, managedStateComponents.MissingTrieNodesNotifier()) err = managedStateComponents.Create() require.NoError(t, err) @@ -54,6 +55,7 @@ func TestManagedStateComponents_CreateShouldWork(t *testing.T) { require.NotNil(t, managedStateComponents.PeerAccounts()) require.NotNil(t, managedStateComponents.TriesContainer()) require.NotNil(t, managedStateComponents.TrieStorageManagers()) + require.NotNil(t, managedStateComponents.MissingTrieNodesNotifier()) } func TestManagedStateComponents_Close(t *testing.T) { diff --git a/process/smartContract/hooks/blockChainHook.go b/process/smartContract/hooks/blockChainHook.go index 775d4b80e8f..5a8904f5b8b 100644 --- a/process/smartContract/hooks/blockChainHook.go +++ b/process/smartContract/hooks/blockChainHook.go @@ -270,7 +270,7 @@ func (bh *BlockChainHookImpl) GetStorageData(accountAddress []byte, index []byte messages = append(messages, "error") messages = append(messages, err) - bh.syncMissingDataTrieNode(err) + bh.syncIfMissingDataTrieNode(err) } log.Trace("GetStorageData ", messages...) @@ -279,7 +279,7 @@ func (bh *BlockChainHookImpl) GetStorageData(accountAddress []byte, index []byte return value, trieDepth, nil } -func (bh *BlockChainHookImpl) syncMissingDataTrieNode(err error) { +func (bh *BlockChainHookImpl) syncIfMissingDataTrieNode(err error) { if !core.IsGetNodeFromDBError(err) { return } @@ -289,7 +289,7 @@ func (bh *BlockChainHookImpl) syncMissingDataTrieNode(err error) { return } - bh.missingTrieNodesNotifier.NotifyMissingTrieNode(getNodeErr.GetKey()) + bh.missingTrieNodesNotifier.AsyncNotifyMissingTrieNode(getNodeErr.GetKey()) } func (bh *BlockChainHookImpl) processMaxReadsCounters() error { diff --git a/process/smartContract/hooks/blockChainHook_test.go b/process/smartContract/hooks/blockChainHook_test.go index 0269db81ca8..6a0420bde68 100644 --- a/process/smartContract/hooks/blockChainHook_test.go +++ b/process/smartContract/hooks/blockChainHook_test.go @@ -553,7 +553,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { }, } args.MissingTrieNodesNotifier = &testscommon.MissingTrieNodesNotifierStub{ - NotifyMissingTrieNodeCalled: func(hash []byte) { + AsyncNotifyMissingTrieNodeCalled: func(hash []byte) { assert.Equal(t, missingDataTrieKey, hash) notifyMissingTrieNodeCalled = true }, @@ -583,7 +583,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { }, } args.MissingTrieNodesNotifier = &testscommon.MissingTrieNodesNotifierStub{ - NotifyMissingTrieNodeCalled: func(hash []byte) { + AsyncNotifyMissingTrieNodeCalled: func(hash []byte) { assert.Fail(t, "should not have been called") }, } @@ -591,7 +591,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { _, _, _ = bh.GetStorageData([]byte("address"), missingDataTrieKey) }) - t.Run("unwrapped err is not of wanted type", func(t *testing.T) { + t.Run("unwrapped err is not of wanted type, should not call missingTrieNodesNotifier", func(t *testing.T) { t.Parallel() missingDataTrieKey := []byte("missingDataTrieKey") @@ -612,7 +612,7 @@ func TestBlockChainHookImpl_GetStorageData(t *testing.T) { }, } args.MissingTrieNodesNotifier = &testscommon.MissingTrieNodesNotifierStub{ - NotifyMissingTrieNodeCalled: func(hash []byte) { + AsyncNotifyMissingTrieNodeCalled: func(hash []byte) { assert.Fail(t, "should not have been called") }, } diff --git a/state/syncer/missingTrieNodesNotifier.go b/state/syncer/missingTrieNodesNotifier.go index d718b12ab6d..545fab32609 100644 --- a/state/syncer/missingTrieNodesNotifier.go +++ b/state/syncer/missingTrieNodesNotifier.go @@ -21,19 +21,20 @@ func NewMissingTrieNodesNotifier() *missingTrieNodesNotifier { } // RegisterHandler registers a new handler for the missing trie nodes notifier -func (mtnn *missingTrieNodesNotifier) RegisterHandler(handler common.StateSyncNotifierSubscriber) { +func (mtnn *missingTrieNodesNotifier) RegisterHandler(handler common.StateSyncNotifierSubscriber) error { if check.IfNil(handler) { - log.Warn("missingTrieNodesNotifier: nil handler") - return + return common.ErrNilStateSyncNotifierSubscriber } mtnn.mutex.Lock() mtnn.handlers = append(mtnn.handlers, handler) mtnn.mutex.Unlock() + + return nil } -// NotifyMissingTrieNode notifies all the registered handlers that a trie node is missing -func (mtnn *missingTrieNodesNotifier) NotifyMissingTrieNode(hash []byte) { +// AsyncNotifyMissingTrieNode asynchronously notifies all the registered handlers that a trie node is missing +func (mtnn *missingTrieNodesNotifier) AsyncNotifyMissingTrieNode(hash []byte) { if common.IsEmptyTrie(hash) { log.Warn("missingTrieNodesNotifier: empty trie hash") return diff --git a/state/syncer/missingTrieNodesNotifier_test.go b/state/syncer/missingTrieNodesNotifier_test.go index 470abbc764f..3609fac3636 100644 --- a/state/syncer/missingTrieNodesNotifier_test.go +++ b/state/syncer/missingTrieNodesNotifier_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core/check" + "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/testscommon" "github.com/stretchr/testify/assert" ) @@ -20,25 +21,27 @@ func TestMissingTrieNodesNotifier_RegisterHandler(t *testing.T) { notifier := NewMissingTrieNodesNotifier() - notifier.RegisterHandler(nil) - notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{}) - notifier.RegisterHandler(nil) + err := notifier.RegisterHandler(nil) + assert.Equal(t, common.ErrNilStateSyncNotifierSubscriber, err) + + err = notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{}) + assert.Nil(t, err) assert.Equal(t, 1, notifier.GetNumHandlers()) } -func TestMissingTrieNodesNotifier_NotifyMissingTrieNode(t *testing.T) { +func TestMissingTrieNodesNotifier_AsyncNotifyMissingTrieNode(t *testing.T) { t.Parallel() numMissingDataTrieNodeFoundCalled := 0 notifier := NewMissingTrieNodesNotifier() - notifier.NotifyMissingTrieNode([]byte("hash1")) + notifier.AsyncNotifyMissingTrieNode([]byte("hash1")) wg := sync.WaitGroup{} wg.Add(2) mutex := sync.Mutex{} - notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{ + err := notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{ MissingDataTrieNodeFoundCalled: func(_ []byte) { mutex.Lock() numMissingDataTrieNodeFoundCalled++ @@ -46,10 +49,11 @@ func TestMissingTrieNodesNotifier_NotifyMissingTrieNode(t *testing.T) { mutex.Unlock() }, }) + assert.Nil(t, err) - notifier.NotifyMissingTrieNode(nil) - notifier.NotifyMissingTrieNode([]byte("hash2")) - notifier.NotifyMissingTrieNode([]byte("hash3")) + notifier.AsyncNotifyMissingTrieNode(nil) + notifier.AsyncNotifyMissingTrieNode([]byte("hash2")) + notifier.AsyncNotifyMissingTrieNode([]byte("hash3")) wg.Wait() diff --git a/testscommon/missingTrieNodesNotifierStub.go b/testscommon/missingTrieNodesNotifierStub.go index 7c5a2e38736..484cd48d797 100644 --- a/testscommon/missingTrieNodesNotifierStub.go +++ b/testscommon/missingTrieNodesNotifierStub.go @@ -4,22 +4,27 @@ import "github.com/multiversx/mx-chain-go/common" // MissingTrieNodesNotifierStub - type MissingTrieNodesNotifierStub struct { - RegisterHandlerCalled func(handler common.StateSyncNotifierSubscriber) - NotifyMissingTrieNodeCalled func(hash []byte) + RegisterHandlerCalled func(handler common.StateSyncNotifierSubscriber) error + AsyncNotifyMissingTrieNodeCalled func(hash []byte) } -func (mtnns *MissingTrieNodesNotifierStub) RegisterHandler(handler common.StateSyncNotifierSubscriber) { +// RegisterHandler - +func (mtnns *MissingTrieNodesNotifierStub) RegisterHandler(handler common.StateSyncNotifierSubscriber) error { if mtnns.RegisterHandlerCalled != nil { - mtnns.RegisterHandlerCalled(handler) + return mtnns.RegisterHandlerCalled(handler) } + + return nil } -func (mtnns *MissingTrieNodesNotifierStub) NotifyMissingTrieNode(hash []byte) { - if mtnns.NotifyMissingTrieNodeCalled != nil { - mtnns.NotifyMissingTrieNodeCalled(hash) +// AsyncNotifyMissingTrieNode - +func (mtnns *MissingTrieNodesNotifierStub) AsyncNotifyMissingTrieNode(hash []byte) { + if mtnns.AsyncNotifyMissingTrieNodeCalled != nil { + mtnns.AsyncNotifyMissingTrieNodeCalled(hash) } } +// IsInterfaceNil returns true if there is no value under the interface func (mtnns *MissingTrieNodesNotifierStub) IsInterfaceNil() bool { return mtnns == nil } diff --git a/testscommon/stateComponentsMock.go b/testscommon/stateComponentsMock.go index 52a429d1da1..a77f4a96b95 100644 --- a/testscommon/stateComponentsMock.go +++ b/testscommon/stateComponentsMock.go @@ -66,6 +66,7 @@ func (scm *StateComponentsMock) String() string { return "StateComponentsMock" } +// MissingTrieNodesNotifier - func (scm *StateComponentsMock) MissingTrieNodesNotifier() common.MissingTrieNodesNotifier { return scm.MissingNodesNotifier } From 8271e8fb99956ef7f35f13d61b9c553b62f84924 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 11 May 2023 13:28:26 +0300 Subject: [PATCH 161/221] fixes after review --- .../cutoff/blockProcessingCutoffHandler.go | 2 +- .../blockProcessingCutoffHandler_test.go | 143 +++++------------- .../disabledBlockProcessingCutoff_test.go | 7 +- testscommon/blockProcessingCutoffStub.go | 1 + 4 files changed, 42 insertions(+), 111 deletions(-) diff --git a/process/block/cutoff/blockProcessingCutoffHandler.go b/process/block/cutoff/blockProcessingCutoffHandler.go index 45bd4671f43..26044eec4e9 100644 --- a/process/block/cutoff/blockProcessingCutoffHandler.go +++ b/process/block/cutoff/blockProcessingCutoffHandler.go @@ -86,7 +86,7 @@ func (b *blockProcessingCutoffHandler) HandlePauseCutoff(header data.HeaderHandl <-neverEndingChannel } -// HandleProcessErrorCutoff will return error if the processing the block at the required coordinates +// HandleProcessErrorCutoff will return error if the processing block matches the required coordinates func (b *blockProcessingCutoffHandler) HandleProcessErrorCutoff(header data.HeaderHandler) error { shouldSkip := !b.config.Enabled || check.IfNil(header) || diff --git a/process/block/cutoff/blockProcessingCutoffHandler_test.go b/process/block/cutoff/blockProcessingCutoffHandler_test.go index 961a86d706c..84cd4403ecc 100644 --- a/process/block/cutoff/blockProcessingCutoffHandler_test.go +++ b/process/block/cutoff/blockProcessingCutoffHandler_test.go @@ -78,50 +78,37 @@ func TestBlockProcessingCutoffHandler_HandlePauseBackoff(t *testing.T) { b.HandlePauseCutoff(&block.MetaBlock{}) }) - t.Run("pause via round - should work", func(t *testing.T) { - t.Parallel() - - cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModePause, - CutoffTrigger: string(common.BlockProcessingCutoffByRound), - Value: 20, - } - b, err := NewBlockProcessingCutoffHandler(cfg) - require.NoError(t, err) - - b.HandlePauseCutoff(&block.MetaBlock{Round: 19}) // not the desired round - - done := make(chan struct{}) - go func() { - b.HandlePauseCutoff(&block.MetaBlock{Round: 20}) - done <- struct{}{} - }() - - select { - case <-done: - require.Fail(t, "should have not advanced") - case <-time.After(100 * time.Millisecond): - } - }) + t.Run("pause via round - should work", testHandlePauseCutoff(string(common.BlockProcessingCutoffByRound))) + t.Run("pause via nonce - should work", testHandlePauseCutoff(string(common.BlockProcessingCutoffByNonce))) + t.Run("pause via epoch - should work", testHandlePauseCutoff(string(common.BlockProcessingCutoffByEpoch))) +} - t.Run("pause via nonce - should work", func(t *testing.T) { +func testHandlePauseCutoff(trigger string) func(t *testing.T) { + return func(t *testing.T) { t.Parallel() cfg := config.BlockProcessingCutoffConfig{ Enabled: true, Mode: common.BlockProcessingCutoffModePause, - CutoffTrigger: string(common.BlockProcessingCutoffByNonce), + CutoffTrigger: trigger, Value: 20, } b, err := NewBlockProcessingCutoffHandler(cfg) require.NoError(t, err) - b.HandlePauseCutoff(&block.MetaBlock{Nonce: 19}) // not the desired round + b.HandlePauseCutoff(&block.MetaBlock{ + Epoch: 19, // not the desired epoch + Nonce: 19, // not the desired nonce + Round: 19, // not the desired round + }) done := make(chan struct{}) go func() { - b.HandlePauseCutoff(&block.MetaBlock{Nonce: 20}) + b.HandlePauseCutoff(&block.MetaBlock{ + Epoch: 20, + Nonce: 20, + Round: 20, + }) done <- struct{}{} }() @@ -130,34 +117,7 @@ func TestBlockProcessingCutoffHandler_HandlePauseBackoff(t *testing.T) { require.Fail(t, "should have not advanced") case <-time.After(100 * time.Millisecond): } - }) - - t.Run("pause via epoch - should work", func(t *testing.T) { - t.Parallel() - - cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModePause, - CutoffTrigger: string(common.BlockProcessingCutoffByEpoch), - Value: 20, - } - b, err := NewBlockProcessingCutoffHandler(cfg) - require.NoError(t, err) - - b.HandlePauseCutoff(&block.MetaBlock{Epoch: 19}) // not the desired round - - done := make(chan struct{}) - go func() { - b.HandlePauseCutoff(&block.MetaBlock{Epoch: 20}) - done <- struct{}{} - }() - - select { - case <-done: - require.Fail(t, "should have not advanced") - case <-time.After(time.Millisecond): - } - }) + } } func TestBlockProcessingCutoffHandler_HandleProcessErrorBackoff(t *testing.T) { @@ -188,67 +148,36 @@ func TestBlockProcessingCutoffHandler_HandleProcessErrorBackoff(t *testing.T) { require.NoError(t, err) }) - t.Run("process error via round", func(t *testing.T) { - t.Parallel() - - cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModeProcessError, - CutoffTrigger: string(common.BlockProcessingCutoffByRound), - Value: 20, - } - b, err := NewBlockProcessingCutoffHandler(cfg) - require.NoError(t, err) - - err = b.HandleProcessErrorCutoff(&block.MetaBlock{Round: 19}) // not the desired round - require.NoError(t, err) - - err = b.HandleProcessErrorCutoff(&block.MetaBlock{Round: 20}) - require.Equal(t, errProcess, err) - }) - - t.Run("process error via nonce", func(t *testing.T) { - t.Parallel() - - cfg := config.BlockProcessingCutoffConfig{ - Enabled: true, - Mode: common.BlockProcessingCutoffModeProcessError, - CutoffTrigger: string(common.BlockProcessingCutoffByNonce), - Value: 20, - } - b, err := NewBlockProcessingCutoffHandler(cfg) - require.NoError(t, err) - - err = b.HandleProcessErrorCutoff(&block.MetaBlock{Nonce: 19}) // not the desired nonce - require.NoError(t, err) - - err = b.HandleProcessErrorCutoff(&block.MetaBlock{Nonce: 20}) - require.Equal(t, errProcess, err) - }) + t.Run("process error via round - should work", testHandleProcessErrorCutoff(string(common.BlockProcessingCutoffByRound))) + t.Run("process error via nonce - should work", testHandleProcessErrorCutoff(string(common.BlockProcessingCutoffByNonce))) + t.Run("process error via epoch - should work", testHandleProcessErrorCutoff(string(common.BlockProcessingCutoffByEpoch))) +} - t.Run("process error via epoch", func(t *testing.T) { +func testHandleProcessErrorCutoff(trigger string) func(t *testing.T) { + return func(t *testing.T) { t.Parallel() cfg := config.BlockProcessingCutoffConfig{ Enabled: true, Mode: common.BlockProcessingCutoffModeProcessError, - CutoffTrigger: string(common.BlockProcessingCutoffByEpoch), + CutoffTrigger: trigger, Value: 20, } b, err := NewBlockProcessingCutoffHandler(cfg) require.NoError(t, err) - dummyEpochStartData := block.EpochStart{ - LastFinalizedHeaders: []block.EpochStartShardData{ - { - ShardID: 0, - }, - }, - } - err = b.HandleProcessErrorCutoff(&block.MetaBlock{Epoch: 19, EpochStart: dummyEpochStartData}) // not the desired nonce + err = b.HandleProcessErrorCutoff(&block.MetaBlock{ + Epoch: 19, // not the desired epoch + Nonce: 19, // not the desired nonce + Round: 19, // not the desired round + }) require.NoError(t, err) - err = b.HandleProcessErrorCutoff(&block.MetaBlock{Epoch: 20, EpochStart: dummyEpochStartData}) + err = b.HandleProcessErrorCutoff(&block.MetaBlock{ + Epoch: 20, + Nonce: 20, + Round: 20, + }) require.Equal(t, errProcess, err) - }) + } } diff --git a/process/block/cutoff/disabledBlockProcessingCutoff_test.go b/process/block/cutoff/disabledBlockProcessingCutoff_test.go index e213348ca20..47bbc422062 100644 --- a/process/block/cutoff/disabledBlockProcessingCutoff_test.go +++ b/process/block/cutoff/disabledBlockProcessingCutoff_test.go @@ -17,9 +17,10 @@ func TestDisabledBlockProcessingCutoff_FunctionsShouldNotPanic(t *testing.T) { d := NewDisabledBlockProcessingCutoff() d.HandlePauseCutoff(&block.MetaBlock{Nonce: 37}) - _ = d.HandleProcessErrorCutoff(&block.MetaBlock{Round: 37}) - _ = d.IsInterfaceNil() + err := d.HandleProcessErrorCutoff(&block.MetaBlock{Round: 37}) + require.NoError(t, err) + require.False(t, d.IsInterfaceNil()) var nilObj *disabledBlockProcessingCutoff - _ = nilObj.IsInterfaceNil() + require.True(t, nilObj.IsInterfaceNil()) } diff --git a/testscommon/blockProcessingCutoffStub.go b/testscommon/blockProcessingCutoffStub.go index c0705738182..4082d484871 100644 --- a/testscommon/blockProcessingCutoffStub.go +++ b/testscommon/blockProcessingCutoffStub.go @@ -4,6 +4,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data" ) +// BlockProcessingCutoffStub - type BlockProcessingCutoffStub struct { HandleProcessErrorCutoffCalled func(header data.HeaderHandler) error HandlePauseCutoffCalled func(header data.HeaderHandler) From b50438ff825ba50f47f0551a39c583778941f024 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 11 May 2023 13:52:49 +0300 Subject: [PATCH 162/221] fix test formatting --- config/tomlConfig_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 581459fc72d..c5ebc82f204 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -344,10 +344,10 @@ func TestTomlPreferencesParser(t *testing.T) { ] [BlockProcessingCutoff] - Enabled = true - Mode = "pause" - CutoffTrigger = "round" - Value = 55 + Enabled = true + Mode = "pause" + CutoffTrigger = "round" + Value = 55 ` cfg := Preferences{} From 6ccd5739dd326a263b22f1b95ccf3322cd214f15 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 11 May 2023 13:55:13 +0300 Subject: [PATCH 163/221] fix formatting try 2 --- config/tomlConfig_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index c5ebc82f204..e1379eef716 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -344,10 +344,10 @@ func TestTomlPreferencesParser(t *testing.T) { ] [BlockProcessingCutoff] - Enabled = true - Mode = "pause" - CutoffTrigger = "round" - Value = 55 + Enabled = true + Mode = "pause" + CutoffTrigger = "round" + Value = 55 ` cfg := Preferences{} From 4d5cc99ad3c474f95ff3a065958bf7401c2632a4 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Thu, 11 May 2023 14:14:03 +0300 Subject: [PATCH 164/221] change comm go --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ca3142578dd..64728f6e5e8 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v0.0.0-20230511091919-0a98658ebc95 + github.com/multiversx/mx-chain-communication-go v0.0.0-20230511105730-3400290e42c0 github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 diff --git a/go.sum b/go.sum index ecff4ce1121..c48acfd3bda 100644 --- a/go.sum +++ b/go.sum @@ -609,8 +609,8 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230511091919-0a98658ebc95 h1:AhlMavTJgWfRKETuvRem+Mj8HY06DnzPpwq+0Uamzdw= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230511091919-0a98658ebc95/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230511105730-3400290e42c0 h1:dZa9ZfN9R605VZYJNhC36eSXJumADO6bHNZMhMdMLfg= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230511105730-3400290e42c0/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= From 0d004e59efdd74027d3e1bf84125745387c8ff52 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 11 May 2023 16:40:44 +0300 Subject: [PATCH 165/221] added facade tests --- facade/initial/initialNodeFacade_test.go | 96 ++- facade/mock/nodeStub.go | 6 +- facade/nodeFacade_test.go | 868 +++++++++++++++++------ 3 files changed, 740 insertions(+), 230 deletions(-) diff --git a/facade/initial/initialNodeFacade_test.go b/facade/initial/initialNodeFacade_test.go index f157571da48..bba4b57eaa7 100644 --- a/facade/initial/initialNodeFacade_test.go +++ b/facade/initial/initialNodeFacade_test.go @@ -4,7 +4,6 @@ import ( "fmt" "testing" - "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-go/facade" "github.com/multiversx/mx-chain-go/node/external" @@ -20,14 +19,14 @@ func TestInitialNodeFacade(t *testing.T) { inf, err := NewInitialNodeFacade("127.0.0.1:8080", true, nil) assert.Equal(t, facade.ErrNilStatusMetrics, err) - assert.True(t, check.IfNil(inf)) + assert.Nil(t, inf) }) t.Run("should work", func(t *testing.T) { t.Parallel() inf, err := NewInitialNodeFacade("127.0.0.1:8080", true, &testscommon.StatusMetricsStub{}) assert.Nil(t, err) - assert.False(t, check.IfNil(inf)) + assert.NotNil(t, inf) }) } @@ -233,5 +232,94 @@ func TestInitialNodeFacade_AllMethodsShouldNotPanic(t *testing.T) { assert.Equal(t, api.GuardianData{}, guardianData) assert.Equal(t, errNodeStarting, err) - assert.False(t, check.IfNil(inf)) + mainTrieResponse, dataTrieResponse, err := inf.GetProofDataTrie("", "", "") + assert.Nil(t, mainTrieResponse) + assert.Nil(t, dataTrieResponse) + assert.Equal(t, errNodeStarting, err) + + codeHash, blockInfo, err := inf.GetCodeHash("", api.AccountQueryOptions{}) + assert.Nil(t, codeHash) + assert.Equal(t, api.BlockInfo{}, blockInfo) + assert.Equal(t, errNodeStarting, err) + + accountsResponse, blockInfo, err := inf.GetAccounts([]string{}, api.AccountQueryOptions{}) + assert.Nil(t, accountsResponse) + assert.Equal(t, api.BlockInfo{}, blockInfo) + assert.Equal(t, errNodeStarting, err) + + stakeValue, err := inf.GetTotalStakedValue() + assert.Nil(t, stakeValue) + assert.Equal(t, errNodeStarting, err) + + ratings := inf.GetConnectedPeersRatings() + assert.Equal(t, "", ratings) + + epochStartData, err := inf.GetEpochStartDataAPI(0) + assert.Nil(t, epochStartData) + assert.Equal(t, errNodeStarting, err) + + alteredAcc, err := inf.GetAlteredAccountsForBlock(api.GetAlteredAccountsForBlockOptions{}) + assert.Nil(t, alteredAcc) + assert.Equal(t, errNodeStarting, err) + + block, err := inf.GetInternalMetaBlockByHash(0, "") + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalMetaBlockByNonce(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalMetaBlockByRound(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalStartOfEpochMetaBlock(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + validatorsInfo, err := inf.GetInternalStartOfEpochValidatorsInfo(0) + assert.Nil(t, validatorsInfo) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalShardBlockByHash(0, "") + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalShardBlockByNonce(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalShardBlockByRound(0, 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + block, err = inf.GetInternalMiniBlockByHash(0, "", 0) + assert.Nil(t, block) + assert.Equal(t, errNodeStarting, err) + + esdtData, blockInfo, err := inf.GetESDTData("", "", 0, api.AccountQueryOptions{}) + assert.Nil(t, esdtData) + assert.Equal(t, api.BlockInfo{}, blockInfo) + assert.Equal(t, errNodeStarting, err) + + genesisBalances, err := inf.GetGenesisBalances() + assert.Nil(t, genesisBalances) + assert.Equal(t, errNodeStarting, err) + + txPoolGaps, err := inf.GetTransactionsPoolNonceGapsForSender("") + assert.Nil(t, txPoolGaps) + assert.Equal(t, errNodeStarting, err) + + assert.NotNil(t, inf) +} + +func TestInitialNodeFacade_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var inf *initialNodeFacade + assert.True(t, inf.IsInterfaceNil()) + + inf, _ = NewInitialNodeFacade("127.0.0.1:7799", true, &testscommon.StatusMetricsStub{}) + assert.False(t, inf.IsInterfaceNil()) } diff --git a/facade/mock/nodeStub.go b/facade/mock/nodeStub.go index 729e2d568d4..4c69d0e2790 100644 --- a/facade/mock/nodeStub.go +++ b/facade/mock/nodeStub.go @@ -52,6 +52,7 @@ type NodeStub struct { GetProofCalled func(rootHash string, key string) (*common.GetProofResponse, error) GetProofDataTrieCalled func(rootHash string, address string, key string) (*common.GetProofResponse, *common.GetProofResponse, error) VerifyProofCalled func(rootHash string, address string, proof [][]byte) (bool, error) + GetTokenSupplyCalled func(token string) (*api.ESDTSupply, error) } // GetProof - @@ -268,7 +269,10 @@ func (ns *NodeStub) GetAllESDTTokens(address string, options api.AccountQueryOpt } // GetTokenSupply - -func (ns *NodeStub) GetTokenSupply(_ string) (*api.ESDTSupply, error) { +func (ns *NodeStub) GetTokenSupply(token string) (*api.ESDTSupply, error) { + if ns.GetTokenSupplyCalled != nil { + return ns.GetTokenSupplyCalled(token) + } return nil, nil } diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index a1a39380fbc..bced65b3788 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -12,11 +12,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" atomicCore "github.com/multiversx/mx-chain-core-go/core/atomic" - "github.com/multiversx/mx-chain-core-go/core/check" nodeData "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/esdt" + "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -26,15 +26,15 @@ import ( "github.com/multiversx/mx-chain-go/heartbeat/data" "github.com/multiversx/mx-chain-go/node/external" "github.com/multiversx/mx-chain-go/process" + txSimData "github.com/multiversx/mx-chain-go/process/txsimulator/data" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" vmcommon "github.com/multiversx/mx-chain-vm-common-go" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -// TODO increase code coverage +var expectedErr = errors.New("expected error") func createMockArguments() ArgNodeFacade { return ArgNodeFacade{ @@ -72,88 +72,144 @@ func createMockArguments() ArgNodeFacade { } } -// ------- NewNodeFacade - -func TestNewNodeFacade_WithNilNodeShouldErr(t *testing.T) { +func TestNewNodeFacade(t *testing.T) { t.Parallel() - arg := createMockArguments() - arg.Node = nil - nf, err := NewNodeFacade(arg) + t.Run("nil Node should error", func(t *testing.T) { + t.Parallel() - assert.True(t, check.IfNil(nf)) - assert.Equal(t, ErrNilNode, err) -} + arg := createMockArguments() + arg.Node = nil + nf, err := NewNodeFacade(arg) -func TestNewNodeFacade_WithNilApiResolverShouldErr(t *testing.T) { - t.Parallel() + require.Nil(t, nf) + require.Equal(t, ErrNilNode, err) + }) + t.Run("nil ApiResolver should error", func(t *testing.T) { + t.Parallel() - arg := createMockArguments() - arg.ApiResolver = nil - nf, err := NewNodeFacade(arg) + arg := createMockArguments() + arg.ApiResolver = nil + nf, err := NewNodeFacade(arg) - assert.True(t, check.IfNil(nf)) - assert.Equal(t, ErrNilApiResolver, err) -} + require.Nil(t, nf) + require.Equal(t, ErrNilApiResolver, err) + }) + t.Run("nil TxSimulatorProcessor should error", func(t *testing.T) { + t.Parallel() -func TestNewNodeFacade_WithInvalidSimultaneousRequestsShouldErr(t *testing.T) { - t.Parallel() + arg := createMockArguments() + arg.TxSimulatorProcessor = nil + nf, err := NewNodeFacade(arg) - arg := createMockArguments() - arg.WsAntifloodConfig.WebServerAntifloodEnabled = true - arg.WsAntifloodConfig.SimultaneousRequests = 0 - nf, err := NewNodeFacade(arg) + require.Nil(t, nf) + require.Equal(t, ErrNilTransactionSimulatorProcessor, err) + }) + t.Run("invalid ApiRoutesConfig should error", func(t *testing.T) { + t.Parallel() - assert.True(t, check.IfNil(nf)) - assert.True(t, errors.Is(err, ErrInvalidValue)) -} + arg := createMockArguments() + arg.ApiRoutesConfig = config.ApiRoutesConfig{} + nf, err := NewNodeFacade(arg) -func TestNewNodeFacade_WithInvalidSameSourceResetIntervalInSecShouldErr(t *testing.T) { - t.Parallel() + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrNoApiRoutesConfig)) + }) + t.Run("invalid SimultaneousRequests should error", func(t *testing.T) { + t.Parallel() - arg := createMockArguments() - arg.WsAntifloodConfig.WebServerAntifloodEnabled = true - arg.WsAntifloodConfig.SameSourceResetIntervalInSec = 0 - nf, err := NewNodeFacade(arg) + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true + arg.WsAntifloodConfig.SimultaneousRequests = 0 + nf, err := NewNodeFacade(arg) - assert.True(t, check.IfNil(nf)) - assert.True(t, errors.Is(err, ErrInvalidValue)) -} + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrInvalidValue)) + }) + t.Run("invalid SameSourceRequests should error", func(t *testing.T) { + t.Parallel() -func TestNewNodeFacade_WithInvalidSameSourceRequestsShouldErr(t *testing.T) { - t.Parallel() + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true + arg.WsAntifloodConfig.SameSourceRequests = 0 + nf, err := NewNodeFacade(arg) - arg := createMockArguments() - arg.WsAntifloodConfig.WebServerAntifloodEnabled = true - arg.WsAntifloodConfig.SameSourceRequests = 0 - nf, err := NewNodeFacade(arg) + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrInvalidValue)) + }) + t.Run("invalid SameSourceResetIntervalInSec should error", func(t *testing.T) { + t.Parallel() - assert.True(t, check.IfNil(nf)) - assert.True(t, errors.Is(err, ErrInvalidValue)) -} + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true + arg.WsAntifloodConfig.SameSourceResetIntervalInSec = 0 + nf, err := NewNodeFacade(arg) -func TestNewNodeFacade_WithInvalidApiRoutesConfigShouldErr(t *testing.T) { - t.Parallel() + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrInvalidValue)) + }) + t.Run("invalid TrieOperationsDeadlineMilliseconds should error", func(t *testing.T) { + t.Parallel() - arg := createMockArguments() - arg.ApiRoutesConfig = config.ApiRoutesConfig{} - nf, err := NewNodeFacade(arg) + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true + arg.WsAntifloodConfig.TrieOperationsDeadlineMilliseconds = 0 + nf, err := NewNodeFacade(arg) - assert.True(t, check.IfNil(nf)) - assert.True(t, errors.Is(err, ErrNoApiRoutesConfig)) -} + require.Nil(t, nf) + require.True(t, errors.Is(err, ErrInvalidValue)) + }) + t.Run("nil AccountsState should error", func(t *testing.T) { + t.Parallel() -func TestNewNodeFacade_WithValidNodeShouldReturnNotNil(t *testing.T) { - t.Parallel() + arg := createMockArguments() + arg.WsAntifloodConfig.WebServerAntifloodEnabled = true // coverage + arg.AccountsState = nil + nf, err := NewNodeFacade(arg) - arg := createMockArguments() - nf, err := NewNodeFacade(arg) + require.Nil(t, nf) + require.Equal(t, ErrNilAccountState, err) + }) + t.Run("nil PeerState should error", func(t *testing.T) { + t.Parallel() - assert.False(t, check.IfNil(nf)) - assert.Nil(t, err) -} + arg := createMockArguments() + arg.PeerState = nil + nf, err := NewNodeFacade(arg) + + require.Nil(t, nf) + require.Equal(t, ErrNilPeerState, err) + }) + t.Run("nil Blockchain should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.Blockchain = nil + nf, err := NewNodeFacade(arg) + + require.Nil(t, nf) + require.Equal(t, ErrNilBlockchain, err) + }) + + t.Run("should work", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.WsAntifloodConfig.EndpointsThrottlers = []config.EndpointsThrottlersConfig{ + { + Endpoint: "endpoint_1", + MaxNumGoRoutines: 10, + }, { + Endpoint: "endpoint_2", + MaxNumGoRoutines: 0, // NewNumGoRoutinesThrottler fails for coverage + }, + } + nf, err := NewNodeFacade(arg) -// ------- Methods + require.NotNil(t, nf) + require.NoError(t, err) + }) +} func TestNodeFacade_GetBalanceWithValidAddressShouldReturnBalance(t *testing.T) { t.Parallel() @@ -175,8 +231,8 @@ func TestNodeFacade_GetBalanceWithValidAddressShouldReturnBalance(t *testing.T) amount, _, err := nf.GetBalance(addr, api.AccountQueryOptions{}) - assert.Nil(t, err) - assert.Equal(t, balance, amount) + require.NoError(t, err) + require.Equal(t, balance, amount) } func TestNodeFacade_GetBalanceWithUnknownAddressShouldReturnZeroBalance(t *testing.T) { @@ -201,8 +257,8 @@ func TestNodeFacade_GetBalanceWithUnknownAddressShouldReturnZeroBalance(t *testi nf, _ := NewNodeFacade(arg) amount, _, err := nf.GetBalance(unknownAddr, api.AccountQueryOptions{}) - assert.Nil(t, err) - assert.Equal(t, zeroBalance, amount) + require.NoError(t, err) + require.Equal(t, zeroBalance, amount) } func TestNodeFacade_GetBalanceWithErrorOnNodeShouldReturnZeroBalanceAndError(t *testing.T) { @@ -222,8 +278,8 @@ func TestNodeFacade_GetBalanceWithErrorOnNodeShouldReturnZeroBalanceAndError(t * nf, _ := NewNodeFacade(arg) amount, _, err := nf.GetBalance(addr, api.AccountQueryOptions{}) - assert.NotNil(t, err) - assert.Equal(t, zeroBalance, amount) + require.NotNil(t, err) + require.Equal(t, zeroBalance, amount) } func TestNodeFacade_GetTransactionWithValidInputsShouldNotReturnError(t *testing.T) { @@ -246,8 +302,8 @@ func TestNodeFacade_GetTransactionWithValidInputsShouldNotReturnError(t *testing nf, _ := NewNodeFacade(arg) tx, err := nf.GetTransaction(testHash, false) - assert.Nil(t, err) - assert.Equal(t, testTx, tx) + require.NoError(t, err) + require.Equal(t, testTx, tx) } func TestNodeFacade_GetTransactionWithUnknowHashShouldReturnNilAndNoError(t *testing.T) { @@ -267,8 +323,8 @@ func TestNodeFacade_GetTransactionWithUnknowHashShouldReturnNilAndNoError(t *tes nf, _ := NewNodeFacade(arg) tx, err := nf.GetTransaction("unknownHash", false) - assert.Nil(t, err) - assert.Nil(t, tx) + require.NoError(t, err) + require.Nil(t, tx) } func TestNodeFacade_SetSyncer(t *testing.T) { @@ -279,25 +335,43 @@ func TestNodeFacade_SetSyncer(t *testing.T) { sync := &mock.SyncTimerMock{} nf.SetSyncer(sync) - assert.Equal(t, sync, nf.GetSyncer()) + require.Equal(t, sync, nf.GetSyncer()) } func TestNodeFacade_GetAccount(t *testing.T) { t.Parallel() - getAccountCalled := false - node := &mock.NodeStub{} - node.GetAccountCalled = func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { - getAccountCalled = true - return api.AccountResponse{}, api.BlockInfo{}, nil - } + t.Run("should error", func(t *testing.T) { + t.Parallel() - arg := createMockArguments() - arg.Node = node - nf, _ := NewNodeFacade(arg) + arg := createMockArguments() + arg.Node = &mock.NodeStub{ + GetAccountCalled: func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { + return api.AccountResponse{}, api.BlockInfo{}, expectedErr + }, + } + nf, _ := NewNodeFacade(arg) - _, _, _ = nf.GetAccount("test", api.AccountQueryOptions{}) - assert.True(t, getAccountCalled) + _, _, err := nf.GetAccount("test", api.AccountQueryOptions{}) + require.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + getAccountCalled := false + node := &mock.NodeStub{} + node.GetAccountCalled = func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { + getAccountCalled = true + return api.AccountResponse{}, api.BlockInfo{}, nil + } + + arg := createMockArguments() + arg.Node = node + nf, _ := NewNodeFacade(arg) + + _, _, _ = nf.GetAccount("test", api.AccountQueryOptions{}) + require.True(t, getAccountCalled) + }) } func TestNodeFacade_GetAccounts(t *testing.T) { @@ -311,16 +385,15 @@ func TestNodeFacade_GetAccounts(t *testing.T) { nf, _ := NewNodeFacade(arg) resp, blockInfo, err := nf.GetAccounts([]string{"test1", "test2"}, api.AccountQueryOptions{}) - assert.Nil(t, resp) - assert.Empty(t, blockInfo) - assert.Error(t, err) - assert.Equal(t, "too many addresses in the bulk request (provided: 2, maximum: 1)", err.Error()) + require.Nil(t, resp) + require.Empty(t, blockInfo) + require.Error(t, err) + require.Equal(t, "too many addresses in the bulk request (provided: 2, maximum: 1)", err.Error()) }) t.Run("node responds with error, should err", func(t *testing.T) { t.Parallel() - expectedErr := errors.New("expected error") node := &mock.NodeStub{} node.GetAccountCalled = func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { return api.AccountResponse{}, api.BlockInfo{}, expectedErr @@ -332,9 +405,9 @@ func TestNodeFacade_GetAccounts(t *testing.T) { nf, _ := NewNodeFacade(arg) resp, blockInfo, err := nf.GetAccounts([]string{"test"}, api.AccountQueryOptions{}) - assert.Nil(t, resp) - assert.Empty(t, blockInfo) - assert.Equal(t, expectedErr, err) + require.Nil(t, resp) + require.Empty(t, blockInfo) + require.Equal(t, expectedErr, err) }) t.Run("should work", func(t *testing.T) { @@ -352,9 +425,9 @@ func TestNodeFacade_GetAccounts(t *testing.T) { nf, _ := NewNodeFacade(arg) resp, blockInfo, err := nf.GetAccounts([]string{"test"}, api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Empty(t, blockInfo) - assert.Equal(t, &expectedAcount, resp["test"]) + require.NoError(t, err) + require.Empty(t, blockInfo) + require.Equal(t, &expectedAcount, resp["test"]) }) } @@ -372,8 +445,8 @@ func TestNodeFacade_GetUsername(t *testing.T) { nf, _ := NewNodeFacade(arg) username, _, err := nf.GetUsername("test", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedUsername, username) + require.NoError(t, err) + require.Equal(t, expectedUsername, username) } func TestNodeFacade_GetCodeHash(t *testing.T) { @@ -390,8 +463,8 @@ func TestNodeFacade_GetCodeHash(t *testing.T) { nf, _ := NewNodeFacade(arg) codeHash, _, err := nf.GetCodeHash("test", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedCodeHash, codeHash) + require.NoError(t, err) + require.Equal(t, expectedCodeHash, codeHash) } func TestNodeFacade_GetHeartbeatsReturnsNilShouldErr(t *testing.T) { @@ -408,8 +481,8 @@ func TestNodeFacade_GetHeartbeatsReturnsNilShouldErr(t *testing.T) { result, err := nf.GetHeartbeats() - assert.Nil(t, result) - assert.Equal(t, ErrHeartbeatsNotActive, err) + require.Nil(t, result) + require.Equal(t, ErrHeartbeatsNotActive, err) } func TestNodeFacade_GetHeartbeats(t *testing.T) { @@ -439,7 +512,7 @@ func TestNodeFacade_GetHeartbeats(t *testing.T) { result, err := nf.GetHeartbeats() - assert.Nil(t, err) + require.NoError(t, err) fmt.Println(result) } @@ -458,7 +531,7 @@ func TestNodeFacade_GetDataValue(t *testing.T) { require.NoError(t, err) _, _ = nf.ExecuteSCQuery(nil) - assert.True(t, wasCalled) + require.True(t, wasCalled) } func TestNodeFacade_EmptyRestInterface(t *testing.T) { @@ -468,7 +541,7 @@ func TestNodeFacade_EmptyRestInterface(t *testing.T) { arg.FacadeConfig.RestApiInterface = "" nf, _ := NewNodeFacade(arg) - assert.Equal(t, DefaultRestInterface, nf.RestApiInterface()) + require.Equal(t, DefaultRestInterface, nf.RestApiInterface()) } func TestNodeFacade_RestInterface(t *testing.T) { @@ -479,7 +552,7 @@ func TestNodeFacade_RestInterface(t *testing.T) { arg.FacadeConfig.RestApiInterface = intf nf, _ := NewNodeFacade(arg) - assert.Equal(t, intf, nf.RestApiInterface()) + require.Equal(t, intf, nf.RestApiInterface()) } func TestNodeFacade_ValidatorStatisticsApi(t *testing.T) { @@ -497,8 +570,8 @@ func TestNodeFacade_ValidatorStatisticsApi(t *testing.T) { nf, _ := NewNodeFacade(arg) res, err := nf.ValidatorStatisticsApi() - assert.Nil(t, err) - assert.Equal(t, mapToRet, res) + require.NoError(t, err) + require.Equal(t, mapToRet, res) } func TestNodeFacade_SendBulkTransactions(t *testing.T) { @@ -521,9 +594,9 @@ func TestNodeFacade_SendBulkTransactions(t *testing.T) { txs = append(txs, &transaction.Transaction{Nonce: 1}) res, err := nf.SendBulkTransactions(txs) - assert.Nil(t, err) - assert.Equal(t, expectedNumOfSuccessfulTxs, res) - assert.True(t, sendBulkTxsWasCalled) + require.NoError(t, err) + require.Equal(t, expectedNumOfSuccessfulTxs, res) + require.True(t, sendBulkTxsWasCalled) } func TestNodeFacade_StatusMetrics(t *testing.T) { @@ -543,7 +616,7 @@ func TestNodeFacade_StatusMetrics(t *testing.T) { _ = nf.StatusMetrics() - assert.True(t, apiResolverMetricsRequested) + require.True(t, apiResolverMetricsRequested) } func TestNodeFacade_PprofEnabled(t *testing.T) { @@ -553,7 +626,7 @@ func TestNodeFacade_PprofEnabled(t *testing.T) { arg.FacadeConfig.PprofEnabled = true nf, _ := NewNodeFacade(arg) - assert.True(t, nf.PprofEnabled()) + require.True(t, nf.PprofEnabled()) } func TestNodeFacade_RestAPIServerDebugMode(t *testing.T) { @@ -563,7 +636,7 @@ func TestNodeFacade_RestAPIServerDebugMode(t *testing.T) { arg.RestAPIServerDebugMode = true nf, _ := NewNodeFacade(arg) - assert.True(t, nf.RestAPIServerDebugMode()) + require.True(t, nf.RestAPIServerDebugMode()) } func TestNodeFacade_CreateTransaction(t *testing.T) { @@ -582,14 +655,13 @@ func TestNodeFacade_CreateTransaction(t *testing.T) { _, _, _ = nf.CreateTransaction(&external.ArgsCreateTransaction{}) - assert.True(t, nodeCreateTxWasCalled) + require.True(t, nodeCreateTxWasCalled) } func TestNodeFacade_Trigger(t *testing.T) { t.Parallel() wasCalled := false - expectedErr := errors.New("expected err") arg := createMockArguments() epoch := uint32(4638) recoveredEpoch := uint32(0) @@ -607,10 +679,10 @@ func TestNodeFacade_Trigger(t *testing.T) { err := nf.Trigger(epoch, true) - assert.True(t, wasCalled) - assert.Equal(t, expectedErr, err) - assert.Equal(t, epoch, atomic.LoadUint32(&recoveredEpoch)) - assert.True(t, recoveredWithEarlyEndOfEpoch.IsSet()) + require.True(t, wasCalled) + require.Equal(t, expectedErr, err) + require.Equal(t, epoch, atomic.LoadUint32(&recoveredEpoch)) + require.True(t, recoveredWithEarlyEndOfEpoch.IsSet()) } func TestNodeFacade_IsSelfTrigger(t *testing.T) { @@ -628,8 +700,8 @@ func TestNodeFacade_IsSelfTrigger(t *testing.T) { isSelf := nf.IsSelfTrigger() - assert.True(t, wasCalled) - assert.True(t, isSelf) + require.True(t, wasCalled) + require.True(t, isSelf) } func TestNodeFacade_EncodeDecodeAddressPubkey(t *testing.T) { @@ -639,12 +711,12 @@ func TestNodeFacade_EncodeDecodeAddressPubkey(t *testing.T) { arg := createMockArguments() nf, _ := NewNodeFacade(arg) encoded, err := nf.EncodeAddressPubkey(buff) - assert.Nil(t, err) + require.NoError(t, err) recoveredBytes, err := nf.DecodeAddressPubkey(encoded) - assert.Nil(t, err) - assert.Equal(t, buff, recoveredBytes) + require.NoError(t, err) + require.Equal(t, buff, recoveredBytes) } func TestNodeFacade_GetQueryHandler(t *testing.T) { @@ -663,9 +735,9 @@ func TestNodeFacade_GetQueryHandler(t *testing.T) { qh, err := nf.GetQueryHandler("") - assert.Nil(t, qh) - assert.Nil(t, err) - assert.True(t, wasCalled) + require.Nil(t, qh) + require.NoError(t, err) + require.True(t, wasCalled) } func TestNodeFacade_GetPeerInfo(t *testing.T) { @@ -684,8 +756,20 @@ func TestNodeFacade_GetPeerInfo(t *testing.T) { val, err := nf.GetPeerInfo("") - assert.Nil(t, err) - assert.Equal(t, []core.QueryP2PPeerInfo{pinfo}, val) + require.NoError(t, err) + require.Equal(t, []core.QueryP2PPeerInfo{pinfo}, val) +} + +func TestNodeFacade_GetThrottlerForEndpointAntifloodDisabledShouldReturnDisabled(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + nf, _ := NewNodeFacade(arg) + + thr, ok := nf.GetThrottlerForEndpoint("any-endpoint") + require.NotNil(t, thr) + require.True(t, ok) + require.Equal(t, "*disabled.disabledThrottler", fmt.Sprintf("%T", thr)) } func TestNodeFacade_GetThrottlerForEndpointNoConfigShouldReturnNilAndFalse(t *testing.T) { @@ -698,8 +782,8 @@ func TestNodeFacade_GetThrottlerForEndpointNoConfigShouldReturnNilAndFalse(t *te thr, ok := nf.GetThrottlerForEndpoint("any-endpoint") - assert.Nil(t, thr) - assert.False(t, ok) + require.Nil(t, thr) + require.False(t, ok) } func TestNodeFacade_GetThrottlerForEndpointNotFoundShouldReturnNilAndFalse(t *testing.T) { @@ -717,8 +801,8 @@ func TestNodeFacade_GetThrottlerForEndpointNotFoundShouldReturnNilAndFalse(t *te thr, ok := nf.GetThrottlerForEndpoint("different-endpoint") - assert.Nil(t, thr) - assert.False(t, ok) + require.Nil(t, thr) + require.False(t, ok) } func TestNodeFacade_GetThrottlerForEndpointShouldFindAndReturn(t *testing.T) { @@ -736,8 +820,8 @@ func TestNodeFacade_GetThrottlerForEndpointShouldFindAndReturn(t *testing.T) { thr, ok := nf.GetThrottlerForEndpoint("endpoint") - assert.NotNil(t, thr) - assert.True(t, ok) + require.NotNil(t, thr) + require.True(t, ok) } func TestNodeFacade_GetKeyValuePairs(t *testing.T) { @@ -754,8 +838,8 @@ func TestNodeFacade_GetKeyValuePairs(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetKeyValuePairs("addr", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedPairs, res) + require.NoError(t, err) + require.Equal(t, expectedPairs, res) } func TestNodeFacade_GetGuardianData(t *testing.T) { @@ -764,7 +848,6 @@ func TestNodeFacade_GetGuardianData(t *testing.T) { emptyGuardianData := api.GuardianData{} testAddress := "test address" - expectedErr := errors.New("expected error") expectedGuardianData := api.GuardianData{ ActiveGuardian: &api.Guardian{ @@ -789,14 +872,14 @@ func TestNodeFacade_GetGuardianData(t *testing.T) { t.Run("with error", func(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetGuardianData("", api.AccountQueryOptions{}) - assert.Equal(t, expectedErr, err) - assert.Equal(t, emptyGuardianData, res) + require.Equal(t, expectedErr, err) + require.Equal(t, emptyGuardianData, res) }) t.Run("ok", func(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetGuardianData(testAddress, api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedGuardianData, res) + require.NoError(t, err) + require.Equal(t, expectedGuardianData, res) }) } @@ -817,8 +900,8 @@ func TestNodeFacade_GetAllESDTTokens(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetAllESDTTokens("addr", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedTokens, res) + require.NoError(t, err) + require.Equal(t, expectedTokens, res) } func TestNodeFacade_GetESDTData(t *testing.T) { @@ -837,8 +920,8 @@ func TestNodeFacade_GetESDTData(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetESDTData("addr", "tkn", 0, api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedData, res) + require.NoError(t, err) + require.Equal(t, expectedData, res) } func TestNodeFacade_GetValueForKey(t *testing.T) { @@ -855,8 +938,8 @@ func TestNodeFacade_GetValueForKey(t *testing.T) { nf, _ := NewNodeFacade(arg) res, _, err := nf.GetValueForKey("addr", "key", api.AccountQueryOptions{}) - assert.NoError(t, err) - assert.Equal(t, expectedValue, res) + require.NoError(t, err) + require.Equal(t, expectedValue, res) } func TestNodeFacade_GetAllIssuedESDTs(t *testing.T) { @@ -873,8 +956,8 @@ func TestNodeFacade_GetAllIssuedESDTs(t *testing.T) { nf, _ := NewNodeFacade(arg) res, err := nf.GetAllIssuedESDTs("") - assert.NoError(t, err) - assert.Equal(t, expectedValue, res) + require.NoError(t, err) + require.Equal(t, expectedValue, res) } func TestNodeFacade_GetESDTsWithRole(t *testing.T) { @@ -929,7 +1012,7 @@ func TestNodeFacade_GetAllIssuedESDTsWithError(t *testing.T) { nf, _ := NewNodeFacade(arg) _, err := nf.GetAllIssuedESDTs("") - assert.Equal(t, err, localErr) + require.Equal(t, err, localErr) } func TestNodeFacade_ValidateTransactionForSimulation(t *testing.T) { @@ -946,8 +1029,8 @@ func TestNodeFacade_ValidateTransactionForSimulation(t *testing.T) { nf, _ := NewNodeFacade(arg) err := nf.ValidateTransactionForSimulation(&transaction.Transaction{}, false) - assert.Nil(t, err) - assert.True(t, called) + require.NoError(t, err) + require.True(t, called) } func TestNodeFacade_GetTotalStakedValue(t *testing.T) { @@ -964,8 +1047,8 @@ func TestNodeFacade_GetTotalStakedValue(t *testing.T) { nf, _ := NewNodeFacade(arg) _, err := nf.GetTotalStakedValue() - assert.Nil(t, err) - assert.True(t, called) + require.NoError(t, err) + require.True(t, called) } func TestNodeFacade_GetDelegatorsList(t *testing.T) { @@ -982,8 +1065,8 @@ func TestNodeFacade_GetDelegatorsList(t *testing.T) { nf, _ := NewNodeFacade(arg) _, err := nf.GetDelegatorsList() - assert.Nil(t, err) - assert.True(t, called) + require.NoError(t, err) + require.True(t, called) } func TestNodeFacade_GetDirectStakedList(t *testing.T) { @@ -1000,8 +1083,8 @@ func TestNodeFacade_GetDirectStakedList(t *testing.T) { nf, _ := NewNodeFacade(arg) _, err := nf.GetDirectStakedList() - assert.Nil(t, err) - assert.True(t, called) + require.NoError(t, err) + require.True(t, called) } func TestNodeFacade_GetProofCurrentRootHashIsEmptyShouldErr(t *testing.T) { @@ -1016,8 +1099,8 @@ func TestNodeFacade_GetProofCurrentRootHashIsEmptyShouldErr(t *testing.T) { nf, _ := NewNodeFacade(arg) response, err := nf.GetProofCurrentRootHash("addr") - assert.Nil(t, response) - assert.Equal(t, ErrEmptyRootHash, err) + require.Nil(t, response) + require.Equal(t, ErrEmptyRootHash, err) } func TestNodeFacade_GetProof(t *testing.T) { @@ -1037,8 +1120,8 @@ func TestNodeFacade_GetProof(t *testing.T) { nf, _ := NewNodeFacade(arg) response, err := nf.GetProof("hash", "addr") - assert.Nil(t, err) - assert.Equal(t, expectedResponse, response) + require.NoError(t, err) + require.Equal(t, expectedResponse, response) } func TestNodeFacade_GetProofCurrentRootHash(t *testing.T) { @@ -1058,8 +1141,8 @@ func TestNodeFacade_GetProofCurrentRootHash(t *testing.T) { nf, _ := NewNodeFacade(arg) response, err := nf.GetProofCurrentRootHash("addr") - assert.Nil(t, err) - assert.Equal(t, expectedResponse, response) + require.NoError(t, err) + require.Equal(t, expectedResponse, response) } func TestNodeFacade_GetProofDataTrie(t *testing.T) { @@ -1084,9 +1167,9 @@ func TestNodeFacade_GetProofDataTrie(t *testing.T) { nf, _ := NewNodeFacade(arg) mainTrieResponse, dataTrieResponse, err := nf.GetProofDataTrie("hash", "addr", "key") - assert.Nil(t, err) - assert.Equal(t, expectedResponseMainTrie, mainTrieResponse) - assert.Equal(t, expectedResponseDataTrie, dataTrieResponse) + require.NoError(t, err) + require.Equal(t, expectedResponseMainTrie, mainTrieResponse) + require.Equal(t, expectedResponseDataTrie, dataTrieResponse) } func TestNodeFacade_VerifyProof(t *testing.T) { @@ -1101,47 +1184,66 @@ func TestNodeFacade_VerifyProof(t *testing.T) { nf, _ := NewNodeFacade(arg) response, err := nf.VerifyProof("hash", "addr", [][]byte{[]byte("proof")}) - assert.Nil(t, err) - assert.True(t, response) + require.NoError(t, err) + require.True(t, response) } func TestNodeFacade_ExecuteSCQuery(t *testing.T) { t.Parallel() - executeScQueryHandlerWasCalled := false - arg := createMockArguments() + t.Run("should error", func(t *testing.T) { + t.Parallel() - expectedAddress := []byte("addr") - expectedBalance := big.NewInt(37) - expectedVmOutput := &vmcommon.VMOutput{ - ReturnData: [][]byte{[]byte("test return data")}, - ReturnCode: vmcommon.AccountCollision, - OutputAccounts: map[string]*vmcommon.OutputAccount{ - "key0": { - Address: expectedAddress, - Balance: expectedBalance, + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + ExecuteSCQueryHandler: func(_ *process.SCQuery) (*vmcommon.VMOutput, error) { + return nil, expectedErr }, - }, - } - arg.ApiResolver = &mock.ApiResolverStub{ - ExecuteSCQueryHandler: func(_ *process.SCQuery) (*vmcommon.VMOutput, error) { - executeScQueryHandlerWasCalled = true - return expectedVmOutput, nil - }, - } + } - nf, _ := NewNodeFacade(arg) + nf, _ := NewNodeFacade(arg) - apiVmOutput, err := nf.ExecuteSCQuery(&process.SCQuery{}) - require.NoError(t, err) - require.True(t, executeScQueryHandlerWasCalled) - require.Equal(t, expectedVmOutput.ReturnData, apiVmOutput.ReturnData) - require.Equal(t, expectedVmOutput.ReturnCode.String(), apiVmOutput.ReturnCode) - require.Equal(t, 1, len(apiVmOutput.OutputAccounts)) + _, err := nf.ExecuteSCQuery(&process.SCQuery{}) + require.Equal(t, expectedErr, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + executeScQueryHandlerWasCalled := false + arg := createMockArguments() + + expectedAddress := []byte("addr") + expectedBalance := big.NewInt(37) + expectedVmOutput := &vmcommon.VMOutput{ + ReturnData: [][]byte{[]byte("test return data")}, + ReturnCode: vmcommon.AccountCollision, + OutputAccounts: map[string]*vmcommon.OutputAccount{ + "key0": { + Address: expectedAddress, + Balance: expectedBalance, + }, + }, + } + arg.ApiResolver = &mock.ApiResolverStub{ + ExecuteSCQueryHandler: func(_ *process.SCQuery) (*vmcommon.VMOutput, error) { + executeScQueryHandlerWasCalled = true + return expectedVmOutput, nil + }, + } + + nf, _ := NewNodeFacade(arg) - outputAccount := apiVmOutput.OutputAccounts[hex.EncodeToString([]byte("key0"))] - require.Equal(t, expectedBalance, outputAccount.Balance) - require.Equal(t, hex.EncodeToString(expectedAddress), outputAccount.Address) + apiVmOutput, err := nf.ExecuteSCQuery(&process.SCQuery{}) + require.NoError(t, err) + require.True(t, executeScQueryHandlerWasCalled) + require.Equal(t, expectedVmOutput.ReturnData, apiVmOutput.ReturnData) + require.Equal(t, expectedVmOutput.ReturnCode.String(), apiVmOutput.ReturnCode) + require.Equal(t, 1, len(apiVmOutput.OutputAccounts)) + + outputAccount := apiVmOutput.OutputAccounts[hex.EncodeToString([]byte("key0"))] + require.Equal(t, expectedBalance, outputAccount.Balance) + require.Equal(t, hex.EncodeToString(expectedAddress), outputAccount.Address) + }) } func TestNodeFacade_GetBlockByRoundShouldWork(t *testing.T) { @@ -1162,8 +1264,8 @@ func TestNodeFacade_GetBlockByRoundShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetBlockByRound(0, api.BlockQueryOptions{}) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } // ---- MetaBlock @@ -1186,8 +1288,8 @@ func TestNodeFacade_GetInternalMetaBlockByNonceShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalMetaBlockByNonce(common.ApiOutputFormatProto, 0) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalMetaBlockByRoundShouldWork(t *testing.T) { @@ -1208,8 +1310,8 @@ func TestNodeFacade_GetInternalMetaBlockByRoundShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalMetaBlockByRound(common.ApiOutputFormatProto, 0) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalMetaBlockByHashShouldWork(t *testing.T) { @@ -1230,8 +1332,8 @@ func TestNodeFacade_GetInternalMetaBlockByHashShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalMetaBlockByHash(common.ApiOutputFormatProto, "dummyhash") - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } // ---- ShardBlock @@ -1254,8 +1356,8 @@ func TestNodeFacade_GetInternalShardBlockByNonceShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalShardBlockByNonce(common.ApiOutputFormatProto, 0) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalShardBlockByRoundShouldWork(t *testing.T) { @@ -1276,8 +1378,8 @@ func TestNodeFacade_GetInternalShardBlockByRoundShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalShardBlockByRound(common.ApiOutputFormatProto, 0) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalShardBlockByHashShouldWork(t *testing.T) { @@ -1298,8 +1400,8 @@ func TestNodeFacade_GetInternalShardBlockByHashShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalShardBlockByHash(common.ApiOutputFormatProto, "dummyhash") - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestNodeFacade_GetInternalMiniBlockByHashShouldWork(t *testing.T) { @@ -1320,8 +1422,8 @@ func TestNodeFacade_GetInternalMiniBlockByHashShouldWork(t *testing.T) { nf, _ := NewNodeFacade(arg) ret, err := nf.GetInternalMiniBlockByHash(common.ApiOutputFormatProto, "dummyhash", 1) - assert.Nil(t, err) - assert.Equal(t, ret, blk) + require.NoError(t, err) + require.Equal(t, ret, blk) } func TestFacade_convertVmOutputToApiResponse(t *testing.T) { @@ -1402,7 +1504,6 @@ func TestNodeFacade_GetTransactionsPool(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.ApiResolver = &mock.ApiResolverStub{ GetTransactionsPoolCalled: func(fields string) (*common.TransactionsPoolAPIResponse, error) { return nil, expectedErr @@ -1465,14 +1566,57 @@ func TestNodeFacade_GetTransactionsPool(t *testing.T) { }) } -func TestNodeFacade_GetGenesisBalances(t *testing.T) { +func TestNodeFacade_GetGenesisNodesPubKeys(t *testing.T) { t.Parallel() t.Run("should return error", func(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") + arg.ApiResolver = &mock.ApiResolverStub{ + GetGenesisNodesPubKeysCalled: func() (map[uint32][]string, map[uint32][]string) { + return nil, nil + }, + } + + nf, _ := NewNodeFacade(arg) + eligible, waiting, err := nf.GetGenesisNodesPubKeys() + require.Nil(t, eligible) + require.Nil(t, waiting) + require.Equal(t, ErrNilGenesisNodes, err) + }) + t.Run("should work", func(t *testing.T) { + t.Parallel() + + providedEligible := map[uint32][]string{ + 0: {"pk1", "pk2"}, + } + providedWaiting := map[uint32][]string{ + 1: {"pk3", "pk4"}, + } + + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetGenesisNodesPubKeysCalled: func() (map[uint32][]string, map[uint32][]string) { + return providedEligible, providedWaiting + }, + } + + nf, _ := NewNodeFacade(arg) + eligible, waiting, err := nf.GetGenesisNodesPubKeys() + require.NoError(t, err) + require.Equal(t, providedEligible, eligible) + require.Equal(t, providedWaiting, waiting) + }) +} + +func TestNodeFacade_GetGenesisBalances(t *testing.T) { + t.Parallel() + + t.Run("GetGenesisBalances error should return error", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() arg.ApiResolver = &mock.ApiResolverStub{ GetGenesisBalancesCalled: func() ([]*common.InitialAccountAPI, error) { return nil, expectedErr @@ -1484,6 +1628,21 @@ func TestNodeFacade_GetGenesisBalances(t *testing.T) { require.Nil(t, res) require.Equal(t, expectedErr, err) }) + t.Run("GetGenesisBalances returns empty initial accounts should return error", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.ApiResolver = &mock.ApiResolverStub{ + GetGenesisBalancesCalled: func() ([]*common.InitialAccountAPI, error) { + return nil, nil + }, + } + + nf, _ := NewNodeFacade(arg) + res, err := nf.GetGenesisBalances() + require.Nil(t, res) + require.Equal(t, ErrNilGenesisBalances, err) + }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -1561,7 +1720,6 @@ func TestNodeFacade_GetTransactionsPoolForSender(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.ApiResolver = &mock.ApiResolverStub{ GetTransactionsPoolForSenderCalled: func(sender, fields string) (*common.TransactionsPoolForSenderApiResponse, error) { return nil, expectedErr @@ -1620,7 +1778,6 @@ func TestNodeFacade_GetLastPoolNonceForSender(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.ApiResolver = &mock.ApiResolverStub{ GetLastPoolNonceForSenderCalled: func(sender string) (uint64, error) { return 0, expectedErr @@ -1655,11 +1812,32 @@ func TestNodeFacade_GetLastPoolNonceForSender(t *testing.T) { func TestNodeFacade_GetTransactionsPoolNonceGapsForSender(t *testing.T) { t.Parallel() + t.Run("GetAccount error should error", func(t *testing.T) { + t.Parallel() + + arg := createMockArguments() + arg.Node = &mock.NodeStub{ + GetAccountCalled: func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { + return api.AccountResponse{}, api.BlockInfo{}, expectedErr + }, + } + arg.ApiResolver = &mock.ApiResolverStub{ + GetTransactionsPoolNonceGapsForSenderCalled: func(sender string, senderAccountNonce uint64) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) { + require.Fail(t, "should have not been called") + return nil, nil + }, + } + + nf, _ := NewNodeFacade(arg) + res, err := nf.GetTransactionsPoolNonceGapsForSender("") + require.Equal(t, &common.TransactionsPoolNonceGapsForSenderApiResponse{}, res) + require.Equal(t, expectedErr, err) + }) + t.Run("should error", func(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.Node = &mock.NodeStub{ GetAccountCalled: func(address string, options api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { return api.AccountResponse{}, api.BlockInfo{}, nil @@ -1699,7 +1877,7 @@ func TestNodeFacade_GetTransactionsPoolNonceGapsForSender(t *testing.T) { } arg.ApiResolver = &mock.ApiResolverStub{ GetTransactionsPoolNonceGapsForSenderCalled: func(sender string, senderAccountNonce uint64) (*common.TransactionsPoolNonceGapsForSenderApiResponse, error) { - assert.Equal(t, providedNonce, senderAccountNonce) + require.Equal(t, providedNonce, senderAccountNonce) return expectedNonceGaps, nil }, } @@ -1718,7 +1896,6 @@ func TestNodeFacade_InternalValidatorsInfo(t *testing.T) { t.Parallel() arg := createMockArguments() - expectedErr := errors.New("expected error") arg.ApiResolver = &mock.ApiResolverStub{ GetInternalStartOfEpochValidatorsInfoCalled: func(epoch uint32) ([]*state.ShardValidatorInfo, error) { return nil, expectedErr @@ -1747,7 +1924,248 @@ func TestNodeFacade_InternalValidatorsInfo(t *testing.T) { nf, _ := NewNodeFacade(arg) res, err := nf.GetInternalStartOfEpochValidatorsInfo(0) require.NotNil(t, res) - require.Nil(t, err) + require.NoError(t, err) require.True(t, wasCalled) }) } + +func TestNodeFacade_GetESDTsRoles(t *testing.T) { + t.Parallel() + + expectedResponse := map[string][]string{ + "key": {"val1", "val2"}, + } + args := createMockArguments() + args.WsAntifloodConfig.WebServerAntifloodEnabled = true // coverage + + args.Node = &mock.NodeStub{ + GetESDTsRolesCalled: func(address string, options api.AccountQueryOptions, ctx context.Context) (map[string][]string, api.BlockInfo, error) { + return expectedResponse, api.BlockInfo{}, nil + }, + } + + nf, _ := NewNodeFacade(args) + + res, _, err := nf.GetESDTsRoles("address", api.AccountQueryOptions{}) + require.NoError(t, err) + require.Equal(t, expectedResponse, res) +} + +func TestNodeFacade_GetTokenSupply(t *testing.T) { + t.Parallel() + + providedResponse := &api.ESDTSupply{ + Supply: "1000", + Burned: "500", + Minted: "1500", + } + args := createMockArguments() + args.Node = &mock.NodeStub{ + GetTokenSupplyCalled: func(token string) (*api.ESDTSupply, error) { + return providedResponse, nil + }, + } + + nf, _ := NewNodeFacade(args) + + response, err := nf.GetTokenSupply("token") + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_ValidateTransaction(t *testing.T) { + t.Parallel() + + args := createMockArguments() + wasCalled := false + args.Node = &mock.NodeStub{ + ValidateTransactionHandler: func(tx *transaction.Transaction) error { + wasCalled = true + return nil + }, + } + + nf, _ := NewNodeFacade(args) + + err := nf.ValidateTransaction(&transaction.Transaction{}) + require.NoError(t, err) + require.True(t, wasCalled) +} + +func TestNodeFacade_SimulateTransactionExecution(t *testing.T) { + t.Parallel() + + providedResponse := &txSimData.SimulationResults{ + Status: "ok", + FailReason: "no reason", + ScResults: nil, + Receipts: nil, + Hash: "hash", + } + args := createMockArguments() + args.TxSimulatorProcessor = &mock.TxExecutionSimulatorStub{ + ProcessTxCalled: func(tx *transaction.Transaction) (*txSimData.SimulationResults, error) { + return providedResponse, nil + }, + } + + nf, _ := NewNodeFacade(args) + + response, err := nf.SimulateTransactionExecution(&transaction.Transaction{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_ComputeTransactionGasLimit(t *testing.T) { + t.Parallel() + + providedResponse := &transaction.CostResponse{ + GasUnits: 10, + } + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + ComputeTransactionGasLimitHandler: func(tx *transaction.Transaction) (*transaction.CostResponse, error) { + return providedResponse, nil + }, + } + + nf, _ := NewNodeFacade(args) + + response, err := nf.ComputeTransactionGasLimit(&transaction.Transaction{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetEpochStartDataAPI(t *testing.T) { + t.Parallel() + + providedResponse := &common.EpochStartDataAPI{ + Nonce: 1, + Round: 2, + Shard: 3, + Timestamp: 4, + } + args := createMockArguments() + args.Node = &mock.NodeStub{ + GetEpochStartDataAPICalled: func(epoch uint32) (*common.EpochStartDataAPI, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetEpochStartDataAPI(0) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetConnectedPeersRatings(t *testing.T) { + t.Parallel() + + providedResponse := "ratings" + args := createMockArguments() + args.Node = &mock.NodeStub{ + GetConnectedPeersRatingsCalled: func() string { + return providedResponse + }, + } + nf, _ := NewNodeFacade(args) + + response := nf.GetConnectedPeersRatings() + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetBlockByHash(t *testing.T) { + t.Parallel() + + providedResponse := &api.Block{ + Nonce: 123, + Round: 321, + } + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + GetBlockByHashCalled: func(hash string, options api.BlockQueryOptions) (*api.Block, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetBlockByHash("hash", api.BlockQueryOptions{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetBlockByNonce(t *testing.T) { + t.Parallel() + + providedResponse := &api.Block{ + Nonce: 123, + Round: 321, + } + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + GetBlockByNonceCalled: func(nonce uint64, options api.BlockQueryOptions) (*api.Block, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetBlockByNonce(0, api.BlockQueryOptions{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetAlteredAccountsForBlock(t *testing.T) { + t.Parallel() + + providedResponse := []*outport.AlteredAccount{ + { + Nonce: 123, + Address: "address", + }, + } + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetAlteredAccountsForBlock(api.GetAlteredAccountsForBlockOptions{}) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_GetInternalStartOfEpochMetaBlock(t *testing.T) { + t.Parallel() + + providedResponse := "meta block" + args := createMockArguments() + args.ApiResolver = &mock.ApiResolverStub{ + GetInternalStartOfEpochMetaBlockCalled: func(format common.ApiOutputFormat, epoch uint32) (interface{}, error) { + return providedResponse, nil + }, + } + nf, _ := NewNodeFacade(args) + + response, err := nf.GetInternalStartOfEpochMetaBlock(0, 0) + require.NoError(t, err) + require.Equal(t, providedResponse, response) +} + +func TestNodeFacade_Close(t *testing.T) { + t.Parallel() + + nf, _ := NewNodeFacade(createMockArguments()) + require.NoError(t, nf.Close()) +} + +func TestNodeFacade_IsInterfaceNil(t *testing.T) { + t.Parallel() + + var nf *nodeFacade + require.True(t, nf.IsInterfaceNil()) + + nf, _ = NewNodeFacade(createMockArguments()) + require.False(t, nf.IsInterfaceNil()) +} From dd4c89a494d853e433785ba8879d214fbb8ac952 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 11 May 2023 17:26:10 +0300 Subject: [PATCH 166/221] fixes after review --- facade/nodeFacade.go | 5 ----- facade/nodeFacade_test.go | 2 +- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/facade/nodeFacade.go b/facade/nodeFacade.go index eb2523e08a9..edaf057567b 100644 --- a/facade/nodeFacade.go +++ b/facade/nodeFacade.go @@ -68,8 +68,6 @@ type nodeFacade struct { accountsState state.AccountsAdapter peerState state.AccountsAdapter blockchain chainData.ChainHandler - ctx context.Context - cancelFunc func() } // NewNodeFacade creates a new Facade with a NodeWrapper @@ -115,7 +113,6 @@ func NewNodeFacade(arg ArgNodeFacade) (*nodeFacade, error) { peerState: arg.PeerState, blockchain: arg.Blockchain, } - nf.ctx, nf.cancelFunc = context.WithCancel(context.Background()) return nf, nil } @@ -557,8 +554,6 @@ func (nf *nodeFacade) GetInternalMiniBlockByHash(format common.ApiOutputFormat, func (nf *nodeFacade) Close() error { log.LogIfError(nf.apiResolver.Close()) - nf.cancelFunc() - return nil } diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index bced65b3788..f0a378a92ec 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -346,7 +346,7 @@ func TestNodeFacade_GetAccount(t *testing.T) { arg := createMockArguments() arg.Node = &mock.NodeStub{ - GetAccountCalled: func(address string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { + GetAccountCalled: func(_ string, _ api.AccountQueryOptions) (api.AccountResponse, api.BlockInfo, error) { return api.AccountResponse{}, api.BlockInfo{}, expectedErr }, } From ed65313c51f3c48d65eca19482f3fdcfdd3985cb Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Fri, 12 May 2023 12:59:49 +0300 Subject: [PATCH 167/221] fix after review --- factory/state/stateComponentsHandler.go | 2 +- state/syncer/missingTrieNodesNotifier_test.go | 1 + state/syncer/userAccountsSyncer.go | 17 +++++++++++------ 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/factory/state/stateComponentsHandler.go b/factory/state/stateComponentsHandler.go index 6adb9edc259..78271a28ffe 100644 --- a/factory/state/stateComponentsHandler.go +++ b/factory/state/stateComponentsHandler.go @@ -211,7 +211,7 @@ func (msc *managedStateComponents) MissingTrieNodesNotifier() common.MissingTrie return nil } - return msc.missingTrieNodesNotifier + return msc.stateComponents.missingTrieNodesNotifier } // IsInterfaceNil returns true if the interface is nil diff --git a/state/syncer/missingTrieNodesNotifier_test.go b/state/syncer/missingTrieNodesNotifier_test.go index 3609fac3636..64682333ff4 100644 --- a/state/syncer/missingTrieNodesNotifier_test.go +++ b/state/syncer/missingTrieNodesNotifier_test.go @@ -23,6 +23,7 @@ func TestMissingTrieNodesNotifier_RegisterHandler(t *testing.T) { err := notifier.RegisterHandler(nil) assert.Equal(t, common.ErrNilStateSyncNotifierSubscriber, err) + assert.Equal(t, 0, notifier.GetNumHandlers()) err = notifier.RegisterHandler(&testscommon.StateSyncNotifierSubscriberStub{}) assert.Nil(t, err) diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index 9249ce21ce5..75dabb6f319 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -152,7 +152,7 @@ func (u *userAccountsSyncer) syncDataTrie(rootHash []byte, address []byte, ctx c u.dataTries[string(rootHash)] = struct{}{} u.syncerMutex.Unlock() - trieSyncer, err := u.createAndStartSyncer(ctx, rootHash) + trieSyncer, err := u.createAndStartSyncer(ctx, rootHash, u.checkNodesOnDisk) if err != nil { return err } @@ -162,7 +162,11 @@ func (u *userAccountsSyncer) syncDataTrie(rootHash []byte, address []byte, ctx c return nil } -func (u *userAccountsSyncer) createAndStartSyncer(ctx context.Context, hash []byte) (trie.TrieSyncer, error) { +func (u *userAccountsSyncer) createAndStartSyncer( + ctx context.Context, + hash []byte, + checkNodesOnDisk bool, +) (trie.TrieSyncer, error) { arg := trie.ArgTrieSyncer{ RequestHandler: u.requestHandler, InterceptedNodes: u.cacher, @@ -174,7 +178,7 @@ func (u *userAccountsSyncer) createAndStartSyncer(ctx context.Context, hash []by TrieSyncStatistics: u.userAccountsSyncStatisticsHandler, TimeoutHandler: u.timeoutHandler, MaxHardCapForMissingNodes: u.maxHardCapForMissingNodes, - CheckNodesOnDisk: u.checkNodesOnDisk, + CheckNodesOnDisk: checkNodesOnDisk, } trieSyncer, err := trie.CreateTrieSyncer(arg, u.trieSyncerVersion) if err != nil { @@ -336,8 +340,7 @@ func (u *userAccountsSyncer) resetTimeoutHandlerWatchdog() { // MissingDataTrieNodeFound is called whenever a missing data trie node is found. // This will trigger the sync process for the whole sub trie, starting from the given hash. func (u *userAccountsSyncer) MissingDataTrieNodeFound(hash []byte) { - u.mutex.Lock() - defer u.mutex.Unlock() + defer u.printDataTrieStatistics() u.timeoutHandler.ResetWatchdog() @@ -347,12 +350,14 @@ func (u *userAccountsSyncer) MissingDataTrieNodeFound(hash []byte) { cancel() }() - _, err := u.createAndStartSyncer(ctx, hash) + trieSyncer, err := u.createAndStartSyncer(ctx, hash, true) if err != nil { log.Error("cannot sync trie", "err", err, "hash", hash) return } + u.updateDataTrieStatistics(trieSyncer, hash) + log.Debug("finished sync data trie", "hash", hash) } From ae1775f4ac30a24c290ab3685c61bb472ba122cb Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Fri, 12 May 2023 13:00:35 +0300 Subject: [PATCH 168/221] fixes --- cmd/node/config/external.toml | 9 ++++--- config/externalConfig.go | 2 +- go.mod | 2 +- go.sum | 4 ++-- outport/factory/hostDriverFactory.go | 3 ++- outport/factory/hostDriverFactory_test.go | 29 +++++++++++++++++++++++ outport/host/driver.go | 6 ++--- 7 files changed, 44 insertions(+), 11 deletions(-) diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index 7de429cf6cc..03a13a61ed0 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -44,13 +44,16 @@ [HostDriverConfig] # This flag shall only be used for observer nodes Enabled = false - # This flag will start the WebSocket connector as server or client - IsServer = false - # The url of the WebSocket client/server + # This flag will start the WebSocket connector as server or client( can be "client" or "server") + Mode = "client" + # URL for the WebSocket client/server connection + # This value represents the IP address and port number that the WebSocket client or server will use to establish a connection. URL = "127.0.0.1:22111" + # After a message will be sent it will wait for an ack message if this flag is enabled WithAcknowledge = true # Currently, only "json" is supported. In the future, "gogo protobuf" could also be supported MarshallerType = "json" # The number of seconds when the client will try again to send the data RetryDurationInSec = 5 + # Signals if in case of data payload processing error, we should send the ack signal or not BlockingAckOnError = false diff --git a/config/externalConfig.go b/config/externalConfig.go index fba21834846..f5609ed3a6c 100644 --- a/config/externalConfig.go +++ b/config/externalConfig.go @@ -41,10 +41,10 @@ type CovalentConfig struct { // HostDriverConfig will hold the configuration for WebSocket driver type HostDriverConfig struct { Enabled bool - IsServer bool WithAcknowledge bool BlockingAckOnError bool URL string MarshallerType string + Mode string RetryDurationInSec int } diff --git a/go.mod b/go.mod index 64728f6e5e8..642937773a7 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v0.0.0-20230511105730-3400290e42c0 + github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df github.com/multiversx/mx-chain-crypto-go v1.2.5 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 diff --git a/go.sum b/go.sum index c48acfd3bda..b3eca275b43 100644 --- a/go.sum +++ b/go.sum @@ -609,8 +609,8 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230511105730-3400290e42c0 h1:dZa9ZfN9R605VZYJNhC36eSXJumADO6bHNZMhMdMLfg= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230511105730-3400290e42c0/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 h1:oFsYNkebv7TQygdEjN4aGgQ8ICLPmS9bDJmzlOHtU2Y= +github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= diff --git a/outport/factory/hostDriverFactory.go b/outport/factory/hostDriverFactory.go index a9cfce25eda..78dbac7db18 100644 --- a/outport/factory/hostDriverFactory.go +++ b/outport/factory/hostDriverFactory.go @@ -17,12 +17,13 @@ type ArgsHostDriverFactory struct { var log = logger.GetOrCreate("outport/factory/hostdriver") +// CreateHostDriver will create a new instance of outport.Driver func CreateHostDriver(args ArgsHostDriverFactory) (outport.Driver, error) { wsHost, err := factory.CreateWebSocketHost(factory.ArgsWebSocketHost{ WebSocketConfig: data.WebSocketConfig{ URL: args.HostConfig.URL, WithAcknowledge: args.HostConfig.WithAcknowledge, - IsServer: args.HostConfig.IsServer, + Mode: args.HostConfig.Mode, RetryDurationInSec: args.HostConfig.RetryDurationInSec, BlockingAckOnError: args.HostConfig.BlockingAckOnError, }, diff --git a/outport/factory/hostDriverFactory_test.go b/outport/factory/hostDriverFactory_test.go index 7312cd2e2d8..834fa793b6c 100644 --- a/outport/factory/hostDriverFactory_test.go +++ b/outport/factory/hostDriverFactory_test.go @@ -1 +1,30 @@ package factory + +import ( + "fmt" + "testing" + + "github.com/multiversx/mx-chain-communication-go/websocket/data" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" + "github.com/stretchr/testify/require" +) + +func TestCreateHostDriver(t *testing.T) { + t.Parallel() + + args := ArgsHostDriverFactory{ + HostConfig: config.HostDriverConfig{ + URL: "localhost", + RetryDurationInSec: 1, + MarshallerType: "json", + Mode: data.ModeClient, + }, + Marshaller: &testscommon.MarshalizerMock{}, + } + + driver, err := CreateHostDriver(args) + require.Nil(t, err) + require.NotNil(t, driver) + require.Equal(t, "*host.hostDriver", fmt.Sprintf("%T", driver)) +} diff --git a/outport/host/driver.go b/outport/host/driver.go index 2b12afd2612..017cdfecefb 100644 --- a/outport/host/driver.go +++ b/outport/host/driver.go @@ -24,6 +24,7 @@ type hostDriver struct { log core.Logger } +// NewHostDriver will create a new instance of hostDriver func NewHostDriver(args ArgsHostDriver) (*hostDriver, error) { if check.IfNil(args.SenderHost) { return nil, ErrNilHost @@ -43,6 +44,7 @@ func NewHostDriver(args ArgsHostDriver) (*hostDriver, error) { }, nil } +// SaveBlock will handle the saving of block func (o *hostDriver) SaveBlock(outportBlock *outport.OutportBlock) error { return o.handleAction(outportBlock, outport.TopicSaveBlock) } @@ -89,14 +91,12 @@ func (o *hostDriver) handleAction(args interface{}, topic string) error { marshalledPayload, err := o.marshaller.Marshal(args) if err != nil { - o.log.Error("cannot marshal block", "topic", topic, "error", err) return fmt.Errorf("%w while marshaling block for topic %s", err, topic) } err = o.senderHost.Send(marshalledPayload, topic) if err != nil { - o.log.Error("cannot send on route", "topic", topic, "error", err) - return fmt.Errorf("%w while sending data on route for topic %s", err, topic) + return fmt.Errorf("%w while sendcing data on route for topic %s", err, topic) } return nil From 8a053b21fe517b13c07759b0e56ed5b262afe12b Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Fri, 12 May 2023 14:58:48 +0300 Subject: [PATCH 169/221] fixes after review --- trie/factory/trieCreator_test.go | 2 +- trie/snapshotTrieStorageManager_test.go | 6 +++--- update/container/accountDBSyncers_test.go | 5 ++--- update/container/trieSyncers_test.go | 2 +- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/trie/factory/trieCreator_test.go b/trie/factory/trieCreator_test.go index dcde970d85b..2dba2e68981 100644 --- a/trie/factory/trieCreator_test.go +++ b/trie/factory/trieCreator_test.go @@ -176,7 +176,7 @@ func TestTrieCreator_CreateWithInvalidMaxTrieLevelInMemShouldErr(t *testing.T) { _, tr, err := tf.Create(createArgs) require.Nil(t, tr) require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), trie.ErrInvalidLevelValue.Error())) + require.Contains(t, err.Error(), trie.ErrInvalidLevelValue.Error()) } func TestTrieCreator_CreateTriesComponentsForShardId(t *testing.T) { diff --git a/trie/snapshotTrieStorageManager_test.go b/trie/snapshotTrieStorageManager_test.go index 96b5c2144df..28735e4875b 100644 --- a/trie/snapshotTrieStorageManager_test.go +++ b/trie/snapshotTrieStorageManager_test.go @@ -34,7 +34,7 @@ func TestNewSnapshotTrieStorageManager(t *testing.T) { assert.False(t, check.IfNil(stsm)) } -func TestNewSnapshotTrieStorageManager_Get(t *testing.T) { +func TestSnapshotTrieStorageManager_Get(t *testing.T) { t.Parallel() t.Run("closed storage manager should error", func(t *testing.T) { @@ -82,7 +82,7 @@ func TestNewSnapshotTrieStorageManager_Get(t *testing.T) { }) } -func TestNewSnapshotTrieStorageManager_Put(t *testing.T) { +func TestSnapshotTrieStorageManager_Put(t *testing.T) { t.Parallel() t.Run("closed storage manager should error", func(t *testing.T) { @@ -114,7 +114,7 @@ func TestNewSnapshotTrieStorageManager_Put(t *testing.T) { }) } -func TestNewSnapshotTrieStorageManager_GetFromLastEpoch(t *testing.T) { +func TestSnapshotTrieStorageManager_GetFromLastEpoch(t *testing.T) { t.Parallel() t.Run("closed storage manager should error", func(t *testing.T) { diff --git a/update/container/accountDBSyncers_test.go b/update/container/accountDBSyncers_test.go index 92baf0218f2..212efe90f6d 100644 --- a/update/container/accountDBSyncers_test.go +++ b/update/container/accountDBSyncers_test.go @@ -100,15 +100,14 @@ func TestAccountDBSyncers_AddMultiple(t *testing.T) { err := adsc.AddMultiple([]string{testKey0}, nil) require.Equal(t, update.ErrLenMismatch, err) }) - t.Run("duplicated keys should should error on Add", func(t *testing.T) { + t.Run("duplicated keys should error on Add", func(t *testing.T) { t.Parallel() adsc := NewAccountsDBSyncersContainer() err := adsc.AddMultiple([]string{testKey0, testKey1, testKey1}, []update.AccountsDBSyncer{testAccountsDBSyncersVal0, testAccountsDBSyncersVal1, testAccountsDBSyncersVal1}) require.Equal(t, update.ErrContainerKeyAlreadyExists, err) }) - t.Run("should work"+ - "", func(t *testing.T) { + t.Run("should work", func(t *testing.T) { t.Parallel() adsc := NewAccountsDBSyncersContainer() diff --git a/update/container/trieSyncers_test.go b/update/container/trieSyncers_test.go index f26556a55df..1778f7f931a 100644 --- a/update/container/trieSyncers_test.go +++ b/update/container/trieSyncers_test.go @@ -98,7 +98,7 @@ func TestTrieSyncers_AddMultiple(t *testing.T) { err := tsc.AddMultiple([]string{testKey0}, nil) require.Equal(t, update.ErrLenMismatch, err) }) - t.Run("duplicated keys should should error on Add", func(t *testing.T) { + t.Run("duplicated keys should error on Add", func(t *testing.T) { t.Parallel() tsc := NewTrieSyncersContainer() From 5ce0b7459ab81d976840a1cda7b3b2eef44b37f8 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Fri, 12 May 2023 16:46:17 +0300 Subject: [PATCH 170/221] fix after merge --- factory/consensus/consensusComponents_test.go | 13 +-- factory/processing/processComponents_test.go | 18 ++-- go.mod | 6 +- go.sum | 10 +- integrationTests/mock/storageManagerStub.go | 91 ------------------- testscommon/factory/stateComponentsMock.go | 15 +-- trie/node.go | 10 +- trie/node_test.go | 2 +- 8 files changed, 45 insertions(+), 120 deletions(-) delete mode 100644 integrationTests/mock/storageManagerStub.go diff --git a/factory/consensus/consensusComponents_test.go b/factory/consensus/consensusComponents_test.go index 4002e886e99..184cb8d3d11 100644 --- a/factory/consensus/consensusComponents_test.go +++ b/factory/consensus/consensusComponents_test.go @@ -33,7 +33,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" stateMocks "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - trieFactory "github.com/multiversx/mx-chain-go/trie/factory" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/update" "github.com/stretchr/testify/require" ) @@ -138,11 +138,12 @@ func createMockConsensusComponentsFactoryArgs() consensusComp.ConsensusComponent }, StateComponents: &factoryMocks.StateComponentsMock{ StorageManagers: map[string]common.StorageManager{ - trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, - trieFactory.PeerAccountTrie: &testscommon.StorageManagerStub{}, + retriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, + retriever.PeerAccountsUnit.String(): &storageManager.StorageManagerStub{}, }, - Accounts: &stateMocks.AccountsStub{}, - PeersAcc: &stateMocks.AccountsStub{}, + Accounts: &stateMocks.AccountsStub{}, + PeersAcc: &stateMocks.AccountsStub{}, + MissingNodesNotifier: &testscommon.MissingTrieNodesNotifierStub{}, }, StatusComponents: &testsMocks.StatusComponentsStub{ Outport: &outportMocks.OutportStub{}, @@ -628,7 +629,7 @@ func TestConsensusComponentsFactory_Create(t *testing.T) { stateCompStub, ok := args.StateComponents.(*factoryMocks.StateComponentsMock) require.True(t, ok) stateCompStub.StorageManagers = map[string]common.StorageManager{ - trieFactory.UserAccountTrie: &testscommon.StorageManagerStub{}, + retriever.UserAccountsUnit.String(): &storageManager.StorageManagerStub{}, } // missing PeerAccountTrie processCompStub, ok := args.ProcessComponents.(*testsMocks.ProcessComponentsStub) require.True(t, ok) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index 92642f808e0..391e164712c 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -883,10 +883,11 @@ func TestProcessComponentsFactory_Create(t *testing.T) { CommitCalled: realStateComp.AccountsAdapter().Commit, RootHashCalled: realStateComp.AccountsAdapter().RootHash, }, - PeersAcc: realStateComp.PeerAccounts(), - Tries: realStateComp.TriesContainer(), - AccountsAPI: realStateComp.AccountsAdapterAPI(), - StorageManagers: realStateComp.TrieStorageManagers(), + PeersAcc: realStateComp.PeerAccounts(), + Tries: realStateComp.TriesContainer(), + AccountsAPI: realStateComp.AccountsAdapterAPI(), + StorageManagers: realStateComp.TrieStorageManagers(), + MissingNodesNotifier: realStateComp.MissingTrieNodesNotifier(), } pcf, _ := processComp.NewProcessComponentsFactory(args) @@ -923,10 +924,11 @@ func TestProcessComponentsFactory_Create(t *testing.T) { CommitCalled: realStateComp.AccountsAdapter().Commit, RootHashCalled: realStateComp.AccountsAdapter().RootHash, }, - PeersAcc: realStateComp.PeerAccounts(), - Tries: realStateComp.TriesContainer(), - AccountsAPI: realStateComp.AccountsAdapterAPI(), - StorageManagers: realStateComp.TrieStorageManagers(), + PeersAcc: realStateComp.PeerAccounts(), + Tries: realStateComp.TriesContainer(), + AccountsAPI: realStateComp.AccountsAdapterAPI(), + StorageManagers: realStateComp.TrieStorageManagers(), + MissingNodesNotifier: realStateComp.MissingTrieNodesNotifier(), } coreCompStub := factoryMocks.NewCoreComponentsHolderStubFromRealComponent(args.CoreData) coreCompStub.InternalMarshalizerCalled = func() marshal.Marshalizer { diff --git a/go.mod b/go.mod index bba09a4e3c7..57dd47bbefc 100644 --- a/go.mod +++ b/go.mod @@ -13,13 +13,13 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.1 + github.com/multiversx/mx-chain-core-go v1.2.3-0.20230512130104-2a2e00c016b2 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.1 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 - github.com/multiversx/mx-chain-storage-go v1.0.8 - github.com/multiversx/mx-chain-vm-common-go v1.4.1 + github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230512130346-04e711f3d064 + github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230512130259-7b26a55bcd8a github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index 3b5e4647240..e7e38c73066 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,10 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1 h1:kmDfK7Znl3S0IJlDEE4sFuBOmA2rZkBudxlGhI1bvQc= +github.com/multiversx/mx-chain-core-go v1.2.1-0.20230403113932-916b16d18978/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.3-0.20230512130104-2a2e00c016b2 h1:j0kxDUOtqUPey78uWW39ScDT8S0V7G/L6kcl+JpGmq4= +github.com/multiversx/mx-chain-core-go v1.2.3-0.20230512130104-2a2e00c016b2/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.1 h1:gD/D7xZP7OL8L/ZZ3SoOfKjVHrU0iUxIG2AbidHFTUc= @@ -627,10 +629,12 @@ github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDT github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= -github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= -github.com/multiversx/mx-chain-vm-common-go v1.4.1 h1:HHZF9zU4WsMbfLrCarx3ESM95caWUrPBleGHKdsbzgc= +github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230512130346-04e711f3d064 h1:by2niUwKPvCONvTLUrhONwo+yl3Lin770A7uJAfEsaU= +github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230512130346-04e711f3d064/go.mod h1:FGhaeTNIcLZOPqsJZQ1TdcMaPVLhj642OzRNmt6+RQs= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230512130259-7b26a55bcd8a h1:m0cQrhe2zet657jWjrE2nvba6DqM5I5bNSqbJcpwfEM= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230512130259-7b26a55bcd8a/go.mod h1:eBUoLYceIutumF+MZYrHhI+Fq/piUueuaR3vH8Pia8A= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= diff --git a/integrationTests/mock/storageManagerStub.go b/integrationTests/mock/storageManagerStub.go deleted file mode 100644 index 83b88c88abb..00000000000 --- a/integrationTests/mock/storageManagerStub.go +++ /dev/null @@ -1,91 +0,0 @@ -package mock - -import ( - "github.com/multiversx/mx-chain-go/common" -) - -// StorageManagerStub - -type StorageManagerStub struct { - DatabaseCalled func() common.DBWriteCacher - TakeSnapshotCalled func([]byte) - SetCheckpointCalled func([]byte) - GetDbThatContainsHashCalled func([]byte) common.DBWriteCacher - GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler - IsPruningEnabledCalled func() bool - IsPruningBlockedCalled func() bool - EnterPruningBufferingModeCalled func() - ExitPruningBufferingModeCalled func() - IsInterfaceNilCalled func() bool -} - -// Database - -func (sms *StorageManagerStub) Database() common.DBWriteCacher { - if sms.DatabaseCalled != nil { - return sms.DatabaseCalled() - } - return nil -} - -// TakeSnapshot - -func (sms *StorageManagerStub) TakeSnapshot([]byte) { - -} - -// SetCheckpoint - -func (sms *StorageManagerStub) SetCheckpoint([]byte) { - -} - -// GetSnapshotThatContainsHash - -func (sms *StorageManagerStub) GetSnapshotThatContainsHash(d []byte) common.SnapshotDbHandler { - if sms.GetSnapshotThatContainsHashCalled != nil { - return sms.GetSnapshotThatContainsHashCalled(d) - } - - return nil -} - -// IsPruningEnabled - -func (sms *StorageManagerStub) IsPruningEnabled() bool { - if sms.IsPruningEnabledCalled != nil { - return sms.IsPruningEnabledCalled() - } - return false -} - -// IsPruningBlocked - -func (sms *StorageManagerStub) IsPruningBlocked() bool { - if sms.IsPruningBlockedCalled != nil { - return sms.IsPruningBlockedCalled() - } - return false -} - -// GetSnapshotDbBatchDelay - -func (sms *StorageManagerStub) GetSnapshotDbBatchDelay() int { - return 0 -} - -// Close - -func (sms *StorageManagerStub) Close() error { - return nil -} - -// EnterPruningBufferingMode - -func (sms *StorageManagerStub) EnterPruningBufferingMode() { - if sms.EnterPruningBufferingModeCalled != nil { - sms.EnterPruningBufferingModeCalled() - } -} - -// ExitPruningBufferingMode - -func (sms *StorageManagerStub) ExitPruningBufferingMode() { - if sms.ExitPruningBufferingModeCalled != nil { - sms.ExitPruningBufferingModeCalled() - } -} - -// IsInterfaceNil - -func (sms *StorageManagerStub) IsInterfaceNil() bool { - return sms == nil -} diff --git a/testscommon/factory/stateComponentsMock.go b/testscommon/factory/stateComponentsMock.go index 711181c003f..5aa541dffa0 100644 --- a/testscommon/factory/stateComponentsMock.go +++ b/testscommon/factory/stateComponentsMock.go @@ -15,18 +15,19 @@ type StateComponentsMock struct { AccountsRepo state.AccountsRepository Tries common.TriesHolder StorageManagers map[string]common.StorageManager - MissingNodesNotifier common.MissingTrieNodesNotifier + MissingNodesNotifier common.MissingTrieNodesNotifier } // NewStateComponentsMockFromRealComponent - func NewStateComponentsMockFromRealComponent(stateComponents factory.StateComponentsHolder) *StateComponentsMock { return &StateComponentsMock{ - PeersAcc: stateComponents.PeerAccounts(), - Accounts: stateComponents.AccountsAdapter(), - AccountsAPI: stateComponents.AccountsAdapterAPI(), - AccountsRepo: stateComponents.AccountsRepository(), - Tries: stateComponents.TriesContainer(), - StorageManagers: stateComponents.TrieStorageManagers(), + PeersAcc: stateComponents.PeerAccounts(), + Accounts: stateComponents.AccountsAdapter(), + AccountsAPI: stateComponents.AccountsAdapterAPI(), + AccountsRepo: stateComponents.AccountsRepository(), + Tries: stateComponents.TriesContainer(), + StorageManagers: stateComponents.TrieStorageManagers(), + MissingNodesNotifier: stateComponents.MissingTrieNodesNotifier(), } } diff --git a/trie/node.go b/trie/node.go index 98305fb47df..67c7f95d8c3 100644 --- a/trie/node.go +++ b/trie/node.go @@ -120,7 +120,7 @@ func computeAndSetNodeHash(n node) ([]byte, error) { func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { - log.Trace(core.GetNodeFromDBErrorString, "error", err, "key", n, "stack trace", string(debug.Stack())) + treatLogError(log, err, n) dbWithID, ok := db.(dbWriteCacherWithIdentifier) if !ok { @@ -134,6 +134,14 @@ func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marsh return decodeNode(encChild, marshalizer, hasher) } +func treatLogError(logInstance logger.Logger, err error, key []byte) { + if logInstance.GetLevel() != logger.LogTrace { + return + } + + logInstance.Trace(core.GetNodeFromDBErrorString, "error", err, "key", key, "stack trace", string(debug.Stack())) +} + func resolveIfCollapsed(n node, pos byte, db common.DBWriteCacher) error { err := n.isEmptyOrNil() if err != nil { diff --git a/trie/node_test.go b/trie/node_test.go index f6bfcf165ce..0b6e850ee63 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -667,7 +667,7 @@ func TestTreatLogError(t *testing.T) { }, TraceCalled: func(message string, args ...interface{}) { wasCalled = true - require.Equal(t, common.GetNodeFromDBErrorString, message) + require.Equal(t, core.GetNodeFromDBErrorString, message) require.Equal(t, 6, len(args)) expectedFirst5Args := []interface{}{"error", err, "key", key, "stack trace"} require.Equal(t, expectedFirst5Args, args[:5]) From fc3828f64c80fcc1b1627d653007183394848efe Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Mon, 15 May 2023 10:29:01 +0300 Subject: [PATCH 171/221] fix --- cmd/node/config/external.toml | 2 +- outport/host/driver.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index 03a13a61ed0..a7a46adf77f 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -55,5 +55,5 @@ MarshallerType = "json" # The number of seconds when the client will try again to send the data RetryDurationInSec = 5 - # Signals if in case of data payload processing error, we should send the ack signal or not + # Sets if, in case of data payload processing error, we should block or not the advancement to the next processing event. Set this to true if you wish the node to stop processing blocks if the client/server encounters errors while processing requests. BlockingAckOnError = false diff --git a/outport/host/driver.go b/outport/host/driver.go index 017cdfecefb..c8f428fd331 100644 --- a/outport/host/driver.go +++ b/outport/host/driver.go @@ -96,7 +96,7 @@ func (o *hostDriver) handleAction(args interface{}, topic string) error { err = o.senderHost.Send(marshalledPayload, topic) if err != nil { - return fmt.Errorf("%w while sendcing data on route for topic %s", err, topic) + return fmt.Errorf("%w while sending data on route for topic %s", err, topic) } return nil From ba3e5e3105eda84084e500077f6d80f241c967aa Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Mon, 15 May 2023 16:39:04 +0300 Subject: [PATCH 172/221] MX-13962: consistent tokens values length checks --- cmd/node/config/enableEpochs.toml | 29 ++++--- common/enablers/enableEpochsHandler.go | 1 + common/enablers/epochFlags.go | 6 ++ config/epochConfig.go | 1 + config/tomlConfig_test.go | 112 +++++++++++++------------ go.mod | 2 +- go.sum | 3 +- 7 files changed, 85 insertions(+), 69 deletions(-) diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 12741cd03aa..4d088612f70 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -246,22 +246,25 @@ KeepExecOrderOnCreatedSCRsEnableEpoch = 2 # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled - MultiClaimOnDelegationEnableEpoch = 3 +MultiClaimOnDelegationEnableEpoch = 3 - # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers - BLSMultiSignerEnableEpoch = [ - { EnableEpoch = 0, Type = "no-KOSK"}, - { EnableEpoch = 1, Type = "KOSK"} - ] +# BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers +BLSMultiSignerEnableEpoch = [ + { EnableEpoch = 0, Type = "no-KOSK" }, + { EnableEpoch = 1, Type = "KOSK" } +] - # SetGuardianEnableEpoch represents the epoch when the guard account feature is enabled in the protocol - SetGuardianEnableEpoch = 2 +# SetGuardianEnableEpoch represents the epoch when the guard account feature is enabled in the protocol +SetGuardianEnableEpoch = 2 - # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch - MaxNodesChangeEnableEpoch = [ - { EpochEnable = 0, MaxNumNodes = 36, NodesToShufflePerShard = 4 }, - { EpochEnable = 1, MaxNumNodes = 56, NodesToShufflePerShard = 2 } - ] +# ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled +ConsistentTokensValuesLengthCheckEnableEpoch = 2 + +# MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch +MaxNodesChangeEnableEpoch = [ + { EpochEnable = 0, MaxNumNodes = 36, NodesToShufflePerShard = 4 }, + { EpochEnable = 1, MaxNumNodes = 56, NodesToShufflePerShard = 2 } +] [GasSchedule] # GasScheduleByEpochs holds the configuration for the gas schedule that will be applied from specific epochs diff --git a/common/enablers/enableEpochsHandler.go b/common/enablers/enableEpochsHandler.go index 59cee759e8a..d5561ad444a 100644 --- a/common/enablers/enableEpochsHandler.go +++ b/common/enablers/enableEpochsHandler.go @@ -121,6 +121,7 @@ func (handler *enableEpochsHandler) EpochConfirmed(epoch uint32, _ uint64) { handler.setFlagValue(epoch >= handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch, handler.multiClaimOnDelegationFlag, "multiClaimOnDelegationFlag", epoch, handler.enableEpochsConfig.MultiClaimOnDelegationEnableEpoch) handler.setFlagValue(epoch >= handler.enableEpochsConfig.SetGuardianEnableEpoch, handler.setGuardianFlag, "setGuardianFlag", epoch, handler.enableEpochsConfig.SetGuardianEnableEpoch) handler.setFlagValue(epoch >= handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch, handler.keepExecOrderOnCreatedSCRsFlag, "keepExecOrderOnCreatedSCRsFlag", epoch, handler.enableEpochsConfig.KeepExecOrderOnCreatedSCRsEnableEpoch) + handler.setFlagValue(epoch >= handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch, handler.consistentTokensValuesCheckFlag, "consistentTokensValuesCheckFlag", epoch, handler.enableEpochsConfig.ConsistentTokensValuesLengthCheckEnableEpoch) } func (handler *enableEpochsHandler) setFlagValue(value bool, flag *atomic.Flag, flagName string, epoch uint32, flagEpoch uint32) { diff --git a/common/enablers/epochFlags.go b/common/enablers/epochFlags.go index e77279928cb..b278b066063 100644 --- a/common/enablers/epochFlags.go +++ b/common/enablers/epochFlags.go @@ -93,6 +93,7 @@ type epochFlagsHolder struct { setGuardianFlag *atomic.Flag keepExecOrderOnCreatedSCRsFlag *atomic.Flag multiClaimOnDelegationFlag *atomic.Flag + consistentTokensValuesCheckFlag *atomic.Flag } func newEpochFlagsHolder() *epochFlagsHolder { @@ -671,6 +672,11 @@ func (holder *epochFlagsHolder) IsSetGuardianEnabled() bool { return holder.setGuardianFlag.IsSet() } +// IsConsistentTokensValuesLengthCheckEnabled returns true if consistentTokensValuesCheckFlag is enabled +func (holder *epochFlagsHolder) IsConsistentTokensValuesLengthCheckEnabled() bool { + return holder.consistentTokensValuesCheckFlag.IsSet() +} + // IsKeepExecOrderOnCreatedSCRsEnabled returns true if keepExecOrderOnCreatedSCRsFlag is enabled func (holder *epochFlagsHolder) IsKeepExecOrderOnCreatedSCRsEnabled() bool { return holder.keepExecOrderOnCreatedSCRsFlag.IsSet() diff --git a/config/epochConfig.go b/config/epochConfig.go index deadeb79d11..34b44d6707b 100644 --- a/config/epochConfig.go +++ b/config/epochConfig.go @@ -97,6 +97,7 @@ type EnableEpochs struct { MultiClaimOnDelegationEnableEpoch uint32 BLSMultiSignerEnableEpoch []MultiSignerConfig SetGuardianEnableEpoch uint32 + ConsistentTokensValuesLengthCheckEnableEpoch uint32 } // GasScheduleByEpochs represents a gas schedule toml entry that will be applied from the provided epoch diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index d7a3b1c7170..bd3e91bfbc5 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -698,6 +698,9 @@ func TestEnableEpochConfig(t *testing.T) { # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation function is enabled MultiClaimOnDelegationEnableEpoch = 65 + # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled + ConsistentTokensValuesLengthCheckEnableEpoch = 66 + # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ { EpochEnable = 44, MaxNumNodes = 2169, NodesToShufflePerShard = 80 }, @@ -742,60 +745,61 @@ func TestEnableEpochConfig(t *testing.T) { NodesToShufflePerShard: 80, }, }, - BlockGasAndFeesReCheckEnableEpoch: 13, - StakingV2EnableEpoch: 18, - StakeEnableEpoch: 17, - DoubleKeyProtectionEnableEpoch: 19, - ESDTEnableEpoch: 20, - GovernanceEnableEpoch: 21, - DelegationManagerEnableEpoch: 22, - DelegationSmartContractEnableEpoch: 23, - CorrectLastUnjailedEnableEpoch: 24, - BalanceWaitingListsEnableEpoch: 14, - ReturnDataToLastTransferEnableEpoch: 15, - SenderInOutTransferEnableEpoch: 16, - RelayedTransactionsV2EnableEpoch: 25, - UnbondTokensV2EnableEpoch: 26, - SaveJailedAlwaysEnableEpoch: 27, - ValidatorToDelegationEnableEpoch: 29, - ReDelegateBelowMinCheckEnableEpoch: 28, - WaitingListFixEnableEpoch: 30, - IncrementSCRNonceInMultiTransferEnableEpoch: 31, - ESDTMultiTransferEnableEpoch: 32, - GlobalMintBurnDisableEpoch: 33, - ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, - ComputeRewardCheckpointEnableEpoch: 36, - SCRSizeInvariantCheckEnableEpoch: 37, - BackwardCompSaveKeyValueEnableEpoch: 38, - ESDTNFTCreateOnMultiShardEnableEpoch: 39, - MetaESDTSetEnableEpoch: 40, - AddTokensToDelegationEnableEpoch: 41, - MultiESDTTransferFixOnCallBackOnEnableEpoch: 42, - OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 43, - FixOOGReturnCodeEnableEpoch: 44, - RemoveNonUpdatedStorageEnableEpoch: 45, - OptimizeNFTStoreEnableEpoch: 46, - CreateNFTThroughExecByCallerEnableEpoch: 47, - IsPayableBySCEnableEpoch: 48, - CleanUpInformativeSCRsEnableEpoch: 49, - StorageAPICostOptimizationEnableEpoch: 50, - TransformToMultiShardCreateEnableEpoch: 51, - ESDTRegisterAndSetAllRolesEnableEpoch: 52, - FailExecutionOnEveryAPIErrorEnableEpoch: 53, - ManagedCryptoAPIsEnableEpoch: 54, - ESDTMetadataContinuousCleanupEnableEpoch: 55, - FixAsyncCallBackArgsListEnableEpoch: 56, - FixOldTokenLiquidityEnableEpoch: 57, - SetSenderInEeiOutputTransferEnableEpoch: 58, - MaxBlockchainHookCountersEnableEpoch: 59, - WipeSingleNFTLiquidityDecreaseEnableEpoch: 60, - AlwaysSaveTokenMetaDataEnableEpoch: 61, - RuntimeCodeSizeFixEnableEpoch: 62, - RuntimeMemStoreLimitEnableEpoch: 63, - SetGuardianEnableEpoch: 64, - MultiClaimOnDelegationEnableEpoch: 65, - KeepExecOrderOnCreatedSCRsEnableEpoch: 64, + BlockGasAndFeesReCheckEnableEpoch: 13, + StakingV2EnableEpoch: 18, + StakeEnableEpoch: 17, + DoubleKeyProtectionEnableEpoch: 19, + ESDTEnableEpoch: 20, + GovernanceEnableEpoch: 21, + DelegationManagerEnableEpoch: 22, + DelegationSmartContractEnableEpoch: 23, + CorrectLastUnjailedEnableEpoch: 24, + BalanceWaitingListsEnableEpoch: 14, + ReturnDataToLastTransferEnableEpoch: 15, + SenderInOutTransferEnableEpoch: 16, + RelayedTransactionsV2EnableEpoch: 25, + UnbondTokensV2EnableEpoch: 26, + SaveJailedAlwaysEnableEpoch: 27, + ValidatorToDelegationEnableEpoch: 29, + ReDelegateBelowMinCheckEnableEpoch: 28, + WaitingListFixEnableEpoch: 30, + IncrementSCRNonceInMultiTransferEnableEpoch: 31, + ESDTMultiTransferEnableEpoch: 32, + GlobalMintBurnDisableEpoch: 33, + ESDTTransferRoleEnableEpoch: 34, + BuiltInFunctionOnMetaEnableEpoch: 35, + ComputeRewardCheckpointEnableEpoch: 36, + SCRSizeInvariantCheckEnableEpoch: 37, + BackwardCompSaveKeyValueEnableEpoch: 38, + ESDTNFTCreateOnMultiShardEnableEpoch: 39, + MetaESDTSetEnableEpoch: 40, + AddTokensToDelegationEnableEpoch: 41, + MultiESDTTransferFixOnCallBackOnEnableEpoch: 42, + OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 43, + FixOOGReturnCodeEnableEpoch: 44, + RemoveNonUpdatedStorageEnableEpoch: 45, + OptimizeNFTStoreEnableEpoch: 46, + CreateNFTThroughExecByCallerEnableEpoch: 47, + IsPayableBySCEnableEpoch: 48, + CleanUpInformativeSCRsEnableEpoch: 49, + StorageAPICostOptimizationEnableEpoch: 50, + TransformToMultiShardCreateEnableEpoch: 51, + ESDTRegisterAndSetAllRolesEnableEpoch: 52, + FailExecutionOnEveryAPIErrorEnableEpoch: 53, + ManagedCryptoAPIsEnableEpoch: 54, + ESDTMetadataContinuousCleanupEnableEpoch: 55, + FixAsyncCallBackArgsListEnableEpoch: 56, + FixOldTokenLiquidityEnableEpoch: 57, + SetSenderInEeiOutputTransferEnableEpoch: 58, + MaxBlockchainHookCountersEnableEpoch: 59, + WipeSingleNFTLiquidityDecreaseEnableEpoch: 60, + AlwaysSaveTokenMetaDataEnableEpoch: 61, + RuntimeCodeSizeFixEnableEpoch: 62, + RuntimeMemStoreLimitEnableEpoch: 63, + SetGuardianEnableEpoch: 64, + MultiClaimOnDelegationEnableEpoch: 65, + KeepExecOrderOnCreatedSCRsEnableEpoch: 64, + ConsistentTokensValuesLengthCheckEnableEpoch: 66, BLSMultiSignerEnableEpoch: []MultiSignerConfig{ { EnableEpoch: 0, diff --git a/go.mod b/go.mod index bba09a4e3c7..d12e5417869 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 - github.com/multiversx/mx-chain-vm-common-go v1.4.1 + github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230515133310-3417cce7427e github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index 3b5e4647240..1c1b6fe8674 100644 --- a/go.sum +++ b/go.sum @@ -629,8 +629,9 @@ github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= -github.com/multiversx/mx-chain-vm-common-go v1.4.1 h1:HHZF9zU4WsMbfLrCarx3ESM95caWUrPBleGHKdsbzgc= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230515133310-3417cce7427e h1:2PkyDANF4IYD1OuUXLQg+eJmoTaxwypYg0VGfbhuKV4= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230515133310-3417cce7427e/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= From 5c3e58f0e183f53ed391796d0a48547ed1db59c4 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Tue, 16 May 2023 11:27:51 +0300 Subject: [PATCH 173/221] revert a change --- process/block/metablock.go | 7 ++++++- process/block/shardblock.go | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/process/block/metablock.go b/process/block/metablock.go index dc6e2b1303e..80cc366582c 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -397,7 +397,12 @@ func (mp *metaProcessor) ProcessBlock( return err } - return mp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) + err = mp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) + if err != nil { + return err + } + + return nil } func (mp *metaProcessor) processEpochStartMetaBlock( diff --git a/process/block/shardblock.go b/process/block/shardblock.go index 8f630e7ae91..0c53e07653e 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -347,7 +347,12 @@ func (sp *shardProcessor) ProcessBlock( return err } - return sp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) + err = sp.blockProcessingCutoffHandler.HandleProcessErrorCutoff(header) + if err != nil { + return err + } + + return nil } func (sp *shardProcessor) requestEpochStartInfo(header data.ShardHeaderHandler, haveTime func() time.Duration) error { From ee6a55dd4d1f3283c78171998571a8d477ee1011 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Tue, 16 May 2023 12:03:33 +0300 Subject: [PATCH 174/221] extend log --- process/block/cutoff/blockProcessingCutoffHandler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process/block/cutoff/blockProcessingCutoffHandler.go b/process/block/cutoff/blockProcessingCutoffHandler.go index 26044eec4e9..90eec668507 100644 --- a/process/block/cutoff/blockProcessingCutoffHandler.go +++ b/process/block/cutoff/blockProcessingCutoffHandler.go @@ -35,7 +35,7 @@ func NewBlockProcessingCutoffHandler(cfg config.BlockProcessingCutoffConfig) (*b return nil, err } - log.Warn("node is started by using block processing cutoff and will pause/error at the provided coordinate", cfg.CutoffTrigger, cfg.Value) + log.Warn("node is started by using block processing cutoff and will pause/error at the provided coordinate", "mode", cfg.Mode, cfg.CutoffTrigger, cfg.Value) return b, nil } From b85c67e2d6b611063fc80a791620209ba297e4a6 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Tue, 16 May 2023 12:44:48 +0300 Subject: [PATCH 175/221] integrate new go mod --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 908a56d70a7..5b9519bcf5a 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 - github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230426114543-7cbe0054a196 + github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230516093409-cabdea058cee github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index ae038645078..e92e2a7ca1d 100644 --- a/go.sum +++ b/go.sum @@ -630,8 +630,8 @@ github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32R github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230426114543-7cbe0054a196 h1:IiRdjifo4+nDpVJsjS78LMhlFJQ2syJaZid7ISvgfyo= -github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230426114543-7cbe0054a196/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230516093409-cabdea058cee h1:maRbES2A2w0oHj2AWCGWYeRmeMjMAfRQtEpzegMGVVg= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230516093409-cabdea058cee/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= From 7538fbdfcbdc8eeb65a4026940832d6eae49a630 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Tue, 16 May 2023 15:26:52 +0300 Subject: [PATCH 176/221] fix interface --- common/interface.go | 1 + sharding/mock/enableEpochsHandlerMock.go | 5 +++++ testscommon/enableEpochsHandlerStub.go | 9 +++++++++ 3 files changed, 15 insertions(+) diff --git a/common/interface.go b/common/interface.go index e992d41ec5b..a0cc7db5b3a 100644 --- a/common/interface.go +++ b/common/interface.go @@ -339,6 +339,7 @@ type EnableEpochsHandler interface { IsSetGuardianEnabled() bool IsKeepExecOrderOnCreatedSCRsEnabled() bool IsMultiClaimOnDelegationEnabled() bool + IsConsistentTokensValuesLengthCheckEnabled() bool IsInterfaceNil() bool } diff --git a/sharding/mock/enableEpochsHandlerMock.go b/sharding/mock/enableEpochsHandlerMock.go index cbe6fb10014..56d9edf95a7 100644 --- a/sharding/mock/enableEpochsHandlerMock.go +++ b/sharding/mock/enableEpochsHandlerMock.go @@ -581,6 +581,11 @@ func (mock *EnableEpochsHandlerMock) IsMultiClaimOnDelegationEnabled() bool { return false } +// IsConsistentTokensValuesLengthCheckEnabled - +func (mock *EnableEpochsHandlerMock) IsConsistentTokensValuesLengthCheckEnabled() bool { + return false +} + // IsInterfaceNil returns true if there is no value under the interface func (mock *EnableEpochsHandlerMock) IsInterfaceNil() bool { return mock == nil diff --git a/testscommon/enableEpochsHandlerStub.go b/testscommon/enableEpochsHandlerStub.go index d076a3676d3..207760b936e 100644 --- a/testscommon/enableEpochsHandlerStub.go +++ b/testscommon/enableEpochsHandlerStub.go @@ -120,6 +120,7 @@ type EnableEpochsHandlerStub struct { IsSetGuardianEnabledField bool IsKeepExecOrderOnCreatedSCRsEnabledField bool IsMultiClaimOnDelegationEnabledField bool + IsConsistentTokensValuesLengthCheckEnabledField bool } // ResetPenalizedTooMuchGasFlag - @@ -1041,6 +1042,14 @@ func (stub *EnableEpochsHandlerStub) IsMultiClaimOnDelegationEnabled() bool { return stub.IsMultiClaimOnDelegationEnabledField } +// IsConsistentTokensValuesLengthCheckEnabled - +func (stub *EnableEpochsHandlerStub) IsConsistentTokensValuesLengthCheckEnabled() bool { + stub.RLock() + defer stub.RUnlock() + + return stub.IsConsistentTokensValuesLengthCheckEnabledField +} + // IsInterfaceNil - func (stub *EnableEpochsHandlerStub) IsInterfaceNil() bool { return stub == nil From 7029a489689db720934920f95f2116a3daada36e Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Tue, 16 May 2023 15:35:55 +0300 Subject: [PATCH 177/221] fix test are merge --- factory/processing/processComponents_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index 92642f808e0..e26fd2667c1 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -79,6 +79,9 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto EpochConfig: config.EpochConfig{}, PrefConfigs: config.PreferencesConfig{}, ImportDBConfig: config.ImportDbConfig{}, + FlagsConfig: config.ContextFlagsConfig{ + Version: "v1.0.0", + }, AccountsParser: &mock.AccountsParserStub{ GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.Pool, error) { return []*dataBlock.MiniBlock{ @@ -142,7 +145,6 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto MaxServiceFee: 100, }, }, - Version: "v1.0.0", ImportStartHandler: &testscommon.ImportStartHandlerStub{}, HistoryRepo: &dblookupext.HistoryRepositoryStub{}, Data: &testsMocks.DataComponentsStub{ From f2cdeee8c2180f214cc40fe0c51e0509a1a847c9 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Tue, 16 May 2023 15:51:19 +0300 Subject: [PATCH 178/221] fix after merge --- go.mod | 6 +++--- go.sum | 13 ++++++------- trie/snapshotTrieStorageManager_test.go | 7 +++---- trie/storageMarker/trieStorageMarker_test.go | 2 +- trie/sync_test.go | 4 ++-- trie/trieStorageManagerInEpoch_test.go | 4 ++-- trie/trieStorageManager_test.go | 11 ++++++----- 7 files changed, 23 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index 57dd47bbefc..b181fd64b0f 100644 --- a/go.mod +++ b/go.mod @@ -13,13 +13,13 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.3-0.20230512130104-2a2e00c016b2 + github.com/multiversx/mx-chain-core-go v1.2.3-0.20230516122245-0160e1d4aa6a github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.1 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 - github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230512130346-04e711f3d064 - github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230512130259-7b26a55bcd8a + github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230516122642-c47b66209ce1 + github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230516122803-af76f55aef6b github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index e7e38c73066..43aaf038f56 100644 --- a/go.sum +++ b/go.sum @@ -617,10 +617,9 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230403113932-916b16d18978/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.3-0.20230512130104-2a2e00c016b2 h1:j0kxDUOtqUPey78uWW39ScDT8S0V7G/L6kcl+JpGmq4= -github.com/multiversx/mx-chain-core-go v1.2.3-0.20230512130104-2a2e00c016b2/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.3-0.20230516122245-0160e1d4aa6a h1:hkJLco9I1dEfg8GRCtzNgEPBZ1wsEkcnPRVFoE3qX4M= +github.com/multiversx/mx-chain-core-go v1.2.3-0.20230516122245-0160e1d4aa6a/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.1 h1:gD/D7xZP7OL8L/ZZ3SoOfKjVHrU0iUxIG2AbidHFTUc= @@ -630,11 +629,11 @@ github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0B github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= -github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230512130346-04e711f3d064 h1:by2niUwKPvCONvTLUrhONwo+yl3Lin770A7uJAfEsaU= -github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230512130346-04e711f3d064/go.mod h1:FGhaeTNIcLZOPqsJZQ1TdcMaPVLhj642OzRNmt6+RQs= +github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230516122642-c47b66209ce1 h1:J0RCbY2+LNVfmWs+qP+DC80awSLy/rhWvTLqcOpPgu4= +github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230516122642-c47b66209ce1/go.mod h1:2U92gf9WQEwI+8t6DZj6Y1qWiHrb0Z9jIix7kLLDZ0c= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230512130259-7b26a55bcd8a h1:m0cQrhe2zet657jWjrE2nvba6DqM5I5bNSqbJcpwfEM= -github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230512130259-7b26a55bcd8a/go.mod h1:eBUoLYceIutumF+MZYrHhI+Fq/piUueuaR3vH8Pia8A= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230516122803-af76f55aef6b h1:YS9XRZuTiJBdUfxtNT2C0bCFlz80Yze2CglLyBwUaHU= +github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230516122803-af76f55aef6b/go.mod h1:r+ckMMhlG/lpsvEXuakNDy5qqdlbS0/x8r0EZwThn6A= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= diff --git a/trie/snapshotTrieStorageManager_test.go b/trie/snapshotTrieStorageManager_test.go index 28735e4875b..6efa5000fa4 100644 --- a/trie/snapshotTrieStorageManager_test.go +++ b/trie/snapshotTrieStorageManager_test.go @@ -8,7 +8,6 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/stretchr/testify/assert" @@ -46,7 +45,7 @@ func TestSnapshotTrieStorageManager_Get(t *testing.T) { _ = stsm.Close() val, err := stsm.Get([]byte("key")) - assert.Equal(t, errorsMx.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) assert.Nil(t, val) }) t.Run("GetFromOldEpochsWithoutAddingToCache returns db closed should error", func(t *testing.T) { @@ -94,7 +93,7 @@ func TestSnapshotTrieStorageManager_Put(t *testing.T) { _ = stsm.Close() err := stsm.Put([]byte("key"), []byte("data")) - assert.Equal(t, errorsMx.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -126,7 +125,7 @@ func TestSnapshotTrieStorageManager_GetFromLastEpoch(t *testing.T) { _ = stsm.Close() val, err := stsm.GetFromLastEpoch([]byte("key")) - assert.Equal(t, errorsMx.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) assert.Nil(t, val) }) t.Run("should work", func(t *testing.T) { diff --git a/trie/storageMarker/trieStorageMarker_test.go b/trie/storageMarker/trieStorageMarker_test.go index 733f9777995..af42a3c439a 100644 --- a/trie/storageMarker/trieStorageMarker_test.go +++ b/trie/storageMarker/trieStorageMarker_test.go @@ -23,7 +23,7 @@ func TestTrieStorageMarker_MarkStorerAsSyncedAndActive(t *testing.T) { getLatestStorageEpochCalled := false putCalled := false putInEpochWithoutCacheCalled := false - storer := &testscommon.StorageManagerStub{ + storer := &storageManager.StorageManagerStub{ GetLatestStorageEpochCalled: func() (uint32, error) { getLatestStorageEpochCalled = true return 0, expectedErr diff --git a/trie/sync_test.go b/trie/sync_test.go index 97fafe653f5..5f65832d701 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-core-go/data" "github.com/multiversx/mx-chain-go/common" - errorsMx "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" @@ -279,6 +279,6 @@ func TestTrieSync_StartSyncing(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() err := ts.StartSyncing([]byte("roothash"), ctx) - assert.Equal(t, errorsMx.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) }) } diff --git a/trie/trieStorageManagerInEpoch_test.go b/trie/trieStorageManagerInEpoch_test.go index 688310df0ff..29722e645c4 100644 --- a/trie/trieStorageManagerInEpoch_test.go +++ b/trie/trieStorageManagerInEpoch_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - errorsMx "github.com/multiversx/mx-chain-go/errors" + "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/testscommon/storageManager" @@ -83,7 +83,7 @@ func TestTrieStorageManagerInEpoch_GetFromEpoch(t *testing.T) { _ = tsmie.Close() _, err := tsmie.Get([]byte("key")) - require.Equal(t, errorsMx.ErrContextClosing, err) + require.Equal(t, core.ErrContextClosing, err) }) t.Run("epoch 0 does not panic", func(t *testing.T) { diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index 45005d27b0f..d2e687742f4 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -15,6 +15,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/storage" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" @@ -350,7 +351,7 @@ func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { providedEpoch := uint32(100) wasCalled := false args := getNewTrieStorageManagerArgs() - args.MainStorer = &testscommon.StorageManagerStub{ + args.MainStorer = &storageManager.StorageManagerStub{ SetEpochForPutOperationCalled: func(u uint32) { assert.Equal(t, providedEpoch, u) wasCalled = true @@ -606,7 +607,7 @@ func TestTrieStorageManager_Get(t *testing.T) { _ = ts.Close() val, err := ts.Get(providedKey) - assert.Equal(t, errors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) assert.Nil(t, val) }) t.Run("main storer closing should error", func(t *testing.T) { @@ -673,7 +674,7 @@ func TestNewSnapshotTrieStorageManager_GetFromCurrentEpoch(t *testing.T) { _ = ts.Close() val, err := ts.GetFromCurrentEpoch(providedKey) - assert.Equal(t, errors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) assert.Nil(t, val) }) t.Run("main storer not snapshotPruningStorer should error", func(t *testing.T) { @@ -717,7 +718,7 @@ func TestTrieStorageManager_Put(t *testing.T) { _ = ts.Close() err := ts.Put(providedKey, providedVal) - assert.Equal(t, errors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -741,7 +742,7 @@ func TestTrieStorageManager_PutInEpochWithoutCache(t *testing.T) { _ = ts.Close() err := ts.PutInEpochWithoutCache(providedKey, providedVal, 0) - assert.Equal(t, errors.ErrContextClosing, err) + assert.Equal(t, core.ErrContextClosing, err) }) t.Run("main storer not snapshotPruningStorer should error", func(t *testing.T) { t.Parallel() From 81d49ac0f1d49eef958c13e88afe22bc7336779a Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Tue, 16 May 2023 16:28:29 +0300 Subject: [PATCH 179/221] fix flag initialization --- common/enablers/enableEpochsHandler_test.go | 1 + common/enablers/epochFlags.go | 1 + 2 files changed, 2 insertions(+) diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index 28e905aae64..e2c05891428 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -215,6 +215,7 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { assert.True(t, handler.IsMaxBlockchainHookCountersFlagEnabled()) assert.True(t, handler.IsAlwaysSaveTokenMetaDataEnabled()) assert.True(t, handler.IsRuntimeCodeSizeFixEnabled()) + assert.True(t, handler.IsConsistentTokensValuesLengthCheckEnabled()) assert.False(t, handler.IsKeepExecOrderOnCreatedSCRsEnabled()) assert.False(t, handler.IsMultiClaimOnDelegationEnabled()) }) diff --git a/common/enablers/epochFlags.go b/common/enablers/epochFlags.go index b278b066063..7280dca883e 100644 --- a/common/enablers/epochFlags.go +++ b/common/enablers/epochFlags.go @@ -185,6 +185,7 @@ func newEpochFlagsHolder() *epochFlagsHolder { alwaysSaveTokenMetaDataFlag: &atomic.Flag{}, setGuardianFlag: &atomic.Flag{}, keepExecOrderOnCreatedSCRsFlag: &atomic.Flag{}, + consistentTokensValuesCheckFlag: &atomic.Flag{}, multiClaimOnDelegationFlag: &atomic.Flag{}, } } From 29973e71e02747c95b5c5e011519661f1a662479 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 17 May 2023 09:25:15 +0300 Subject: [PATCH 180/221] integrate new go mod --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5b9519bcf5a..64506355d52 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 - github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230516093409-cabdea058cee + github.com/multiversx/mx-chain-vm-common-go v1.4.2 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index e92e2a7ca1d..9a52a93de71 100644 --- a/go.sum +++ b/go.sum @@ -630,8 +630,8 @@ github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32R github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230516093409-cabdea058cee h1:maRbES2A2w0oHj2AWCGWYeRmeMjMAfRQtEpzegMGVVg= -github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230516093409-cabdea058cee/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.2 h1:ApNOwdZV2IGRrZGxQtSL4hjI0PDFrSq4p2TB3gLybxQ= +github.com/multiversx/mx-chain-vm-common-go v1.4.2/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= From 6a56c48710ef06012d86afc6e9ecf5724c0da664 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 17 May 2023 09:37:00 +0300 Subject: [PATCH 181/221] fix after review --- common/enablers/enableEpochsHandler_test.go | 4 ++-- config/tomlConfig_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/common/enablers/enableEpochsHandler_test.go b/common/enablers/enableEpochsHandler_test.go index bad48d6421f..c80bf34cedb 100644 --- a/common/enablers/enableEpochsHandler_test.go +++ b/common/enablers/enableEpochsHandler_test.go @@ -93,7 +93,7 @@ func createEnableEpochsConfig() config.EnableEpochs { RuntimeCodeSizeFixEnableEpoch: 77, MultiClaimOnDelegationEnableEpoch: 78, KeepExecOrderOnCreatedSCRsEnableEpoch: 79, - ChangeUsernameEnableEpoch: 78, + ChangeUsernameEnableEpoch: 80, } } @@ -223,7 +223,7 @@ func TestNewEnableEpochsHandler_EpochConfirmed(t *testing.T) { t.Run("flags with == condition should be set, along with all >=", func(t *testing.T) { t.Parallel() - epoch := uint32(79) + epoch := uint32(80) cfg := createEnableEpochsConfig() cfg.StakingV2EnableEpoch = epoch cfg.ESDTEnableEpoch = epoch diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 98049f584ff..6ce10c80e2f 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -689,8 +689,8 @@ func TestEnableEpochConfig(t *testing.T) { # RuntimeMemStoreLimitEnableEpoch represents the epoch when the condition for Runtime MemStore is enabled RuntimeMemStoreLimitEnableEpoch = 63 - # SetGuardianEnableEpoch represents the epoch when guard account feature is enabled - SetGuardianEnableEpoch = 64 + # SetGuardianEnableEpoch represents the epoch when guard account feature is enabled + SetGuardianEnableEpoch = 64 # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured KeepExecOrderOnCreatedSCRsEnableEpoch = 64 From 20400d41d185b6d887084e8b67f6b0fa04ac8864 Mon Sep 17 00:00:00 2001 From: jules01 Date: Wed, 17 May 2023 10:10:23 +0300 Subject: [PATCH 182/221] - fixes after merge --- .../realcomponents/processorRunner.go | 20 +++++++++++-------- integrationTests/testProcessorNode.go | 4 +--- process/sync/metablock_test.go | 4 ++-- process/sync/shardblock_test.go | 4 ++-- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 51b3516356d..73d77765995 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -233,7 +233,9 @@ func (pr *ProcessorRunner) createDataComponents(tb testing.TB) { CurrentEpoch: 0, CreateTrieEpochRootHashStorer: false, NodeProcessingMode: common.Normal, - SnapshotsEnabled: false, + FlagsConfigs: config.ContextFlagsConfig{ + SnapshotsEnabled: false, + }, } dataFactory, err := factoryData.NewDataComponentsFactory(argsData) @@ -402,10 +404,15 @@ func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { require.Nil(tb, err) argsProcess := factoryProcessing.ProcessComponentsFactoryArgs{ - Config: *pr.Config.GeneralConfig, - EpochConfig: *pr.Config.EpochConfig, - PrefConfigs: pr.Config.PreferencesConfig.Preferences, - ImportDBConfig: *pr.Config.ImportDbConfig, + Config: *pr.Config.GeneralConfig, + EpochConfig: *pr.Config.EpochConfig, + PrefConfigs: pr.Config.PreferencesConfig.Preferences, + ImportDBConfig: *pr.Config.ImportDbConfig, + FlagsConfig: config.ContextFlagsConfig{ + Version: "test", + WorkingDir: pr.Config.FlagsConfig.WorkingDir, + SnapshotsEnabled: false, + }, AccountsParser: accountsParser, SmartContractParser: smartContractParser, GasSchedule: gasScheduleNotifier, @@ -415,11 +422,8 @@ func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { WhiteListerVerifiedTxs: whiteListerVerifiedTxs, MaxRating: pr.Config.RatingsConfig.General.MaxRating, SystemSCConfig: pr.Config.SystemSCConfig, - Version: "test", ImportStartHandler: importStartHandler, - WorkingDir: pr.Config.FlagsConfig.WorkingDir, HistoryRepo: historyRepository, - SnapshotsEnabled: false, Data: pr.DataComponents, CoreData: pr.CoreComponents, Crypto: pr.CryptoComponents, diff --git a/integrationTests/testProcessorNode.go b/integrationTests/testProcessorNode.go index e7bf0a3fc84..552311f8dcd 100644 --- a/integrationTests/testProcessorNode.go +++ b/integrationTests/testProcessorNode.go @@ -2491,9 +2491,7 @@ func (tpn *TestProcessorNode) StartSync() error { return errors.New("no bootstrapper available") } - tpn.Bootstrapper.StartSyncingBlocks() - - return nil + return tpn.Bootstrapper.StartSyncingBlocks() } // LoadTxSignSkBytes alters the already generated sk/pk pair diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 7104359c5ce..5281b94b432 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -523,7 +523,7 @@ func TestMetaBootstrap_ShouldNotNeedToSync(t *testing.T) { bs, _ := sync.NewMetaBootstrap(args) - bs.StartSyncingBlocks() + _ = bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) _ = bs.Close() } @@ -599,7 +599,7 @@ func TestMetaBootstrap_SyncShouldSyncOneBlock(t *testing.T) { ) bs, _ := sync.NewMetaBootstrap(args) - bs.StartSyncingBlocks() + _ = bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) diff --git a/process/sync/shardblock_test.go b/process/sync/shardblock_test.go index 768a13a3342..ed5872ebdb1 100644 --- a/process/sync/shardblock_test.go +++ b/process/sync/shardblock_test.go @@ -654,7 +654,7 @@ func TestBootstrap_ShouldNotNeedToSync(t *testing.T) { bs, _ := sync.NewShardBootstrap(args) - bs.StartSyncingBlocks() + _ = bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) _ = bs.Close() } @@ -749,7 +749,7 @@ func TestBootstrap_SyncShouldSyncOneBlock(t *testing.T) { ) bs, _ := sync.NewShardBootstrap(args) - bs.StartSyncingBlocks() + _ = bs.StartSyncingBlocks() time.Sleep(200 * time.Millisecond) From 6b0650a2211b3064a1f40a486b79871e95d65655 Mon Sep 17 00:00:00 2001 From: robertsasu Date: Wed, 17 May 2023 10:45:39 +0300 Subject: [PATCH 183/221] fix tests --- .../multiShard/smartContract/dns/dns_test.go | 6 +++++- integrationTests/testInitializer.go | 16 +++++++++++++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/integrationTests/multiShard/smartContract/dns/dns_test.go b/integrationTests/multiShard/smartContract/dns/dns_test.go index 63d7ce13a60..4fdf6eb52f5 100644 --- a/integrationTests/multiShard/smartContract/dns/dns_test.go +++ b/integrationTests/multiShard/smartContract/dns/dns_test.go @@ -129,11 +129,15 @@ func prepareNodesAndPlayers() ([]*integrationTests.TestProcessorNode, []*integra numMetachainNodes := 1 genesisFile := "smartcontracts.json" - nodes, _ := integrationTests.CreateNodesWithFullGenesis( + enableEpochsConfig := integrationTests.GetDefaultEnableEpochsConfig() + enableEpochsConfig.StakingV2EnableEpoch = integrationTests.UnreachableEpoch + enableEpochsConfig.ChangeUsernameEnableEpoch = integrationTests.UnreachableEpoch + nodes, _ := integrationTests.CreateNodesWithFullGenesisCustomEnableEpochs( numOfShards, nodesPerShard, numMetachainNodes, genesisFile, + enableEpochsConfig, ) for _, node := range nodes { diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index dac26a1b4be..7fa34f57bc5 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -1462,11 +1462,21 @@ func CreateNodesWithFullGenesis( numMetaChainNodes int, genesisFile string, ) ([]*TestProcessorNode, *TestProcessorNode) { - nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) - connectableNodes := make([]Connectable, len(nodes)) - enableEpochsConfig := GetDefaultEnableEpochsConfig() enableEpochsConfig.StakingV2EnableEpoch = UnreachableEpoch + return CreateNodesWithFullGenesisCustomEnableEpochs(numOfShards, nodesPerShard, numMetaChainNodes, genesisFile, enableEpochsConfig) +} + +// CreateNodesWithFullGenesisCustomEnableEpochs creates multiple nodes in different shards +func CreateNodesWithFullGenesisCustomEnableEpochs( + numOfShards int, + nodesPerShard int, + numMetaChainNodes int, + genesisFile string, + enableEpochsConfig *config.EnableEpochs, +) ([]*TestProcessorNode, *TestProcessorNode) { + nodes := make([]*TestProcessorNode, numOfShards*nodesPerShard+numMetaChainNodes) + connectableNodes := make([]Connectable, len(nodes)) economicsConfig := createDefaultEconomicsConfig() economicsConfig.GlobalSettings.YearSettings = append( From 614ac64be449e334eaf2dadf5042b87706626a21 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Wed, 17 May 2023 10:53:41 +0300 Subject: [PATCH 184/221] proper core release --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dba5ede87df..298dd2714d6 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.2-0.20230428142157-76e19ecd04ac + github.com/multiversx/mx-chain-core-go v1.2.3 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.1 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index f0c74309ddb..c1094c8e3d7 100644 --- a/go.sum +++ b/go.sum @@ -618,8 +618,8 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.2-0.20230428142157-76e19ecd04ac h1:SYlgvr/2nMupmPvZnroIXjAAVgLSZCFxYgc8XhKHzd0= -github.com/multiversx/mx-chain-core-go v1.2.2-0.20230428142157-76e19ecd04ac/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.3 h1:ArNROsob/2PDdVM4SHXJPHJ1YzIZPWqvrCX/A77Dqec= +github.com/multiversx/mx-chain-core-go v1.2.3/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.1 h1:gD/D7xZP7OL8L/ZZ3SoOfKjVHrU0iUxIG2AbidHFTUc= From 0f483a6e00858b962b34c8bc38166aaffdb9f3fe Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Wed, 17 May 2023 11:19:48 +0300 Subject: [PATCH 185/221] fix enable epochs formatting --- cmd/node/config/enableEpochs.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 4d088612f70..6eacf08cc7a 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -239,13 +239,13 @@ # AlwaysSaveTokenMetaDataEnableEpoch represents the epoch when the token metadata is always saved AlwaysSaveTokenMetaDataEnableEpoch = 1 - # RuntimeCodeSizeFixEnableEpoch represents the epoch when the code size fix in the VM is enabled - RuntimeCodeSizeFixEnableEpoch = 1 +# RuntimeCodeSizeFixEnableEpoch represents the epoch when the code size fix in the VM is enabled +RuntimeCodeSizeFixEnableEpoch = 1 - # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured - KeepExecOrderOnCreatedSCRsEnableEpoch = 2 +# KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured +KeepExecOrderOnCreatedSCRsEnableEpoch = 2 - # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled +# MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled MultiClaimOnDelegationEnableEpoch = 3 # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers From 850eeba69598d0e63f4bbdc24ffe13fe8ff169da Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Wed, 17 May 2023 11:21:37 +0300 Subject: [PATCH 186/221] fix enable epochs formatting - take 2 --- cmd/node/config/enableEpochs.toml | 40 +++++++++++++++---------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 6eacf08cc7a..50d020b43bc 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -239,32 +239,32 @@ # AlwaysSaveTokenMetaDataEnableEpoch represents the epoch when the token metadata is always saved AlwaysSaveTokenMetaDataEnableEpoch = 1 -# RuntimeCodeSizeFixEnableEpoch represents the epoch when the code size fix in the VM is enabled -RuntimeCodeSizeFixEnableEpoch = 1 + # RuntimeCodeSizeFixEnableEpoch represents the epoch when the code size fix in the VM is enabled + RuntimeCodeSizeFixEnableEpoch = 1 -# KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured -KeepExecOrderOnCreatedSCRsEnableEpoch = 2 + # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured + KeepExecOrderOnCreatedSCRsEnableEpoch = 2 -# MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled -MultiClaimOnDelegationEnableEpoch = 3 + # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation is enabled + MultiClaimOnDelegationEnableEpoch = 3 -# BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers -BLSMultiSignerEnableEpoch = [ - { EnableEpoch = 0, Type = "no-KOSK" }, - { EnableEpoch = 1, Type = "KOSK" } -] + # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers + BLSMultiSignerEnableEpoch = [ + { EnableEpoch = 0, Type = "no-KOSK" }, + { EnableEpoch = 1, Type = "KOSK" } + ] -# SetGuardianEnableEpoch represents the epoch when the guard account feature is enabled in the protocol -SetGuardianEnableEpoch = 2 + # SetGuardianEnableEpoch represents the epoch when the guard account feature is enabled in the protocol + SetGuardianEnableEpoch = 2 -# ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled -ConsistentTokensValuesLengthCheckEnableEpoch = 2 + # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled + ConsistentTokensValuesLengthCheckEnableEpoch = 2 -# MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch -MaxNodesChangeEnableEpoch = [ - { EpochEnable = 0, MaxNumNodes = 36, NodesToShufflePerShard = 4 }, - { EpochEnable = 1, MaxNumNodes = 56, NodesToShufflePerShard = 2 } -] + # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch + MaxNodesChangeEnableEpoch = [ + { EpochEnable = 0, MaxNumNodes = 36, NodesToShufflePerShard = 4 }, + { EpochEnable = 1, MaxNumNodes = 56, NodesToShufflePerShard = 2 } + ] [GasSchedule] # GasScheduleByEpochs holds the configuration for the gas schedule that will be applied from specific epochs From fad08d233fef537425fc4392ab1cba3cbbc89b04 Mon Sep 17 00:00:00 2001 From: MariusC Date: Wed, 17 May 2023 17:37:07 +0300 Subject: [PATCH 187/221] FIX: After merge --- api/groups/blockGroup_test.go | 1 - factory/processing/export_test.go | 7 -- factory/processing/processComponents.go | 8 +- factory/processing/processComponents_test.go | 2 +- go.mod | 4 +- go.sum | 81 ++++++++++++-------- outport/process/outportDataProvider.go | 1 - testscommon/components/components.go | 3 - 8 files changed, 57 insertions(+), 50 deletions(-) diff --git a/api/groups/blockGroup_test.go b/api/groups/blockGroup_test.go index 6115abc211e..696db92a748 100644 --- a/api/groups/blockGroup_test.go +++ b/api/groups/blockGroup_test.go @@ -10,7 +10,6 @@ import ( "strings" "testing" - "github.com/gin-gonic/gin" "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" apiErrors "github.com/multiversx/mx-chain-go/api/errors" diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index f1c53fee475..702cebee361 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -1,8 +1,6 @@ package processing import ( - "github.com/multiversx/mx-chain-core-go/data" - "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/factory" @@ -49,8 +47,3 @@ func (pcf *processComponentsFactory) NewBlockProcessor( func (pcf *processComponentsFactory) CreateTxSimulatorProcessor() (factory.TransactionSimulatorProcessor, process.VirtualMachinesContainerFactory, error) { return pcf.createTxSimulatorProcessor() } - -// IndexGenesisBlocks - -func (pcf *processComponentsFactory) IndexGenesisBlocks(genesisBlocks map[uint32]data.HeaderHandler, indexingData map[uint32]*genesis.IndexingData) error { - return pcf.indexGenesisBlocks(genesisBlocks, indexingData, map[string]*alteredAccount.AlteredAccount{}) -} diff --git a/factory/processing/processComponents.go b/factory/processing/processComponents.go index 5e785822291..822a6dc4550 100644 --- a/factory/processing/processComponents.go +++ b/factory/processing/processComponents.go @@ -869,7 +869,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string rootHash, err := pcf.state.AccountsAdapter().RootHash() if err != nil { - return map[string]*outport.AlteredAccount{}, err + return map[string]*alteredAccount.AlteredAccount{}, err } leavesChannels := &common.TrieIteratorChannels{ @@ -878,7 +878,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string } err = pcf.state.AccountsAdapter().GetAllLeaves(leavesChannels, context.Background(), rootHash) if err != nil { - return map[string]*outport.AlteredAccount{}, err + return map[string]*alteredAccount.AlteredAccount{}, err } genesisAccounts := make(map[string]*alteredAccount.AlteredAccount, 0) @@ -891,7 +891,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string encodedAddress, errEncode := pcf.coreData.AddressPubKeyConverter().Encode(userAccount.AddressBytes()) if errEncode != nil { - return map[string]*outport.AlteredAccount{}, errEncode + return map[string]*alteredAccount.AlteredAccount{}, errEncode } genesisAccounts[encodedAddress] = &alteredAccount.AlteredAccount{ @@ -907,7 +907,7 @@ func (pcf *processComponentsFactory) indexAndReturnGenesisAccounts() (map[string err = leavesChannels.ErrChan.ReadFromChanNonBlocking() if err != nil { - return map[string]*outport.AlteredAccount{}, err + return map[string]*alteredAccount.AlteredAccount{}, err } shardID := pcf.bootstrapComponents.ShardCoordinator().SelfId() diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index c6d6671dac3..ebab46319fa 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -87,7 +87,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto return []*dataBlock.MiniBlock{ {}, }, - map[uint32]*outportCore.Pool{ + map[uint32]*outportCore.TransactionPool{ 0: {}, }, nil }, diff --git a/go.mod b/go.mod index 661d33d48d6..707ba33b1d1 100644 --- a/go.mod +++ b/go.mod @@ -14,8 +14,8 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 - github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df - github.com/multiversx/mx-chain-crypto-go v1.2.5 + github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 + github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 diff --git a/go.sum b/go.sum index b3eca275b43..477090db584 100644 --- a/go.sum +++ b/go.sum @@ -37,6 +37,7 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= @@ -167,6 +168,8 @@ github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwU github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -274,8 +277,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -337,8 +341,9 @@ github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0= github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= +github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= @@ -371,8 +376,9 @@ github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Ax github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipld/go-ipld-prime v0.9.0 h1:N2OjJMb+fhyFPwPnVvJcWU/NsumP8etal+d2v3G4eww= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.19.0 h1:5axC7rJmPc17Emw6TelxGwnzALk0PdupZ2oj2roDj04= +github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -589,8 +595,9 @@ github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPw github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= -github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs= github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -612,32 +619,32 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 h1:oFsYNkebv7TQygdEjN4aGgQ8ICLPmS9bDJmzlOHtU2Y= github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.31/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.1.34/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df h1:ADV4QOB2Tg42SYyVmYNq4FBXCc4bzD5EA66IFhF+fb0= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= -github.com/multiversx/mx-chain-crypto-go v1.2.5 h1:tuq3BUNMhKud5DQbZi9DiVAAHUXypizy8zPH0NpTGZk= -github.com/multiversx/mx-chain-crypto-go v1.2.5/go.mod h1:teqhNyWEqfMPgNn8sgWXlgtJ1a36jGCnhs/tRpXW6r4= +github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 h1:7dgFuxG2sUbQEFA4y36NAoRjuN+Z3PlY0znmmgr7ZSo= +github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= +github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= +github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96/go.mod h1:Y6jgeoMBpDCtm7lurtChhgPyhpQ0GF5OruW/tl/++JI= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= -github.com/multiversx/mx-chain-p2p-go v1.0.15 h1:H7273huZG/zAR6MPvWuXwBEVBsJWH1MeSIDshYV0nh0= -github.com/multiversx/mx-chain-p2p-go v1.0.15/go.mod h1:hUE4H8kGJk3u9gTqeetF3uhjJpnfdV/hALKsJ6bMI+8= -github.com/multiversx/mx-chain-storage-go v1.0.7 h1:UqLo/OLTD3IHiE/TB/SEdNRV1GG2f1R6vIP5ehHwCNw= -github.com/multiversx/mx-chain-storage-go v1.0.7/go.mod h1:gtKoV32Cg2Uy8deHzF8Ud0qAl0zv92FvWgPSYIP0Zmg= -github.com/multiversx/mx-chain-vm-common-go v1.3.36/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.3.37/go.mod h1:sZ2COLCxvf2GxAAJHGmGqWybObLtFuk2tZUyGqnMXE8= -github.com/multiversx/mx-chain-vm-common-go v1.4.0 h1:0i0cJZJOXGzqYzwtKFHSr2yGmnFAdizOuISK8HgsnYo= +github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= +github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= +github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= +github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-vm-common-go v1.4.0/go.mod h1:odBJC92ANA8zLtPh/wwajUUGJOaS88F5QYGf0t8Wgzw= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50 h1:ScUq7/wq78vthMTQ6v5Ux1DvSMQMHxQ2Sl7aPP26q1w= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.50/go.mod h1:e3uYdgoKzs3puaznbmSjDcRisJc5Do4tpg7VqyYwoek= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51 h1:axtp5/mpA+xYJ1cu4KtAGETV4t6v6/tNfQh0HCclBYY= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.51/go.mod h1:oKj32V2nkd+KGNOL6emnwVkDRPpciwHHDqBmeorcL8k= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77 h1:3Yh4brS5/Jye24l5AKy+Q6Yci6Rv55pHyj9/GR3AYos= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.77/go.mod h1:3IaAOHc1JfxL5ywQZIrcaHQu5+CVdZNDaoY64NGOtUE= +github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.2 h1:ApNOwdZV2IGRrZGxQtSL4hjI0PDFrSq4p2TB3gLybxQ= +github.com/multiversx/mx-chain-vm-common-go v1.4.2/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54/go.mod h1:1rgU8wXdn76S7rZx+4YS6ObK+M1XiSdPoPmXVq8fuZE= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 h1:iiOXTcwvfjQXlchlVnSdNeqHYKVn/k7s/MsHfk/wrr0= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80/go.mod h1:Be8y+QBPSKacW2TJaaQSeKYNGtCenFt4dpBOAnICAcc= github.com/multiversx/mx-components-big-int v0.1.1 h1:695mYPKYOrmGEGgRH4/pZruDoe3CPP1LHrBxKfvj5l4= github.com/multiversx/mx-components-big-int v0.1.1/go.mod h1:0QrcFdfeLgJ/am10HGBeH0G0DNF+0Qx1E4DS/iozQls= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= @@ -687,8 +694,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -759,8 +767,9 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= +github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= @@ -822,6 +831,7 @@ github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60Nt github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= +github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -901,8 +911,9 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -938,8 +949,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -996,8 +1008,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1020,8 +1034,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1099,14 +1114,16 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1117,8 +1134,9 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1176,8 +1194,9 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/outport/process/outportDataProvider.go b/outport/process/outportDataProvider.go index b25bb460186..6250bfa0960 100644 --- a/outport/process/outportDataProvider.go +++ b/outport/process/outportDataProvider.go @@ -131,7 +131,6 @@ func (odp *outportDataProvider) PrepareOutportSaveBlockData(arg ArgPrepareOutpor AlteredAccounts: alteredAccounts, NotarizedHeadersHashes: arg.NotarizedHeadersHashes, NumberOfShards: odp.numOfShards, - IsImportDB: odp.isImportDBMode, SignersIndexes: signersIndexes, HighestFinalBlockNonce: arg.HighestFinalBlockNonce, diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 5c4fa618f82..338cd69d34d 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -661,9 +661,6 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin RequestTimeoutSec: 30, MarshallerType: "json", }, - WebSocketConnector: config.WebSocketDriverConfig{ - MarshallerType: "json", - }, }, EconomicsConfig: config.EconomicsConfig{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), From 0b2e12f898afe1efcc2c237fb0ac8951c12f21a0 Mon Sep 17 00:00:00 2001 From: MariusC Date: Wed, 17 May 2023 17:38:54 +0300 Subject: [PATCH 188/221] FIX: After merge 2 --- api/groups/blockGroup_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/groups/blockGroup_test.go b/api/groups/blockGroup_test.go index 696db92a748..b190c2f0561 100644 --- a/api/groups/blockGroup_test.go +++ b/api/groups/blockGroup_test.go @@ -251,7 +251,7 @@ func TestBlockGroup_getAlteredAccountsByNonce(t *testing.T) { t.Parallel() facade := &mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nil, expectedErr }, } @@ -320,7 +320,7 @@ func TestBlockGroup_getAlteredAccountsByHash(t *testing.T) { providedHash := hex.EncodeToString([]byte("hash")) facade := &mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return nil, expectedErr }, } @@ -352,7 +352,7 @@ func TestBlockGroup_getAlteredAccountsByHash(t *testing.T) { } facade := &mock.FacadeStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { require.Equal(t, providedHash, hex.EncodeToString(options.Hash)) require.Equal(t, expectedOptions, options) return expectedResponse, nil From b84e0422e6653d658441ff0c0e0dd782e64518ff Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Thu, 18 May 2023 12:05:46 +0300 Subject: [PATCH 189/221] fix after merge and update go mod --- factory/processing/txSimulatorProcessComponents.go | 2 ++ go.mod | 6 +++--- go.sum | 11 ++++++----- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/factory/processing/txSimulatorProcessComponents.go b/factory/processing/txSimulatorProcessComponents.go index d6a5568697e..023495f6cb5 100644 --- a/factory/processing/txSimulatorProcessComponents.go +++ b/factory/processing/txSimulatorProcessComponents.go @@ -16,6 +16,7 @@ import ( "github.com/multiversx/mx-chain-go/process/transactionLog" "github.com/multiversx/mx-chain-go/process/txsimulator" "github.com/multiversx/mx-chain-go/state" + "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/storage" storageFactory "github.com/multiversx/mx-chain-go/storage/factory" "github.com/multiversx/mx-chain-go/storage/storageunit" @@ -261,6 +262,7 @@ func (pcf *processComponentsFactory) createArgsTxSimulatorProcessorShard( vmContainerFactory, err := pcf.createVMFactoryShard( accountsAdapter, + syncer.NewMissingTrieNodesNotifier(), builtInFuncFactory.BuiltInFunctionContainer(), esdtTransferParser, pcf.coreData.WasmVMChangeLocker(), diff --git a/go.mod b/go.mod index dd18bdefbc2..0826e8cdfe6 100644 --- a/go.mod +++ b/go.mod @@ -13,13 +13,13 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-core-go v1.2.3 + github.com/multiversx/mx-chain-core-go v1.2.4-0.20230518082654-bf2789086b0f github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.1 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 - github.com/multiversx/mx-chain-storage-go v1.0.8 - github.com/multiversx/mx-chain-vm-common-go v1.4.2 + github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230518083218-98a7f2c893e5 + github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518083542-c2304957adc2 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index 6acd543c0c7..82d8c1352b4 100644 --- a/go.sum +++ b/go.sum @@ -618,8 +618,8 @@ github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUY github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.3 h1:ArNROsob/2PDdVM4SHXJPHJ1YzIZPWqvrCX/A77Dqec= -github.com/multiversx/mx-chain-core-go v1.2.3/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.4-0.20230518082654-bf2789086b0f h1:Lpp6qH6GnX8BK+mJ2l/qL5y/n098+3W4h5d/loaro0Y= +github.com/multiversx/mx-chain-core-go v1.2.4-0.20230518082654-bf2789086b0f/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.1 h1:gD/D7xZP7OL8L/ZZ3SoOfKjVHrU0iUxIG2AbidHFTUc= @@ -628,11 +628,12 @@ github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDT github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= -github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= +github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230518083218-98a7f2c893e5 h1:rScGG2KmUduMXBqqp5zVo4PyanYfbJlxk35rYLMMvfA= +github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230518083218-98a7f2c893e5/go.mod h1:rtJdopIbIKYLcA2alB6FCnNaYxJI9oziLtqViKsePQs= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-common-go v1.4.2 h1:ApNOwdZV2IGRrZGxQtSL4hjI0PDFrSq4p2TB3gLybxQ= -github.com/multiversx/mx-chain-vm-common-go v1.4.2/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518083542-c2304957adc2 h1:Iv2mCubTRBAUj3AzwUoRauDC7NqS2ENxmbXbRcejHDQ= +github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518083542-c2304957adc2/go.mod h1:TtVyrNticDW82wU9blIwPNZyNVJMcpE7+pgqZj2Efs0= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= From a58411d374dee47dc94ef8bc5c1b7df210d0a4a4 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Thu, 18 May 2023 12:45:52 +0300 Subject: [PATCH 190/221] fix comment --- cmd/node/config/external.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/node/config/external.toml b/cmd/node/config/external.toml index a7a46adf77f..8f0bd3e9817 100644 --- a/cmd/node/config/external.toml +++ b/cmd/node/config/external.toml @@ -44,7 +44,7 @@ [HostDriverConfig] # This flag shall only be used for observer nodes Enabled = false - # This flag will start the WebSocket connector as server or client( can be "client" or "server") + # This flag will start the WebSocket connector as server or client (can be "client" or "server") Mode = "client" # URL for the WebSocket client/server connection # This value represents the IP address and port number that the WebSocket client or server will use to establish a connection. From 62ab595f4aa7e30e57d59f2b6bd9a5348b9de3ff Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 18 May 2023 13:08:45 +0300 Subject: [PATCH 191/221] fixes after merge --- config/tomlConfig_test.go | 110 +++++++++++++++++++------------------- go.mod | 2 +- go.sum | 7 +-- 3 files changed, 60 insertions(+), 59 deletions(-) diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 40a7fdd0278..473df5cb187 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -748,61 +748,61 @@ func TestEnableEpochConfig(t *testing.T) { NodesToShufflePerShard: 80, }, }, - BlockGasAndFeesReCheckEnableEpoch: 13, - StakingV2EnableEpoch: 18, - StakeEnableEpoch: 17, - DoubleKeyProtectionEnableEpoch: 19, - ESDTEnableEpoch: 20, - GovernanceEnableEpoch: 21, - DelegationManagerEnableEpoch: 22, - DelegationSmartContractEnableEpoch: 23, - CorrectLastUnjailedEnableEpoch: 24, - BalanceWaitingListsEnableEpoch: 14, - ReturnDataToLastTransferEnableEpoch: 15, - SenderInOutTransferEnableEpoch: 16, - RelayedTransactionsV2EnableEpoch: 25, - UnbondTokensV2EnableEpoch: 26, - SaveJailedAlwaysEnableEpoch: 27, - ValidatorToDelegationEnableEpoch: 29, - ReDelegateBelowMinCheckEnableEpoch: 28, - WaitingListFixEnableEpoch: 30, - IncrementSCRNonceInMultiTransferEnableEpoch: 31, - ESDTMultiTransferEnableEpoch: 32, - GlobalMintBurnDisableEpoch: 33, - ESDTTransferRoleEnableEpoch: 34, - BuiltInFunctionOnMetaEnableEpoch: 35, - ComputeRewardCheckpointEnableEpoch: 36, - SCRSizeInvariantCheckEnableEpoch: 37, - BackwardCompSaveKeyValueEnableEpoch: 38, - ESDTNFTCreateOnMultiShardEnableEpoch: 39, - MetaESDTSetEnableEpoch: 40, - AddTokensToDelegationEnableEpoch: 41, - MultiESDTTransferFixOnCallBackOnEnableEpoch: 42, - OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 43, - FixOOGReturnCodeEnableEpoch: 44, - RemoveNonUpdatedStorageEnableEpoch: 45, - OptimizeNFTStoreEnableEpoch: 46, - CreateNFTThroughExecByCallerEnableEpoch: 47, - IsPayableBySCEnableEpoch: 48, - CleanUpInformativeSCRsEnableEpoch: 49, - StorageAPICostOptimizationEnableEpoch: 50, - TransformToMultiShardCreateEnableEpoch: 51, - ESDTRegisterAndSetAllRolesEnableEpoch: 52, - FailExecutionOnEveryAPIErrorEnableEpoch: 53, - ManagedCryptoAPIsEnableEpoch: 54, - ESDTMetadataContinuousCleanupEnableEpoch: 55, - FixAsyncCallBackArgsListEnableEpoch: 56, - FixOldTokenLiquidityEnableEpoch: 57, - SetSenderInEeiOutputTransferEnableEpoch: 58, - MaxBlockchainHookCountersEnableEpoch: 59, - WipeSingleNFTLiquidityDecreaseEnableEpoch: 60, - AlwaysSaveTokenMetaDataEnableEpoch: 61, - RuntimeCodeSizeFixEnableEpoch: 62, - RuntimeMemStoreLimitEnableEpoch: 63, - SetGuardianEnableEpoch: 64, - MultiClaimOnDelegationEnableEpoch: 65, - KeepExecOrderOnCreatedSCRsEnableEpoch: 64, - ChangeUsernameEnableEpoch: 64, + BlockGasAndFeesReCheckEnableEpoch: 13, + StakingV2EnableEpoch: 18, + StakeEnableEpoch: 17, + DoubleKeyProtectionEnableEpoch: 19, + ESDTEnableEpoch: 20, + GovernanceEnableEpoch: 21, + DelegationManagerEnableEpoch: 22, + DelegationSmartContractEnableEpoch: 23, + CorrectLastUnjailedEnableEpoch: 24, + BalanceWaitingListsEnableEpoch: 14, + ReturnDataToLastTransferEnableEpoch: 15, + SenderInOutTransferEnableEpoch: 16, + RelayedTransactionsV2EnableEpoch: 25, + UnbondTokensV2EnableEpoch: 26, + SaveJailedAlwaysEnableEpoch: 27, + ValidatorToDelegationEnableEpoch: 29, + ReDelegateBelowMinCheckEnableEpoch: 28, + WaitingListFixEnableEpoch: 30, + IncrementSCRNonceInMultiTransferEnableEpoch: 31, + ESDTMultiTransferEnableEpoch: 32, + GlobalMintBurnDisableEpoch: 33, + ESDTTransferRoleEnableEpoch: 34, + BuiltInFunctionOnMetaEnableEpoch: 35, + ComputeRewardCheckpointEnableEpoch: 36, + SCRSizeInvariantCheckEnableEpoch: 37, + BackwardCompSaveKeyValueEnableEpoch: 38, + ESDTNFTCreateOnMultiShardEnableEpoch: 39, + MetaESDTSetEnableEpoch: 40, + AddTokensToDelegationEnableEpoch: 41, + MultiESDTTransferFixOnCallBackOnEnableEpoch: 42, + OptimizeGasUsedInCrossMiniBlocksEnableEpoch: 43, + FixOOGReturnCodeEnableEpoch: 44, + RemoveNonUpdatedStorageEnableEpoch: 45, + OptimizeNFTStoreEnableEpoch: 46, + CreateNFTThroughExecByCallerEnableEpoch: 47, + IsPayableBySCEnableEpoch: 48, + CleanUpInformativeSCRsEnableEpoch: 49, + StorageAPICostOptimizationEnableEpoch: 50, + TransformToMultiShardCreateEnableEpoch: 51, + ESDTRegisterAndSetAllRolesEnableEpoch: 52, + FailExecutionOnEveryAPIErrorEnableEpoch: 53, + ManagedCryptoAPIsEnableEpoch: 54, + ESDTMetadataContinuousCleanupEnableEpoch: 55, + FixAsyncCallBackArgsListEnableEpoch: 56, + FixOldTokenLiquidityEnableEpoch: 57, + SetSenderInEeiOutputTransferEnableEpoch: 58, + MaxBlockchainHookCountersEnableEpoch: 59, + WipeSingleNFTLiquidityDecreaseEnableEpoch: 60, + AlwaysSaveTokenMetaDataEnableEpoch: 61, + RuntimeCodeSizeFixEnableEpoch: 62, + RuntimeMemStoreLimitEnableEpoch: 63, + SetGuardianEnableEpoch: 64, + MultiClaimOnDelegationEnableEpoch: 65, + KeepExecOrderOnCreatedSCRsEnableEpoch: 64, + ChangeUsernameEnableEpoch: 64, ConsistentTokensValuesLengthCheckEnableEpoch: 66, BLSMultiSignerEnableEpoch: []MultiSignerConfig{ { diff --git a/go.mod b/go.mod index f19dc5c18d1..842c5a929b0 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 - github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230515133310-3417cce7427e + github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518100603-9784aae99d8a github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index 1c1b6fe8674..b10aea686b0 100644 --- a/go.sum +++ b/go.sum @@ -617,8 +617,9 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1 h1:kmDfK7Znl3S0IJlDEE4sFuBOmA2rZkBudxlGhI1bvQc= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= +github.com/multiversx/mx-chain-core-go v1.2.3 h1:ArNROsob/2PDdVM4SHXJPHJ1YzIZPWqvrCX/A77Dqec= +github.com/multiversx/mx-chain-core-go v1.2.3/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.1 h1:gD/D7xZP7OL8L/ZZ3SoOfKjVHrU0iUxIG2AbidHFTUc= @@ -630,8 +631,8 @@ github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32R github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230515133310-3417cce7427e h1:2PkyDANF4IYD1OuUXLQg+eJmoTaxwypYg0VGfbhuKV4= -github.com/multiversx/mx-chain-vm-common-go v1.4.2-0.20230515133310-3417cce7427e/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518100603-9784aae99d8a h1:zxcm1RXo5EOGycnU1w8BmvqbS+P/wGRC4QmWLrPxQ3Y= +github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518100603-9784aae99d8a/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= From 3b8b0a78c7fee2e916d3c70d68de9f5acfe9c130 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 18 May 2023 13:17:03 +0300 Subject: [PATCH 192/221] move config values --- cmd/node/config/enableEpochs.toml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/node/config/enableEpochs.toml b/cmd/node/config/enableEpochs.toml index 8d83dc2c33d..959b5a361b4 100644 --- a/cmd/node/config/enableEpochs.toml +++ b/cmd/node/config/enableEpochs.toml @@ -251,18 +251,18 @@ # ChangeUsernameEnableEpoch represents the epoch when changing username is enabled ChangeUsernameEnableEpoch = 2 - # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers - BLSMultiSignerEnableEpoch = [ - { EnableEpoch = 0, Type = "no-KOSK" }, - { EnableEpoch = 1, Type = "KOSK" } - ] - # SetGuardianEnableEpoch represents the epoch when the guard account feature is enabled in the protocol SetGuardianEnableEpoch = 2 # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled ConsistentTokensValuesLengthCheckEnableEpoch = 2 + # BLSMultiSignerEnableEpoch represents the activation epoch for different types of BLS multi-signers + BLSMultiSignerEnableEpoch = [ + { EnableEpoch = 0, Type = "no-KOSK" }, + { EnableEpoch = 1, Type = "KOSK" } + ] + # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ { EpochEnable = 0, MaxNumNodes = 36, NodesToShufflePerShard = 4 }, From 57f5f3c1aa737abeae0d417cc452d353a0947396 Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 18 May 2023 13:32:11 +0300 Subject: [PATCH 193/221] FIX: Indexer --- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 707ba33b1d1..462f24cb00a 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 github.com/multiversx/mx-chain-crypto-go v1.2.6 - github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 + github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518102916-c7c22e214123 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 diff --git a/go.sum b/go.sum index 477090db584..f5edc762bec 100644 --- a/go.sum +++ b/go.sum @@ -620,15 +620,14 @@ github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc6372931 github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.1-0.20230329082847-b78e96c3ad5a/go.mod h1:/lovncjwo+pXQ7IAERwNzwCifeH7SAWk0DGqjorX2bc= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 h1:7dgFuxG2sUbQEFA4y36NAoRjuN+Z3PlY0znmmgr7ZSo= github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96 h1:okIfLr+NqX04eHNp9k97KuLhpYfLJOjmGZaOia9xcGg= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230331083741-0fd8a2156e96/go.mod h1:Y6jgeoMBpDCtm7lurtChhgPyhpQ0GF5OruW/tl/++JI= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518102916-c7c22e214123 h1:BpWN9bL/bKp1RKnyYJdL37GvrRpx2wfQ0LNsL1EE/pk= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518102916-c7c22e214123/go.mod h1:Ch2i9rxUE6G2i6izfCUo1ffLWuAOXTbYdKiWNPOVpNE= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= From b70a4003915c2afcdea65ec217736ce39a8ba7fc Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 18 May 2023 13:35:13 +0300 Subject: [PATCH 194/221] FIX: Unit test --- factory/processing/processComponents_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/factory/processing/processComponents_test.go b/factory/processing/processComponents_test.go index ebab46319fa..9d272ef68f6 100644 --- a/factory/processing/processComponents_test.go +++ b/factory/processing/processComponents_test.go @@ -83,7 +83,7 @@ func createMockProcessComponentsFactoryArgs() processComp.ProcessComponentsFacto Version: "v1.0.0", }, AccountsParser: &mock.AccountsParserStub{ - GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.Pool, error) { + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.TransactionPool, error) { return []*dataBlock.MiniBlock{ {}, }, @@ -674,7 +674,7 @@ func TestProcessComponentsFactory_Create(t *testing.T) { args := createMockProcessComponentsFactoryArgs() args.AccountsParser = &mock.AccountsParserStub{ - GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.Pool, error) { + GenerateInitialTransactionsCalled: func(shardCoordinator sharding.Coordinator, initialIndexingData map[uint32]*genesis.IndexingData) ([]*dataBlock.MiniBlock, map[uint32]*outportCore.TransactionPool, error) { return nil, nil, expectedErr }, } From f4460b5e06cc20adbfdc32a95f9a29743264fc56 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Thu, 18 May 2023 14:03:20 +0300 Subject: [PATCH 195/221] fixes after merge --- facade/nodeFacade_test.go | 6 +++--- factory/status/statusComponents.go | 1 + factory/status/statusComponents_test.go | 11 +++++++---- go.mod | 2 +- go.sum | 4 ++-- testscommon/components/components.go | 6 ++++++ 6 files changed, 20 insertions(+), 10 deletions(-) diff --git a/facade/nodeFacade_test.go b/facade/nodeFacade_test.go index f0a378a92ec..1f68c7c5108 100644 --- a/facade/nodeFacade_test.go +++ b/facade/nodeFacade_test.go @@ -13,10 +13,10 @@ import ( "github.com/multiversx/mx-chain-core-go/core" atomicCore "github.com/multiversx/mx-chain-core-go/core/atomic" nodeData "github.com/multiversx/mx-chain-core-go/data" + "github.com/multiversx/mx-chain-core-go/data/alteredAccount" "github.com/multiversx/mx-chain-core-go/data/api" "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/data/esdt" - "github.com/multiversx/mx-chain-core-go/data/outport" "github.com/multiversx/mx-chain-core-go/data/transaction" "github.com/multiversx/mx-chain-core-go/data/vm" "github.com/multiversx/mx-chain-go/common" @@ -2117,7 +2117,7 @@ func TestNodeFacade_GetBlockByNonce(t *testing.T) { func TestNodeFacade_GetAlteredAccountsForBlock(t *testing.T) { t.Parallel() - providedResponse := []*outport.AlteredAccount{ + providedResponse := []*alteredAccount.AlteredAccount{ { Nonce: 123, Address: "address", @@ -2125,7 +2125,7 @@ func TestNodeFacade_GetAlteredAccountsForBlock(t *testing.T) { } args := createMockArguments() args.ApiResolver = &mock.ApiResolverStub{ - GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*outport.AlteredAccount, error) { + GetAlteredAccountsForBlockCalled: func(options api.GetAlteredAccountsForBlockOptions) ([]*alteredAccount.AlteredAccount, error) { return providedResponse, nil }, } diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index a5d531a7e09..d55702d0e14 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -226,6 +226,7 @@ func (scf *statusComponentsFactory) makeElasticIndexerArgs() indexerFactory.Args EnabledIndexes: elasticSearchConfig.EnabledIndexes, Denomination: scf.economicsConfig.GlobalSettings.Denomination, UseKibana: elasticSearchConfig.UseKibana, + ImportDB: scf.isInImportMode, } } diff --git a/factory/status/statusComponents_test.go b/factory/status/statusComponents_test.go index 5240fc11ba7..c27489057e0 100644 --- a/factory/status/statusComponents_test.go +++ b/factory/status/statusComponents_test.go @@ -30,7 +30,10 @@ func createMockStatusComponentsFactoryArgs() statusComp.StatusComponentsFactoryA Password: "pass", EnabledIndexes: []string{"transactions", "blocks"}, }, - WebSocketConnector: config.WebSocketDriverConfig{ + HostDriverConfig: config.HostDriverConfig{ + MarshallerType: "json", + }, + EventNotifierConnector: config.EventNotifierConfig{ MarshallerType: "json", }, }, @@ -184,8 +187,8 @@ func TestStatusComponentsFactory_Create(t *testing.T) { t.Parallel() args := createMockStatusComponentsFactoryArgs() - args.ExternalConfig.WebSocketConnector.Enabled = true - args.ExternalConfig.WebSocketConnector.MarshallerType = "invalid type" + args.ExternalConfig.HostDriverConfig.Enabled = true + args.ExternalConfig.HostDriverConfig.MarshallerType = "invalid type" scf, _ := statusComp.NewStatusComponentsFactory(args) require.NotNil(t, scf) @@ -201,7 +204,7 @@ func TestStatusComponentsFactory_Create(t *testing.T) { return core.MetachainShardId // coverage } args, _ := componentsMock.GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator) - args.ExternalConfig.WebSocketConnector.Enabled = true // coverage + args.ExternalConfig.HostDriverConfig.Enabled = true // coverage scf, err := statusComp.NewStatusComponentsFactory(args) require.Nil(t, err) diff --git a/go.mod b/go.mod index 462f24cb00a..3d4d509dc9e 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 github.com/multiversx/mx-chain-crypto-go v1.2.6 - github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518102916-c7c22e214123 + github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518104314-87b0947855ef github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 diff --git a/go.sum b/go.sum index f5edc762bec..81f2c5eb61c 100644 --- a/go.sum +++ b/go.sum @@ -626,8 +626,8 @@ github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 h1:7 github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518102916-c7c22e214123 h1:BpWN9bL/bKp1RKnyYJdL37GvrRpx2wfQ0LNsL1EE/pk= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518102916-c7c22e214123/go.mod h1:Ch2i9rxUE6G2i6izfCUo1ffLWuAOXTbYdKiWNPOVpNE= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518104314-87b0947855ef h1:Y1Zz7rIL4U73ixQpElX3SJjtIdUawUfReI0gALxvrrY= +github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518104314-87b0947855ef/go.mod h1:Ch2i9rxUE6G2i6izfCUo1ffLWuAOXTbYdKiWNPOVpNE= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 338cd69d34d..e755a49ba44 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -661,6 +661,12 @@ func GetStatusComponentsFactoryArgsAndProcessComponents(shardCoordinator shardin RequestTimeoutSec: 30, MarshallerType: "json", }, + HostDriverConfig: config.HostDriverConfig{ + MarshallerType: "json", + Mode: "client", + URL: "localhost:12345", + RetryDurationInSec: 1, + }, }, EconomicsConfig: config.EconomicsConfig{}, ShardCoordinator: mock.NewMultiShardsCoordinatorMock(2), From 8fc8f4f962bd20d8a719aee356fddbef284bbe89 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 18 May 2023 16:06:09 +0300 Subject: [PATCH 196/221] fix after merge --- factory/processing/export_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/factory/processing/export_test.go b/factory/processing/export_test.go index c7d540d2d60..6b927229331 100644 --- a/factory/processing/export_test.go +++ b/factory/processing/export_test.go @@ -6,7 +6,6 @@ import ( "github.com/multiversx/mx-chain-go/factory" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/block/cutoff" - "github.com/multiversx/mx-chain-go/process/txsimulator" ) // NewBlockProcessor calls the unexported method with the same name in order to use it in tests From 9b4dc717485c5ef4af010922cccfb1858bc69829 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Thu, 18 May 2023 16:08:53 +0300 Subject: [PATCH 197/221] latest indexer version --- go.mod | 2 +- go.sum | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 3d4d509dc9e..7f45c62ec9d 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 github.com/multiversx/mx-chain-crypto-go v1.2.6 - github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518104314-87b0947855ef + github.com/multiversx/mx-chain-es-indexer-go v1.4.2-0.20230518130444-97e3775aa0ff github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 diff --git a/go.sum b/go.sum index 81f2c5eb61c..70fdb6aeaf1 100644 --- a/go.sum +++ b/go.sum @@ -619,22 +619,20 @@ github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7 github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 h1:oFsYNkebv7TQygdEjN4aGgQ8ICLPmS9bDJmzlOHtU2Y= github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.0/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 h1:7dgFuxG2sUbQEFA4y36NAoRjuN+Z3PlY0znmmgr7ZSo= github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518104314-87b0947855ef h1:Y1Zz7rIL4U73ixQpElX3SJjtIdUawUfReI0gALxvrrY= -github.com/multiversx/mx-chain-es-indexer-go v1.4.1-0.20230518104314-87b0947855ef/go.mod h1:Ch2i9rxUE6G2i6izfCUo1ffLWuAOXTbYdKiWNPOVpNE= +github.com/multiversx/mx-chain-es-indexer-go v1.4.2-0.20230518130444-97e3775aa0ff h1:mMm20VJbdQ5YVuiP6T5pur7n34y5nKFP2ar8OVm63zU= +github.com/multiversx/mx-chain-es-indexer-go v1.4.2-0.20230518130444-97e3775aa0ff/go.mod h1:wnKp6mBg26CApCd2350U3tywa2RNFfSQyAVOADe7Xr0= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= -github.com/multiversx/mx-chain-vm-common-go v1.4.0/go.mod h1:odBJC92ANA8zLtPh/wwajUUGJOaS88F5QYGf0t8Wgzw= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= github.com/multiversx/mx-chain-vm-common-go v1.4.2 h1:ApNOwdZV2IGRrZGxQtSL4hjI0PDFrSq4p2TB3gLybxQ= github.com/multiversx/mx-chain-vm-common-go v1.4.2/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= From e9e0b6c493cde5077692ef279c619aeae0380337 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 18 May 2023 16:25:30 +0300 Subject: [PATCH 198/221] fix after merge 2 --- integrationTests/realcomponents/processorRunner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrationTests/realcomponents/processorRunner.go b/integrationTests/realcomponents/processorRunner.go index 73d77765995..6176a54858e 100644 --- a/integrationTests/realcomponents/processorRunner.go +++ b/integrationTests/realcomponents/processorRunner.go @@ -406,7 +406,7 @@ func (pr *ProcessorRunner) createProcessComponents(tb testing.TB) { argsProcess := factoryProcessing.ProcessComponentsFactoryArgs{ Config: *pr.Config.GeneralConfig, EpochConfig: *pr.Config.EpochConfig, - PrefConfigs: pr.Config.PreferencesConfig.Preferences, + PrefConfigs: *pr.Config.PreferencesConfig, ImportDBConfig: *pr.Config.ImportDbConfig, FlagsConfig: config.ContextFlagsConfig{ Version: "test", From d6315b7f14466f737de9798c216403ba1f9ea924 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Fri, 19 May 2023 14:01:57 +0300 Subject: [PATCH 199/221] update toml config --- config/tomlConfig_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/config/tomlConfig_test.go b/config/tomlConfig_test.go index 473df5cb187..a6fce6921d6 100644 --- a/config/tomlConfig_test.go +++ b/config/tomlConfig_test.go @@ -693,16 +693,16 @@ func TestEnableEpochConfig(t *testing.T) { SetGuardianEnableEpoch = 64 # KeepExecOrderOnCreatedSCRsEnableEpoch represents the epoch when the execution order of created SCRs is ensured - KeepExecOrderOnCreatedSCRsEnableEpoch = 64 + KeepExecOrderOnCreatedSCRsEnableEpoch = 65 # MultiClaimOnDelegationEnableEpoch represents the epoch when the multi claim on delegation function is enabled - MultiClaimOnDelegationEnableEpoch = 65 + MultiClaimOnDelegationEnableEpoch = 66 # ChangeUsernameEnableEpoch represents the epoch when change username is enabled - ChangeUsernameEnableEpoch = 64 + ChangeUsernameEnableEpoch = 67 # ConsistentTokensValuesLengthCheckEnableEpoch represents the epoch when the consistent tokens values length check is enabled - ConsistentTokensValuesLengthCheckEnableEpoch = 66 + ConsistentTokensValuesLengthCheckEnableEpoch = 68 # MaxNodesChangeEnableEpoch holds configuration for changing the maximum number of nodes and the enabling epoch MaxNodesChangeEnableEpoch = [ @@ -800,10 +800,10 @@ func TestEnableEpochConfig(t *testing.T) { RuntimeCodeSizeFixEnableEpoch: 62, RuntimeMemStoreLimitEnableEpoch: 63, SetGuardianEnableEpoch: 64, - MultiClaimOnDelegationEnableEpoch: 65, - KeepExecOrderOnCreatedSCRsEnableEpoch: 64, - ChangeUsernameEnableEpoch: 64, - ConsistentTokensValuesLengthCheckEnableEpoch: 66, + KeepExecOrderOnCreatedSCRsEnableEpoch: 65, + MultiClaimOnDelegationEnableEpoch: 66, + ChangeUsernameEnableEpoch: 67, + ConsistentTokensValuesLengthCheckEnableEpoch: 68, BLSMultiSignerEnableEpoch: []MultiSignerConfig{ { EnableEpoch: 0, From 83c48f543b53c6c68b3fa33cb0f3d78ff5d6bb81 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Fri, 19 May 2023 14:10:21 +0300 Subject: [PATCH 200/221] proper tags --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 7f45c62ec9d..ab2ef885c4d 100644 --- a/go.mod +++ b/go.mod @@ -13,10 +13,10 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 - github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 + github.com/multiversx/mx-chain-communication-go v1.0.0 + github.com/multiversx/mx-chain-core-go v1.2.4 github.com/multiversx/mx-chain-crypto-go v1.2.6 - github.com/multiversx/mx-chain-es-indexer-go v1.4.2-0.20230518130444-97e3775aa0ff + github.com/multiversx/mx-chain-es-indexer-go v1.4.3 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 diff --git a/go.sum b/go.sum index 70fdb6aeaf1..792f954a3bb 100644 --- a/go.sum +++ b/go.sum @@ -616,17 +616,17 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104 h1:oFsYNkebv7TQygdEjN4aGgQ8ICLPmS9bDJmzlOHtU2Y= -github.com/multiversx/mx-chain-communication-go v0.0.0-20230512095548-5bc637293104/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= +github.com/multiversx/mx-chain-communication-go v1.0.0 h1:ZGIIrWIE7RqpF7gvMfshH+CJUehviXzkWlxnpZ02efE= +github.com/multiversx/mx-chain-communication-go v1.0.0/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912 h1:7dgFuxG2sUbQEFA4y36NAoRjuN+Z3PlY0znmmgr7ZSo= -github.com/multiversx/mx-chain-core-go v1.2.4-0.20230517135533-2e54a17cd912/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= +github.com/multiversx/mx-chain-core-go v1.2.4 h1:BRXyajUevLU6zHszR8jnp2+7C2bAQBor51YTc4dp3YQ= +github.com/multiversx/mx-chain-core-go v1.2.4/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= -github.com/multiversx/mx-chain-es-indexer-go v1.4.2-0.20230518130444-97e3775aa0ff h1:mMm20VJbdQ5YVuiP6T5pur7n34y5nKFP2ar8OVm63zU= -github.com/multiversx/mx-chain-es-indexer-go v1.4.2-0.20230518130444-97e3775aa0ff/go.mod h1:wnKp6mBg26CApCd2350U3tywa2RNFfSQyAVOADe7Xr0= +github.com/multiversx/mx-chain-es-indexer-go v1.4.3 h1:s6eX2dJSr/yjbGiF00Q68ar0jcvGkBm+ZzEa8/tpHzM= +github.com/multiversx/mx-chain-es-indexer-go v1.4.3/go.mod h1:b2TVf5kCmmFQUjagI962YaKa2uqOEMn7dbTsiE/0J6U= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= From 7ec6cfe5f0ce0eff327e83d1f6d11cc7a7767d65 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 22 May 2023 12:02:06 +0300 Subject: [PATCH 201/221] remove duplicate inits of NewTrieStorageManagerArgs in trie package --- trie/branchNode_test.go | 49 +------ trie/doubleListSync_test.go | 18 +-- trie/export_test.go | 23 ++++ trie/patriciaMerkleTrie_test.go | 28 +--- trie/snapshotTrieStorageManager_test.go | 4 +- trie/syncTrieStorageManager_test.go | 4 +- trie/trieStorageManagerFactory_test.go | 10 +- ...ieStorageManagerWithoutCheckpoints_test.go | 6 +- trie/trieStorageManagerWithoutPruning_test.go | 4 +- .../trieStorageManagerWithoutSnapshot_test.go | 18 +-- trie/trieStorageManager_test.go | 121 ++++++++---------- 11 files changed, 110 insertions(+), 175 deletions(-) diff --git a/trie/branchNode_test.go b/trie/branchNode_test.go index 70665720bb4..2b6bb40a080 100644 --- a/trie/branchNode_test.go +++ b/trie/branchNode_test.go @@ -12,11 +12,9 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/statistics" "github.com/stretchr/testify/assert" ) @@ -47,30 +45,12 @@ func getBnAndCollapsedBn(marshalizer marshal.Marshalizer, hasher hashing.Hasher) } func newEmptyTrie() (*patriciaMerkleTrie, *trieStorageManager) { - marsh, hsh := getTestMarshalizerAndHasher() - - // TODO change this initialization of the persister (and everywhere in this package) - // by using a persister factory - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - - args := NewTrieStorageManagerArgs{ - MainStorer: createMemUnit(), - CheckpointsStorer: createMemUnit(), - Marshalizer: marsh, - Hasher: hsh, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(hsh.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := GetDefaultTrieStorageManagerParameters() trieStorage, _ := NewTrieStorageManager(args) tr := &patriciaMerkleTrie{ trieStorage: trieStorage, - marshalizer: marsh, - hasher: hsh, + marshalizer: args.Marshalizer, + hasher: args.Hasher, oldHashes: make([][]byte, 0), oldRoot: make([]byte, 0), maxTrieLevelInMemory: 5, @@ -185,26 +165,9 @@ func TestBranchNode_setRootHash(t *testing.T) { t.Parallel() marsh, hsh := getTestMarshalizerAndHasher() - args := NewTrieStorageManagerArgs{ - MainStorer: createMemUnit(), - CheckpointsStorer: createMemUnit(), - Marshalizer: marsh, - Hasher: hsh, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, uint64(hsh.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } - trieStorage1, _ := NewTrieStorageManager(args) - args = NewTrieStorageManagerArgs{ - MainStorer: createMemUnit(), - CheckpointsStorer: createMemUnit(), - Marshalizer: marsh, - Hasher: hsh, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, uint64(hsh.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } - trieStorage2, _ := NewTrieStorageManager(args) + + trieStorage1, _ := NewTrieStorageManager(GetDefaultTrieStorageManagerParameters()) + trieStorage2, _ := NewTrieStorageManager(GetDefaultTrieStorageManagerParameters()) maxTrieLevelInMemory := uint(5) tr1, _ := NewTrie(trieStorage1, marsh, hsh, maxTrieLevelInMemory) diff --git a/trie/doubleListSync_test.go b/trie/doubleListSync_test.go index a1d67e102fa..ee188916553 100644 --- a/trie/doubleListSync_test.go +++ b/trie/doubleListSync_test.go @@ -10,13 +10,11 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-core-go/core/check" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -37,20 +35,8 @@ func createMemUnit() storage.Storer { // CreateTrieStorageManager creates the trie storage manager for the tests func createTrieStorageManager(store storage.Storer) (common.StorageManager, storage.Storer) { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := NewTrieStorageManagerArgs{ - MainStorer: store, - CheckpointsStorer: store, - Marshalizer: marshalizer, - Hasher: hasherMock, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(hasherMock.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := GetDefaultTrieStorageManagerParameters() + args.MainStorer = store tsm, _ := NewTrieStorageManager(args) return tsm, store diff --git a/trie/export_test.go b/trie/export_test.go index e91f2378919..1f50041883e 100644 --- a/trie/export_test.go +++ b/trie/export_test.go @@ -3,8 +3,12 @@ package trie import ( "time" + "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" + "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/storageManager" + "github.com/multiversx/mx-chain-go/trie/hashesHolder" ) func (ts *trieSyncer) trieNodeIntercepted(hash []byte, val interface{}) { @@ -101,3 +105,22 @@ func IsTrieStorageManagerInEpoch(tsm common.StorageManager) bool { func NewBaseIterator(trie common.Trie) (*baseIterator, error) { return newBaseIterator(trie) } + +// GetDefaultTrieStorageManagerParameters - +func GetDefaultTrieStorageManagerParameters() NewTrieStorageManagerArgs { + generalCfg := config.TrieStorageManagerConfig{ + PruningBufferLen: 1000, + SnapshotsBufferLen: 10, + SnapshotsGoroutineNum: 1, + } + + return NewTrieStorageManagerArgs{ + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), + Marshalizer: &marshal.GogoProtoMarshalizer{}, + Hasher: &testscommon.KeccakMock{}, + GeneralConfig: generalCfg, + CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), + IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + } +} diff --git a/trie/patriciaMerkleTrie_test.go b/trie/patriciaMerkleTrie_test.go index ae93bf933e4..384d6891b8f 100644 --- a/trie/patriciaMerkleTrie_test.go +++ b/trie/patriciaMerkleTrie_test.go @@ -18,12 +18,9 @@ import ( "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" "github.com/multiversx/mx-chain-go/common/holders" - "github.com/multiversx/mx-chain-go/config" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/storage" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/multiversx/mx-chain-go/trie/mock" "github.com/stretchr/testify/assert" @@ -38,29 +35,8 @@ func emptyTrie() common.Trie { return tr } -func getDefaultTrieStorageManagerParameters() trie.NewTrieStorageManagerArgs { - marshalizer := &testscommon.ProtobufMarshalizerMock{} - hasher := &testscommon.KeccakMock{} - - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - - return trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.NewSnapshotPruningStorerMock(), - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: marshalizer, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } -} - func getDefaultTrieParameters() (common.StorageManager, marshal.Marshalizer, hashing.Hasher, uint) { - args := getDefaultTrieStorageManagerParameters() + args := trie.GetDefaultTrieStorageManagerParameters() trieStorageManager, _ := trie.NewTrieStorageManager(args) maxTrieLevelInMemory := uint(1) @@ -1050,7 +1026,7 @@ func TestPatriciaMerkleTrie_ConcurrentOperations(t *testing.T) { func TestPatriciaMerkleTrie_GetSerializedNodesClose(t *testing.T) { t.Parallel() - args := getDefaultTrieStorageManagerParameters() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { // gets take a long time diff --git a/trie/snapshotTrieStorageManager_test.go b/trie/snapshotTrieStorageManager_test.go index 6efa5000fa4..a0c401a6eb8 100644 --- a/trie/snapshotTrieStorageManager_test.go +++ b/trie/snapshotTrieStorageManager_test.go @@ -16,7 +16,9 @@ import ( func TestNewSnapshotTrieStorageManagerInvalidStorerType(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() + args := GetDefaultTrieStorageManagerParameters() + args.MainStorer = createMemUnit() + trieStorage, _ := NewTrieStorageManager(args) stsm, err := newSnapshotTrieStorageManager(trieStorage, 0) assert.True(t, check.IfNil(stsm)) diff --git a/trie/syncTrieStorageManager_test.go b/trie/syncTrieStorageManager_test.go index c4818388fdc..2290c4bf08c 100644 --- a/trie/syncTrieStorageManager_test.go +++ b/trie/syncTrieStorageManager_test.go @@ -20,7 +20,9 @@ func TestNewSyncTrieStorageManagerNilTsm(t *testing.T) { func TestNewSyncTrieStorageManagerInvalidStorerType(t *testing.T) { t.Parallel() - _, trieStorage := newEmptyTrie() + args := GetDefaultTrieStorageManagerParameters() + args.MainStorer = createMemUnit() + trieStorage, _ := NewTrieStorageManager(args) stsm, err := NewSyncTrieStorageManager(trieStorage) assert.Nil(t, stsm) diff --git a/trie/trieStorageManagerFactory_test.go b/trie/trieStorageManagerFactory_test.go index 26d679e157a..fcf2150b645 100644 --- a/trie/trieStorageManagerFactory_test.go +++ b/trie/trieStorageManagerFactory_test.go @@ -26,7 +26,7 @@ func TestTrieFactory_CreateWithoutPruning(t *testing.T) { options := getTrieStorageManagerOptions() options.PruningEnabled = false - tsm, err := trie.CreateTrieStorageManager(getNewTrieStorageManagerArgs(), options) + tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), options) assert.Nil(t, err) assert.Equal(t, "*trie.trieStorageManagerWithoutPruning", fmt.Sprintf("%T", tsm)) } @@ -36,7 +36,7 @@ func TestTrieFactory_CreateWithoutSnapshot(t *testing.T) { options := getTrieStorageManagerOptions() options.SnapshotsEnabled = false - tsm, err := trie.CreateTrieStorageManager(getNewTrieStorageManagerArgs(), options) + tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), options) assert.Nil(t, err) assert.Equal(t, "*trie.trieStorageManagerWithoutSnapshot", fmt.Sprintf("%T", tsm)) } @@ -46,7 +46,7 @@ func TestTrieFactory_CreateWithoutCheckpoints(t *testing.T) { options := getTrieStorageManagerOptions() options.CheckpointsEnabled = false - tsm, err := trie.CreateTrieStorageManager(getNewTrieStorageManagerArgs(), options) + tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), options) assert.Nil(t, err) assert.Equal(t, "*trie.trieStorageManagerWithoutCheckpoints", fmt.Sprintf("%T", tsm)) } @@ -54,7 +54,7 @@ func TestTrieFactory_CreateWithoutCheckpoints(t *testing.T) { func TestTrieFactory_CreateNormal(t *testing.T) { t.Parallel() - tsm, err := trie.CreateTrieStorageManager(getNewTrieStorageManagerArgs(), getTrieStorageManagerOptions()) + tsm, err := trie.CreateTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters(), getTrieStorageManagerOptions()) assert.Nil(t, err) assert.Equal(t, "*trie.trieStorageManager", fmt.Sprintf("%T", tsm)) } @@ -98,7 +98,7 @@ func TestTrieStorageManager_SerialFuncShadowingCallsExpectedImpl(t *testing.T) { return true }, GetBaseTrieStorageManagerCalled: func() common.StorageManager { - tsm, _ = trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ = trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) return tsm }, } diff --git a/trie/trieStorageManagerWithoutCheckpoints_test.go b/trie/trieStorageManagerWithoutCheckpoints_test.go index 7775037b289..251d64f38ed 100644 --- a/trie/trieStorageManagerWithoutCheckpoints_test.go +++ b/trie/trieStorageManagerWithoutCheckpoints_test.go @@ -25,7 +25,7 @@ func TestNewTrieStorageManagerWithoutCheckpoints(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, err := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) assert.Nil(t, err) assert.NotNil(t, ts) @@ -35,7 +35,7 @@ func TestNewTrieStorageManagerWithoutCheckpoints(t *testing.T) { func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, _ := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) iteratorChannels := &common.TrieIteratorChannels{ @@ -62,7 +62,7 @@ func TestTrieStorageManagerWithoutCheckpoints_SetCheckpoint(t *testing.T) { func TestTrieStorageManagerWithoutCheckpoints_AddDirtyCheckpointHashes(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, _ := trie.NewTrieStorageManagerWithoutCheckpoints(tsm) assert.False(t, ts.AddDirtyCheckpointHashes([]byte("rootHash"), nil)) diff --git a/trie/trieStorageManagerWithoutPruning_test.go b/trie/trieStorageManagerWithoutPruning_test.go index f60a1078ebc..4c05108991a 100644 --- a/trie/trieStorageManagerWithoutPruning_test.go +++ b/trie/trieStorageManagerWithoutPruning_test.go @@ -20,7 +20,7 @@ func TestNewTrieStorageManagerWithoutPruningWithNilStorage(t *testing.T) { func TestNewTrieStorageManagerWithoutPruning(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, err := trie.NewTrieStorageManagerWithoutPruning(tsm) assert.Nil(t, err) assert.NotNil(t, ts) @@ -29,7 +29,7 @@ func TestNewTrieStorageManagerWithoutPruning(t *testing.T) { func TestTrieStorageManagerWithoutPruning_IsPruningEnabled(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) assert.False(t, ts.IsPruningEnabled()) } diff --git a/trie/trieStorageManagerWithoutSnapshot_test.go b/trie/trieStorageManagerWithoutSnapshot_test.go index 0dd15d21b68..d3c4073fab7 100644 --- a/trie/trieStorageManagerWithoutSnapshot_test.go +++ b/trie/trieStorageManagerWithoutSnapshot_test.go @@ -25,7 +25,7 @@ func TestNewTrieStorageManagerWithoutSnapshot(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) ts, err := trie.NewTrieStorageManagerWithoutSnapshot(tsm) assert.Nil(t, err) assert.NotNil(t, ts) @@ -35,7 +35,7 @@ func TestNewTrieStorageManagerWithoutSnapshot(t *testing.T) { func TestTrieStorageManagerWithoutSnapshot_GetFromCurrentEpoch(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -51,7 +51,7 @@ func TestTrieStorageManagerWithoutSnapshot_GetFromCurrentEpoch(t *testing.T) { func TestTrieStorageManagerWithoutSnapshot_PutInEpoch(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -68,7 +68,7 @@ func TestTrieStorageManagerWithoutSnapshot_PutInEpoch(t *testing.T) { func TestTrieStorageManagerWithoutSnapshot_PutInEpochWithoutCache(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -85,7 +85,7 @@ func TestTrieStorageManagerWithoutSnapshot_PutInEpochWithoutCache(t *testing.T) func TestTrieStorageManagerWithoutSnapshot_TakeSnapshot(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -105,7 +105,7 @@ func TestTrieStorageManagerWithoutSnapshot_TakeSnapshot(t *testing.T) { func TestTrieStorageManagerWithoutSnapshot_GetLatestStorageEpoch(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -117,7 +117,7 @@ func TestTrieStorageManagerWithoutSnapshot_GetLatestStorageEpoch(t *testing.T) { func TestTrieStorageManagerWithoutSnapshot_SetEpochForPutOperationDoesNotPanic(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -127,7 +127,7 @@ func TestTrieStorageManagerWithoutSnapshot_SetEpochForPutOperationDoesNotPanic(t func TestTrieStorageManagerWithoutSnapshot_ShouldTakeSnapshot(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ := trie.NewTrieStorageManagerWithoutSnapshot(tsm) @@ -141,7 +141,7 @@ func TestTrieStorageManagerWithoutSnapshot_IsInterfaceNil(t *testing.T) { var ts common.StorageManager assert.True(t, check.IfNil(ts)) - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() tsm, _ := trie.NewTrieStorageManager(args) ts, _ = trie.NewTrieStorageManagerWithoutSnapshot(tsm) assert.False(t, check.IfNil(ts)) diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index d2e687742f4..32e14ca9655 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -10,41 +10,22 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" - "github.com/multiversx/mx-chain-go/config" storageMx "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -const ( - hashSize = 32 -) - var ( providedKey = []byte("key") providedVal = []byte("value") expectedErr = errorsGo.New("expected error") ) -func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { - return trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: &testscommon.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, hashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } -} - // errChanWithLen extends the BufferedErrChan interface with a Len method type errChanWithLen interface { common.BufferedErrChan @@ -57,7 +38,7 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("nil main storer", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = nil ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) @@ -66,7 +47,7 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("nil checkpoints storer", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.CheckpointsStorer = nil ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) @@ -75,7 +56,7 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("nil marshaller", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.Marshalizer = nil ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) @@ -84,7 +65,7 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("nil hasher", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.Hasher = nil ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) @@ -93,7 +74,7 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("nil checkpoint hashes holder", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.CheckpointHashesHolder = nil ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) @@ -102,7 +83,7 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("nil idle provider", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.IdleProvider = nil ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) @@ -111,7 +92,7 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("invalid config should error", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.GeneralConfig.SnapshotsGoroutineNum = 0 ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, ts) @@ -120,7 +101,7 @@ func TestNewTrieStorageManager(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, err := trie.NewTrieStorageManager(args) assert.Nil(t, err) assert.NotNil(t, ts) @@ -159,7 +140,7 @@ func TestTrieCheckpoint(t *testing.T) { func TestTrieStorageManager_SetCheckpointNilErrorChan(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) rootHash := []byte("rootHash") @@ -178,7 +159,7 @@ func TestTrieStorageManager_SetCheckpointNilErrorChan(t *testing.T) { func TestTrieStorageManager_SetCheckpointClosedDb(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) _ = ts.Close() @@ -199,7 +180,7 @@ func TestTrieStorageManager_SetCheckpointClosedDb(t *testing.T) { func TestTrieStorageManager_SetCheckpointEmptyTrieRootHash(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) rootHash := make([]byte, 32) @@ -244,7 +225,7 @@ func TestTrieCheckpoint_DoesNotSaveToCheckpointStorageIfNotDirty(t *testing.T) { func TestTrieStorageManager_IsPruningEnabled(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) assert.True(t, ts.IsPruningEnabled()) @@ -253,7 +234,7 @@ func TestTrieStorageManager_IsPruningEnabled(t *testing.T) { func TestTrieStorageManager_IsPruningBlocked(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) ts.ExitPruningBufferingMode() // early exit @@ -273,7 +254,7 @@ func TestTrieStorageManager_Remove(t *testing.T) { t.Parallel() wasCalled := false - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{ RemoveCalled: func(key []byte) error { wasCalled = true @@ -289,7 +270,7 @@ func TestTrieStorageManager_Remove(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = testscommon.NewSnapshotPruningStorerMock() args.CheckpointsStorer = testscommon.NewSnapshotPruningStorerMock() ts, _ := trie.NewTrieStorageManager(args) @@ -321,7 +302,7 @@ func TestTrieStorageManager_RemoveFromCheckpointHashesHolder(t *testing.T) { t.Parallel() wasCalled := false - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.CheckpointHashesHolder = &trieMock.CheckpointHashesHolderStub{ RemoveCalled: func(bytes []byte) { wasCalled = true @@ -339,7 +320,7 @@ func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { t.Run("main storer not epochStorer should early exit", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{} ts, _ := trie.NewTrieStorageManager(args) @@ -350,7 +331,7 @@ func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { providedEpoch := uint32(100) wasCalled := false - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storageManager.StorageManagerStub{ SetEpochForPutOperationCalled: func(u uint32) { assert.Equal(t, providedEpoch, u) @@ -367,7 +348,7 @@ func TestTrieStorageManager_SetEpochForPutOperation(t *testing.T) { func TestTrieStorageManager_PutInEpochClosedDb(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) _ = ts.Close() @@ -378,7 +359,8 @@ func TestTrieStorageManager_PutInEpochClosedDb(t *testing.T) { func TestTrieStorageManager_PutInEpochInvalidStorer(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = testscommon.CreateMemUnit() ts, _ := trie.NewTrieStorageManager(args) err := ts.PutInEpoch(providedKey, providedVal, 0) @@ -389,7 +371,7 @@ func TestTrieStorageManager_PutInEpoch(t *testing.T) { t.Parallel() putInEpochCalled := false - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ MemDbMock: testscommon.NewMemDbMock(), PutInEpochCalled: func(key []byte, data []byte, epoch uint32) error { @@ -407,7 +389,8 @@ func TestTrieStorageManager_PutInEpoch(t *testing.T) { func TestTrieStorageManager_GetLatestStorageEpochInvalidStorer(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() + args.MainStorer = testscommon.CreateMemUnit() ts, _ := trie.NewTrieStorageManager(args) val, err := ts.GetLatestStorageEpoch() @@ -419,7 +402,7 @@ func TestTrieStorageManager_GetLatestStorageEpoch(t *testing.T) { t.Parallel() getLatestSorageCalled := false - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ MemDbMock: testscommon.NewMemDbMock(), GetLatestStorageEpochCalled: func() (uint32, error) { @@ -438,7 +421,7 @@ func TestTrieStorageManager_GetLatestStorageEpoch(t *testing.T) { func TestTrieStorageManager_TakeSnapshotNilErrorChan(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) rootHash := []byte("rootHash") @@ -457,7 +440,7 @@ func TestTrieStorageManager_TakeSnapshotNilErrorChan(t *testing.T) { func TestTrieStorageManager_TakeSnapshotClosedDb(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) _ = ts.Close() @@ -478,7 +461,7 @@ func TestTrieStorageManager_TakeSnapshotClosedDb(t *testing.T) { func TestTrieStorageManager_TakeSnapshotEmptyTrieRootHash(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) rootHash := make([]byte, 32) @@ -498,7 +481,7 @@ func TestTrieStorageManager_TakeSnapshotEmptyTrieRootHash(t *testing.T) { func TestTrieStorageManager_TakeSnapshotWithGetNodeFromDBError(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = testscommon.NewSnapshotPruningStorerMock() ts, _ := trie.NewTrieStorageManager(args) @@ -525,7 +508,7 @@ func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { t.Run("invalid storer should return false", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() ts, _ := trie.NewTrieStorageManager(args) assert.False(t, ts.ShouldTakeSnapshot()) @@ -533,7 +516,7 @@ func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { t.Run("trie synced should return false", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { return []byte(common.TrieSyncedVal), nil @@ -547,7 +530,7 @@ func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { t.Run("GetFromOldEpochsWithoutAddingToCacheCalled error should return false", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { return nil, expectedErr // isTrieSynced returns false @@ -564,7 +547,7 @@ func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { t.Run("GetFromOldEpochsWithoutAddingToCacheCalled returns non ActiveDBVal should return false", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { return []byte("response"), nil @@ -581,7 +564,7 @@ func TestTrieStorageManager_ShouldTakeSnapshot(t *testing.T) { t.Run("GetFromOldEpochsWithoutAddingToCacheCalled returns ActiveDBVal should return true", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ GetFromCurrentEpochCalled: func(key []byte) ([]byte, error) { return nil, expectedErr // isTrieSynced returns false @@ -603,7 +586,7 @@ func TestTrieStorageManager_Get(t *testing.T) { t.Run("closed storage manager should error", func(t *testing.T) { t.Parallel() - ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) _ = ts.Close() val, err := ts.Get(providedKey) @@ -613,7 +596,7 @@ func TestTrieStorageManager_Get(t *testing.T) { t.Run("main storer closing should error", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { return nil, storageMx.ErrDBIsClosed @@ -628,7 +611,7 @@ func TestTrieStorageManager_Get(t *testing.T) { t.Run("checkpoints storer closing should error", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.CheckpointsStorer = &storage.StorerStub{ GetCalled: func(key []byte) ([]byte, error) { return nil, storageMx.ErrDBIsClosed @@ -643,7 +626,7 @@ func TestTrieStorageManager_Get(t *testing.T) { t.Run("should return from main storer", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() _ = args.MainStorer.Put(providedKey, providedVal) ts, _ := trie.NewTrieStorageManager(args) @@ -654,7 +637,7 @@ func TestTrieStorageManager_Get(t *testing.T) { t.Run("should return from checkpoints storer", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() _ = args.CheckpointsStorer.Put(providedKey, providedVal) ts, _ := trie.NewTrieStorageManager(args) @@ -670,7 +653,7 @@ func TestNewSnapshotTrieStorageManager_GetFromCurrentEpoch(t *testing.T) { t.Run("closed storage manager should error", func(t *testing.T) { t.Parallel() - ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) _ = ts.Close() val, err := ts.GetFromCurrentEpoch(providedKey) @@ -680,7 +663,7 @@ func TestNewSnapshotTrieStorageManager_GetFromCurrentEpoch(t *testing.T) { t.Run("main storer not snapshotPruningStorer should error", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{} ts, _ := trie.NewTrieStorageManager(args) @@ -692,7 +675,7 @@ func TestNewSnapshotTrieStorageManager_GetFromCurrentEpoch(t *testing.T) { t.Parallel() getFromCurrentEpochCalled := false - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &trieMock.SnapshotPruningStorerStub{ MemDbMock: testscommon.NewMemDbMock(), GetFromCurrentEpochCalled: func(_ []byte) ([]byte, error) { @@ -714,7 +697,7 @@ func TestTrieStorageManager_Put(t *testing.T) { t.Run("closed storage manager should error", func(t *testing.T) { t.Parallel() - ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) _ = ts.Close() err := ts.Put(providedKey, providedVal) @@ -723,7 +706,7 @@ func TestTrieStorageManager_Put(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) _ = ts.Put(providedKey, providedVal) val, err := ts.Get(providedKey) @@ -738,7 +721,7 @@ func TestTrieStorageManager_PutInEpochWithoutCache(t *testing.T) { t.Run("closed storage manager should error", func(t *testing.T) { t.Parallel() - ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) _ = ts.Close() err := ts.PutInEpochWithoutCache(providedKey, providedVal, 0) @@ -747,7 +730,7 @@ func TestTrieStorageManager_PutInEpochWithoutCache(t *testing.T) { t.Run("main storer not snapshotPruningStorer should error", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{} ts, _ := trie.NewTrieStorageManager(args) @@ -757,7 +740,7 @@ func TestTrieStorageManager_PutInEpochWithoutCache(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = testscommon.NewSnapshotPruningStorerMock() ts, _ := trie.NewTrieStorageManager(args) @@ -772,7 +755,7 @@ func TestTrieStorageManager_Close(t *testing.T) { t.Run("error on main storer close", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{ CloseCalled: func() error { return expectedErr @@ -786,7 +769,7 @@ func TestTrieStorageManager_Close(t *testing.T) { t.Run("error on checkpoints storer close", func(t *testing.T) { t.Parallel() - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.CheckpointsStorer = &storage.StorerStub{ CloseCalled: func() error { return expectedErr @@ -800,7 +783,7 @@ func TestTrieStorageManager_Close(t *testing.T) { t.Run("should work", func(t *testing.T) { t.Parallel() - ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) err := ts.Close() assert.NoError(t, err) @@ -912,7 +895,7 @@ func TestTrieStorageManager_GetIdentifier(t *testing.T) { t.Run("db without identifier", func(t *testing.T) { t.Parallel() - ts, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) id := ts.GetIdentifier() assert.Equal(t, "", id) }) @@ -921,7 +904,7 @@ func TestTrieStorageManager_GetIdentifier(t *testing.T) { t.Parallel() expectedIdentifier := "identifier" - args := getNewTrieStorageManagerArgs() + args := trie.GetDefaultTrieStorageManagerParameters() args.MainStorer = &storage.StorerStub{ GetIdentifierCalled: func() string { return expectedIdentifier From 1094c2df43329f835eabe1bde09668316804a366 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 22 May 2023 13:00:20 +0300 Subject: [PATCH 202/221] remove duplicated NewTrieStorageManagerArgs inits --- epochStart/metachain/baseRewards_test.go | 4 +- epochStart/metachain/systemSCs_test.go | 4 +- .../processing/blockProcessorCreator_test.go | 6 +- genesis/process/genesisBlockCreator_test.go | 4 +- .../benchmarks/loadFromTrie_test.go | 23 +++---- .../longTests/storage/storage_test.go | 4 +- .../state/stateTrie/stateTrie_test.go | 20 ++---- .../stateTrieClose/stateTrieClose_test.go | 15 +---- integrationTests/testInitializer.go | 63 ++++++------------- state/accountsDB_test.go | 53 +++------------- .../storagePruningManager_test.go | 12 +--- testscommon/components/components.go | 19 +----- testscommon/storage/storageManagerArgs.go | 21 ++++--- 13 files changed, 71 insertions(+), 177 deletions(-) diff --git a/epochStart/metachain/baseRewards_test.go b/epochStart/metachain/baseRewards_test.go index c84e6b1d246..57fd8ad7a9b 100644 --- a/epochStart/metachain/baseRewards_test.go +++ b/epochStart/metachain/baseRewards_test.go @@ -1136,11 +1136,11 @@ func getBaseRewardsArguments() BaseRewardsCreatorArgs { hasher := sha256.NewSha256() marshalizer := &marshal.GogoProtoMarshalizer{} - storageManagerArgs, options := storage.GetStorageManagerArgsAndOptions() + storageManagerArgs := storage.GetStorageManagerArgs() storageManagerArgs.Marshalizer = marshalizer storageManagerArgs.Hasher = hasher - trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, storage.GetStorageManagerOptions()) userAccountsDB := createAccountsDB(hasher, marshalizer, factory.NewAccountCreator(), trieFactoryManager) shardCoordinator := mock.NewMultiShardsCoordinatorMock(2) shardCoordinator.CurrentShard = core.MetachainShardId diff --git a/epochStart/metachain/systemSCs_test.go b/epochStart/metachain/systemSCs_test.go index 28b8cac8b4e..8b48dc948f7 100644 --- a/epochStart/metachain/systemSCs_test.go +++ b/epochStart/metachain/systemSCs_test.go @@ -910,13 +910,13 @@ func createAccountsDB( func createFullArgumentsForSystemSCProcessing(enableEpochsConfig config.EnableEpochs, trieStorer storage.Storer) (ArgsNewEpochStartSystemSCProcessing, vm.SystemSCContainer) { hasher := sha256.NewSha256() marshalizer := &marshal.GogoProtoMarshalizer{} - storageManagerArgs, options := stateMock.GetStorageManagerArgsAndOptions() + storageManagerArgs := stateMock.GetStorageManagerArgs() storageManagerArgs.Marshalizer = marshalizer storageManagerArgs.Hasher = hasher storageManagerArgs.MainStorer = trieStorer storageManagerArgs.CheckpointsStorer = trieStorer - trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + trieFactoryManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, stateMock.GetStorageManagerOptions()) userAccountsDB := createAccountsDB(hasher, marshalizer, factory.NewAccountCreator(), trieFactoryManager) peerAccountsDB := createAccountsDB(hasher, marshalizer, factory.NewPeerAccountCreator(), trieFactoryManager) en := forking.NewGenericEpochNotifier() diff --git a/factory/processing/blockProcessorCreator_test.go b/factory/processing/blockProcessorCreator_test.go index def228725ac..088269d62bd 100644 --- a/factory/processing/blockProcessorCreator_test.go +++ b/factory/processing/blockProcessorCreator_test.go @@ -83,14 +83,14 @@ func Test_newBlockProcessorCreatorForMeta(t *testing.T) { cryptoComponents := componentsMock.GetCryptoComponents(coreComponents) networkComponents := componentsMock.GetNetworkComponents(cryptoComponents) - storageManagerArgs, options := storageManager.GetStorageManagerArgsAndOptions() + storageManagerArgs := storageManager.GetStorageManagerArgs() storageManagerArgs.Marshalizer = coreComponents.InternalMarshalizer() storageManagerArgs.Hasher = coreComponents.Hasher() - storageManagerUser, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + storageManagerUser, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageManager.GetStorageManagerOptions()) storageManagerArgs.MainStorer = mock.NewMemDbMock() storageManagerArgs.CheckpointsStorer = mock.NewMemDbMock() - storageManagerPeer, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + storageManagerPeer, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageManager.GetStorageManagerOptions()) trieStorageManagers := make(map[string]common.StorageManager) trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = storageManagerUser diff --git a/genesis/process/genesisBlockCreator_test.go b/genesis/process/genesisBlockCreator_test.go index c4f3cbc1e9e..db4e29072d8 100644 --- a/genesis/process/genesisBlockCreator_test.go +++ b/genesis/process/genesisBlockCreator_test.go @@ -52,8 +52,8 @@ func createMockArgument( entireSupply *big.Int, ) ArgsGenesisBlockCreator { - storageManagerArgs, options := storageCommon.GetStorageManagerArgsAndOptions() - storageManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, options) + storageManagerArgs := storageCommon.GetStorageManagerArgs() + storageManager, _ := trie.CreateTrieStorageManager(storageManagerArgs, storageCommon.GetStorageManagerOptions()) trieStorageManagers := make(map[string]common.StorageManager) trieStorageManagers[dataRetriever.UserAccountsUnit.String()] = storageManager diff --git a/integrationTests/benchmarks/loadFromTrie_test.go b/integrationTests/benchmarks/loadFromTrie_test.go index 487ced6b447..711ddeba293 100644 --- a/integrationTests/benchmarks/loadFromTrie_test.go +++ b/integrationTests/benchmarks/loadFromTrie_test.go @@ -12,11 +12,10 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/blake2b" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/storage" "github.com/multiversx/mx-chain-go/storage/database" "github.com/multiversx/mx-chain-go/storage/storageunit" - "github.com/multiversx/mx-chain-go/testscommon" + testStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder/disabled" "github.com/stretchr/testify/require" @@ -163,20 +162,12 @@ func generateRandHexString(size int) string { } func getTrieStorageManager(store storage.Storer, marshaller marshal.Marshalizer, hasher hashing.Hasher) common.StorageManager { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: store, - CheckpointsStorer: database.NewMemDB(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: disabled.NewDisabledCheckpointHashesHolder(), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := testStorage.GetStorageManagerArgs() + args.MainStorer = store + args.Marshalizer = marshaller + args.Hasher = hasher + args.CheckpointHashesHolder = disabled.NewDisabledCheckpointHashesHolder() + trieStorageManager, _ := trie.NewTrieStorageManager(args) return trieStorageManager diff --git a/integrationTests/longTests/storage/storage_test.go b/integrationTests/longTests/storage/storage_test.go index 88269350c68..56474f26978 100644 --- a/integrationTests/longTests/storage/storage_test.go +++ b/integrationTests/longTests/storage/storage_test.go @@ -105,10 +105,12 @@ func TestWriteContinuouslyInTree(t *testing.T) { nbTxsWrite := 1000000 testStorage := integrationTests.NewTestStorage() store := testStorage.CreateStorageLevelDB() - storageManagerArgs, options := storage.GetStorageManagerArgsAndOptions() + storageManagerArgs := storage.GetStorageManagerArgs() storageManagerArgs.MainStorer = store storageManagerArgs.Marshalizer = &marshal.JsonMarshalizer{} storageManagerArgs.Hasher = blake2b.NewBlake2b() + + options := storage.GetStorageManagerOptions() options.CheckpointsEnabled = false options.PruningEnabled = false diff --git a/integrationTests/state/stateTrie/stateTrie_test.go b/integrationTests/state/stateTrie/stateTrie_test.go index b46575bdbce..f653c917308 100644 --- a/integrationTests/state/stateTrie/stateTrie_test.go +++ b/integrationTests/state/stateTrie/stateTrie_test.go @@ -37,7 +37,7 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" + testStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" vmcommon "github.com/multiversx/mx-chain-vm-common-go" "github.com/stretchr/testify/assert" @@ -46,18 +46,6 @@ import ( const denomination = "000000000000000000" -func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { - return trie.NewTrieStorageManagerArgs{ - MainStorer: integrationTests.CreateMemUnit(), - CheckpointsStorer: integrationTests.CreateMemUnit(), - Marshalizer: integrationTests.TestMarshalizer, - Hasher: integrationTests.TestHasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } -} - func TestAccountsDB_RetrieveDataWithSomeValuesShouldWork(t *testing.T) { // test simulates creation of a new account, data trie retrieval, // adding a (key, value) pair in that data trie, committing changes @@ -269,7 +257,7 @@ func TestAccountsDB_CommitTwoOkAccountsShouldWork(t *testing.T) { func TestTrieDB_RecreateFromStorageShouldWork(t *testing.T) { hasher := integrationTests.TestHasher store := integrationTests.CreateMemUnit() - args := getNewTrieStorageManagerArgs() + args := testStorage.GetStorageManagerArgs() args.MainStorer = store args.Hasher = hasher trieStorage, _ := trie.NewTrieStorageManager(args) @@ -1054,7 +1042,7 @@ func createAccounts( HashesSize: evictionWaitListSize * 100, } ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) - args := getNewTrieStorageManagerArgs() + args := testStorage.GetStorageManagerArgs() args.MainStorer = store trieStorage, _ := trie.NewTrieStorageManager(args) maxTrieLevelInMemory := uint(5) @@ -2486,7 +2474,7 @@ func createAccountsDBTestSetup() *state.AccountsDB { HashesSize: evictionWaitListSize * 100, } ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) - args := getNewTrieStorageManagerArgs() + args := testStorage.GetStorageManagerArgs() args.GeneralConfig = generalCfg trieStorage, _ := trie.NewTrieStorageManager(args) maxTrieLevelInMemory := uint(5) diff --git a/integrationTests/state/stateTrieClose/stateTrieClose_test.go b/integrationTests/state/stateTrieClose/stateTrieClose_test.go index 985f49c660a..7b96f2b39b1 100644 --- a/integrationTests/state/stateTrieClose/stateTrieClose_test.go +++ b/integrationTests/state/stateTrieClose/stateTrieClose_test.go @@ -10,13 +10,10 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/common/errChan" - "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/integrationTests" - "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/goroutines" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" + "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/keyBuilder" "github.com/stretchr/testify/assert" ) @@ -116,15 +113,7 @@ func TestPatriciaMerkleTrie_Close(t *testing.T) { } func TestTrieStorageManager_Close(t *testing.T) { - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: &testscommon.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, 32), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() gc := goroutines.NewGoCounter(goroutines.TestsRelevantGoRoutines) idxInitial, _ := gc.Snapshot() diff --git a/integrationTests/testInitializer.go b/integrationTests/testInitializer.go index 14d7367fafe..b1c860ff006 100644 --- a/integrationTests/testInitializer.go +++ b/integrationTests/testInitializer.go @@ -62,6 +62,7 @@ import ( testStorage "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/txDataBuilder" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" @@ -396,12 +397,6 @@ func CreateStore(numOfShards uint32) dataRetriever.StorageService { // CreateTrieStorageManagerWithPruningStorer creates the trie storage manager for the tests func CreateTrieStorageManagerWithPruningStorer(coordinator sharding.Coordinator, notifier pruning.EpochStartNotifier) common.StorageManager { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - mainStorer, _, err := testStorage.CreateTestingTriePruningStorer(coordinator, notifier) if err != nil { fmt.Println("err creating main storer" + err.Error()) @@ -410,15 +405,14 @@ func CreateTrieStorageManagerWithPruningStorer(coordinator sharding.Coordinator, if err != nil { fmt.Println("err creating checkpoints storer" + err.Error()) } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: mainStorer, - CheckpointsStorer: checkpointsStorer, - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + + args := testcommonStorage.GetStorageManagerArgs() + args.MainStorer = mainStorer + args.CheckpointsStorer = checkpointsStorer + args.Marshalizer = TestMarshalizer + args.Hasher = TestHasher + args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) + trieStorageManager, _ := trie.NewTrieStorageManager(args) return trieStorageManager @@ -426,20 +420,12 @@ func CreateTrieStorageManagerWithPruningStorer(coordinator sharding.Coordinator, // CreateTrieStorageManager creates the trie storage manager for the tests func CreateTrieStorageManager(store storage.Storer) (common.StorageManager, storage.Storer) { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: store, - CheckpointsStorer: CreateMemUnit(), - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := testcommonStorage.GetStorageManagerArgs() + args.MainStorer = store + args.Marshalizer = TestMarshalizer + args.Hasher = TestHasher + args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) + trieStorageManager, _ := trie.NewTrieStorageManager(args) return trieStorageManager, store @@ -1024,20 +1010,11 @@ func CreateSimpleTxProcessor(accnts state.AccountsAdapter) process.TransactionPr // CreateNewDefaultTrie returns a new trie with test hasher and marsahalizer func CreateNewDefaultTrie() common.Trie { - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: CreateMemUnit(), - CheckpointsStorer: CreateMemUnit(), - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := testcommonStorage.GetStorageManagerArgs() + args.Marshalizer = TestMarshalizer + args.Hasher = TestHasher + args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())) + trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, TestMarshalizer, TestHasher, maxTrieLevelInMemory) diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 2bb43b499b1..29352585ed7 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -31,6 +31,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" stateMock "github.com/multiversx/mx-chain-go/testscommon/state" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/testscommon/storageManager" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" @@ -109,15 +110,9 @@ func getDefaultStateComponents( marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} - args := trie.NewTrieStorageManagerArgs{ - MainStorer: db, - CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() + args.MainStorer = db + args.CheckpointHashesHolder = hashesHolder trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ @@ -1736,15 +1731,7 @@ func TestAccountsDB_MainTrieAutomaticallyMarksCodeUpdatesForEviction(t *testing. marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} ewl := stateMock.NewEvictionWaitingListMock(100) - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() storageManager, _ := trie.NewTrieStorageManager(args) maxTrieLevelInMemory := uint(5) tr, _ := trie.NewTrie(storageManager, marshaller, hasher, maxTrieLevelInMemory) @@ -1821,15 +1808,7 @@ func TestAccountsDB_RemoveAccountMarksObsoleteHashesForEviction(t *testing.T) { hasher := &hashingMocks.HasherMock{} ewl := stateMock.NewEvictionWaitingListMock(100) - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() storageManager, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(storageManager, marshaller, hasher, maxTrieLevelInMemory) spm, _ := storagePruningManager.NewStoragePruningManager(ewl, 5) @@ -2249,15 +2228,7 @@ func TestAccountsDB_GetCode(t *testing.T) { marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() storageManager, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(storageManager, marshaller, hasher, maxTrieLevelInMemory) spm := disabled.NewDisabledStoragePruningManager() @@ -2639,15 +2610,7 @@ func BenchmarkAccountsDb_GetCodeEntry(b *testing.B) { marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() storageManager, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(storageManager, marshaller, hasher, maxTrieLevelInMemory) spm := disabled.NewDisabledStoragePruningManager() diff --git a/state/storagePruningManager/storagePruningManager_test.go b/state/storagePruningManager/storagePruningManager_test.go index 28b2473ff2c..1a1a8ace76e 100644 --- a/state/storagePruningManager/storagePruningManager_test.go +++ b/state/storagePruningManager/storagePruningManager_test.go @@ -11,6 +11,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/stretchr/testify/assert" @@ -24,15 +25,8 @@ func getDefaultTrieAndAccountsDbAndStoragePruningManager() (common.Trie, *state. } marshaller := &testscommon.MarshalizerMock{} hasher := &hashingMocks.HasherMock{} - args := trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: marshaller, - Hasher: hasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + args := storage.GetStorageManagerArgs() + args.CheckpointHashesHolder = hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, marshaller, hasher, 5) ewlArgs := evictionWaitingList.MemoryEvictionWaitingListArgs{ diff --git a/testscommon/components/components.go b/testscommon/components/components.go index f6748a3c210..2d19d06d1bf 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -2,6 +2,7 @@ package components import ( "fmt" + "github.com/multiversx/mx-chain-go/testscommon/storage" "math/big" "testing" @@ -36,11 +37,9 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/dblookupext" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" logger "github.com/multiversx/mx-chain-logger-go" wasmConfig "github.com/multiversx/mx-chain-vm-v1_4-go/config" "github.com/stretchr/testify/require" @@ -319,23 +318,11 @@ func GetNetworkFactoryArgs() networkComp.NetworkComponentsFactoryArgs { } } -func getNewTrieStorageManagerArgs() trie.NewTrieStorageManagerArgs { - return trie.NewTrieStorageManagerArgs{ - MainStorer: testscommon.CreateMemUnit(), - CheckpointsStorer: testscommon.CreateMemUnit(), - Marshalizer: &mock.MarshalizerMock{}, - Hasher: &hashingMocks.HasherMock{}, - GeneralConfig: config.TrieStorageManagerConfig{SnapshotsGoroutineNum: 1}, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10, 32), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } -} - // GetStateFactoryArgs - func GetStateFactoryArgs(coreComponents factory.CoreComponentsHolder) stateComp.StateComponentsFactoryArgs { - tsm, _ := trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ := trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) storageManagerUser, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) - tsm, _ = trie.NewTrieStorageManager(getNewTrieStorageManagerArgs()) + tsm, _ = trie.NewTrieStorageManager(storage.GetStorageManagerArgs()) storageManagerPeer, _ := trie.NewTrieStorageManagerWithoutPruning(tsm) trieStorageManagers := make(map[string]common.StorageManager) diff --git a/testscommon/storage/storageManagerArgs.go b/testscommon/storage/storageManagerArgs.go index b69d19c6b99..74ecc2dd94d 100644 --- a/testscommon/storage/storageManagerArgs.go +++ b/testscommon/storage/storageManagerArgs.go @@ -4,30 +4,33 @@ import ( "github.com/multiversx/mx-chain-go/config" "github.com/multiversx/mx-chain-go/genesis/mock" "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/genericMocks" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" trieMock "github.com/multiversx/mx-chain-go/testscommon/trie" "github.com/multiversx/mx-chain-go/trie" ) -// GetStorageManagerArgsAndOptions returns mock args for trie storage manager creation -func GetStorageManagerArgsAndOptions() (trie.NewTrieStorageManagerArgs, trie.StorageManagerOptions) { - storageManagerArgs := trie.NewTrieStorageManagerArgs{ - MainStorer: genericMocks.NewStorerMock(), - CheckpointsStorer: genericMocks.NewStorerMock(), +// GetStorageManagerArgs returns mock args for trie storage manager creation +func GetStorageManagerArgs() trie.NewTrieStorageManagerArgs { + return trie.NewTrieStorageManagerArgs{ + MainStorer: testscommon.NewSnapshotPruningStorerMock(), + CheckpointsStorer: testscommon.NewSnapshotPruningStorerMock(), Marshalizer: &mock.MarshalizerMock{}, Hasher: &hashingMocks.HasherMock{}, GeneralConfig: config.TrieStorageManagerConfig{ + PruningBufferLen: 1000, + SnapshotsBufferLen: 10, SnapshotsGoroutineNum: 2, }, CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, IdleProvider: &testscommon.ProcessStatusHandlerStub{}, } - options := trie.StorageManagerOptions{ +} + +// GetStorageManagerOptions returns default options for trie storage manager creation +func GetStorageManagerOptions() trie.StorageManagerOptions { + return trie.StorageManagerOptions{ PruningEnabled: true, SnapshotsEnabled: true, CheckpointsEnabled: true, } - - return storageManagerArgs, options } From cb6d084667a458ec06344a15812e1a1435f453df Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 22 May 2023 15:11:52 +0300 Subject: [PATCH 203/221] add identifier to trie storage manager --- .../baseRequestersContainerFactory.go | 2 ++ .../metaRequestersContainerFactory.go | 2 ++ .../shardRequestersContainerFactory.go | 1 + testscommon/integrationtests/factory.go | 22 ++++++------------- testscommon/storage/storageManagerArgs.go | 2 ++ trie/errors.go | 3 +++ trie/export_test.go | 2 ++ trie/factory/trieCreator.go | 4 ++++ trie/factory/trieCreator_test.go | 1 + trie/trieStorageManager.go | 6 +++++ update/factory/dataTrieFactory.go | 2 ++ 11 files changed, 32 insertions(+), 15 deletions(-) diff --git a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go index 0157ca5c634..d7407bdb1ba 100644 --- a/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/baseRequestersContainerFactory.go @@ -231,6 +231,7 @@ func (brcf *baseRequestersContainerFactory) createMiniBlocksRequester(responseTo func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( mainStorer storage.Storer, checkpointsStorer storage.Storer, + storageIdentifier dataRetriever.UnitType, ) (common.StorageManager, dataRetriever.TrieDataGetter, error) { pathManager, err := storageFactory.CreatePathManager( storageFactory.ArgCreatePathManager{ @@ -261,6 +262,7 @@ func (brcf *baseRequestersContainerFactory) newImportDBTrieStorage( MaxTrieLevelInMem: brcf.generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, SnapshotsEnabled: brcf.snapshotsEnabled, IdleProvider: disabled.NewProcessStatusHandler(), + Identifier: storageIdentifier.String(), } return trieFactoryInstance.Create(args) } diff --git a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go index 498d02cc1b3..092ef541a5c 100644 --- a/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/metaRequestersContainerFactory.go @@ -194,6 +194,7 @@ func (mrcf *metaRequestersContainerFactory) generateTrieNodesRequesters() error storageManager, userAccountsDataTrie, err := mrcf.newImportDBTrieStorage( userAccountsStorer, userAccountsCheckpointStorer, + dataRetriever.UserAccountsUnit, ) if err != nil { return fmt.Errorf("%w while creating user accounts data trie storage getter", err) @@ -230,6 +231,7 @@ func (mrcf *metaRequestersContainerFactory) generateTrieNodesRequesters() error storageManager, peerAccountsDataTrie, err := mrcf.newImportDBTrieStorage( peerAccountsStorer, peerAccountsCheckpointStorer, + dataRetriever.PeerAccountsUnit, ) if err != nil { return fmt.Errorf("%w while creating peer accounts data trie storage getter", err) diff --git a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go index f1298ae1391..dcf0acf6583 100644 --- a/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go +++ b/dataRetriever/factory/storageRequestersContainer/shardRequestersContainerFactory.go @@ -169,6 +169,7 @@ func (srcf *shardRequestersContainerFactory) generateTrieNodesRequesters() error storageManager, userAccountsDataTrie, err := srcf.newImportDBTrieStorage( userAccountsStorer, userAccountsCheckpointStorer, + dataRetriever.UserAccountsUnit, ) if err != nil { return fmt.Errorf("%w while creating user accounts data trie storage getter", err) diff --git a/testscommon/integrationtests/factory.go b/testscommon/integrationtests/factory.go index 97ab0e858a0..3a1302d43b5 100644 --- a/testscommon/integrationtests/factory.go +++ b/testscommon/integrationtests/factory.go @@ -14,8 +14,8 @@ import ( "github.com/multiversx/mx-chain-go/storage/storageunit" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + testcommonStorage "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" - "github.com/multiversx/mx-chain-go/trie/hashesHolder" ) // TestMarshalizer - @@ -84,20 +84,12 @@ func CreateAccountsDB(db storage.Storer) *state.AccountsDB { HashesSize: 10000, } ewl, _ := evictionWaitingList.NewMemoryEvictionWaitingList(ewlArgs) - generalCfg := config.TrieStorageManagerConfig{ - PruningBufferLen: 1000, - SnapshotsBufferLen: 10, - SnapshotsGoroutineNum: 1, - } - args := trie.NewTrieStorageManagerArgs{ - MainStorer: db, - CheckpointsStorer: CreateMemUnit(), - Marshalizer: TestMarshalizer, - Hasher: TestHasher, - GeneralConfig: generalCfg, - CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, uint64(TestHasher.Size())), - IdleProvider: &testscommon.ProcessStatusHandlerStub{}, - } + + args := testcommonStorage.GetStorageManagerArgs() + args.MainStorer = db + args.Marshalizer = TestMarshalizer + args.Hasher = TestHasher + trieStorage, _ := trie.NewTrieStorageManager(args) tr, _ := trie.NewTrie(trieStorage, TestMarshalizer, TestHasher, MaxTrieLevelInMemory) diff --git a/testscommon/storage/storageManagerArgs.go b/testscommon/storage/storageManagerArgs.go index 74ecc2dd94d..a69e795a9d2 100644 --- a/testscommon/storage/storageManagerArgs.go +++ b/testscommon/storage/storageManagerArgs.go @@ -2,6 +2,7 @@ package storage import ( "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/genesis/mock" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" @@ -23,6 +24,7 @@ func GetStorageManagerArgs() trie.NewTrieStorageManagerArgs { }, CheckpointHashesHolder: &trieMock.CheckpointHashesHolderStub{}, IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), } } diff --git a/trie/errors.go b/trie/errors.go index a225a84c00c..dc229f1c1b0 100644 --- a/trie/errors.go +++ b/trie/errors.go @@ -114,3 +114,6 @@ var ErrNilTrieIteratorLeavesChannel = errors.New("nil trie iterator leaves chann // ErrNilTrieIteratorErrChannel signals that a nil trie iterator error channel has been provided var ErrNilTrieIteratorErrChannel = errors.New("nil trie iterator error channel") + +// ErrInvalidIdentifier signals that an invalid identifier was provided +var ErrInvalidIdentifier = errors.New("invalid identifier") diff --git a/trie/export_test.go b/trie/export_test.go index 1f50041883e..c227b8bf81b 100644 --- a/trie/export_test.go +++ b/trie/export_test.go @@ -6,6 +6,7 @@ import ( "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/trie/hashesHolder" @@ -122,5 +123,6 @@ func GetDefaultTrieStorageManagerParameters() NewTrieStorageManagerArgs { GeneralConfig: generalCfg, CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), } } diff --git a/trie/factory/trieCreator.go b/trie/factory/trieCreator.go index e8236f1c116..2958e9edccf 100644 --- a/trie/factory/trieCreator.go +++ b/trie/factory/trieCreator.go @@ -23,6 +23,7 @@ type TrieCreateArgs struct { SnapshotsEnabled bool MaxTrieLevelInMem uint IdleProvider trie.IdleNodeProvider + Identifier string } type trieCreator struct { @@ -64,6 +65,7 @@ func (tc *trieCreator) Create(args TrieCreateArgs) (common.StorageManager, commo GeneralConfig: tc.trieStorageManagerConfig, CheckpointHashesHolder: tc.getCheckpointHashesHolder(args.CheckpointsEnabled), IdleProvider: args.IdleProvider, + Identifier: args.Identifier, } options := trie.StorageManagerOptions{ @@ -140,6 +142,7 @@ func CreateTriesComponentsForShardId( MaxTrieLevelInMem: generalConfig.StateTriesConfig.MaxStateTrieLevelInMemory, SnapshotsEnabled: snapshotsEnabled, IdleProvider: coreComponentsHolder.ProcessStatusHandler(), + Identifier: dataRetriever.UserAccountsUnit.String(), } userStorageManager, userAccountTrie, err := trFactory.Create(args) if err != nil { @@ -170,6 +173,7 @@ func CreateTriesComponentsForShardId( MaxTrieLevelInMem: generalConfig.StateTriesConfig.MaxPeerTrieLevelInMemory, SnapshotsEnabled: snapshotsEnabled, IdleProvider: coreComponentsHolder.ProcessStatusHandler(), + Identifier: dataRetriever.PeerAccountsUnit.String(), } peerStorageManager, peerAccountsTrie, err := trFactory.Create(args) if err != nil { diff --git a/trie/factory/trieCreator_test.go b/trie/factory/trieCreator_test.go index 2dba2e68981..55bba27cea4 100644 --- a/trie/factory/trieCreator_test.go +++ b/trie/factory/trieCreator_test.go @@ -37,6 +37,7 @@ func getCreateArgs() factory.TrieCreateArgs { SnapshotsEnabled: true, MaxTrieLevelInMem: 5, IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: dataRetriever.UserAccountsUnit.String(), } } diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index dfbce363d38..54948d4e953 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -32,6 +32,7 @@ type trieStorageManager struct { closer core.SafeCloser closed bool idleProvider IdleNodeProvider + identifier string } type snapshotsQueueEntry struct { @@ -53,6 +54,7 @@ type NewTrieStorageManagerArgs struct { GeneralConfig config.TrieStorageManagerConfig CheckpointHashesHolder CheckpointHashesHolder IdleProvider IdleNodeProvider + Identifier string } // NewTrieStorageManager creates a new instance of trieStorageManager @@ -75,6 +77,9 @@ func NewTrieStorageManager(args NewTrieStorageManagerArgs) (*trieStorageManager, if check.IfNil(args.IdleProvider) { return nil, ErrNilIdleNodeProvider } + if len(args.Identifier) == 0 { + return nil, ErrInvalidIdentifier + } ctx, cancelFunc := context.WithCancel(context.Background()) @@ -88,6 +93,7 @@ func NewTrieStorageManager(args NewTrieStorageManagerArgs) (*trieStorageManager, checkpointHashesHolder: args.CheckpointHashesHolder, closer: closing.NewSafeChanCloser(), idleProvider: args.IdleProvider, + identifier: args.Identifier, } goRoutinesThrottler, err := throttler.NewNumGoRoutinesThrottler(int32(args.GeneralConfig.SnapshotsGoroutineNum)) if err != nil { diff --git a/update/factory/dataTrieFactory.go b/update/factory/dataTrieFactory.go index 215c90fbf9a..db3fa4ea71b 100644 --- a/update/factory/dataTrieFactory.go +++ b/update/factory/dataTrieFactory.go @@ -10,6 +10,7 @@ import ( "github.com/multiversx/mx-chain-go/common" commonDisabled "github.com/multiversx/mx-chain-go/common/disabled" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/storage/database" @@ -73,6 +74,7 @@ func NewDataTrieFactory(args ArgsNewDataTrieFactory) (*dataTrieFactory, error) { }, CheckpointHashesHolder: disabled.NewDisabledCheckpointHashesHolder(), IdleProvider: commonDisabled.NewProcessStatusHandler(), + Identifier: dataRetriever.UserAccountsUnit.String(), } options := trie.StorageManagerOptions{ PruningEnabled: false, From b992485fc070587883eef9b6dc273d5443ebc0e7 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Mon, 22 May 2023 16:22:05 +0300 Subject: [PATCH 204/221] use identifier from tsm instead of id from storage --- common/interface.go | 17 +++++---- epochStart/mock/storageManagerStub.go | 6 +-- genesis/mock/storageManagerStub.go | 6 +-- process/sync/baseSync.go | 14 ------- process/sync/interface.go | 4 -- process/sync/metablock.go | 14 +------ process/sync/metablock_test.go | 6 +-- state/accountsDB_test.go | 4 +- storage/pruning/pruningStorer.go | 5 --- testscommon/memDbMock.go | 17 +++++++-- .../storageManager/storageManagerStub.go | 2 +- trie/baseIterator.go | 2 +- trie/branchNode.go | 32 ++++++++-------- trie/branchNode_test.go | 9 ++--- trie/depthFirstSync.go | 2 +- trie/doubleListSync.go | 2 +- trie/extensionNode.go | 32 ++++++++-------- trie/extensionNode_test.go | 9 ++--- trie/interface.go | 36 ++++++++---------- trie/leafNode.go | 28 +++++++------- trie/node.go | 15 ++------ trie/sync.go | 4 +- trie/trieStorageManager.go | 18 +++------ trie/trieStorageManager_test.go | 37 ++++++++----------- 24 files changed, 137 insertions(+), 184 deletions(-) diff --git a/common/interface.go b/common/interface.go index b2f48b736a9..04c909fc9e0 100644 --- a/common/interface.go +++ b/common/interface.go @@ -76,9 +76,8 @@ type DataTrieHandler interface { // StorageManager manages all trie storage operations type StorageManager interface { - Get(key []byte) ([]byte, error) + TrieStorageInteractor GetFromCurrentEpoch(key []byte) ([]byte, error) - Put(key []byte, val []byte) error PutInEpoch(key []byte, val []byte, epoch uint32) error PutInEpochWithoutCache(key []byte, val []byte, epoch uint32) error TakeSnapshot(address string, rootHash []byte, mainTrieRootHash []byte, iteratorChannels *TrieIteratorChannels, missingNodesChan chan []byte, stats SnapshotStatisticsHandler, epoch uint32) @@ -89,18 +88,22 @@ type StorageManager interface { EnterPruningBufferingMode() ExitPruningBufferingMode() AddDirtyCheckpointHashes([]byte, ModifiedHashes) bool - Remove(hash []byte) error SetEpochForPutOperation(uint32) ShouldTakeSnapshot() bool GetBaseTrieStorageManager() StorageManager - GetIdentifier() string IsClosed() bool Close() error IsInterfaceNil() bool } -// DBWriteCacher is used to cache changes made to the trie, and only write to the database when it's needed -type DBWriteCacher interface { +// TrieStorageInteractor defines the methods used for interacting with the trie storage +type TrieStorageInteractor interface { + BaseStorer + GetIdentifier() string +} + +// BaseStorer define the base methods needed for a storer +type BaseStorer interface { Put(key, val []byte) error Get(key []byte) ([]byte, error) Remove(key []byte) error @@ -110,7 +113,7 @@ type DBWriteCacher interface { // SnapshotDbHandler is used to keep track of how many references a snapshot db has type SnapshotDbHandler interface { - DBWriteCacher + BaseStorer IsInUse() bool DecreaseNumReferences() IncreaseNumReferences() diff --git a/epochStart/mock/storageManagerStub.go b/epochStart/mock/storageManagerStub.go index 21e28a13065..da4d434ed8d 100644 --- a/epochStart/mock/storageManagerStub.go +++ b/epochStart/mock/storageManagerStub.go @@ -7,13 +7,13 @@ import ( // StorageManagerStub -- type StorageManagerStub struct { - DatabaseCalled func() common.DBWriteCacher + DatabaseCalled func() common.BaseStorer TakeSnapshotCalled func([]byte) SetCheckpointCalled func([]byte) PruneCalled func([]byte) CancelPruneCalled func([]byte) MarkForEvictionCalled func([]byte, common.ModifiedHashes) error - GetDbThatContainsHashCalled func([]byte) common.DBWriteCacher + GetDbThatContainsHashCalled func([]byte) common.BaseStorer GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler IsPruningEnabledCalled func() bool EnterSnapshotModeCalled func() @@ -22,7 +22,7 @@ type StorageManagerStub struct { } // Database -- -func (sms *StorageManagerStub) Database() common.DBWriteCacher { +func (sms *StorageManagerStub) Database() common.BaseStorer { if sms.DatabaseCalled != nil { return sms.DatabaseCalled() } diff --git a/genesis/mock/storageManagerStub.go b/genesis/mock/storageManagerStub.go index e6b686b726c..d881d8e3b2f 100644 --- a/genesis/mock/storageManagerStub.go +++ b/genesis/mock/storageManagerStub.go @@ -7,13 +7,13 @@ import ( // StorageManagerStub - type StorageManagerStub struct { - DatabaseCalled func() common.DBWriteCacher + DatabaseCalled func() common.BaseStorer TakeSnapshotCalled func([]byte) SetCheckpointCalled func([]byte) PruneCalled func([]byte) CancelPruneCalled func([]byte) MarkForEvictionCalled func([]byte, common.ModifiedHashes) error - GetDbThatContainsHashCalled func([]byte) common.DBWriteCacher + GetDbThatContainsHashCalled func([]byte) common.BaseStorer GetSnapshotThatContainsHashCalled func(rootHash []byte) common.SnapshotDbHandler IsPruningEnabledCalled func() bool EnterSnapshotModeCalled func() @@ -22,7 +22,7 @@ type StorageManagerStub struct { } // Database - -func (sms *StorageManagerStub) Database() common.DBWriteCacher { +func (sms *StorageManagerStub) Database() common.BaseStorer { if sms.DatabaseCalled != nil { return sms.DatabaseCalled() } diff --git a/process/sync/baseSync.go b/process/sync/baseSync.go index 9fa60f73fa1..8b8e8e97717 100644 --- a/process/sync/baseSync.go +++ b/process/sync/baseSync.go @@ -20,7 +20,6 @@ import ( "github.com/multiversx/mx-chain-go/consensus" "github.com/multiversx/mx-chain-go/dataRetriever" "github.com/multiversx/mx-chain-go/dblookupext" - "github.com/multiversx/mx-chain-go/errors" "github.com/multiversx/mx-chain-go/outport" "github.com/multiversx/mx-chain-go/process" "github.com/multiversx/mx-chain-go/process/sync/storageBootstrap/metricsLoader" @@ -1128,19 +1127,6 @@ func (boot *baseBootstrap) waitForMiniBlocks() error { } } -func (boot *baseBootstrap) getStorerIdentifier(unitType dataRetriever.UnitType) (string, error) { - storer, err := boot.store.GetStorer(unitType) - if err != nil { - return "", err - } - dbWithID, ok := storer.(dbStorerWithIdentifier) - if !ok { - return "", errors.ErrWrongTypeAssertion - } - - return dbWithID.GetIdentifier(), nil -} - func (boot *baseBootstrap) init() { boot.forkInfo = process.NewForkInfo() diff --git a/process/sync/interface.go b/process/sync/interface.go index 9c9e00fc899..88f644df160 100644 --- a/process/sync/interface.go +++ b/process/sync/interface.go @@ -29,7 +29,3 @@ type syncStarter interface { type forkDetector interface { computeFinalCheckpoint() } - -type dbStorerWithIdentifier interface { - GetIdentifier() string -} diff --git a/process/sync/metablock.go b/process/sync/metablock.go index 0f0d39aa0d2..1b3c69c7386 100644 --- a/process/sync/metablock.go +++ b/process/sync/metablock.go @@ -196,21 +196,11 @@ func (boot *MetaBootstrap) SyncBlock(ctx context.Context) error { } func (boot *MetaBootstrap) syncAccountsDBs(key []byte, id string) error { - userAccountsStorerIdentifier, err := boot.getStorerIdentifier(dataRetriever.UserAccountsUnit) - if err != nil { - return err - } - - peerAccountsStorerIdentifier, err := boot.getStorerIdentifier(dataRetriever.PeerAccountsUnit) - if err != nil { - return err - } - // TODO: refactor this in order to avoid treatment based on identifier switch id { - case userAccountsStorerIdentifier: + case dataRetriever.UserAccountsUnit.String(): return boot.syncUserAccountsState(key) - case peerAccountsStorerIdentifier: + case dataRetriever.PeerAccountsUnit.String(): return boot.syncValidatorAccountsState(key) default: return fmt.Errorf("invalid trie identifier, id: %s", id) diff --git a/process/sync/metablock_test.go b/process/sync/metablock_test.go index 155cf71397a..fff94e55389 100644 --- a/process/sync/metablock_test.go +++ b/process/sync/metablock_test.go @@ -1626,7 +1626,7 @@ func TestMetaBootstrap_SyncBlockErrGetNodeDBShouldSyncAccounts(t *testing.T) { } args.ChainHandler = blkc - errGetNodeFromDB := core.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), "userAccountsUnit") + errGetNodeFromDB := core.NewGetNodeFromDBErrWithKey([]byte("key"), errors.New("get error"), dataRetriever.UserAccountsUnit.String()) blockProcessor := createMetaBlockProcessor(args.ChainHandler) blockProcessor.ProcessBlockCalled = func(header data.HeaderHandler, body data.BodyHandler, haveTime func() time.Duration) error { return errGetNodeFromDB @@ -1742,7 +1742,7 @@ func TestMetaBootstrap_SyncAccountsDBs(t *testing.T) { }, } - dbIdentifier := "userAccountsTrie" + dbIdentifier := dataRetriever.UserAccountsUnit.String() args.Store = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { if unitType != dataRetriever.UserAccountsUnit { @@ -1782,7 +1782,7 @@ func TestMetaBootstrap_SyncAccountsDBs(t *testing.T) { }, } - dbIdentifier := "peerAccountsTrie" + dbIdentifier := dataRetriever.PeerAccountsUnit.String() args.Store = &storageStubs.ChainStorerStub{ GetStorerCalled: func(unitType dataRetriever.UnitType) (storage.Storer, error) { if unitType != dataRetriever.PeerAccountsUnit { diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 29352585ed7..12995a1bca7 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -92,7 +92,7 @@ func getDefaultTrieAndAccountsDb() (common.Trie, *state.AccountsDB) { return tr, adb } -func getDefaultTrieAndAccountsDbWithCustomDB(db common.DBWriteCacher) (common.Trie, *state.AccountsDB) { +func getDefaultTrieAndAccountsDbWithCustomDB(db common.BaseStorer) (common.Trie, *state.AccountsDB) { checkpointHashesHolder := hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize) adb, tr, _ := getDefaultStateComponents(checkpointHashesHolder, db) return tr, adb @@ -100,7 +100,7 @@ func getDefaultTrieAndAccountsDbWithCustomDB(db common.DBWriteCacher) (common.Tr func getDefaultStateComponents( hashesHolder trie.CheckpointHashesHolder, - db common.DBWriteCacher, + db common.BaseStorer, ) (*state.AccountsDB, common.Trie, common.StorageManager) { generalCfg := config.TrieStorageManagerConfig{ PruningBufferLen: 1000, diff --git a/storage/pruning/pruningStorer.go b/storage/pruning/pruningStorer.go index b0dff395ddc..174ecf254b2 100644 --- a/storage/pruning/pruningStorer.go +++ b/storage/pruning/pruningStorer.go @@ -1033,11 +1033,6 @@ func (ps *PruningStorer) RangeKeys(_ func(key []byte, val []byte) bool) { debug.PrintStack() } -// GetIdentifier returns the identifier for storer -func (ps *PruningStorer) GetIdentifier() string { - return ps.identifier -} - // IsInterfaceNil returns true if there is no value under the interface func (ps *PruningStorer) IsInterfaceNil() bool { return ps == nil diff --git a/testscommon/memDbMock.go b/testscommon/memDbMock.go index 365ffffba8b..6891b0ff6dd 100644 --- a/testscommon/memDbMock.go +++ b/testscommon/memDbMock.go @@ -10,10 +10,11 @@ import ( // MemDbMock represents the memory database storage. It holds a map of key value pairs // and a mutex to handle concurrent accesses to the map type MemDbMock struct { - db map[string][]byte - mutx sync.RWMutex - PutCalled func(key, val []byte) error - GetCalled func(key []byte) ([]byte, error) + db map[string][]byte + mutx sync.RWMutex + PutCalled func(key, val []byte) error + GetCalled func(key []byte) ([]byte, error) + GetIdentifierCalled func() string } // NewMemDbMock creates a new memorydb object @@ -117,6 +118,14 @@ func (s *MemDbMock) RangeKeys(handler func(key []byte, value []byte) bool) { } } +func (s *MemDbMock) GetIdentifier() string { + if s.GetIdentifierCalled != nil { + return s.GetIdentifierCalled() + } + + return "" +} + // IsInterfaceNil returns true if there is no value under the interface func (s *MemDbMock) IsInterfaceNil() bool { return s == nil diff --git a/testscommon/storageManager/storageManagerStub.go b/testscommon/storageManager/storageManagerStub.go index caaca56576b..75d9dee333f 100644 --- a/testscommon/storageManager/storageManagerStub.go +++ b/testscommon/storageManager/storageManagerStub.go @@ -13,7 +13,7 @@ type StorageManagerStub struct { GetFromCurrentEpochCalled func([]byte) ([]byte, error) TakeSnapshotCalled func(string, []byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler, uint32) SetCheckpointCalled func([]byte, []byte, *common.TrieIteratorChannels, chan []byte, common.SnapshotStatisticsHandler) - GetDbThatContainsHashCalled func([]byte) common.DBWriteCacher + GetDbThatContainsHashCalled func([]byte) common.BaseStorer IsPruningEnabledCalled func() bool IsPruningBlockedCalled func() bool EnterPruningBufferingModeCalled func() diff --git a/trie/baseIterator.go b/trie/baseIterator.go index 9f1d6d1f52b..8ff558790d8 100644 --- a/trie/baseIterator.go +++ b/trie/baseIterator.go @@ -8,7 +8,7 @@ import ( type baseIterator struct { currentNode node nextNodes []node - db common.DBWriteCacher + db common.TrieStorageInteractor } // newBaseIterator creates a new instance of trie iterator diff --git a/trie/branchNode.go b/trie/branchNode.go index 4b2591d0982..66fa48e8d9e 100644 --- a/trie/branchNode.go +++ b/trie/branchNode.go @@ -247,7 +247,7 @@ func (bn *branchNode) hashNode() ([]byte, error) { return encodeNodeAndGetHash(bn) } -func (bn *branchNode) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.DBWriteCacher, targetDb common.DBWriteCacher) error { +func (bn *branchNode) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.TrieStorageInteractor, targetDb common.BaseStorer) error { level++ err := bn.isEmptyOrNil() if err != nil { @@ -288,8 +288,8 @@ func (bn *branchNode) commitDirty(level byte, maxTrieLevelInMemory uint, originD } func (bn *branchNode) commitCheckpoint( - originDb common.DBWriteCacher, - targetDb common.DBWriteCacher, + originDb common.TrieStorageInteractor, + targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, @@ -337,7 +337,7 @@ func (bn *branchNode) commitCheckpoint( } func (bn *branchNode) commitSnapshot( - db common.DBWriteCacher, + db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, @@ -377,7 +377,7 @@ func (bn *branchNode) commitSnapshot( return bn.saveToStorage(db, stats, depthLevel) } -func (bn *branchNode) saveToStorage(targetDb common.DBWriteCacher, stats common.TrieStatisticsHandler, depthLevel int) error { +func (bn *branchNode) saveToStorage(targetDb common.BaseStorer, stats common.TrieStatisticsHandler, depthLevel int) error { nodeSize, err := encodeNodeAndCommitToDB(bn, targetDb) if err != nil { return err @@ -408,7 +408,7 @@ func (bn *branchNode) getEncodedNode() ([]byte, error) { return marshaledNode, nil } -func (bn *branchNode) resolveCollapsed(pos byte, db common.DBWriteCacher) error { +func (bn *branchNode) resolveCollapsed(pos byte, db common.TrieStorageInteractor) error { err := bn.isEmptyOrNil() if err != nil { return fmt.Errorf("resolveCollapsed error %w", err) @@ -441,7 +441,7 @@ func (bn *branchNode) isPosCollapsed(pos int) bool { return bn.children[pos] == nil && len(bn.EncodedChildren[pos]) != 0 } -func (bn *branchNode) tryGet(key []byte, currentDepth uint32, db common.DBWriteCacher) (value []byte, maxDepth uint32, err error) { +func (bn *branchNode) tryGet(key []byte, currentDepth uint32, db common.TrieStorageInteractor) (value []byte, maxDepth uint32, err error) { err = bn.isEmptyOrNil() if err != nil { return nil, currentDepth, fmt.Errorf("tryGet error %w", err) @@ -465,7 +465,7 @@ func (bn *branchNode) tryGet(key []byte, currentDepth uint32, db common.DBWriteC return bn.children[childPos].tryGet(key, currentDepth+1, db) } -func (bn *branchNode) getNext(key []byte, db common.DBWriteCacher) (node, []byte, error) { +func (bn *branchNode) getNext(key []byte, db common.TrieStorageInteractor) (node, []byte, error) { err := bn.isEmptyOrNil() if err != nil { return nil, nil, fmt.Errorf("getNext error %w", err) @@ -489,7 +489,7 @@ func (bn *branchNode) getNext(key []byte, db common.DBWriteCacher) (node, []byte return bn.children[childPos], key, nil } -func (bn *branchNode) insert(n *leafNode, db common.DBWriteCacher) (node, [][]byte, error) { +func (bn *branchNode) insert(n *leafNode, db common.TrieStorageInteractor) (node, [][]byte, error) { emptyHashes := make([][]byte, 0) err := bn.isEmptyOrNil() if err != nil { @@ -530,7 +530,7 @@ func (bn *branchNode) insertOnNilChild(n *leafNode, childPos byte) (node, [][]by return bn, modifiedHashes, nil } -func (bn *branchNode) insertOnExistingChild(n *leafNode, childPos byte, db common.DBWriteCacher) (node, [][]byte, error) { +func (bn *branchNode) insertOnExistingChild(n *leafNode, childPos byte, db common.TrieStorageInteractor) (node, [][]byte, error) { newNode, modifiedHashes, err := bn.children[childPos].insert(n, db) if check.IfNil(newNode) || err != nil { return nil, [][]byte{}, err @@ -553,7 +553,7 @@ func (bn *branchNode) modifyNodeAfterInsert(modifiedHashes [][]byte, childPos by return modifiedHashes } -func (bn *branchNode) delete(key []byte, db common.DBWriteCacher) (bool, node, [][]byte, error) { +func (bn *branchNode) delete(key []byte, db common.TrieStorageInteractor) (bool, node, [][]byte, error) { emptyHashes := make([][]byte, 0) err := bn.isEmptyOrNil() if err != nil { @@ -658,7 +658,7 @@ func (bn *branchNode) isEmptyOrNil() error { return ErrEmptyBranchNode } -func (bn *branchNode) print(writer io.Writer, index int, db common.DBWriteCacher) { +func (bn *branchNode) print(writer io.Writer, index int, db common.TrieStorageInteractor) { if bn == nil { return } @@ -711,7 +711,7 @@ func (bn *branchNode) getDirtyHashes(hashes common.ModifiedHashes) error { return nil } -func (bn *branchNode) getChildren(db common.DBWriteCacher) ([]node, error) { +func (bn *branchNode) getChildren(db common.TrieStorageInteractor) ([]node, error) { err := bn.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getChildren error %w", err) @@ -781,7 +781,7 @@ func (bn *branchNode) loadChildren(getNode func([]byte) (node, error)) ([][]byte func (bn *branchNode) getAllLeavesOnChannel( leavesChannel chan core.KeyValueHolder, keyBuilder common.KeyBuilder, - db common.DBWriteCacher, + db common.TrieStorageInteractor, marshalizer marshal.Marshalizer, chanClose chan struct{}, ctx context.Context, @@ -823,7 +823,7 @@ func (bn *branchNode) getAllLeavesOnChannel( return nil } -func (bn *branchNode) getAllHashes(db common.DBWriteCacher) ([][]byte, error) { +func (bn *branchNode) getAllHashes(db common.TrieStorageInteractor) ([][]byte, error) { err := bn.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getAllHashes error: %w", err) @@ -884,7 +884,7 @@ func (bn *branchNode) getValue() []byte { return []byte{} } -func (bn *branchNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, db common.DBWriteCacher) error { +func (bn *branchNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, db common.TrieStorageInteractor) error { err := bn.isEmptyOrNil() if err != nil { return fmt.Errorf("collectStats error %w", err) diff --git a/trie/branchNode_test.go b/trie/branchNode_test.go index 2b6bb40a080..e3f1118c61a 100644 --- a/trie/branchNode_test.go +++ b/trie/branchNode_test.go @@ -8,7 +8,6 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data/mock" "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/common" @@ -1324,11 +1323,11 @@ func TestBranchNode_commitContextDone(t *testing.T) { func TestBranchNode_commitSnapshotDbIsClosing(t *testing.T) { t.Parallel() - db := &mock.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - return nil, core.ErrContextClosing - }, + db := testscommon.NewMemDbMock() + db.GetCalled = func(key []byte) ([]byte, error) { + return nil, core.ErrContextClosing } + _, collapsedBn := getBnAndCollapsedBn(getTestMarshalizerAndHasher()) missingNodesChan := make(chan []byte, 10) err := collapsedBn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) diff --git a/trie/depthFirstSync.go b/trie/depthFirstSync.go index a26e0bdfe2a..f51388ed5dc 100644 --- a/trie/depthFirstSync.go +++ b/trie/depthFirstSync.go @@ -21,7 +21,7 @@ type depthFirstTrieSyncer struct { waitTimeBetweenChecks time.Duration marshaller marshal.Marshalizer hasher hashing.Hasher - db common.DBWriteCacher + db common.TrieStorageInteractor requestHandler RequestHandler interceptedNodesCacher storage.Cacher mutOperation sync.RWMutex diff --git a/trie/doubleListSync.go b/trie/doubleListSync.go index b855e942d28..b5f68c7bf0d 100644 --- a/trie/doubleListSync.go +++ b/trie/doubleListSync.go @@ -31,7 +31,7 @@ type doubleListTrieSyncer struct { waitTimeBetweenChecks time.Duration marshalizer marshal.Marshalizer hasher hashing.Hasher - db common.DBWriteCacher + db common.TrieStorageInteractor requestHandler RequestHandler interceptedNodesCacher storage.Cacher mutOperation sync.RWMutex diff --git a/trie/extensionNode.go b/trie/extensionNode.go index 79d8e16cdad..04871193be8 100644 --- a/trie/extensionNode.go +++ b/trie/extensionNode.go @@ -162,7 +162,7 @@ func (en *extensionNode) hashNode() ([]byte, error) { return encodeNodeAndGetHash(en) } -func (en *extensionNode) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.DBWriteCacher, targetDb common.DBWriteCacher) error { +func (en *extensionNode) commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.TrieStorageInteractor, targetDb common.BaseStorer) error { level++ err := en.isEmptyOrNil() if err != nil { @@ -200,8 +200,8 @@ func (en *extensionNode) commitDirty(level byte, maxTrieLevelInMemory uint, orig } func (en *extensionNode) commitCheckpoint( - originDb common.DBWriteCacher, - targetDb common.DBWriteCacher, + originDb common.TrieStorageInteractor, + targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, @@ -243,7 +243,7 @@ func (en *extensionNode) commitCheckpoint( } func (en *extensionNode) commitSnapshot( - db common.DBWriteCacher, + db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, @@ -281,7 +281,7 @@ func (en *extensionNode) commitSnapshot( return en.saveToStorage(db, stats, depthLevel) } -func (en *extensionNode) saveToStorage(targetDb common.DBWriteCacher, stats common.TrieStatisticsHandler, depthLevel int) error { +func (en *extensionNode) saveToStorage(targetDb common.BaseStorer, stats common.TrieStatisticsHandler, depthLevel int) error { nodeSize, err := encodeNodeAndCommitToDB(en, targetDb) if err != nil { return err @@ -306,7 +306,7 @@ func (en *extensionNode) getEncodedNode() ([]byte, error) { return marshaledNode, nil } -func (en *extensionNode) resolveCollapsed(_ byte, db common.DBWriteCacher) error { +func (en *extensionNode) resolveCollapsed(_ byte, db common.TrieStorageInteractor) error { err := en.isEmptyOrNil() if err != nil { return fmt.Errorf("resolveCollapsed error %w", err) @@ -328,7 +328,7 @@ func (en *extensionNode) isPosCollapsed(_ int) bool { return en.isCollapsed() } -func (en *extensionNode) tryGet(key []byte, currentDepth uint32, db common.DBWriteCacher) (value []byte, maxDepth uint32, err error) { +func (en *extensionNode) tryGet(key []byte, currentDepth uint32, db common.TrieStorageInteractor) (value []byte, maxDepth uint32, err error) { err = en.isEmptyOrNil() if err != nil { return nil, currentDepth, fmt.Errorf("tryGet error %w", err) @@ -350,7 +350,7 @@ func (en *extensionNode) tryGet(key []byte, currentDepth uint32, db common.DBWri return en.child.tryGet(key, currentDepth+1, db) } -func (en *extensionNode) getNext(key []byte, db common.DBWriteCacher) (node, []byte, error) { +func (en *extensionNode) getNext(key []byte, db common.TrieStorageInteractor) (node, []byte, error) { err := en.isEmptyOrNil() if err != nil { return nil, nil, fmt.Errorf("getNext error %w", err) @@ -372,7 +372,7 @@ func (en *extensionNode) getNext(key []byte, db common.DBWriteCacher) (node, []b return en.child, key, nil } -func (en *extensionNode) insert(n *leafNode, db common.DBWriteCacher) (node, [][]byte, error) { +func (en *extensionNode) insert(n *leafNode, db common.TrieStorageInteractor) (node, [][]byte, error) { emptyHashes := make([][]byte, 0) err := en.isEmptyOrNil() if err != nil { @@ -395,7 +395,7 @@ func (en *extensionNode) insert(n *leafNode, db common.DBWriteCacher) (node, [][ return en.insertInNewBn(n, keyMatchLen) } -func (en *extensionNode) insertInSameEn(n *leafNode, keyMatchLen int, db common.DBWriteCacher) (node, [][]byte, error) { +func (en *extensionNode) insertInSameEn(n *leafNode, keyMatchLen int, db common.TrieStorageInteractor) (node, [][]byte, error) { n.Key = n.Key[keyMatchLen:] newNode, oldHashes, err := en.child.insert(n, db) if check.IfNil(newNode) || err != nil { @@ -456,7 +456,7 @@ func (en *extensionNode) insertInNewBn(n *leafNode, keyMatchLen int) (node, [][] return newEn, oldHash, nil } -func (en *extensionNode) delete(key []byte, db common.DBWriteCacher) (bool, node, [][]byte, error) { +func (en *extensionNode) delete(key []byte, db common.TrieStorageInteractor) (bool, node, [][]byte, error) { emptyHashes := make([][]byte, 0) err := en.isEmptyOrNil() if err != nil { @@ -535,7 +535,7 @@ func (en *extensionNode) isEmptyOrNil() error { return nil } -func (en *extensionNode) print(writer io.Writer, index int, db common.DBWriteCacher) { +func (en *extensionNode) print(writer io.Writer, index int, db common.TrieStorageInteractor) { if en == nil { return } @@ -582,7 +582,7 @@ func (en *extensionNode) getDirtyHashes(hashes common.ModifiedHashes) error { return nil } -func (en *extensionNode) getChildren(db common.DBWriteCacher) ([]node, error) { +func (en *extensionNode) getChildren(db common.TrieStorageInteractor) ([]node, error) { err := en.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getChildren error %w", err) @@ -639,7 +639,7 @@ func (en *extensionNode) loadChildren(getNode func([]byte) (node, error)) ([][]b func (en *extensionNode) getAllLeavesOnChannel( leavesChannel chan core.KeyValueHolder, keyBuilder common.KeyBuilder, - db common.DBWriteCacher, + db common.TrieStorageInteractor, marshalizer marshal.Marshalizer, chanClose chan struct{}, ctx context.Context, @@ -674,7 +674,7 @@ func (en *extensionNode) getAllLeavesOnChannel( return nil } -func (en *extensionNode) getAllHashes(db common.DBWriteCacher) ([][]byte, error) { +func (en *extensionNode) getAllHashes(db common.TrieStorageInteractor) ([][]byte, error) { err := en.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getAllHashes error: %w", err) @@ -722,7 +722,7 @@ func (en *extensionNode) getValue() []byte { return []byte{} } -func (en *extensionNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, db common.DBWriteCacher) error { +func (en *extensionNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, db common.TrieStorageInteractor) error { err := en.isEmptyOrNil() if err != nil { return fmt.Errorf("collectStats error %w", err) diff --git a/trie/extensionNode_test.go b/trie/extensionNode_test.go index 34b87199e99..f24f8edbf14 100644 --- a/trie/extensionNode_test.go +++ b/trie/extensionNode_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/multiversx/mx-chain-core-go/core" - "github.com/multiversx/mx-chain-core-go/data/mock" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/storage/cache" "github.com/multiversx/mx-chain-go/testscommon" @@ -1037,11 +1036,11 @@ func TestExtensionNode_getValueReturnsEmptyByteSlice(t *testing.T) { func TestExtensionNode_commitSnapshotDbIsClosing(t *testing.T) { t.Parallel() - db := &mock.StorerStub{ - GetCalled: func(key []byte) ([]byte, error) { - return nil, core.ErrContextClosing - }, + db := testscommon.NewMemDbMock() + db.GetCalled = func(key []byte) ([]byte, error) { + return nil, core.ErrContextClosing } + _, collapsedEn := getEnAndCollapsedEn() missingNodesChan := make(chan []byte, 10) err := collapsedEn.commitSnapshot(db, nil, missingNodesChan, context.Background(), statistics.NewTrieStatistics(), &testscommon.ProcessStatusHandlerStub{}, 0) diff --git a/trie/interface.go b/trie/interface.go index 9cd5502743e..50c17b33a1f 100644 --- a/trie/interface.go +++ b/trie/interface.go @@ -24,36 +24,36 @@ type node interface { isPosCollapsed(pos int) bool isDirty() bool getEncodedNode() ([]byte, error) - resolveCollapsed(pos byte, db common.DBWriteCacher) error + resolveCollapsed(pos byte, db common.TrieStorageInteractor) error hashNode() ([]byte, error) hashChildren() error - tryGet(key []byte, depth uint32, db common.DBWriteCacher) ([]byte, uint32, error) - getNext(key []byte, db common.DBWriteCacher) (node, []byte, error) - insert(n *leafNode, db common.DBWriteCacher) (node, [][]byte, error) - delete(key []byte, db common.DBWriteCacher) (bool, node, [][]byte, error) + tryGet(key []byte, depth uint32, db common.TrieStorageInteractor) ([]byte, uint32, error) + getNext(key []byte, db common.TrieStorageInteractor) (node, []byte, error) + insert(n *leafNode, db common.TrieStorageInteractor) (node, [][]byte, error) + delete(key []byte, db common.TrieStorageInteractor) (bool, node, [][]byte, error) reduceNode(pos int) (node, bool, error) isEmptyOrNil() error - print(writer io.Writer, index int, db common.DBWriteCacher) + print(writer io.Writer, index int, db common.TrieStorageInteractor) getDirtyHashes(common.ModifiedHashes) error - getChildren(db common.DBWriteCacher) ([]node, error) + getChildren(db common.TrieStorageInteractor) ([]node, error) isValid() bool setDirty(bool) loadChildren(func([]byte) (node, error)) ([][]byte, []node, error) - getAllLeavesOnChannel(chan core.KeyValueHolder, common.KeyBuilder, common.DBWriteCacher, marshal.Marshalizer, chan struct{}, context.Context) error - getAllHashes(db common.DBWriteCacher) ([][]byte, error) + getAllLeavesOnChannel(chan core.KeyValueHolder, common.KeyBuilder, common.TrieStorageInteractor, marshal.Marshalizer, chan struct{}, context.Context) error + getAllHashes(db common.TrieStorageInteractor) ([][]byte, error) getNextHashAndKey([]byte) (bool, []byte, []byte) getValue() []byte - commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.DBWriteCacher, targetDb common.DBWriteCacher) error - commitCheckpoint(originDb common.DBWriteCacher, targetDb common.DBWriteCacher, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error - commitSnapshot(originDb common.DBWriteCacher, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error + commitDirty(level byte, maxTrieLevelInMemory uint, originDb common.TrieStorageInteractor, targetDb common.BaseStorer) error + commitCheckpoint(originDb common.TrieStorageInteractor, targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error + commitSnapshot(originDb common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error getMarshalizer() marshal.Marshalizer setMarshalizer(marshal.Marshalizer) getHasher() hashing.Hasher setHasher(hashing.Hasher) sizeInBytes() int - collectStats(handler common.TrieStatisticsHandler, depthLevel int, db common.DBWriteCacher) error + collectStats(handler common.TrieStatisticsHandler, depthLevel int, db common.TrieStorageInteractor) error IsInterfaceNil() bool } @@ -63,8 +63,8 @@ type dbWithGetFromEpoch interface { } type snapshotNode interface { - commitCheckpoint(originDb common.DBWriteCacher, targetDb common.DBWriteCacher, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error - commitSnapshot(originDb common.DBWriteCacher, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error + commitCheckpoint(originDb common.TrieStorageInteractor, targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error + commitSnapshot(originDb common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, missingNodesChan chan []byte, ctx context.Context, stats common.TrieStatisticsHandler, idleProvider IdleNodeProvider, depthLevel int) error } // RequestHandler defines the methods through which request to data can be made @@ -96,7 +96,7 @@ type epochStorer interface { } type snapshotPruningStorer interface { - common.DBWriteCacher + common.BaseStorer GetFromOldEpochsWithoutAddingToCache(key []byte) ([]byte, core.OptionalUint32, error) GetFromLastEpoch(key []byte) ([]byte, error) PutInEpoch(key []byte, data []byte, epoch uint32) error @@ -122,7 +122,3 @@ type IdleNodeProvider interface { type storageManagerExtension interface { RemoveFromCheckpointHashesHolder(hash []byte) } - -type dbWriteCacherWithIdentifier interface { - GetIdentifier() string -} diff --git a/trie/leafNode.go b/trie/leafNode.go index ed037c7f0e0..e20a38d4afd 100644 --- a/trie/leafNode.go +++ b/trie/leafNode.go @@ -111,7 +111,7 @@ func (ln *leafNode) hashNode() ([]byte, error) { return encodeNodeAndGetHash(ln) } -func (ln *leafNode) commitDirty(_ byte, _ uint, _ common.DBWriteCacher, targetDb common.DBWriteCacher) error { +func (ln *leafNode) commitDirty(_ byte, _ uint, _ common.TrieStorageInteractor, targetDb common.BaseStorer) error { err := ln.isEmptyOrNil() if err != nil { return fmt.Errorf("commit error %w", err) @@ -128,8 +128,8 @@ func (ln *leafNode) commitDirty(_ byte, _ uint, _ common.DBWriteCacher, targetDb } func (ln *leafNode) commitCheckpoint( - _ common.DBWriteCacher, - targetDb common.DBWriteCacher, + _ common.TrieStorageInteractor, + targetDb common.BaseStorer, checkpointHashes CheckpointHashesHolder, leavesChan chan core.KeyValueHolder, ctx context.Context, @@ -174,7 +174,7 @@ func (ln *leafNode) commitCheckpoint( } func (ln *leafNode) commitSnapshot( - db common.DBWriteCacher, + db common.TrieStorageInteractor, leavesChan chan core.KeyValueHolder, _ chan []byte, ctx context.Context, @@ -235,7 +235,7 @@ func (ln *leafNode) getEncodedNode() ([]byte, error) { return marshaledNode, nil } -func (ln *leafNode) resolveCollapsed(_ byte, _ common.DBWriteCacher) error { +func (ln *leafNode) resolveCollapsed(_ byte, _ common.TrieStorageInteractor) error { return nil } @@ -247,7 +247,7 @@ func (ln *leafNode) isPosCollapsed(_ int) bool { return false } -func (ln *leafNode) tryGet(key []byte, currentDepth uint32, _ common.DBWriteCacher) (value []byte, maxDepth uint32, err error) { +func (ln *leafNode) tryGet(key []byte, currentDepth uint32, _ common.TrieStorageInteractor) (value []byte, maxDepth uint32, err error) { err = ln.isEmptyOrNil() if err != nil { return nil, currentDepth, fmt.Errorf("tryGet error %w", err) @@ -259,7 +259,7 @@ func (ln *leafNode) tryGet(key []byte, currentDepth uint32, _ common.DBWriteCach return nil, currentDepth, nil } -func (ln *leafNode) getNext(key []byte, _ common.DBWriteCacher) (node, []byte, error) { +func (ln *leafNode) getNext(key []byte, _ common.TrieStorageInteractor) (node, []byte, error) { err := ln.isEmptyOrNil() if err != nil { return nil, nil, fmt.Errorf("getNext error %w", err) @@ -269,7 +269,7 @@ func (ln *leafNode) getNext(key []byte, _ common.DBWriteCacher) (node, []byte, e } return nil, nil, ErrNodeNotFound } -func (ln *leafNode) insert(n *leafNode, _ common.DBWriteCacher) (node, [][]byte, error) { +func (ln *leafNode) insert(n *leafNode, _ common.TrieStorageInteractor) (node, [][]byte, error) { err := ln.isEmptyOrNil() if err != nil { return nil, [][]byte{}, fmt.Errorf("insert error %w", err) @@ -343,7 +343,7 @@ func (ln *leafNode) insertInNewBn(n *leafNode, keyMatchLen int) (node, error) { return bn, nil } -func (ln *leafNode) delete(key []byte, _ common.DBWriteCacher) (bool, node, [][]byte, error) { +func (ln *leafNode) delete(key []byte, _ common.TrieStorageInteractor) (bool, node, [][]byte, error) { if bytes.Equal(key, ln.Key) { oldHash := make([][]byte, 0) if !ln.dirty { @@ -376,7 +376,7 @@ func (ln *leafNode) isEmptyOrNil() error { return nil } -func (ln *leafNode) print(writer io.Writer, _ int, _ common.DBWriteCacher) { +func (ln *leafNode) print(writer io.Writer, _ int, _ common.TrieStorageInteractor) { if ln == nil { return } @@ -408,7 +408,7 @@ func (ln *leafNode) getDirtyHashes(hashes common.ModifiedHashes) error { return nil } -func (ln *leafNode) getChildren(_ common.DBWriteCacher) ([]node, error) { +func (ln *leafNode) getChildren(_ common.TrieStorageInteractor) ([]node, error) { return nil, nil } @@ -427,7 +427,7 @@ func (ln *leafNode) loadChildren(_ func([]byte) (node, error)) ([][]byte, []node func (ln *leafNode) getAllLeavesOnChannel( leavesChannel chan core.KeyValueHolder, keyBuilder common.KeyBuilder, - _ common.DBWriteCacher, + _ common.TrieStorageInteractor, _ marshal.Marshalizer, chanClose chan struct{}, ctx context.Context, @@ -458,7 +458,7 @@ func (ln *leafNode) getAllLeavesOnChannel( } } -func (ln *leafNode) getAllHashes(_ common.DBWriteCacher) ([][]byte, error) { +func (ln *leafNode) getAllHashes(_ common.TrieStorageInteractor) ([][]byte, error) { err := ln.isEmptyOrNil() if err != nil { return nil, fmt.Errorf("getAllHashes error: %w", err) @@ -494,7 +494,7 @@ func (ln *leafNode) getValue() []byte { return ln.Value } -func (ln *leafNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, _ common.DBWriteCacher) error { +func (ln *leafNode) collectStats(ts common.TrieStatisticsHandler, depthLevel int, _ common.TrieStorageInteractor) error { err := ln.isEmptyOrNil() if err != nil { return fmt.Errorf("collectStats error %w", err) diff --git a/trie/node.go b/trie/node.go index 67c7f95d8c3..617aab8b528 100644 --- a/trie/node.go +++ b/trie/node.go @@ -3,7 +3,6 @@ package trie import ( "context" - "fmt" "runtime/debug" "time" @@ -75,7 +74,7 @@ func encodeNodeAndGetHash(n node) ([]byte, error) { } // encodeNodeAndCommitToDB will encode and save provided node. It returns the node's value in bytes -func encodeNodeAndCommitToDB(n node, db common.DBWriteCacher) (int, error) { +func encodeNodeAndCommitToDB(n node, db common.BaseStorer) (int, error) { key, err := computeAndSetNodeHash(n) if err != nil { return 0, err @@ -117,18 +116,12 @@ func computeAndSetNodeHash(n node) ([]byte, error) { return key, nil } -func getNodeFromDBAndDecode(n []byte, db common.DBWriteCacher, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { +func getNodeFromDBAndDecode(n []byte, db common.TrieStorageInteractor, marshalizer marshal.Marshalizer, hasher hashing.Hasher) (node, error) { encChild, err := db.Get(n) if err != nil { treatLogError(log, err, n) - dbWithID, ok := db.(dbWriteCacherWithIdentifier) - if !ok { - getNodeFromDbErr := core.NewGetNodeFromDBErrWithKey(n, err, "") - return nil, fmt.Errorf("db does not have an identifier, db type: %T, error: %w", db, getNodeFromDbErr) - } - - return nil, core.NewGetNodeFromDBErrWithKey(n, err, dbWithID.GetIdentifier()) + return nil, core.NewGetNodeFromDBErrWithKey(n, err, db.GetIdentifier()) } return decodeNode(encChild, marshalizer, hasher) @@ -142,7 +135,7 @@ func treatLogError(logInstance logger.Logger, err error, key []byte) { logInstance.Trace(core.GetNodeFromDBErrorString, "error", err, "key", key, "stack trace", string(debug.Stack())) } -func resolveIfCollapsed(n node, pos byte, db common.DBWriteCacher) error { +func resolveIfCollapsed(n node, pos byte, db common.TrieStorageInteractor) error { err := n.isEmptyOrNil() if err != nil { return err diff --git a/trie/sync.go b/trie/sync.go index 2ef4bb807ca..89c8d3ef3c5 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -32,7 +32,7 @@ type trieSyncer struct { waitTimeBetweenRequests time.Duration marshalizer marshal.Marshalizer hasher hashing.Hasher - db common.DBWriteCacher + db common.TrieStorageInteractor requestHandler RequestHandler interceptedNodesCacher storage.Cacher mutOperation sync.RWMutex @@ -303,7 +303,7 @@ func (ts *trieSyncer) getNode(hash []byte) (node, error) { func getNodeFromCacheOrStorage( hash []byte, interceptedNodesCacher storage.Cacher, - db common.DBWriteCacher, + db common.TrieStorageInteractor, marshalizer marshal.Marshalizer, hasher hashing.Hasher, ) (node, error) { diff --git a/trie/trieStorageManager.go b/trie/trieStorageManager.go index 54948d4e953..75312cbc8ae 100644 --- a/trie/trieStorageManager.go +++ b/trie/trieStorageManager.go @@ -21,11 +21,11 @@ import ( // trieStorageManager manages all the storage operations of the trie (commit, snapshot, checkpoint, pruning) type trieStorageManager struct { - mainStorer common.DBWriteCacher + mainStorer common.BaseStorer + checkpointsStorer common.BaseStorer pruningBlockingOps uint32 snapshotReq chan *snapshotsQueueEntry checkpointReq chan *snapshotsQueueEntry - checkpointsStorer common.DBWriteCacher checkpointHashesHolder CheckpointHashesHolder storageOperationMutex sync.RWMutex cancelFunc context.CancelFunc @@ -47,8 +47,8 @@ type snapshotsQueueEntry struct { // NewTrieStorageManagerArgs holds the arguments needed for creating a new trieStorageManager type NewTrieStorageManagerArgs struct { - MainStorer common.DBWriteCacher - CheckpointsStorer common.DBWriteCacher + MainStorer common.BaseStorer + CheckpointsStorer common.BaseStorer Marshalizer marshal.Marshalizer Hasher hashing.Hasher GeneralConfig config.TrieStorageManagerConfig @@ -523,7 +523,7 @@ func treatSnapshotError(err error, message string, rootHash []byte, mainTrieRoot } func newSnapshotNode( - db common.DBWriteCacher, + db common.TrieStorageInteractor, msh marshal.Marshalizer, hsh hashing.Hasher, rootHash []byte, @@ -687,13 +687,7 @@ func (tsm *trieStorageManager) GetBaseTrieStorageManager() common.StorageManager // GetIdentifier returns the identifier of the main storer func (tsm *trieStorageManager) GetIdentifier() string { - dbWithIdentifier, ok := tsm.mainStorer.(dbWriteCacherWithIdentifier) - if !ok { - log.Warn("trieStorageManager.GetIdentifier mainStorer is not of type dbWriteCacherWithIdentifier", "type", fmt.Sprintf("%T", tsm.mainStorer)) - return "" - } - - return dbWithIdentifier.GetIdentifier() + return tsm.identifier } // IsInterfaceNil returns true if there is no value under the interface diff --git a/trie/trieStorageManager_test.go b/trie/trieStorageManager_test.go index 32e14ca9655..f23810d0eb1 100644 --- a/trie/trieStorageManager_test.go +++ b/trie/trieStorageManager_test.go @@ -98,6 +98,15 @@ func TestNewTrieStorageManager(t *testing.T) { assert.Nil(t, ts) assert.Error(t, err) }) + t.Run("invalid identifier", func(t *testing.T) { + t.Parallel() + + args := trie.GetDefaultTrieStorageManagerParameters() + args.Identifier = "" + ts, err := trie.NewTrieStorageManager(args) + assert.Nil(t, ts) + assert.Equal(t, trie.ErrInvalidIdentifier, err) + }) t.Run("should work", func(t *testing.T) { t.Parallel() @@ -892,27 +901,11 @@ func TestWriteInChanNonBlocking(t *testing.T) { func TestTrieStorageManager_GetIdentifier(t *testing.T) { t.Parallel() - t.Run("db without identifier", func(t *testing.T) { - t.Parallel() - - ts, _ := trie.NewTrieStorageManager(trie.GetDefaultTrieStorageManagerParameters()) - id := ts.GetIdentifier() - assert.Equal(t, "", id) - }) - - t.Run("db with identifier", func(t *testing.T) { - t.Parallel() - - expectedIdentifier := "identifier" - args := trie.GetDefaultTrieStorageManagerParameters() - args.MainStorer = &storage.StorerStub{ - GetIdentifierCalled: func() string { - return expectedIdentifier - }, - } - ts, _ := trie.NewTrieStorageManager(args) + expectedId := "testId" + args := trie.GetDefaultTrieStorageManagerParameters() + args.Identifier = expectedId + ts, _ := trie.NewTrieStorageManager(args) - id := ts.GetIdentifier() - assert.Equal(t, expectedIdentifier, id) - }) + id := ts.GetIdentifier() + assert.Equal(t, expectedId, id) } From cdedfc2a9c4d09065f053671b67e2743d90e056b Mon Sep 17 00:00:00 2001 From: jules01 Date: Mon, 22 May 2023 17:50:01 +0300 Subject: [PATCH 205/221] - fixed the indexer creation --- factory/status/statusComponents.go | 1 + 1 file changed, 1 insertion(+) diff --git a/factory/status/statusComponents.go b/factory/status/statusComponents.go index d55702d0e14..44c2c0b4b2f 100644 --- a/factory/status/statusComponents.go +++ b/factory/status/statusComponents.go @@ -227,6 +227,7 @@ func (scf *statusComponentsFactory) makeElasticIndexerArgs() indexerFactory.Args Denomination: scf.economicsConfig.GlobalSettings.Denomination, UseKibana: elasticSearchConfig.UseKibana, ImportDB: scf.isInImportMode, + HeaderMarshaller: scf.coreComponents.InternalMarshalizer(), } } From fded02f8f46270504e06620363aabe17e1d718c9 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Tue, 23 May 2023 10:53:20 +0300 Subject: [PATCH 206/221] removed mx-chain-p2p-go dependency and used the p2p package from mx-chain-communication-go --- go.mod | 13 +++++-------- go.sum | 26 ++++++++------------------ p2p/config/config.go | 2 +- p2p/constants.go | 2 +- p2p/errors.go | 2 +- p2p/factory/factory.go | 14 +++++++------- p2p/interface.go | 2 +- 7 files changed, 24 insertions(+), 37 deletions(-) diff --git a/go.mod b/go.mod index ab2ef885c4d..63ed6e6f0c3 100644 --- a/go.mod +++ b/go.mod @@ -13,12 +13,11 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.0 + github.com/multiversx/mx-chain-communication-go v1.0.1-0.20230522155438-b9bc3ba7183f github.com/multiversx/mx-chain-core-go v1.2.4 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.3 github.com/multiversx/mx-chain-logger-go v1.0.11 - github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 github.com/multiversx/mx-chain-vm-common-go v1.4.2 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 @@ -73,18 +72,17 @@ require ( github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/herumi/bls-go-binary v1.0.0 // indirect github.com/huin/goupnp v1.0.3 // indirect - github.com/ipfs/go-cid v0.3.2 // indirect + github.com/ipfs/go-cid v0.2.0 // indirect github.com/ipfs/go-datastore v0.5.1 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-ipns v0.2.0 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect - github.com/ipld/go-ipld-prime v0.19.0 // indirect + github.com/ipld/go-ipld-prime v0.9.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/cpuid/v2 v2.1.0 // indirect github.com/koron/go-ssdp v0.0.3 // indirect @@ -129,7 +127,7 @@ require ( github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.6.0 // indirect + github.com/multiformats/go-multicodec v0.5.0 // indirect github.com/multiformats/go-multihash v0.2.1 // indirect github.com/multiformats/go-multistream v0.3.3 // indirect github.com/multiformats/go-varint v0.0.6 // indirect @@ -143,7 +141,7 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect + github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 // indirect github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.37.0 // indirect @@ -151,7 +149,6 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect - github.com/smartystreets/assertions v1.13.0 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect diff --git a/go.sum b/go.sum index 792f954a3bb..de71d770005 100644 --- a/go.sum +++ b/go.sum @@ -168,8 +168,6 @@ github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwU github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -277,9 +275,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -341,9 +338,8 @@ github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.2.0 h1:01JTiihFq9en9Vz0lc0VDWvZe/uBonGpzo4THP0vcQ0= github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= @@ -376,9 +372,8 @@ github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Ax github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/ipld/go-ipld-prime v0.9.0 h1:N2OjJMb+fhyFPwPnVvJcWU/NsumP8etal+d2v3G4eww= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= -github.com/ipld/go-ipld-prime v0.19.0 h1:5axC7rJmPc17Emw6TelxGwnzALk0PdupZ2oj2roDj04= -github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -595,9 +590,8 @@ github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPw github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.5.0 h1:EgU6cBe/D7WRwQb1KmnBvU7lrcFGMggZVTPtOW9dDHs= github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= -github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= -github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -616,8 +610,9 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.0 h1:ZGIIrWIE7RqpF7gvMfshH+CJUehviXzkWlxnpZ02efE= github.com/multiversx/mx-chain-communication-go v1.0.0/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= +github.com/multiversx/mx-chain-communication-go v1.0.1-0.20230522155438-b9bc3ba7183f h1:zcjDCpcjntD+37uLbhNdSPrhaVjrXhOTBZp9AFeqgxg= +github.com/multiversx/mx-chain-communication-go v1.0.1-0.20230522155438-b9bc3ba7183f/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= @@ -629,8 +624,6 @@ github.com/multiversx/mx-chain-es-indexer-go v1.4.3 h1:s6eX2dJSr/yjbGiF00Q68ar0j github.com/multiversx/mx-chain-es-indexer-go v1.4.3/go.mod h1:b2TVf5kCmmFQUjagI962YaKa2uqOEMn7dbTsiE/0J6U= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= -github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= -github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= @@ -691,9 +684,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -764,9 +756,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= -github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= @@ -828,7 +819,6 @@ github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60Nt github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= -github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= diff --git a/p2p/config/config.go b/p2p/config/config.go index 00ee4b5775b..eb2bf95d07c 100644 --- a/p2p/config/config.go +++ b/p2p/config/config.go @@ -1,6 +1,6 @@ package config -import "github.com/multiversx/mx-chain-p2p-go/config" +import "github.com/multiversx/mx-chain-communication-go/p2p/config" // P2PConfig will hold all the P2P settings type P2PConfig = config.P2PConfig diff --git a/p2p/constants.go b/p2p/constants.go index 271cae06736..4f0807484b7 100644 --- a/p2p/constants.go +++ b/p2p/constants.go @@ -1,7 +1,7 @@ package p2p import ( - p2p "github.com/multiversx/mx-chain-p2p-go" + "github.com/multiversx/mx-chain-communication-go/p2p" ) // NodeOperation defines the p2p node operation diff --git a/p2p/errors.go b/p2p/errors.go index 4f841161bbc..d80b9445433 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -3,7 +3,7 @@ package p2p import ( "errors" - p2p "github.com/multiversx/mx-chain-p2p-go" + "github.com/multiversx/mx-chain-communication-go/p2p" ) // ErrNilMessage signals that a nil message has been received diff --git a/p2p/factory/factory.go b/p2p/factory/factory.go index cfc6c6f6e6f..c64ee34775c 100644 --- a/p2p/factory/factory.go +++ b/p2p/factory/factory.go @@ -1,13 +1,13 @@ package factory import ( + "github.com/multiversx/mx-chain-communication-go/p2p/libp2p" + "github.com/multiversx/mx-chain-communication-go/p2p/libp2p/crypto" + "github.com/multiversx/mx-chain-communication-go/p2p/message" + messagecheck "github.com/multiversx/mx-chain-communication-go/p2p/messageCheck" + "github.com/multiversx/mx-chain-communication-go/p2p/peersHolder" + "github.com/multiversx/mx-chain-communication-go/p2p/rating" "github.com/multiversx/mx-chain-go/p2p" - "github.com/multiversx/mx-chain-p2p-go/libp2p" - p2pCrypto "github.com/multiversx/mx-chain-p2p-go/libp2p/crypto" - "github.com/multiversx/mx-chain-p2p-go/message" - messagecheck "github.com/multiversx/mx-chain-p2p-go/messageCheck" - "github.com/multiversx/mx-chain-p2p-go/peersHolder" - "github.com/multiversx/mx-chain-p2p-go/rating" ) // ArgsNetworkMessenger defines the options used to create a p2p wrapper @@ -53,7 +53,7 @@ func NewPeersHolder(preferredConnectionAddresses []string) (p2p.PreferredPeersHo // NewP2PKeyConverter returns a new instance of p2pKeyConverter func NewP2PKeyConverter() p2p.P2PKeyConverter { - return p2pCrypto.NewP2PKeyConverter() + return crypto.NewP2PKeyConverter() } // NewMessageVerifier will return a new instance of messages verifier diff --git a/p2p/interface.go b/p2p/interface.go index 8f57650b85c..f643852dc32 100644 --- a/p2p/interface.go +++ b/p2p/interface.go @@ -4,9 +4,9 @@ import ( "encoding/hex" "time" + "github.com/multiversx/mx-chain-communication-go/p2p" "github.com/multiversx/mx-chain-core-go/core" crypto "github.com/multiversx/mx-chain-crypto-go" - p2p "github.com/multiversx/mx-chain-p2p-go" ) // MessageProcessor is the interface used to describe what a receive message processor should do From 075f73a6a7617394ed8bae4f3276d24581a23177 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Tue, 23 May 2023 14:24:52 +0300 Subject: [PATCH 207/221] fix after review --- testscommon/components/components.go | 2 +- testscommon/memDbMock.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/testscommon/components/components.go b/testscommon/components/components.go index 2d19d06d1bf..7b15febbe53 100644 --- a/testscommon/components/components.go +++ b/testscommon/components/components.go @@ -2,7 +2,6 @@ package components import ( "fmt" - "github.com/multiversx/mx-chain-go/testscommon/storage" "math/big" "testing" @@ -39,6 +38,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon/dblookupext" "github.com/multiversx/mx-chain-go/testscommon/shardingMocks" statusHandlerMock "github.com/multiversx/mx-chain-go/testscommon/statusHandler" + "github.com/multiversx/mx-chain-go/testscommon/storage" "github.com/multiversx/mx-chain-go/trie" logger "github.com/multiversx/mx-chain-logger-go" wasmConfig "github.com/multiversx/mx-chain-vm-v1_4-go/config" diff --git a/testscommon/memDbMock.go b/testscommon/memDbMock.go index 6891b0ff6dd..7caa6ad947f 100644 --- a/testscommon/memDbMock.go +++ b/testscommon/memDbMock.go @@ -118,6 +118,7 @@ func (s *MemDbMock) RangeKeys(handler func(key []byte, value []byte) bool) { } } +// GetIdentifier returns the identifier of the storage medium func (s *MemDbMock) GetIdentifier() string { if s.GetIdentifierCalled != nil { return s.GetIdentifierCalled() From 63d3d019abfa145742a7a563f9fcc3a7f00367ad Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Tue, 23 May 2023 16:37:16 +0300 Subject: [PATCH 208/221] update go mod --- go.mod | 2 +- go.sum | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 806dc0efe4a..01a7b3cbf62 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 github.com/multiversx/mx-chain-communication-go v1.0.0 - github.com/multiversx/mx-chain-core-go v1.2.4 + github.com/multiversx/mx-chain-core-go v1.2.5-0.20230523132237-215d9ab6e398 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.3 github.com/multiversx/mx-chain-logger-go v1.0.11 diff --git a/go.sum b/go.sum index 8e12a2ecda5..32544a65580 100644 --- a/go.sum +++ b/go.sum @@ -621,8 +621,10 @@ github.com/multiversx/mx-chain-communication-go v1.0.0/go.mod h1:GPHOm4HSXbvC0Io github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.4 h1:BRXyajUevLU6zHszR8jnp2+7C2bAQBor51YTc4dp3YQ= +github.com/multiversx/mx-chain-core-go v1.2.4-0.20230518082654-bf2789086b0f/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.4/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= +github.com/multiversx/mx-chain-core-go v1.2.5-0.20230523132237-215d9ab6e398 h1:herVQVbYWRMgmI2i9/LdFCmBwGSlNOYoLyaJIB1t1eE= +github.com/multiversx/mx-chain-core-go v1.2.5-0.20230523132237-215d9ab6e398/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.3 h1:s6eX2dJSr/yjbGiF00Q68ar0jcvGkBm+ZzEa8/tpHzM= From 5e123e5ab6301188331bc459053e1ff3a60cfc94 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Wed, 24 May 2023 09:58:06 +0300 Subject: [PATCH 209/221] fix race condition in test --- state/accountsDB_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/state/accountsDB_test.go b/state/accountsDB_test.go index 12995a1bca7..ae56e67ccfd 100644 --- a/state/accountsDB_test.go +++ b/state/accountsDB_test.go @@ -2000,6 +2000,7 @@ func mergeMaps(map1 common.ModifiedHashes, map2 common.ModifiedHashes) { func TestAccountsDB_CommitSetsStateCheckpointIfCheckpointHashesHolderIsFull(t *testing.T) { t.Parallel() + mutex := &sync.Mutex{} newHashes := make(common.ModifiedHashes) numRemoveCalls := 0 checkpointHashesHolder := &trieMock.CheckpointHashesHolderStub{ @@ -2007,9 +2008,11 @@ func TestAccountsDB_CommitSetsStateCheckpointIfCheckpointHashesHolderIsFull(t *t return true }, RemoveCalled: func(hash []byte) { + mutex.Lock() _, ok := newHashes[string(hash)] assert.True(t, ok) numRemoveCalls++ + mutex.Unlock() }, } From 9b2d4003d66de94a46006270dbaf986643b93646 Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Wed, 24 May 2023 11:21:17 +0300 Subject: [PATCH 210/221] proper release --- go.mod | 6 +++--- go.sum | 13 ++++++------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 01a7b3cbf62..c0c6db38ad4 100644 --- a/go.mod +++ b/go.mod @@ -14,13 +14,13 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 github.com/multiversx/mx-chain-communication-go v1.0.0 - github.com/multiversx/mx-chain-core-go v1.2.5-0.20230523132237-215d9ab6e398 + github.com/multiversx/mx-chain-core-go v1.2.5 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.3 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 - github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230518083218-98a7f2c893e5 - github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518083542-c2304957adc2 + github.com/multiversx/mx-chain-storage-go v1.0.10 + github.com/multiversx/mx-chain-vm-common-go v1.4.4 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index 32544a65580..c2fb741663c 100644 --- a/go.sum +++ b/go.sum @@ -621,10 +621,9 @@ github.com/multiversx/mx-chain-communication-go v1.0.0/go.mod h1:GPHOm4HSXbvC0Io github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.4-0.20230518082654-bf2789086b0f/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.4/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= -github.com/multiversx/mx-chain-core-go v1.2.5-0.20230523132237-215d9ab6e398 h1:herVQVbYWRMgmI2i9/LdFCmBwGSlNOYoLyaJIB1t1eE= -github.com/multiversx/mx-chain-core-go v1.2.5-0.20230523132237-215d9ab6e398/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= +github.com/multiversx/mx-chain-core-go v1.2.5 h1:uIZSqRygJAxv+pGuZnoSMwS4t10C/paasuwps5nxrIQ= +github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.3 h1:s6eX2dJSr/yjbGiF00Q68ar0jcvGkBm+ZzEa8/tpHzM= @@ -634,11 +633,11 @@ github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0B github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= -github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230518083218-98a7f2c893e5 h1:rScGG2KmUduMXBqqp5zVo4PyanYfbJlxk35rYLMMvfA= -github.com/multiversx/mx-chain-storage-go v1.0.9-0.20230518083218-98a7f2c893e5/go.mod h1:rtJdopIbIKYLcA2alB6FCnNaYxJI9oziLtqViKsePQs= +github.com/multiversx/mx-chain-storage-go v1.0.10 h1:5rzPMME+CEJyoGGJ1tAb6ISnPmr68VFvGoKo0hF0WtU= +github.com/multiversx/mx-chain-storage-go v1.0.10/go.mod h1:VP9fwyFBmbmDzahUuu0IeGX/dKG3iBWjN6FSQ6YtVaI= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518083542-c2304957adc2 h1:Iv2mCubTRBAUj3AzwUoRauDC7NqS2ENxmbXbRcejHDQ= -github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518083542-c2304957adc2/go.mod h1:TtVyrNticDW82wU9blIwPNZyNVJMcpE7+pgqZj2Efs0= +github.com/multiversx/mx-chain-vm-common-go v1.4.4 h1:hD5Sx1leFEvF/4qXcWSK9O+/MxGJGrEHzUe1HPXm9DU= +github.com/multiversx/mx-chain-vm-common-go v1.4.4/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= From 80d101c54acad07f74575f7ca086d2b176b5098d Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Wed, 24 May 2023 14:11:44 +0300 Subject: [PATCH 211/221] update vm common ref --- go.mod | 4 ++-- go.sum | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index c2440603b70..c44776eda20 100644 --- a/go.mod +++ b/go.mod @@ -14,13 +14,13 @@ require ( github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 github.com/multiversx/mx-chain-communication-go v1.0.0 - github.com/multiversx/mx-chain-core-go v1.2.4 + github.com/multiversx/mx-chain-core-go v1.2.5 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.3 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.8 - github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518100603-9784aae99d8a + github.com/multiversx/mx-chain-vm-common-go v1.4.5-0.20230524110929-f0e1204a3796 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 diff --git a/go.sum b/go.sum index 59e7d8c5f35..15ae8f2ca85 100644 --- a/go.sum +++ b/go.sum @@ -621,8 +621,9 @@ github.com/multiversx/mx-chain-communication-go v1.0.0/go.mod h1:GPHOm4HSXbvC0Io github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= -github.com/multiversx/mx-chain-core-go v1.2.4 h1:BRXyajUevLU6zHszR8jnp2+7C2bAQBor51YTc4dp3YQ= github.com/multiversx/mx-chain-core-go v1.2.4/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= +github.com/multiversx/mx-chain-core-go v1.2.5 h1:uIZSqRygJAxv+pGuZnoSMwS4t10C/paasuwps5nxrIQ= +github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= github.com/multiversx/mx-chain-es-indexer-go v1.4.3 h1:s6eX2dJSr/yjbGiF00Q68ar0jcvGkBm+ZzEa8/tpHzM= @@ -634,8 +635,8 @@ github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32R github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518100603-9784aae99d8a h1:zxcm1RXo5EOGycnU1w8BmvqbS+P/wGRC4QmWLrPxQ3Y= -github.com/multiversx/mx-chain-vm-common-go v1.4.3-0.20230518100603-9784aae99d8a/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= +github.com/multiversx/mx-chain-vm-common-go v1.4.5-0.20230524110929-f0e1204a3796 h1:Yct5Xr9kLDbqmK9nZUBI4I6ZMaqHhMBV2oGPzsY5m7A= +github.com/multiversx/mx-chain-vm-common-go v1.4.5-0.20230524110929-f0e1204a3796/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= From 9fabb12abe3064f38d457b3b6511176a0a1c9f91 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Wed, 24 May 2023 14:13:15 +0300 Subject: [PATCH 212/221] go mod tidy --- go.sum | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/go.sum b/go.sum index c2fb741663c..3068060ae11 100644 --- a/go.sum +++ b/go.sum @@ -636,8 +636,8 @@ github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNg github.com/multiversx/mx-chain-storage-go v1.0.10 h1:5rzPMME+CEJyoGGJ1tAb6ISnPmr68VFvGoKo0hF0WtU= github.com/multiversx/mx-chain-storage-go v1.0.10/go.mod h1:VP9fwyFBmbmDzahUuu0IeGX/dKG3iBWjN6FSQ6YtVaI= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-common-go v1.4.4 h1:hD5Sx1leFEvF/4qXcWSK9O+/MxGJGrEHzUe1HPXm9DU= -github.com/multiversx/mx-chain-vm-common-go v1.4.4/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= +github.com/multiversx/mx-chain-vm-common-go v1.4.5-0.20230524110929-f0e1204a3796 h1:Yct5Xr9kLDbqmK9nZUBI4I6ZMaqHhMBV2oGPzsY5m7A= +github.com/multiversx/mx-chain-vm-common-go v1.4.5-0.20230524110929-f0e1204a3796/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= From bc9a59884b44fe380df6a616992b2666084cb37e Mon Sep 17 00:00:00 2001 From: BeniaminDrasovean Date: Wed, 24 May 2023 14:27:21 +0300 Subject: [PATCH 213/221] update vm versions in go mod --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index c0c6db38ad4..f75828309b2 100644 --- a/go.mod +++ b/go.mod @@ -21,9 +21,9 @@ require ( github.com/multiversx/mx-chain-p2p-go v1.0.16 github.com/multiversx/mx-chain-storage-go v1.0.10 github.com/multiversx/mx-chain-vm-common-go v1.4.4 - github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 - github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 - github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 + github.com/multiversx/mx-chain-vm-v1_2-go v1.2.55 + github.com/multiversx/mx-chain-vm-v1_3-go v1.3.56 + github.com/multiversx/mx-chain-vm-v1_4-go v1.4.82 github.com/pelletier/go-toml v1.9.3 github.com/pkg/errors v0.9.1 github.com/shirou/gopsutil v3.21.11+incompatible diff --git a/go.sum b/go.sum index c2fb741663c..0c671a38d29 100644 --- a/go.sum +++ b/go.sum @@ -638,12 +638,12 @@ github.com/multiversx/mx-chain-storage-go v1.0.10/go.mod h1:VP9fwyFBmbmDzahUuu0I github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= github.com/multiversx/mx-chain-vm-common-go v1.4.4 h1:hD5Sx1leFEvF/4qXcWSK9O+/MxGJGrEHzUe1HPXm9DU= github.com/multiversx/mx-chain-vm-common-go v1.4.4/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54/go.mod h1:1rgU8wXdn76S7rZx+4YS6ObK+M1XiSdPoPmXVq8fuZE= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 h1:iiOXTcwvfjQXlchlVnSdNeqHYKVn/k7s/MsHfk/wrr0= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80/go.mod h1:Be8y+QBPSKacW2TJaaQSeKYNGtCenFt4dpBOAnICAcc= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.55 h1:jvBLu7JoitavahMDCkfOGYWjgXGBOe+3JJ0hNxj9AZM= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.55/go.mod h1:jCNgHGyj0JoLAsmijOSVK0G+yphccp9gIKsp/mRguF4= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.56 h1:VXveqaT/wdipfhIdUHXxFderY3+KxtFEbrDkF+zirr8= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.56/go.mod h1:guKkvnEDwGPaysZOVa+SaHEyiFDRJkFSVu0VE7jbk4k= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.82 h1:f0jL0jMPayN+/J/ZoK9sDRLggqvUp+/DJmu0dVTQNq8= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.82/go.mod h1:tKdkDQXDPFE5vAYOAJOq2iiTibi9KeiasNWsmA4nEmk= github.com/multiversx/mx-components-big-int v0.1.1 h1:695mYPKYOrmGEGgRH4/pZruDoe3CPP1LHrBxKfvj5l4= github.com/multiversx/mx-components-big-int v0.1.1/go.mod h1:0QrcFdfeLgJ/am10HGBeH0G0DNF+0Qx1E4DS/iozQls= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= From 261a9753f6be1f299cd65181f469c4de655f93c0 Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 25 May 2023 11:04:48 +0300 Subject: [PATCH 214/221] updated mx-chain-communication-go --- go.mod | 4 +++- go.sum | 20 +++++++++++++++++--- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 64f0492ae0d..c2fccc62a60 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/google/gops v0.3.18 github.com/gorilla/websocket v1.5.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiversx/mx-chain-communication-go v1.0.1-0.20230522155438-b9bc3ba7183f + github.com/multiversx/mx-chain-communication-go v1.0.2 github.com/multiversx/mx-chain-core-go v1.2.5 github.com/multiversx/mx-chain-crypto-go v1.2.6 github.com/multiversx/mx-chain-es-indexer-go v1.4.3 @@ -83,6 +83,7 @@ require ( github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/cpuid/v2 v2.1.0 // indirect github.com/koron/go-ssdp v0.0.3 // indirect @@ -149,6 +150,7 @@ require ( github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.0.1 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + github.com/smartystreets/assertions v1.13.1 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect diff --git a/go.sum b/go.sum index 1f50e7e69f3..f840cae201c 100644 --- a/go.sum +++ b/go.sum @@ -168,6 +168,8 @@ github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwU github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -276,6 +278,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -338,6 +342,8 @@ github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.2.0/go.mod h1:P+HXFDF4CVhaVayiEb4wkAy7zBHxBwsJyt0Y5U6MLro= +github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= +github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= @@ -371,6 +377,8 @@ github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOL github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.19.0 h1:5axC7rJmPc17Emw6TelxGwnzALk0PdupZ2oj2roDj04= +github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= @@ -588,6 +596,8 @@ github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyD github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= github.com/multiformats/go-multicodec v0.4.1/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multicodec v0.5.0/go.mod h1:DiY2HFaEp5EhEXb/iYzVAunmyX/aSFMxq2KMKfWEues= +github.com/multiformats/go-multicodec v0.6.0 h1:KhH2kSuCARyuJraYMFxrNO3DqIaYhOdS039kbhgVwpE= +github.com/multiformats/go-multicodec v0.6.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -607,8 +617,8 @@ github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= github.com/multiversx/mx-chain-communication-go v1.0.0/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= -github.com/multiversx/mx-chain-communication-go v1.0.1-0.20230522155438-b9bc3ba7183f h1:zcjDCpcjntD+37uLbhNdSPrhaVjrXhOTBZp9AFeqgxg= -github.com/multiversx/mx-chain-communication-go v1.0.1-0.20230522155438-b9bc3ba7183f/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= +github.com/multiversx/mx-chain-communication-go v1.0.2 h1:1AKdqFZNmigt1kcwYMl+L8fzolsb+WpeTX6yzpmvbV4= +github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= @@ -621,7 +631,6 @@ github.com/multiversx/mx-chain-es-indexer-go v1.4.3 h1:s6eX2dJSr/yjbGiF00Q68ar0j github.com/multiversx/mx-chain-es-indexer-go v1.4.3/go.mod h1:b2TVf5kCmmFQUjagI962YaKa2uqOEMn7dbTsiE/0J6U= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= -github.com/multiversx/mx-chain-storage-go v1.0.8 h1:PB9OAwZs3rWz7nybBOxVCxgrd785FUUUAsVc5JWXYCw= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-storage-go v1.0.10 h1:5rzPMME+CEJyoGGJ1tAb6ISnPmr68VFvGoKo0hF0WtU= github.com/multiversx/mx-chain-storage-go v1.0.10/go.mod h1:VP9fwyFBmbmDzahUuu0IeGX/dKG3iBWjN6FSQ6YtVaI= @@ -684,6 +693,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= +github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -755,6 +766,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= +github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= @@ -816,6 +829,7 @@ github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60Nt github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/wangjia184/sortedset v0.0.0-20160527075905-f5d03557ba30/go.mod h1:YkocrP2K2tcw938x9gCOmT5G5eCD6jsTz0SZuyAqwIE= +github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= From aa3ec083b8585fc2d57b34a753e2a764881bf479 Mon Sep 17 00:00:00 2001 From: Bogdan Rosianu Date: Thu, 25 May 2023 12:10:31 +0300 Subject: [PATCH 215/221] proper tag --- go.mod | 2 +- go.sum | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 1e3b84943e0..6e147be7fcd 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/multiversx/mx-chain-es-indexer-go v1.4.3 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-storage-go v1.0.10 - github.com/multiversx/mx-chain-vm-common-go v1.4.5-0.20230524110929-f0e1204a3796 + github.com/multiversx/mx-chain-vm-common-go v1.4.5 github.com/multiversx/mx-chain-vm-v1_2-go v1.2.55 github.com/multiversx/mx-chain-vm-v1_3-go v1.3.56 github.com/multiversx/mx-chain-vm-v1_4-go v1.4.82 diff --git a/go.sum b/go.sum index 3068060ae11..ebd14c9a723 100644 --- a/go.sum +++ b/go.sum @@ -616,8 +616,9 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.0 h1:ZGIIrWIE7RqpF7gvMfshH+CJUehviXzkWlxnpZ02efE= github.com/multiversx/mx-chain-communication-go v1.0.0/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= +github.com/multiversx/mx-chain-communication-go v1.0.2 h1:1AKdqFZNmigt1kcwYMl+L8fzolsb+WpeTX6yzpmvbV4= +github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= github.com/multiversx/mx-chain-core-go v1.2.1-0.20230510143029-ab37792342df/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-core-go v1.2.1/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= @@ -630,20 +631,19 @@ github.com/multiversx/mx-chain-es-indexer-go v1.4.3 h1:s6eX2dJSr/yjbGiF00Q68ar0j github.com/multiversx/mx-chain-es-indexer-go v1.4.3/go.mod h1:b2TVf5kCmmFQUjagI962YaKa2uqOEMn7dbTsiE/0J6U= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= -github.com/multiversx/mx-chain-p2p-go v1.0.16 h1:iMK8KUi006/avVcmecnk7lqbDCRL0BN04vgepoVLlyY= -github.com/multiversx/mx-chain-p2p-go v1.0.16/go.mod h1:7piVUb5Z7UHK6n3JW8yIc32RdDFZu7GivRY29q0r2gI= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= github.com/multiversx/mx-chain-storage-go v1.0.10 h1:5rzPMME+CEJyoGGJ1tAb6ISnPmr68VFvGoKo0hF0WtU= github.com/multiversx/mx-chain-storage-go v1.0.10/go.mod h1:VP9fwyFBmbmDzahUuu0IeGX/dKG3iBWjN6FSQ6YtVaI= github.com/multiversx/mx-chain-vm-common-go v1.4.1/go.mod h1:K6yCdro8VohzYI6GwjGzTO+fJiPgO5coo2sgQb+zA24= -github.com/multiversx/mx-chain-vm-common-go v1.4.5-0.20230524110929-f0e1204a3796 h1:Yct5Xr9kLDbqmK9nZUBI4I6ZMaqHhMBV2oGPzsY5m7A= -github.com/multiversx/mx-chain-vm-common-go v1.4.5-0.20230524110929-f0e1204a3796/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53 h1:HGbatzgIhVPJASN3ADnygH4MrKypAidOVtZkHkY/jKw= -github.com/multiversx/mx-chain-vm-v1_2-go v1.2.53/go.mod h1:STVJW9m/TUJ9Q64W/T4P/KuhP5fPb+cCb6Q4gSehWJg= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54 h1:bl2essObOEDwVWci71hJ2QO5AYTsKk6IlzRK0i8y63w= -github.com/multiversx/mx-chain-vm-v1_3-go v1.3.54/go.mod h1:1rgU8wXdn76S7rZx+4YS6ObK+M1XiSdPoPmXVq8fuZE= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80 h1:iiOXTcwvfjQXlchlVnSdNeqHYKVn/k7s/MsHfk/wrr0= -github.com/multiversx/mx-chain-vm-v1_4-go v1.4.80/go.mod h1:Be8y+QBPSKacW2TJaaQSeKYNGtCenFt4dpBOAnICAcc= +github.com/multiversx/mx-chain-vm-common-go v1.4.4/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= +github.com/multiversx/mx-chain-vm-common-go v1.4.5 h1:/pIMGSGqNJXbfAKOqigU2yapuBlosMCJiu6r+r+XcHE= +github.com/multiversx/mx-chain-vm-common-go v1.4.5/go.mod h1:+AjDwO/RJwQ75dzHJ/gBxmi5uTdICdhAo8bGNHTf7Yk= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.55 h1:jvBLu7JoitavahMDCkfOGYWjgXGBOe+3JJ0hNxj9AZM= +github.com/multiversx/mx-chain-vm-v1_2-go v1.2.55/go.mod h1:jCNgHGyj0JoLAsmijOSVK0G+yphccp9gIKsp/mRguF4= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.56 h1:VXveqaT/wdipfhIdUHXxFderY3+KxtFEbrDkF+zirr8= +github.com/multiversx/mx-chain-vm-v1_3-go v1.3.56/go.mod h1:guKkvnEDwGPaysZOVa+SaHEyiFDRJkFSVu0VE7jbk4k= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.82 h1:f0jL0jMPayN+/J/ZoK9sDRLggqvUp+/DJmu0dVTQNq8= +github.com/multiversx/mx-chain-vm-v1_4-go v1.4.82/go.mod h1:tKdkDQXDPFE5vAYOAJOq2iiTibi9KeiasNWsmA4nEmk= github.com/multiversx/mx-components-big-int v0.1.1 h1:695mYPKYOrmGEGgRH4/pZruDoe3CPP1LHrBxKfvj5l4= github.com/multiversx/mx-components-big-int v0.1.1/go.mod h1:0QrcFdfeLgJ/am10HGBeH0G0DNF+0Qx1E4DS/iozQls= github.com/multiversx/protobuf v1.3.2 h1:RaNkxvGTGbA0lMcnHAN24qE1G1i+Xs5yHA6MDvQ4mSM= @@ -767,8 +767,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= -github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= +github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0ZxudX+ThRdWfU= +github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= From 47b0d251f0616988ec4444423234a5984e68837a Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Thu, 25 May 2023 14:42:17 +0300 Subject: [PATCH 216/221] fix encoding --- .../process/transactionsfee/dataHolders.go | 4 +- .../transactionsfee/dataHolders_test.go | 15 ++--- .../transactionsfee/transactionChecker.go | 4 +- .../transactionsFeeProcessor.go | 8 +-- .../transactionsFeeProcessor_test.go | 59 +++++++++++++++++-- 5 files changed, 71 insertions(+), 19 deletions(-) diff --git a/outport/process/transactionsfee/dataHolders.go b/outport/process/transactionsfee/dataHolders.go index 805b645990a..ef2f1a3bba4 100644 --- a/outport/process/transactionsfee/dataHolders.go +++ b/outport/process/transactionsfee/dataHolders.go @@ -1,6 +1,8 @@ package transactionsfee import ( + "encoding/hex" + "github.com/multiversx/mx-chain-core-go/data" outportcore "github.com/multiversx/mx-chain-core-go/data/outport" ) @@ -57,7 +59,7 @@ func prepareTransactionsAndScrs(txPool *outportcore.TransactionPool) *transactio for scrHash, scrHandler := range txPool.SmartContractResults { scr := scrHandler.SmartContractResult - txWithResults, ok := transactionsAndScrs.txsWithResults[string(scr.OriginalTxHash)] + txWithResults, ok := transactionsAndScrs.txsWithResults[hex.EncodeToString(scr.OriginalTxHash)] if !ok { transactionsAndScrs.scrsNoTx[scrHash] = scrHandler continue diff --git a/outport/process/transactionsfee/dataHolders_test.go b/outport/process/transactionsfee/dataHolders_test.go index 36311aec775..141d8da5e89 100644 --- a/outport/process/transactionsfee/dataHolders_test.go +++ b/outport/process/transactionsfee/dataHolders_test.go @@ -1,6 +1,7 @@ package transactionsfee import ( + "encoding/hex" "math/big" "testing" @@ -19,7 +20,7 @@ func TestTransactionsAndScrsHolder(t *testing.T) { scrHash3 := "scrHash3" pool := &outportcore.TransactionPool{ Transactions: map[string]*outportcore.TxInfo{ - txHash: { + hex.EncodeToString([]byte(txHash)): { Transaction: &transaction.Transaction{ Nonce: 1, }, @@ -29,7 +30,7 @@ func TestTransactionsAndScrsHolder(t *testing.T) { }, }, SmartContractResults: map[string]*outportcore.SCRInfo{ - scrHash1: { + hex.EncodeToString([]byte(scrHash1)): { SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 2, OriginalTxHash: []byte(txHash), @@ -39,13 +40,13 @@ func TestTransactionsAndScrsHolder(t *testing.T) { }, }, - scrHash2: { + hex.EncodeToString([]byte(scrHash2)): { SmartContractResult: &smartContractResult.SmartContractResult{}, FeeInfo: &outportcore.FeeInfo{ Fee: big.NewInt(0), }, }, - scrHash3: { + hex.EncodeToString([]byte(scrHash3)): { SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 3, OriginalTxHash: []byte(txHash), @@ -58,7 +59,7 @@ func TestTransactionsAndScrsHolder(t *testing.T) { Logs: []*outportcore.LogData{ { Log: &transaction.Log{Address: []byte("addr")}, - TxHash: txHash, + TxHash: hex.EncodeToString([]byte(txHash)), }, { Log: &transaction.Log{}, @@ -70,7 +71,7 @@ func TestTransactionsAndScrsHolder(t *testing.T) { res := prepareTransactionsAndScrs(pool) require.NotNil(t, res) require.Equal(t, 1, len(res.txsWithResults)) - require.Equal(t, 2, len(res.txsWithResults[txHash].scrs)) - require.NotNil(t, res.txsWithResults[txHash].log) + require.Equal(t, 2, len(res.txsWithResults[hex.EncodeToString([]byte(txHash))].scrs)) + require.NotNil(t, res.txsWithResults[hex.EncodeToString([]byte(txHash))].log) require.Equal(t, 1, len(res.scrsNoTx)) } diff --git a/outport/process/transactionsfee/transactionChecker.go b/outport/process/transactionsfee/transactionChecker.go index 80435e6f094..710f7ce061f 100644 --- a/outport/process/transactionsfee/transactionChecker.go +++ b/outport/process/transactionsfee/transactionChecker.go @@ -37,10 +37,10 @@ func (tep *transactionsFeeProcessor) isESDTOperationWithSCCall(tx data.Transacti return isESDTTransferOperation && isReceiverSC && hasFunction } -func isSCRForSenderWithRefund(scr *smartContractResult.SmartContractResult, txHash []byte, tx data.TransactionHandler) bool { +func isSCRForSenderWithRefund(scr *smartContractResult.SmartContractResult, txHash string, tx data.TransactionHandler) bool { isForSender := bytes.Equal(scr.RcvAddr, tx.GetSndAddr()) isRightNonce := scr.Nonce == tx.GetNonce()+1 - isFromCurrentTx := bytes.Equal(scr.PrevTxHash, txHash) + isFromCurrentTx := hex.EncodeToString(scr.PrevTxHash) == txHash isScrDataOk := isDataOk(scr.Data) return isFromCurrentTx && isForSender && isRightNonce && isScrDataOk diff --git a/outport/process/transactionsfee/transactionsFeeProcessor.go b/outport/process/transactionsfee/transactionsFeeProcessor.go index 864bba9c4bf..593a5d6b83b 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor.go @@ -98,7 +98,7 @@ func (tep *transactionsFeeProcessor) prepareInvalidTxs(pool *outportcore.Transac } func (tep *transactionsFeeProcessor) prepareNormalTxs(transactionsAndScrs *transactionsAndScrsHolder) { - for txHash, txWithResult := range transactionsAndScrs.txsWithResults { + for txHashHex, txWithResult := range transactionsAndScrs.txsWithResults { txHandler := txWithResult.GetTxHandler() gasUsed := tep.txFeeCalculator.ComputeGasLimit(txHandler) @@ -115,11 +115,11 @@ func (tep *transactionsFeeProcessor) prepareNormalTxs(transactionsAndScrs *trans feeInfo.SetFee(initialPaidFee) } - tep.prepareTxWithResults([]byte(txHash), txWithResult) + tep.prepareTxWithResults(txHashHex, txWithResult) } } -func (tep *transactionsFeeProcessor) prepareTxWithResults(txHash []byte, txWithResults *transactionWithResults) { +func (tep *transactionsFeeProcessor) prepareTxWithResults(txHashHex string, txWithResults *transactionWithResults) { hasRefund := false for _, scrHandler := range txWithResults.scrs { scr, ok := scrHandler.GetTxHandler().(*smartContractResult.SmartContractResult) @@ -127,7 +127,7 @@ func (tep *transactionsFeeProcessor) prepareTxWithResults(txHash []byte, txWithR continue } - if isSCRForSenderWithRefund(scr, txHash, txWithResults.GetTxHandler()) || isRefundForRelayed(scr, txWithResults.GetTxHandler()) { + if isSCRForSenderWithRefund(scr, txHashHex, txWithResults.GetTxHandler()) || isRefundForRelayed(scr, txWithResults.GetTxHandler()) { gasUsed, fee := tep.txFeeCalculator.ComputeGasUsedAndFeeBasedOnRefundValue(txWithResults.GetTxHandler(), scr.Value) txWithResults.GetFeeInfo().SetGasUsed(gasUsed) diff --git a/outport/process/transactionsfee/transactionsFeeProcessor_test.go b/outport/process/transactionsfee/transactionsFeeProcessor_test.go index a2c06d3e6fa..4495b1d0c75 100644 --- a/outport/process/transactionsfee/transactionsFeeProcessor_test.go +++ b/outport/process/transactionsfee/transactionsFeeProcessor_test.go @@ -1,6 +1,7 @@ package transactionsfee import ( + "encoding/hex" "math/big" "testing" @@ -91,16 +92,16 @@ func TestPutFeeAndGasUsedTx1(t *testing.T) { pool := &outportcore.TransactionPool{ Transactions: map[string]*outportcore.TxInfo{ - string(txHash): initialTx, + hex.EncodeToString(txHash): initialTx, }, SmartContractResults: map[string]*outportcore.SCRInfo{ - string(scrHash1): { + hex.EncodeToString(scrHash1): { SmartContractResult: scr1, FeeInfo: &outportcore.FeeInfo{ Fee: big.NewInt(0), }, }, - string(scrWithRefund): { + hex.EncodeToString(scrWithRefund): { SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 3, SndAddr: []byte("erd1qqqqqqqqqqqqqpgq3dswlnnlkfd3gqrcv3dhzgnvh8ryf27g5rfsecnn2s"), @@ -314,10 +315,10 @@ func TestPutFeeAndGasUsedWrongRelayedTx(t *testing.T) { pool := &outportcore.TransactionPool{ Transactions: map[string]*outportcore.TxInfo{ - string(txHash): initialTx, + hex.EncodeToString(txHash): initialTx, }, SmartContractResults: map[string]*outportcore.SCRInfo{ - string(scrHash1): scr1, + hex.EncodeToString(scrHash1): scr1, }, } @@ -470,3 +471,51 @@ func TestPutFeeAndGasUsedScrWithRefundNotForInitialSender(t *testing.T) { require.Equal(t, big.NewInt(0), scr.GetFeeInfo().GetFee()) require.Equal(t, uint64(0), scr.GetFeeInfo().GetGasUsed()) } + +func TestPutFeeAndGasUsedScrWithRefund(t *testing.T) { + t.Parallel() + + txHash := []byte("e3cdb8b4936fdbee2d3b1244b4c49959df5f90ada683d650019d244e5a64afaf") + scrWithRefund := []byte("scrWithRefund") + + initialTx := &outportcore.TxInfo{Transaction: &transaction.Transaction{ + Nonce: 1004, + GasLimit: 60_000_000, + GasPrice: 1000000000, + SndAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + }, FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}} + + refundValueBig, _ := big.NewInt(0).SetString("96635000000000", 10) + + scr := &outportcore.SCRInfo{ + SmartContractResult: &smartContractResult.SmartContractResult{ + Nonce: 1005, + SndAddr: []byte("erd1qqqqqqqqqqqqqqqpqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqzllls8a5w6u"), + RcvAddr: []byte("erd1s8jr8e8hsvv7c9ehmshcjlpzf9ua5l50qeswa8feshrp6xlz9c7quacmtx"), + PrevTxHash: txHash, + OriginalTxHash: txHash, + Value: refundValueBig, + Data: []byte("@6f6b"), + }, + FeeInfo: &outportcore.FeeInfo{Fee: big.NewInt(0)}, + } + + pool := &outportcore.TransactionPool{ + SmartContractResults: map[string]*outportcore.SCRInfo{ + hex.EncodeToString(scrWithRefund): scr, + }, + Transactions: map[string]*outportcore.TxInfo{ + hex.EncodeToString(txHash): initialTx, + }, + } + + arg := prepareMockArg() + txsFeeProc, err := NewTransactionsFeeProcessor(arg) + require.NotNil(t, txsFeeProc) + require.Nil(t, err) + + err = txsFeeProc.PutFeeAndGasUsed(pool) + require.Nil(t, err) + require.Equal(t, big.NewInt(552865000000000), initialTx.GetFeeInfo().GetFee()) + require.Equal(t, uint64(50_336_500), initialTx.GetFeeInfo().GetGasUsed()) +} From 84e1501ee4103d3d54e1e04ed0cc2d9a7b3ddc5a Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Thu, 25 May 2023 14:51:13 +0300 Subject: [PATCH 217/221] latest version of the indexer --- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 6e147be7fcd..90a37b58bc3 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/multiversx/mx-chain-communication-go v1.0.2 github.com/multiversx/mx-chain-core-go v1.2.5 github.com/multiversx/mx-chain-crypto-go v1.2.6 - github.com/multiversx/mx-chain-es-indexer-go v1.4.3 + github.com/multiversx/mx-chain-es-indexer-go v1.4.4-0.20230525114822-b11a612035cd github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-storage-go v1.0.10 github.com/multiversx/mx-chain-vm-common-go v1.4.5 diff --git a/go.sum b/go.sum index ebd14c9a723..c823f1ebcc7 100644 --- a/go.sum +++ b/go.sum @@ -616,7 +616,6 @@ github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2 github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiversx/concurrent-map v0.1.4 h1:hdnbM8VE4b0KYJaGY5yJS2aNIW9TFFsUYwbO0993uPI= github.com/multiversx/concurrent-map v0.1.4/go.mod h1:8cWFRJDOrWHOTNSqgYCUvwT7c7eFQ4U2vKMOp4A/9+o= -github.com/multiversx/mx-chain-communication-go v1.0.0/go.mod h1:GPHOm4HSXbvC0IotMziWXQmhtsUe69ScBPYsb+mF9bk= github.com/multiversx/mx-chain-communication-go v1.0.2 h1:1AKdqFZNmigt1kcwYMl+L8fzolsb+WpeTX6yzpmvbV4= github.com/multiversx/mx-chain-communication-go v1.0.2/go.mod h1:OnnSUjnSP87H5MtQtxn33FGnTVRVgo2Huo3ijmCgN2M= github.com/multiversx/mx-chain-core-go v1.1.30/go.mod h1:8gGEQv6BWuuJwhd25qqhCOZbBSv9mk+hLeKvinSaSMk= @@ -627,8 +626,8 @@ github.com/multiversx/mx-chain-core-go v1.2.5 h1:uIZSqRygJAxv+pGuZnoSMwS4t10C/pa github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= -github.com/multiversx/mx-chain-es-indexer-go v1.4.3 h1:s6eX2dJSr/yjbGiF00Q68ar0jcvGkBm+ZzEa8/tpHzM= -github.com/multiversx/mx-chain-es-indexer-go v1.4.3/go.mod h1:b2TVf5kCmmFQUjagI962YaKa2uqOEMn7dbTsiE/0J6U= +github.com/multiversx/mx-chain-es-indexer-go v1.4.4-0.20230525114822-b11a612035cd h1:fF7/d7eEU7rsi8+5wGz6R8gquEUUMecR5nrnI8rEG6Y= +github.com/multiversx/mx-chain-es-indexer-go v1.4.4-0.20230525114822-b11a612035cd/go.mod h1:IAFuU3LhjVfs3+Sf4T3BlNjY1TmZHWovHRhV7tfR8cw= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= From 5f5fecea19fb31f9040ea973a3c28f4e4ce8f388 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Thu, 25 May 2023 15:46:50 +0300 Subject: [PATCH 218/221] fixes after review --- .../transactionsfee/dataHolders_test.go | 26 +++++++++---------- .../transactionsfee/transactionChecker.go | 4 +-- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/outport/process/transactionsfee/dataHolders_test.go b/outport/process/transactionsfee/dataHolders_test.go index 141d8da5e89..00ddb466575 100644 --- a/outport/process/transactionsfee/dataHolders_test.go +++ b/outport/process/transactionsfee/dataHolders_test.go @@ -14,13 +14,13 @@ import ( func TestTransactionsAndScrsHolder(t *testing.T) { t.Parallel() - txHash := "txHash" - scrHash1 := "scrHash1" - scrHash2 := "scrHash2" - scrHash3 := "scrHash3" + txHash := []byte("txHash") + scrHash1 := []byte("scrHash1") + scrHash2 := []byte("scrHash2") + scrHash3 := []byte("scrHash3") pool := &outportcore.TransactionPool{ Transactions: map[string]*outportcore.TxInfo{ - hex.EncodeToString([]byte(txHash)): { + hex.EncodeToString(txHash): { Transaction: &transaction.Transaction{ Nonce: 1, }, @@ -30,26 +30,26 @@ func TestTransactionsAndScrsHolder(t *testing.T) { }, }, SmartContractResults: map[string]*outportcore.SCRInfo{ - hex.EncodeToString([]byte(scrHash1)): { + hex.EncodeToString(scrHash1): { SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 2, - OriginalTxHash: []byte(txHash), + OriginalTxHash: txHash, }, FeeInfo: &outportcore.FeeInfo{ Fee: big.NewInt(0), }, }, - hex.EncodeToString([]byte(scrHash2)): { + hex.EncodeToString(scrHash2): { SmartContractResult: &smartContractResult.SmartContractResult{}, FeeInfo: &outportcore.FeeInfo{ Fee: big.NewInt(0), }, }, - hex.EncodeToString([]byte(scrHash3)): { + hex.EncodeToString(scrHash3): { SmartContractResult: &smartContractResult.SmartContractResult{ Nonce: 3, - OriginalTxHash: []byte(txHash), + OriginalTxHash: txHash, }, FeeInfo: &outportcore.FeeInfo{ Fee: big.NewInt(0), @@ -59,7 +59,7 @@ func TestTransactionsAndScrsHolder(t *testing.T) { Logs: []*outportcore.LogData{ { Log: &transaction.Log{Address: []byte("addr")}, - TxHash: hex.EncodeToString([]byte(txHash)), + TxHash: hex.EncodeToString(txHash), }, { Log: &transaction.Log{}, @@ -71,7 +71,7 @@ func TestTransactionsAndScrsHolder(t *testing.T) { res := prepareTransactionsAndScrs(pool) require.NotNil(t, res) require.Equal(t, 1, len(res.txsWithResults)) - require.Equal(t, 2, len(res.txsWithResults[hex.EncodeToString([]byte(txHash))].scrs)) - require.NotNil(t, res.txsWithResults[hex.EncodeToString([]byte(txHash))].log) + require.Equal(t, 2, len(res.txsWithResults[hex.EncodeToString(txHash)].scrs)) + require.NotNil(t, res.txsWithResults[hex.EncodeToString(txHash)].log) require.Equal(t, 1, len(res.scrsNoTx)) } diff --git a/outport/process/transactionsfee/transactionChecker.go b/outport/process/transactionsfee/transactionChecker.go index 710f7ce061f..546fdd9f432 100644 --- a/outport/process/transactionsfee/transactionChecker.go +++ b/outport/process/transactionsfee/transactionChecker.go @@ -37,10 +37,10 @@ func (tep *transactionsFeeProcessor) isESDTOperationWithSCCall(tx data.Transacti return isESDTTransferOperation && isReceiverSC && hasFunction } -func isSCRForSenderWithRefund(scr *smartContractResult.SmartContractResult, txHash string, tx data.TransactionHandler) bool { +func isSCRForSenderWithRefund(scr *smartContractResult.SmartContractResult, txHashHex string, tx data.TransactionHandler) bool { isForSender := bytes.Equal(scr.RcvAddr, tx.GetSndAddr()) isRightNonce := scr.Nonce == tx.GetNonce()+1 - isFromCurrentTx := hex.EncodeToString(scr.PrevTxHash) == txHash + isFromCurrentTx := hex.EncodeToString(scr.PrevTxHash) == txHashHex isScrDataOk := isDataOk(scr.Data) return isFromCurrentTx && isForSender && isRightNonce && isScrDataOk From 0a981e6258ceffa667d0f19966ea1408a9f5817c Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu Date: Thu, 25 May 2023 17:33:39 +0300 Subject: [PATCH 219/221] fix after merge --- dataRetriever/storageRequesters/trieNodeRequester_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dataRetriever/storageRequesters/trieNodeRequester_test.go b/dataRetriever/storageRequesters/trieNodeRequester_test.go index 8bbf0fc4f3a..7fd87cf6dc2 100644 --- a/dataRetriever/storageRequesters/trieNodeRequester_test.go +++ b/dataRetriever/storageRequesters/trieNodeRequester_test.go @@ -199,7 +199,7 @@ func TestTrieNodeRequester_Close(t *testing.T) { t.Parallel() args := createMockTrieRequesterArguments() - args.TrieStorageManager = &testscommon.StorageManagerStub{ + args.TrieStorageManager = &storageManager.StorageManagerStub{ CloseCalled: func() error { return expectedErr }, From 97b43f0f458231df5975cf50eda13e65a2eb0073 Mon Sep 17 00:00:00 2001 From: Iuga Mihai Date: Fri, 26 May 2023 14:47:32 +0300 Subject: [PATCH 220/221] proper tag --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 90a37b58bc3..7e6cebc7912 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/multiversx/mx-chain-communication-go v1.0.2 github.com/multiversx/mx-chain-core-go v1.2.5 github.com/multiversx/mx-chain-crypto-go v1.2.6 - github.com/multiversx/mx-chain-es-indexer-go v1.4.4-0.20230525114822-b11a612035cd + github.com/multiversx/mx-chain-es-indexer-go v1.4.4 github.com/multiversx/mx-chain-logger-go v1.0.11 github.com/multiversx/mx-chain-storage-go v1.0.10 github.com/multiversx/mx-chain-vm-common-go v1.4.5 diff --git a/go.sum b/go.sum index c823f1ebcc7..82a1022faa0 100644 --- a/go.sum +++ b/go.sum @@ -626,8 +626,8 @@ github.com/multiversx/mx-chain-core-go v1.2.5 h1:uIZSqRygJAxv+pGuZnoSMwS4t10C/pa github.com/multiversx/mx-chain-core-go v1.2.5/go.mod h1:jzYFSiYBuO0dGpGFXnZWSwcwcKP7Flyn/X41y4zIQrQ= github.com/multiversx/mx-chain-crypto-go v1.2.6 h1:yxsjAQGh62los+iYmORMfh3w9qen0xbYlmwU0juNSeg= github.com/multiversx/mx-chain-crypto-go v1.2.6/go.mod h1:rOj0Rr19HTOYt9YTeym7RKxlHt91NXln3LVKjHKVmA0= -github.com/multiversx/mx-chain-es-indexer-go v1.4.4-0.20230525114822-b11a612035cd h1:fF7/d7eEU7rsi8+5wGz6R8gquEUUMecR5nrnI8rEG6Y= -github.com/multiversx/mx-chain-es-indexer-go v1.4.4-0.20230525114822-b11a612035cd/go.mod h1:IAFuU3LhjVfs3+Sf4T3BlNjY1TmZHWovHRhV7tfR8cw= +github.com/multiversx/mx-chain-es-indexer-go v1.4.4 h1:3k8pB1AEILlNXL2ggSnP43uqVBQQg3hbx7351IcGbh0= +github.com/multiversx/mx-chain-es-indexer-go v1.4.4/go.mod h1:IAFuU3LhjVfs3+Sf4T3BlNjY1TmZHWovHRhV7tfR8cw= github.com/multiversx/mx-chain-logger-go v1.0.11 h1:DFsHa+sc5fKwhDR50I8uBM99RTDTEW68ESyr5ALRDwE= github.com/multiversx/mx-chain-logger-go v1.0.11/go.mod h1:1srDkP0DQucWQ+rYfaq0BX2qLnULsUdRPADpYUTM6dA= github.com/multiversx/mx-chain-storage-go v1.0.8/go.mod h1:lEkFYFe6taKYxqO1einNgT1esY3K9Pj6vPnoCwV9C3U= From 179871977745540ff59cebc139623570df6514cb Mon Sep 17 00:00:00 2001 From: ssd04 Date: Mon, 29 May 2023 15:00:18 +0300 Subject: [PATCH 221/221] fixes after merge --- state/syncer/baseAccoutnsSyncer_test.go | 5 +- state/syncer/export_test.go | 5 +- state/syncer/userAccountSyncer_test.go | 110 ----------------- state/syncer/userAccountsSyncer.go | 12 +- state/syncer/userAccountsSyncer_test.go | 121 +++++++++++++++---- state/syncer/validatorAccountsSyncer.go | 6 +- state/syncer/validatorAccountsSyncer_test.go | 83 ++++++------- 7 files changed, 147 insertions(+), 195 deletions(-) delete mode 100644 state/syncer/userAccountSyncer_test.go diff --git a/state/syncer/baseAccoutnsSyncer_test.go b/state/syncer/baseAccoutnsSyncer_test.go index 12ba52df5fe..de71219d74b 100644 --- a/state/syncer/baseAccoutnsSyncer_test.go +++ b/state/syncer/baseAccoutnsSyncer_test.go @@ -9,7 +9,7 @@ import ( "github.com/multiversx/mx-chain-go/testscommon" "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/multiversx/mx-chain-go/trie/storageMarker" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/stretchr/testify/require" ) @@ -17,8 +17,7 @@ func getDefaultBaseAccSyncerArgs() syncer.ArgsNewBaseAccountsSyncer { return syncer.ArgsNewBaseAccountsSyncer{ Hasher: &hashingMocks.HasherMock{}, Marshalizer: testscommon.MarshalizerMock{}, - TrieStorageManager: &testscommon.StorageManagerStub{}, - StorageMarker: storageMarker.NewDisabledStorageMarker(), + TrieStorageManager: &storageManager.StorageManagerStub{}, RequestHandler: &testscommon.RequestHandlerStub{}, Timeout: time.Second, Cacher: testscommon.NewCacherMock(), diff --git a/state/syncer/export_test.go b/state/syncer/export_test.go index e8fc953e73a..cfd917aba66 100644 --- a/state/syncer/export_test.go +++ b/state/syncer/export_test.go @@ -1,6 +1,5 @@ package syncer -<<<<<<< HEAD import ( "context" @@ -24,9 +23,9 @@ func (u *userAccountsSyncer) SyncAccountDataTries( ctx context.Context, ) error { return u.syncAccountDataTries(leavesChannels, ctx) -======= +} + // GetNumHandlers - func (mtnn *missingTrieNodesNotifier) GetNumHandlers() int { return len(mtnn.handlers) ->>>>>>> rc/v1.6.0 } diff --git a/state/syncer/userAccountSyncer_test.go b/state/syncer/userAccountSyncer_test.go deleted file mode 100644 index 904ce00b326..00000000000 --- a/state/syncer/userAccountSyncer_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package syncer - -import ( - "testing" - "time" - - "github.com/multiversx/mx-chain-go/dataRetriever/mock" - "github.com/multiversx/mx-chain-go/testscommon" - "github.com/multiversx/mx-chain-go/testscommon/hashingMocks" - "github.com/multiversx/mx-chain-go/testscommon/statusHandler" - "github.com/multiversx/mx-chain-go/testscommon/storageManager" - "github.com/multiversx/mx-chain-go/trie" - "github.com/stretchr/testify/assert" -) - -// TODO add more tests - -func getDefaultBaseAccSyncerArgs() ArgsNewBaseAccountsSyncer { - return ArgsNewBaseAccountsSyncer{ - Hasher: &hashingMocks.HasherMock{}, - Marshalizer: testscommon.MarshalizerMock{}, - TrieStorageManager: &storageManager.StorageManagerStub{}, - RequestHandler: &testscommon.RequestHandlerStub{}, - Timeout: time.Second, - Cacher: testscommon.NewCacherMock(), - UserAccountsSyncStatisticsHandler: &testscommon.SizeSyncStatisticsHandlerStub{}, - AppStatusHandler: &statusHandler.AppStatusHandlerStub{}, - MaxTrieLevelInMemory: 0, - MaxHardCapForMissingNodes: 100, - TrieSyncerVersion: 2, - CheckNodesOnDisk: false, - } -} - -func TestUserAccountsSyncer_SyncAccounts(t *testing.T) { - t.Parallel() - - args := ArgsNewUserAccountsSyncer{ - ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), - ShardId: 0, - Throttler: &mock.ThrottlerStub{}, - AddressPubKeyConverter: &testscommon.PubkeyConverterStub{}, - } - syncer, err := NewUserAccountsSyncer(args) - assert.Nil(t, err) - assert.NotNil(t, syncer) - - err = syncer.SyncAccounts([]byte("rootHash"), nil) - assert.Equal(t, ErrNilStorageMarker, err) -} - -func TestUserAccountsSyncer_MissingDataTrieNodeFound(t *testing.T) { - t.Parallel() - - numNodesSynced := 0 - numProcessedCalled := 0 - setNumMissingCalled := 0 - args := ArgsNewUserAccountsSyncer{ - ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), - ShardId: 0, - Throttler: &mock.ThrottlerStub{}, - AddressPubKeyConverter: &testscommon.PubkeyConverterStub{}, - } - args.TrieStorageManager = &storageManager.StorageManagerStub{ - PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { - numNodesSynced++ - return nil - }, - } - args.UserAccountsSyncStatisticsHandler = &testscommon.SizeSyncStatisticsHandlerStub{ - AddNumProcessedCalled: func(value int) { - numProcessedCalled++ - }, - SetNumMissingCalled: func(rootHash []byte, value int) { - setNumMissingCalled++ - assert.Equal(t, 0, value) - }, - } - - var serializedLeafNode []byte - tsm := &storageManager.StorageManagerStub{ - PutCalled: func(key []byte, val []byte) error { - serializedLeafNode = val - return nil - }, - } - - tr, _ := trie.NewTrie(tsm, args.Marshalizer, args.Hasher, 5) - key := []byte("key") - value := []byte("value") - _ = tr.Update(key, value) - rootHash, _ := tr.RootHash() - _ = tr.Commit() - - args.Cacher = &testscommon.CacherStub{ - GetCalled: func(key []byte) (value interface{}, ok bool) { - interceptedNode, _ := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) - return interceptedNode, true - }, - } - - syncer, _ := NewUserAccountsSyncer(args) - // test that timeout watchdog is reset - time.Sleep(args.Timeout * 2) - syncer.MissingDataTrieNodeFound(rootHash) - - assert.Equal(t, 1, numNodesSynced) - assert.Equal(t, 1, numProcessedCalled) - assert.Equal(t, 1, setNumMissingCalled) -} diff --git a/state/syncer/userAccountsSyncer.go b/state/syncer/userAccountsSyncer.go index e3254fc505d..f503849f943 100644 --- a/state/syncer/userAccountsSyncer.go +++ b/state/syncer/userAccountsSyncer.go @@ -150,13 +150,9 @@ func (u *userAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker common. return err } -<<<<<<< HEAD - u.storageMarker.MarkStorerAsSyncedAndActive(u.trieStorageManager) + storageMarker.MarkStorerAsSyncedAndActive(u.trieStorageManager) log.Debug("main trie and data tries synced", "main trie root hash", rootHash, "num data tries", len(u.dataTries)) -======= - storageMarker.MarkStorerAsSyncedAndActive(mainTrie.GetStorageManager()) ->>>>>>> rc/v1.6.0 return nil } @@ -198,12 +194,8 @@ func (u *userAccountsSyncer) createAndStartSyncer( TrieSyncStatistics: u.userAccountsSyncStatisticsHandler, TimeoutHandler: u.timeoutHandler, MaxHardCapForMissingNodes: u.maxHardCapForMissingNodes, -<<<<<<< HEAD - CheckNodesOnDisk: u.checkNodesOnDisk, - LeavesChan: nil, // not used for data tries -======= CheckNodesOnDisk: checkNodesOnDisk, ->>>>>>> rc/v1.6.0 + LeavesChan: nil, // not used for data tries } trieSyncer, err := trie.CreateTrieSyncer(arg, u.trieSyncerVersion) if err != nil { diff --git a/state/syncer/userAccountsSyncer_test.go b/state/syncer/userAccountsSyncer_test.go index 51184d76d91..8f1ca462be3 100644 --- a/state/syncer/userAccountsSyncer_test.go +++ b/state/syncer/userAccountsSyncer_test.go @@ -17,9 +17,11 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/trie" "github.com/multiversx/mx-chain-go/trie/hashesHolder" "github.com/multiversx/mx-chain-go/trie/keyBuilder" + "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -96,7 +98,7 @@ func getSerializedTrieNode( hasher hashing.Hasher, ) []byte { var serializedLeafNode []byte - tsm := &testscommon.StorageManagerStub{ + tsm := &storageManager.StorageManagerStub{ PutCalled: func(key []byte, val []byte) error { serializedLeafNode = val return nil @@ -113,29 +115,45 @@ func getSerializedTrieNode( func TestUserAccountsSyncer_SyncAccounts(t *testing.T) { t.Parallel() - args := getDefaultUserAccountsSyncerArgs() - args.Timeout = 5 * time.Second + t.Run("nil storage marker", func(t *testing.T) { + t.Parallel() - key := []byte("rootHash") - serializedLeafNode := getSerializedTrieNode(key, args.Marshalizer, args.Hasher) - itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) - require.Nil(t, err) + args := getDefaultUserAccountsSyncerArgs() + s, err := syncer.NewUserAccountsSyncer(args) + assert.Nil(t, err) + assert.NotNil(t, s) - args.TrieStorageManager = &testscommon.StorageManagerStub{ - GetCalled: func(b []byte) ([]byte, error) { - return serializedLeafNode, nil - }, - } + err = s.SyncAccounts([]byte("rootHash"), nil) + assert.Equal(t, syncer.ErrNilStorageMarker, err) + }) - cacher := testscommon.NewCacherMock() - cacher.Put(key, itn, 0) - args.Cacher = cacher + t.Run("should work", func(t *testing.T) { + t.Parallel() - s, err := syncer.NewUserAccountsSyncer(args) - require.Nil(t, err) + args := getDefaultUserAccountsSyncerArgs() + args.Timeout = 5 * time.Second - err = s.SyncAccounts(key) - require.Nil(t, err) + key := []byte("rootHash") + serializedLeafNode := getSerializedTrieNode(key, args.Marshalizer, args.Hasher) + itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) + require.Nil(t, err) + + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetCalled: func(b []byte) ([]byte, error) { + return serializedLeafNode, nil + }, + } + + cacher := testscommon.NewCacherMock() + cacher.Put(key, itn, 0) + args.Cacher = cacher + + s, err := syncer.NewUserAccountsSyncer(args) + require.Nil(t, err) + + err = s.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) + require.Nil(t, err) + }) } func getDefaultTrieParameters() (common.StorageManager, marshal.Marshalizer, hashing.Hasher, uint) { @@ -156,6 +174,7 @@ func getDefaultTrieParameters() (common.StorageManager, marshal.Marshalizer, has GeneralConfig: generalCfg, CheckpointHashesHolder: hashesHolder.NewCheckpointHashesHolder(10000000, testscommon.HashSize), IdleProvider: &testscommon.ProcessStatusHandlerStub{}, + Identifier: "identifier", } trieStorageManager, _ := trie.NewTrieStorageManager(args) @@ -195,7 +214,7 @@ func TestUserAccountsSyncer_SyncAccountDataTries(t *testing.T) { itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) require.Nil(t, err) - args.TrieStorageManager = &testscommon.StorageManagerStub{ + args.TrieStorageManager = &storageManager.StorageManagerStub{ GetCalled: func(b []byte) ([]byte, error) { return serializedLeafNode, nil }, @@ -257,7 +276,7 @@ func TestUserAccountsSyncer_SyncAccountDataTries(t *testing.T) { itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) require.Nil(t, err) - args.TrieStorageManager = &testscommon.StorageManagerStub{ + args.TrieStorageManager = &storageManager.StorageManagerStub{ GetCalled: func(b []byte) ([]byte, error) { return serializedLeafNode, nil }, @@ -301,6 +320,66 @@ func TestUserAccountsSyncer_SyncAccountDataTries(t *testing.T) { }) } +func TestUserAccountsSyncer_MissingDataTrieNodeFound(t *testing.T) { + t.Parallel() + + numNodesSynced := 0 + numProcessedCalled := 0 + setNumMissingCalled := 0 + args := syncer.ArgsNewUserAccountsSyncer{ + ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), + ShardId: 0, + Throttler: &mock.ThrottlerStub{}, + AddressPubKeyConverter: &testscommon.PubkeyConverterStub{}, + } + args.TrieStorageManager = &storageManager.StorageManagerStub{ + PutInEpochCalled: func(_ []byte, _ []byte, _ uint32) error { + numNodesSynced++ + return nil + }, + } + args.UserAccountsSyncStatisticsHandler = &testscommon.SizeSyncStatisticsHandlerStub{ + AddNumProcessedCalled: func(value int) { + numProcessedCalled++ + }, + SetNumMissingCalled: func(rootHash []byte, value int) { + setNumMissingCalled++ + assert.Equal(t, 0, value) + }, + } + + var serializedLeafNode []byte + tsm := &storageManager.StorageManagerStub{ + PutCalled: func(key []byte, val []byte) error { + serializedLeafNode = val + return nil + }, + } + + tr, _ := trie.NewTrie(tsm, args.Marshalizer, args.Hasher, 5) + key := []byte("key") + value := []byte("value") + _ = tr.Update(key, value) + rootHash, _ := tr.RootHash() + _ = tr.Commit() + + args.Cacher = &testscommon.CacherStub{ + GetCalled: func(key []byte) (value interface{}, ok bool) { + interceptedNode, _ := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) + return interceptedNode, true + }, + } + + syncer, _ := syncer.NewUserAccountsSyncer(args) + // test that timeout watchdog is reset + time.Sleep(args.Timeout * 2) + syncer.MissingDataTrieNodeFound(rootHash) + + assert.Equal(t, 1, numNodesSynced) + assert.Equal(t, 1, numProcessedCalled) + assert.Equal(t, 1, setNumMissingCalled) +} + func TestUserAccountsSyncer_IsInterfaceNil(t *testing.T) { t.Parallel() diff --git a/state/syncer/validatorAccountsSyncer.go b/state/syncer/validatorAccountsSyncer.go index 048704ba489..db70df18930 100644 --- a/state/syncer/validatorAccountsSyncer.go +++ b/state/syncer/validatorAccountsSyncer.go @@ -88,11 +88,7 @@ func (v *validatorAccountsSyncer) SyncAccounts(rootHash []byte, storageMarker co return err } -<<<<<<< HEAD - v.storageMarker.MarkStorerAsSyncedAndActive(v.trieStorageManager) -======= - storageMarker.MarkStorerAsSyncedAndActive(mainTrie.GetStorageManager()) ->>>>>>> rc/v1.6.0 + storageMarker.MarkStorerAsSyncedAndActive(v.trieStorageManager) return nil } diff --git a/state/syncer/validatorAccountsSyncer_test.go b/state/syncer/validatorAccountsSyncer_test.go index 30106c8a4ba..b4a025883f1 100644 --- a/state/syncer/validatorAccountsSyncer_test.go +++ b/state/syncer/validatorAccountsSyncer_test.go @@ -1,4 +1,3 @@ -<<<<<<< HEAD package syncer_test import ( @@ -9,7 +8,9 @@ import ( "github.com/multiversx/mx-chain-go/state" "github.com/multiversx/mx-chain-go/state/syncer" "github.com/multiversx/mx-chain-go/testscommon" + "github.com/multiversx/mx-chain-go/testscommon/storageManager" "github.com/multiversx/mx-chain-go/trie" + "github.com/multiversx/mx-chain-go/trie/storageMarker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -54,46 +55,54 @@ func TestNewValidatorAccountsSyncer(t *testing.T) { require.NotNil(t, v) }) } -======= -package syncer -import ( - "testing" +func TestValidatorAccountsSyncer_SyncAccounts(t *testing.T) { + t.Parallel() - "github.com/stretchr/testify/assert" -) + key := []byte("rootHash") -// TODO add more tests ->>>>>>> rc/v1.6.0 + t.Run("nil storage marker", func(t *testing.T) { + t.Parallel() -func TestValidatorAccountsSyncer_SyncAccounts(t *testing.T) { - t.Parallel() + args := syncer.ArgsNewValidatorAccountsSyncer{ + ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), + } -<<<<<<< HEAD - args := syncer.ArgsNewValidatorAccountsSyncer{ - ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), - } + v, err := syncer.NewValidatorAccountsSyncer(args) + require.Nil(t, err) + require.NotNil(t, v) - key := []byte("rootHash") - serializedLeafNode := getSerializedTrieNode(key, args.Marshalizer, args.Hasher) - itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) - require.Nil(t, err) + err = v.SyncAccounts(key, nil) + require.Equal(t, syncer.ErrNilStorageMarker, err) + }) - args.TrieStorageManager = &testscommon.StorageManagerStub{ - GetCalled: func(b []byte) ([]byte, error) { - return serializedLeafNode, nil - }, - } + t.Run("should work", func(t *testing.T) { + t.Parallel() - cacher := testscommon.NewCacherMock() - cacher.Put(key, itn, 0) - args.Cacher = cacher + args := syncer.ArgsNewValidatorAccountsSyncer{ + ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), + } - v, err := syncer.NewValidatorAccountsSyncer(args) - require.Nil(t, err) + serializedLeafNode := getSerializedTrieNode(key, args.Marshalizer, args.Hasher) + itn, err := trie.NewInterceptedTrieNode(serializedLeafNode, args.Hasher) + require.Nil(t, err) - err = v.SyncAccounts(key) - require.Nil(t, err) + args.TrieStorageManager = &storageManager.StorageManagerStub{ + GetCalled: func(b []byte) ([]byte, error) { + return serializedLeafNode, nil + }, + } + + cacher := testscommon.NewCacherMock() + cacher.Put(key, itn, 0) + args.Cacher = cacher + + v, err := syncer.NewValidatorAccountsSyncer(args) + require.Nil(t, err) + + err = v.SyncAccounts(key, storageMarker.NewDisabledStorageMarker()) + require.Nil(t, err) + }) } func TestValidatorAccountsSyncer_IsInterfaceNil(t *testing.T) { @@ -108,16 +117,4 @@ func TestValidatorAccountsSyncer_IsInterfaceNil(t *testing.T) { vas, err := syncer.NewValidatorAccountsSyncer(args) require.Nil(t, err) assert.False(t, vas.IsInterfaceNil()) -======= - args := ArgsNewValidatorAccountsSyncer{ - ArgsNewBaseAccountsSyncer: getDefaultBaseAccSyncerArgs(), - } - - syncer, err := NewValidatorAccountsSyncer(args) - assert.Nil(t, err) - assert.NotNil(t, syncer) - - err = syncer.SyncAccounts([]byte("rootHash"), nil) - assert.Equal(t, ErrNilStorageMarker, err) ->>>>>>> rc/v1.6.0 }