diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index 8f665cdd32b..92ab77ff24a 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -106,6 +106,61 @@ func unStake(t *testing.T, owner []byte, accountsDB state.AccountsAdapter, marsh require.Nil(t, err) } +type configNum struct { + eligible map[uint32]int + waiting map[uint32]int + leaving map[uint32]int + shuffledOut map[uint32]int + queue int + auction int + new int +} + +func checkConfig(t *testing.T, expectedConfig *configNum, nodesConfig nodesConfig) { + checkNumNodes(t, expectedConfig.eligible, nodesConfig.eligible) + checkNumNodes(t, expectedConfig.waiting, nodesConfig.waiting) + checkNumNodes(t, expectedConfig.leaving, nodesConfig.leaving) + checkNumNodes(t, expectedConfig.shuffledOut, nodesConfig.shuffledOut) + + require.Equal(t, expectedConfig.queue, len(nodesConfig.queue)) + require.Equal(t, expectedConfig.auction, len(nodesConfig.auction)) + require.Equal(t, expectedConfig.new, len(nodesConfig.new)) +} + +func checkNumNodes(t *testing.T, expectedNumNodes map[uint32]int, actualNodes map[uint32][][]byte) { + for shardID, numNodesInShard := range expectedNumNodes { + require.Equal(t, numNodesInShard, len(actualNodes[shardID])) + } +} + +func checkShuffledOutNodes(t *testing.T, currNodesConfig, prevNodesConfig nodesConfig, numShuffledOutNodes int, numRemainingEligible int) { + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numShuffledOutNodes) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), numRemainingEligible) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), numShuffledOutNodes) +} + +func checkStakingV4EpochChangeFlow( + t *testing.T, + currNodesConfig, prevNodesConfig nodesConfig, + numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction int) { + + // Nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numOfShuffledOut) + + // New auction list also contains unselected nodes from previous auction list + requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, numOfUnselectedNodesFromAuction) + + // All shuffled out are from previous eligible config + requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All shuffled out are now in auction + requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // Nodes which have been selected from previous auction list are now in waiting + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction, numOfSelectedNodesFromAuction) +} + func TestStakingV4(t *testing.T) { numOfMetaNodes := uint32(400) numOfShards := uint32(3) @@ -194,21 +249,7 @@ func TestStakingV4(t *testing.T) { require.Empty(t, newNodeConfig.queue) require.Empty(t, newNodeConfig.leaving) - // 320 nodes which are now in eligible are from previous waiting list - requireSliceContainsNumOfElements(t, getAllPubKeys(newNodeConfig.eligible), getAllPubKeys(prevConfig.waiting), numOfShuffledOut) - - // New auction list also contains unselected nodes from previous auction list - requireSliceContainsNumOfElements(t, newNodeConfig.auction, prevConfig.auction, numOfUnselectedNodesFromAuction) - - // All shuffled out are from previous eligible config - requireMapContains(t, prevConfig.eligible, getAllPubKeys(newNodeConfig.shuffledOut)) - - // All shuffled out are now in auction - requireSliceContains(t, newNodeConfig.auction, getAllPubKeys(newNodeConfig.shuffledOut)) - - // 320 nodes which have been selected from previous auction list are now in waiting - requireSliceContainsNumOfElements(t, getAllPubKeys(newNodeConfig.waiting), prevConfig.auction, numOfSelectedNodesFromAuction) - + checkStakingV4EpochChangeFlow(t, newNodeConfig, prevConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) prevConfig = newNodeConfig epochs++ } @@ -901,3 +942,216 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) require.Empty(t, node.NodesConfig.queue) } + +func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffledToToWaiting(t *testing.T) { + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, // epoch 3 + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: 6, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } + currNodesConfig := node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + + // During these 9 epochs, we will always have: + // - 10 activeNodes (8 eligible + 2 waiting) + // - 1 node to shuffle out per shard + // Meanwhile, maxNumNodes changes from 12-10-12 + // Since activeNodes <= maxNumNodes, shuffled out nodes will always be sent directly to waiting list, + // instead of auction(there is no reason to send them to auction, they will be selected anyway) + epoch := uint32(0) + numOfShuffledOut := 2 + numRemainingEligible := 6 + prevNodesConfig := currNodesConfig + for epoch < 9 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // Epoch = 9 with: + // - activeNodes = 10 + // - maxNumNodes = 12 + // Owner2 stakes 2 nodes, which should be initially sent to auction list + owner2Nodes := pubKeys[10:12] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner2Nodes) + + // Epoch = 10 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner2's new nodes are selected from auction and distributed to waiting list + node.Process(t, 5) + currNodesConfig = node.NodesConfig + expectedNodesNum.waiting[core.MetachainShardId]++ + expectedNodesNum.waiting[0]++ + expectedNodesNum.auction = 0 + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes) + + // During epochs 10-13, we will have: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Since activeNodes == maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch = 10 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + prevNodesConfig = currNodesConfig + for epoch < 13 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 13 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner3 stakes 2 nodes, which should be initially sent to auction list + owner3Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) + currNodesConfig = node.NodesConfig + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + + // During epochs 14-18, we will have: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes, shuffled out nodes (2) will be sent to auction list + node.Process(t, 5) + prevNodesConfig = node.NodesConfig + epoch = 14 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 2 + for epoch < 18 { + checkConfig(t, expectedNodesNum, currNodesConfig) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } + + // Epoch = 18, with: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Owner3 unStakes one of his nodes + node.ProcessUnStake(t, map[string][][]byte{ + "owner3": {owner3Nodes[0]}, + }) + + // Epoch = 19, with: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Owner3's unStaked node is now leaving + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.leaving, 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner3Nodes[0]}) + + epoch = 19 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // During epochs 19-23, we will have: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes: + // - shuffled out nodes (2) will be sent to auction list + // - waiting lists will be unbalanced (3 in total: 1 + 2 per shard) + // - no node will spend extra epochs in eligible/waiting, since waiting lists will always be refilled + prevNodesConfig = node.NodesConfig + for epoch < 23 { + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.auction, 2) + + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) + + prevNodesConfig = currNodesConfig + epoch++ + } +} diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index 89b3beb5fc5..d2a4fc0d92b 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" ) var _ NodesShuffler = (*randHashShuffler)(nil) @@ -41,11 +42,21 @@ type shuffleNodesArg struct { nodesPerShard uint32 nbShards uint32 maxNodesToSwapPerShard uint32 + maxNumNodes uint32 flagBalanceWaitingLists bool flagStakingV4Step2 bool flagStakingV4Step3 bool } +type shuffledNodesConfig struct { + numShuffled uint32 + numNewEligible uint32 + numNewWaiting uint32 + numSelectedAuction uint32 + maxNumNodes uint32 + flagStakingV4Step2 bool +} + // TODO: Decide if transaction load statistics will be used for limiting the number of shards type randHashShuffler struct { // TODO: remove the references to this constant and the distributor @@ -195,6 +206,7 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo flagBalanceWaitingLists: rhs.flagBalanceWaitingLists.IsSet(), flagStakingV4Step2: rhs.flagStakingV4Step2.IsSet(), flagStakingV4Step3: rhs.flagStakingV4Step3.IsSet(), + maxNumNodes: rhs.activeNodesConfig.MaxNumNodes, }) } @@ -285,26 +297,42 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard) if err != nil { - log.Warn("moveNodesToMap failed", "error", err) + return nil, fmt.Errorf("moveNodesToMap failed, error: %w", err) } - err = distributeValidators(newWaiting, arg.newNodes, arg.randomness, false) + err = checkAndDistributeNewNodes(newWaiting, arg.newNodes, arg.randomness, arg.flagStakingV4Step3) if err != nil { - log.Warn("distributeValidators newNodes failed", "error", err) + return nil, fmt.Errorf("distributeValidators newNodes failed, error: %w", err) + } + + shuffledNodesCfg := &shuffledNodesConfig{ + numShuffled: getNumPubKeys(shuffledOutMap), + numNewEligible: getNumPubKeys(newEligible), + numNewWaiting: getNumPubKeys(newWaiting), + numSelectedAuction: uint32(len(arg.auction)), + maxNumNodes: arg.maxNumNodes, + flagStakingV4Step2: arg.flagStakingV4Step2, } if arg.flagStakingV4Step3 { + log.Debug("distributing selected nodes from auction to waiting", + "num auction nodes", len(arg.auction), "num waiting nodes", shuffledNodesCfg.numNewWaiting) + // Distribute selected validators from AUCTION -> WAITING - err = distributeValidators(newWaiting, arg.auction, arg.randomness, false) + err = distributeValidators(newWaiting, arg.auction, arg.randomness, arg.flagBalanceWaitingLists) if err != nil { - log.Warn("distributeValidators auction list failed", "error", err) + return nil, fmt.Errorf("distributeValidators auction list failed, error: %w", err) } } - if !arg.flagStakingV4Step2 { + + if shouldDistributeShuffledToWaiting(shuffledNodesCfg) { + log.Debug("distributing shuffled out nodes to waiting", + "num shuffled nodes", shuffledNodesCfg.numShuffled, "num waiting nodes", shuffledNodesCfg.numNewWaiting) + // Distribute validators from SHUFFLED OUT -> WAITING err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) if err != nil { - log.Warn("distributeValidators shuffledOut failed", "error", err) + return nil, fmt.Errorf("distributeValidators shuffled out failed, error: %w", err) } } @@ -554,6 +582,51 @@ func removeValidatorFromList(validatorList []Validator, index int) []Validator { return validatorList[:len(validatorList)-1] } +func checkAndDistributeNewNodes( + waiting map[uint32][]Validator, + newNodes []Validator, + randomness []byte, + flagStakingV4Step3 bool, +) error { + if !flagStakingV4Step3 { + return distributeValidators(waiting, newNodes, randomness, false) + } + + if len(newNodes) > 0 { + return epochStart.ErrReceivedNewListNodeInStakingV4 + } + + return nil +} + +func shouldDistributeShuffledToWaiting(shuffledNodesCfg *shuffledNodesConfig) bool { + if !shuffledNodesCfg.flagStakingV4Step2 { + return true + } + + totalNewWaiting := shuffledNodesCfg.numNewWaiting + shuffledNodesCfg.numSelectedAuction + totalNodes := totalNewWaiting + shuffledNodesCfg.numNewEligible + shuffledNodesCfg.numShuffled + + log.Debug("checking if should distribute shuffled out nodes to waiting in staking v4", + "numShuffled", shuffledNodesCfg.numShuffled, + "numNewEligible", shuffledNodesCfg.numNewEligible, + "numSelectedAuction", shuffledNodesCfg.numSelectedAuction, + "totalNewWaiting", totalNewWaiting, + "totalNodes", totalNodes, + "maxNumNodes", shuffledNodesCfg.maxNumNodes, + ) + + distributeShuffledToWaitingInStakingV4 := false + if totalNodes <= shuffledNodesCfg.maxNumNodes { + log.Warn("num of total nodes in waiting is too low after shuffling; will distribute " + + "shuffled out nodes directly to waiting and skip sending them to auction") + + distributeShuffledToWaitingInStakingV4 = true + } + + return distributeShuffledToWaitingInStakingV4 +} + func removeValidatorFromListKeepOrder(validatorList []Validator, index int) []Validator { indexNotOK := index > len(validatorList)-1 || index < 0 if indexNotOK { @@ -614,6 +687,16 @@ func moveNodesToMap(destination map[uint32][]Validator, source map[uint32][]Vali return nil } +func getNumPubKeys(shardValidatorsMap map[uint32][]Validator) uint32 { + numPubKeys := uint32(0) + + for _, validatorsInShard := range shardValidatorsMap { + numPubKeys += uint32(len(validatorsInShard)) + } + + return numPubKeys +} + // moveMaxNumNodesToMap moves the validators in the source list to the corresponding destination list // but adding just enough nodes so that at most the number of nodes is kept in the destination list // The parameter maxNodesToMove is a limiting factor and should limit the number of nodes diff --git a/sharding/nodesCoordinator/hashValidatorShuffler_test.go b/sharding/nodesCoordinator/hashValidatorShuffler_test.go index cae9ad879ce..bf53154a925 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler_test.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler_test.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/sharding/mock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -2429,6 +2430,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NoWaiting(t *testing.T) { ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -2490,6 +2492,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NilOrEmptyWaiting(t *test ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2566,20 +2569,17 @@ func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { t.Parallel() numEligiblePerShard := 100 - numNewNodesPerShard := 100 numWaitingPerShard := 30 numAuction := 40 nbShards := uint32(2) eligibleMap := generateValidatorMap(numEligiblePerShard, nbShards) waitingMap := generateValidatorMap(numWaitingPerShard, nbShards) - newNodes := generateValidatorList(numNewNodesPerShard * (int(nbShards) + 1)) auctionList := generateValidatorList(numAuction) args := ArgsUpdateNodes{ Eligible: eligibleMap, Waiting: waitingMap, - NewNodes: newNodes, UnStakeLeaving: make([]Validator, 0), AdditionalLeaving: make([]Validator, 0), Rand: generateRandomByteArray(32), @@ -2592,11 +2592,6 @@ func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { resUpdateNodeList, err := shuffler.UpdateNodeLists(args) require.Nil(t, err) - for _, newNode := range args.NewNodes { - found, _ := searchInMap(resUpdateNodeList.Waiting, newNode.PubKey()) - assert.True(t, found) - } - for _, auctionNode := range args.Auction { found, _ := searchInMap(resUpdateNodeList.Waiting, auctionNode.PubKey()) assert.True(t, found) @@ -2611,9 +2606,14 @@ func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { allNewEligible := getValidatorsInMap(resUpdateNodeList.Eligible) allNewWaiting := getValidatorsInMap(resUpdateNodeList.Waiting) - previousNumberOfNodes := (numEligiblePerShard+numWaitingPerShard+numNewNodesPerShard)*(int(nbShards)+1) + numAuction + previousNumberOfNodes := (numEligiblePerShard+numWaitingPerShard)*(int(nbShards)+1) + numAuction currentNumberOfNodes := len(allNewEligible) + len(allNewWaiting) + len(allShuffledOut) assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) + + args.NewNodes = generateValidatorList(100 * (int(nbShards) + 1)) + resUpdateNodeList, err = shuffler.UpdateNodeLists(args) + require.ErrorIs(t, err, epochStart.ErrReceivedNewListNodeInStakingV4) + require.Nil(t, resUpdateNodeList) } func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t *testing.T) { diff --git a/vm/systemSmartContracts/delegation.go b/vm/systemSmartContracts/delegation.go index 2f89ed72d79..e269e633df5 100644 --- a/vm/systemSmartContracts/delegation.go +++ b/vm/systemSmartContracts/delegation.go @@ -2883,22 +2883,6 @@ func (d *delegation) executeStakeAndUpdateStatus( return vmcommon.Ok } -func (d *delegation) getConfigStatusAndGlobalFund() (*DelegationConfig, *DelegationContractStatus, *GlobalFundData, error) { - dConfig, err := d.getDelegationContractConfig() - if err != nil { - return nil, nil, nil, err - } - globalFund, err := d.getGlobalFundData() - if err != nil { - return nil, nil, nil, err - } - dStatus, err := d.getDelegationStatus() - if err != nil { - return nil, nil, nil, err - } - return dConfig, dStatus, globalFund, nil -} - func (d *delegation) executeOnValidatorSC(address []byte, function string, args [][]byte, value *big.Int) (*vmcommon.VMOutput, error) { validatorCall := function for _, key := range args {