From 5bca1bbfd50232770e45d97f11fa3236a91be429 Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 6 Mar 2023 14:14:44 +0200 Subject: [PATCH 01/14] FEAT: Add integration test which fails for now --- integrationTests/vm/staking/stakingV4_test.go | 166 ++++++++++++++++++ vm/systemSmartContracts/delegation.go | 16 -- 2 files changed, 166 insertions(+), 16 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index 8f665cdd32b..a0c8713b9b1 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -901,3 +901,169 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { requireSameSliceDifferentOrder(t, queue, currNodesConfig.auction) require.Empty(t, node.NodesConfig.queue) } + +// This is an edge case with exactly 1 in waiting +func TestStakingV4_ExactlyOneNodeInWaitingEveryEpoch(t *testing.T) { + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:10], + 0: pubKeys[10:12], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 12, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + node.Process(t, 7*4+2) +} + +func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T) { + pubKeys := generateAddresses(0, 20) + + owner1 := "owner1" + owner1Stats := &OwnerStats{ + EligibleBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[:4], + 0: pubKeys[4:8], + }, + WaitingBlsKeys: map[uint32][][]byte{ + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], + }, + TotalStake: big.NewInt(20 * nodePrice), + } + + cfg := &InitialNodesConfig{ + MetaConsensusGroupSize: 2, + ShardConsensusGroupSize: 2, + MinNumberOfEligibleShardNodes: 4, + MinNumberOfEligibleMetaNodes: 4, + NumOfShards: 1, + Owners: map[string]*OwnerStats{ + owner1: owner1Stats, + }, + MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ + { + EpochEnable: 0, + MaxNumNodes: 10, + NodesToShufflePerShard: 1, + }, + { + EpochEnable: stakingV4Step3EnableEpoch, + MaxNumNodes: 8, + NodesToShufflePerShard: 1, + }, + }, + } + node := NewTestMetaProcessorWithCustomNodes(cfg) + node.EpochStartTrigger.SetRoundsPerEpoch(4) + + // 1. Check initial config is correct + currNodesConfig := node.NodesConfig + prevNodesConfig := currNodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 2) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.waiting[0], 1) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // 2. Epoch = StakingV4Step1, configuration should be the same, nodes from eligible should be shuffled + node.Process(t, 6) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 2) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.waiting[0], 1) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) + + prevNodesConfig = currNodesConfig + + // 3. Epoch = StakingV4Step2, shuffled nodes from eligible are sent to auction, waiting list remains empty + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.waiting[0], 0) + require.Len(t, currNodesConfig.auction, 2) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) + + prevNodesConfig = currNodesConfig + + // 4. Epoch = StakingV4Step3, auction nodes from previous epoch should be sent directly to waiting list, since waiting list was empty + node.Process(t, 5) + + /* Test fails from here, should work with fix + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.waiting[0], 0) + require.Len(t, currNodesConfig.auction, 2) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) + */ +} diff --git a/vm/systemSmartContracts/delegation.go b/vm/systemSmartContracts/delegation.go index 2f89ed72d79..e269e633df5 100644 --- a/vm/systemSmartContracts/delegation.go +++ b/vm/systemSmartContracts/delegation.go @@ -2883,22 +2883,6 @@ func (d *delegation) executeStakeAndUpdateStatus( return vmcommon.Ok } -func (d *delegation) getConfigStatusAndGlobalFund() (*DelegationConfig, *DelegationContractStatus, *GlobalFundData, error) { - dConfig, err := d.getDelegationContractConfig() - if err != nil { - return nil, nil, nil, err - } - globalFund, err := d.getGlobalFundData() - if err != nil { - return nil, nil, nil, err - } - dStatus, err := d.getDelegationStatus() - if err != nil { - return nil, nil, nil, err - } - return dConfig, dStatus, globalFund, nil -} - func (d *delegation) executeOnValidatorSC(address []byte, function string, args [][]byte, value *big.Int) (*vmcommon.VMOutput, error) { validatorCall := function for _, key := range args { From 6d4b2f803c48ae51e00c7639881833cefc3b6005 Mon Sep 17 00:00:00 2001 From: MariusC Date: Tue, 7 Mar 2023 12:01:39 +0200 Subject: [PATCH 02/14] FEAT: Add todo workflow --- integrationTests/vm/staking/stakingV4_test.go | 4 ++-- sharding/nodesCoordinator/hashValidatorShuffler.go | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index a0c8713b9b1..0f2341c248e 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -1051,7 +1051,7 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T // 4. Epoch = StakingV4Step3, auction nodes from previous epoch should be sent directly to waiting list, since waiting list was empty node.Process(t, 5) - /* Test fails from here, should work with fix + /*Test fails from here, should work with fix currNodesConfig = node.NodesConfig require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) @@ -1063,7 +1063,7 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T requireSameSliceDifferentOrder(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), prevNodesConfig.auction, 2) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) */ } diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index 89b3beb5fc5..6c06af41d44 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -283,6 +283,11 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { shuffledOutMap, newEligible := shuffleOutNodes(newEligible, numToRemove, arg.randomness) + // Here check that if allNodes(waitingList/newWaiting) < allNodes(shuffledOutMap) then select nodes from auction + // Compute numNodesToFillWaiting = allNodes(shuffledOutMap) - allNodes(waitingList) + // Easy case If: numNodesToFillWaiting > allNodes(auction) => move all auction list to waiting + // Else: select best nodes from auction to fill waiting list + err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard) if err != nil { log.Warn("moveNodesToMap failed", "error", err) From 26e8245fd7b6504bdeffdfa683327008b95c9d1d Mon Sep 17 00:00:00 2001 From: MariusC Date: Tue, 7 Mar 2023 13:34:30 +0200 Subject: [PATCH 03/14] FEAT: Possible solution for easy case --- integrationTests/vm/staking/stakingV4_test.go | 32 ++++++++++--------- .../nodesCoordinator/hashValidatorShuffler.go | 31 +++++++++++++++++- .../hashValidatorShufflerWithAuction.go | 11 +++++++ 3 files changed, 58 insertions(+), 16 deletions(-) create mode 100644 sharding/nodesCoordinator/hashValidatorShufflerWithAuction.go diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index 0f2341c248e..ef175ff66a5 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -1032,21 +1032,21 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T // 3. Epoch = StakingV4Step2, shuffled nodes from eligible are sent to auction, waiting list remains empty node.Process(t, 5) - currNodesConfig = node.NodesConfig - require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) - require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) - require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 0) - require.Len(t, currNodesConfig.auction, 2) - requireSameSliceDifferentOrder(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) - - // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) - - prevNodesConfig = currNodesConfig + //currNodesConfig = node.NodesConfig + //require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + //require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) + //require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + //require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) + //require.Len(t, currNodesConfig.eligible[0], 4) + //require.Len(t, currNodesConfig.waiting[0], 0) + //require.Len(t, currNodesConfig.auction, 2) + //requireSameSliceDifferentOrder(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + // + //// Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes + //requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + //requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) + // + //prevNodesConfig = currNodesConfig // 4. Epoch = StakingV4Step3, auction nodes from previous epoch should be sent directly to waiting list, since waiting list was empty node.Process(t, 5) @@ -1066,4 +1066,6 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), prevNodesConfig.auction, 2) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) */ + + node.Process(t, 5) } diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index 6c06af41d44..a818fb43b33 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -283,6 +283,24 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { shuffledOutMap, newEligible := shuffleOutNodes(newEligible, numToRemove, arg.randomness) + numShuffled := getNumPubKeys(shuffledOutMap) + numNewWaiting := getNumPubKeys(newWaiting) + numSelectedAuction := uint32(len(arg.auction)) + totalNewWaiting := numNewWaiting + numSelectedAuction + + shouldFillWaitingList := false + if numShuffled >= totalNewWaiting { + numNeededNodesToFillWaiting := numShuffled - totalNewWaiting + log.Warn("not enough nodes in waiting for next epoch after shuffling current validators into auction", + "numShuffled", numShuffled, + "numNewWaiting", numNewWaiting, + "numSelectedAuction", numSelectedAuction, + "numNeededNodesToFillWaiting", numNeededNodesToFillWaiting) + + if arg.flagStakingV4Step2 { + shouldFillWaitingList = true + } + } // Here check that if allNodes(waitingList/newWaiting) < allNodes(shuffledOutMap) then select nodes from auction // Compute numNodesToFillWaiting = allNodes(shuffledOutMap) - allNodes(waitingList) // Easy case If: numNodesToFillWaiting > allNodes(auction) => move all auction list to waiting @@ -298,13 +316,24 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { log.Warn("distributeValidators newNodes failed", "error", err) } - if arg.flagStakingV4Step3 { + if arg.flagStakingV4Step3 && !shouldFillWaitingList { // Distribute selected validators from AUCTION -> WAITING err = distributeValidators(newWaiting, arg.auction, arg.randomness, false) if err != nil { log.Warn("distributeValidators auction list failed", "error", err) } } + + if arg.flagStakingV4Step2 && shouldFillWaitingList { + + log.Warn("distributing shuffled out nodes to waiting list instead of auction") + // Distribute validators from SHUFFLED OUT -> WAITING + err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) + if err != nil { + log.Warn("distributeValidators shuffledOut failed", "error", err) + } + } + if !arg.flagStakingV4Step2 { // Distribute validators from SHUFFLED OUT -> WAITING err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) diff --git a/sharding/nodesCoordinator/hashValidatorShufflerWithAuction.go b/sharding/nodesCoordinator/hashValidatorShufflerWithAuction.go new file mode 100644 index 00000000000..77edafcc52a --- /dev/null +++ b/sharding/nodesCoordinator/hashValidatorShufflerWithAuction.go @@ -0,0 +1,11 @@ +package nodesCoordinator + +func getNumPubKeys(shardValidatorsMap map[uint32][]Validator) uint32 { + numPubKeys := uint32(0) + + for _, validatorsInShard := range shardValidatorsMap { + numPubKeys += uint32(len(validatorsInShard)) + } + + return numPubKeys +} From 9712fc0f4ceaba4e03868cbb6c3ca7595885b32f Mon Sep 17 00:00:00 2001 From: MariusC Date: Tue, 7 Mar 2023 18:31:38 +0200 Subject: [PATCH 04/14] FEAT: Possible solution --- .../nodesCoordinator/hashValidatorShuffler.go | 32 ++++++++++++------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index a818fb43b33..7cc0acd8914 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -41,6 +41,7 @@ type shuffleNodesArg struct { nodesPerShard uint32 nbShards uint32 maxNodesToSwapPerShard uint32 + maxNumNodes uint32 flagBalanceWaitingLists bool flagStakingV4Step2 bool flagStakingV4Step3 bool @@ -195,6 +196,7 @@ func (rhs *randHashShuffler) UpdateNodeLists(args ArgsUpdateNodes) (*ResUpdateNo flagBalanceWaitingLists: rhs.flagBalanceWaitingLists.IsSet(), flagStakingV4Step2: rhs.flagStakingV4Step2.IsSet(), flagStakingV4Step3: rhs.flagStakingV4Step3.IsSet(), + maxNumNodes: rhs.activeNodesConfig.MaxNumNodes, }) } @@ -284,22 +286,26 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { shuffledOutMap, newEligible := shuffleOutNodes(newEligible, numToRemove, arg.randomness) numShuffled := getNumPubKeys(shuffledOutMap) + numNewEligible := getNumPubKeys(newEligible) numNewWaiting := getNumPubKeys(newWaiting) + numSelectedAuction := uint32(len(arg.auction)) totalNewWaiting := numNewWaiting + numSelectedAuction - shouldFillWaitingList := false - if numShuffled >= totalNewWaiting { - numNeededNodesToFillWaiting := numShuffled - totalNewWaiting - log.Warn("not enough nodes in waiting for next epoch after shuffling current validators into auction", + totalNodes := totalNewWaiting + numNewEligible + numShuffled + + distributeShuffledToWaiting := false + if totalNodes <= arg.maxNumNodes || (numNewEligible+numShuffled) <= arg.maxNumNodes { + log.Warn("num of total nodes in waiting is too low after shuffling; will distribute "+ + "shuffled out nodes directly in waiting and skip sending them to auction", "numShuffled", numShuffled, - "numNewWaiting", numNewWaiting, + "numNewEligible", numNewEligible, "numSelectedAuction", numSelectedAuction, - "numNeededNodesToFillWaiting", numNeededNodesToFillWaiting) + "totalNewWaiting", totalNewWaiting, + "totalNodes", totalNodes, + "maxNumNodes", arg.maxNumNodes) - if arg.flagStakingV4Step2 { - shouldFillWaitingList = true - } + distributeShuffledToWaiting = arg.flagStakingV4Step2 } // Here check that if allNodes(waitingList/newWaiting) < allNodes(shuffledOutMap) then select nodes from auction // Compute numNodesToFillWaiting = allNodes(shuffledOutMap) - allNodes(waitingList) @@ -316,7 +322,9 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { log.Warn("distributeValidators newNodes failed", "error", err) } - if arg.flagStakingV4Step3 && !shouldFillWaitingList { + if arg.flagStakingV4Step3 && !distributeShuffledToWaiting { + log.Debug("distributing selected nodes from auction to waiting") + // Distribute selected validators from AUCTION -> WAITING err = distributeValidators(newWaiting, arg.auction, arg.randomness, false) if err != nil { @@ -324,9 +332,9 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { } } - if arg.flagStakingV4Step2 && shouldFillWaitingList { + if distributeShuffledToWaiting { + log.Debug("distributing shuffled out nodes to waiting in staking V4") - log.Warn("distributing shuffled out nodes to waiting list instead of auction") // Distribute validators from SHUFFLED OUT -> WAITING err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) if err != nil { From 721748776aed2bd06cb8d50c8b727b31361e5bf2 Mon Sep 17 00:00:00 2001 From: MariusC Date: Wed, 8 Mar 2023 11:54:06 +0200 Subject: [PATCH 05/14] FIX: Broken condition for impossible case --- integrationTests/vm/staking/stakingV4_test.go | 4 ++-- .../nodesCoordinator/hashValidatorShuffler.go | 23 +++++++++---------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index ef175ff66a5..6d379d45f00 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -986,12 +986,12 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ { EpochEnable: 0, - MaxNumNodes: 10, + MaxNumNodes: 12, NodesToShufflePerShard: 1, }, { EpochEnable: stakingV4Step3EnableEpoch, - MaxNumNodes: 8, + MaxNumNodes: 10, NodesToShufflePerShard: 1, }, }, diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index 7cc0acd8914..635de1f0a6e 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -293,9 +293,10 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { totalNewWaiting := numNewWaiting + numSelectedAuction totalNodes := totalNewWaiting + numNewEligible + numShuffled + maxNumNodes := arg.maxNumNodes - distributeShuffledToWaiting := false - if totalNodes <= arg.maxNumNodes || (numNewEligible+numShuffled) <= arg.maxNumNodes { + distributeShuffledToWaitingInStakingV4 := false + if totalNodes <= maxNumNodes { log.Warn("num of total nodes in waiting is too low after shuffling; will distribute "+ "shuffled out nodes directly in waiting and skip sending them to auction", "numShuffled", numShuffled, @@ -303,14 +304,10 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { "numSelectedAuction", numSelectedAuction, "totalNewWaiting", totalNewWaiting, "totalNodes", totalNodes, - "maxNumNodes", arg.maxNumNodes) + "maxNumNodes", maxNumNodes) - distributeShuffledToWaiting = arg.flagStakingV4Step2 + distributeShuffledToWaitingInStakingV4 = arg.flagStakingV4Step2 } - // Here check that if allNodes(waitingList/newWaiting) < allNodes(shuffledOutMap) then select nodes from auction - // Compute numNodesToFillWaiting = allNodes(shuffledOutMap) - allNodes(waitingList) - // Easy case If: numNodesToFillWaiting > allNodes(auction) => move all auction list to waiting - // Else: select best nodes from auction to fill waiting list err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard) if err != nil { @@ -322,8 +319,9 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { log.Warn("distributeValidators newNodes failed", "error", err) } - if arg.flagStakingV4Step3 && !distributeShuffledToWaiting { - log.Debug("distributing selected nodes from auction to waiting") + if arg.flagStakingV4Step3 && !distributeShuffledToWaitingInStakingV4 { + log.Debug("distributing selected nodes from auction to waiting", + "num auction nodes", len(arg.auction), "num waiting nodes", numNewWaiting) // Distribute selected validators from AUCTION -> WAITING err = distributeValidators(newWaiting, arg.auction, arg.randomness, false) @@ -332,8 +330,9 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { } } - if distributeShuffledToWaiting { - log.Debug("distributing shuffled out nodes to waiting in staking V4") + if distributeShuffledToWaitingInStakingV4 { + log.Debug("distributing shuffled out nodes to waiting in staking V4", + "num shuffled nodes", numShuffled, "num waiting nodes", numNewWaiting) // Distribute validators from SHUFFLED OUT -> WAITING err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) From c68293a7494be7af7c1bcfd5e4463a272972cfb5 Mon Sep 17 00:00:00 2001 From: MariusC Date: Wed, 8 Mar 2023 17:33:58 +0200 Subject: [PATCH 06/14] FEAT: Continue integration edge case testing --- integrationTests/vm/staking/stakingV4_test.go | 211 +++++++++++------- .../nodesCoordinator/hashValidatorShuffler.go | 12 +- .../hashValidatorShufflerWithAuction.go | 11 - 3 files changed, 136 insertions(+), 98 deletions(-) delete mode 100644 sharding/nodesCoordinator/hashValidatorShufflerWithAuction.go diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index 6d379d45f00..7864de8974f 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -902,8 +902,7 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { require.Empty(t, node.NodesConfig.queue) } -// This is an edge case with exactly 1 in waiting -func TestStakingV4_ExactlyOneNodeInWaitingEveryEpoch(t *testing.T) { +func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T) { pubKeys := generateAddresses(0, 20) owner1 := "owner1" @@ -913,8 +912,8 @@ func TestStakingV4_ExactlyOneNodeInWaitingEveryEpoch(t *testing.T) { 0: pubKeys[4:8], }, WaitingBlsKeys: map[uint32][][]byte{ - core.MetachainShardId: pubKeys[8:10], - 0: pubKeys[10:12], + core.MetachainShardId: pubKeys[8:9], + 0: pubKeys[9:10], }, TotalStake: big.NewInt(20 * nodePrice), } @@ -935,63 +934,18 @@ func TestStakingV4_ExactlyOneNodeInWaitingEveryEpoch(t *testing.T) { NodesToShufflePerShard: 1, }, { - EpochEnable: stakingV4Step3EnableEpoch, + EpochEnable: stakingV4Step3EnableEpoch, // epoch 3 MaxNumNodes: 10, NodesToShufflePerShard: 1, }, - }, - } - node := NewTestMetaProcessorWithCustomNodes(cfg) - node.EpochStartTrigger.SetRoundsPerEpoch(4) - - // 1. Check initial config is correct - currNodesConfig := node.NodesConfig - require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) - require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) - require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 2) - require.Empty(t, currNodesConfig.shuffledOut) - require.Empty(t, currNodesConfig.auction) - - node.Process(t, 7*4+2) -} - -func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T) { - pubKeys := generateAddresses(0, 20) - - owner1 := "owner1" - owner1Stats := &OwnerStats{ - EligibleBlsKeys: map[uint32][][]byte{ - core.MetachainShardId: pubKeys[:4], - 0: pubKeys[4:8], - }, - WaitingBlsKeys: map[uint32][][]byte{ - core.MetachainShardId: pubKeys[8:9], - 0: pubKeys[9:10], - }, - TotalStake: big.NewInt(20 * nodePrice), - } - - cfg := &InitialNodesConfig{ - MetaConsensusGroupSize: 2, - ShardConsensusGroupSize: 2, - MinNumberOfEligibleShardNodes: 4, - MinNumberOfEligibleMetaNodes: 4, - NumOfShards: 1, - Owners: map[string]*OwnerStats{ - owner1: owner1Stats, - }, - MaxNodesChangeConfig: []config.MaxNodesChangeConfig{ { - EpochEnable: 0, + EpochEnable: 6, MaxNumNodes: 12, NodesToShufflePerShard: 1, }, { - EpochEnable: stakingV4Step3EnableEpoch, - MaxNumNodes: 10, + EpochEnable: 9, + MaxNumNodes: 12, NodesToShufflePerShard: 1, }, }, @@ -1001,7 +955,6 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T // 1. Check initial config is correct currNodesConfig := node.NodesConfig - prevNodesConfig := currNodesConfig require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) require.Len(t, getAllPubKeys(currNodesConfig.waiting), 2) require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) @@ -1011,8 +964,39 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T require.Empty(t, currNodesConfig.shuffledOut) require.Empty(t, currNodesConfig.auction) - // 2. Epoch = StakingV4Step1, configuration should be the same, nodes from eligible should be shuffled - node.Process(t, 6) + prevNodesConfig := currNodesConfig + epochs := uint32(0) + for epochs < 9 { + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 2) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.waiting[0], 1) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) + + prevNodesConfig = currNodesConfig + epochs++ + } + + require.Equal(t, epochs, node.EpochStartTrigger.Epoch()) + + owner2Nodes := pubKeys[10:12] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner2": { + BLSKeys: owner2Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) currNodesConfig = node.NodesConfig require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) require.Len(t, getAllPubKeys(currNodesConfig.waiting), 2) @@ -1021,51 +1005,106 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T require.Len(t, currNodesConfig.eligible[0], 4) require.Len(t, currNodesConfig.waiting[0], 1) require.Empty(t, currNodesConfig.shuffledOut) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner2Nodes) + + node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) require.Empty(t, currNodesConfig.auction) // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) + requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes) prevNodesConfig = currNodesConfig + epochs = 10 + require.Equal(t, epochs, node.EpochStartTrigger.Epoch()) + for epochs < 13 { + node.Process(t, 5) - // 3. Epoch = StakingV4Step2, shuffled nodes from eligible are sent to auction, waiting list remains empty - node.Process(t, 5) - //currNodesConfig = node.NodesConfig - //require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - //require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) - //require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - //require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) - //require.Len(t, currNodesConfig.eligible[0], 4) - //require.Len(t, currNodesConfig.waiting[0], 0) - //require.Len(t, currNodesConfig.auction, 2) - //requireSameSliceDifferentOrder(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) - // - //// Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes - //requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) - //requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) - // - //prevNodesConfig = currNodesConfig - - // 4. Epoch = StakingV4Step3, auction nodes from previous epoch should be sent directly to waiting list, since waiting list was empty - node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + require.Empty(t, currNodesConfig.auction) + + // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) + + prevNodesConfig = currNodesConfig + epochs++ + } - /*Test fails from here, should work with fix + owner3Nodes := pubKeys[12:14] + node.ProcessStake(t, map[string]*NodesRegisterData{ + "owner3": { + BLSKeys: owner3Nodes, + TotalStake: big.NewInt(5 * nodePrice), + }, + }) currNodesConfig = node.NodesConfig require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 0) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 0) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 0) - require.Len(t, currNodesConfig.auction, 2) - requireSameSliceDifferentOrder(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Empty(t, currNodesConfig.shuffledOut) + requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) - // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), prevNodesConfig.auction, 2) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) - */ + node.Process(t, 5) + prevNodesConfig = node.NodesConfig + epochs = 14 + require.Equal(t, epochs, node.EpochStartTrigger.Epoch()) + for epochs < 18 { + + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.waiting[0], 2) + require.Len(t, currNodesConfig.auction, 2) + + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + // Nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + // New auction list does not contain nodes from previous auction list, since all of them have been distributed to waiting + requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, 0) + + // All shuffled out are from previous eligible config + requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All shuffled out are now in auction + requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All nodes which have been selected from previous auction list are now in waiting + requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction) + + prevNodesConfig = currNodesConfig + epochs++ + } + + node.ProcessUnStake(t, map[string][][]byte{ + "owner3": {owner3Nodes[0]}, + }) + node.Process(t, 5) node.Process(t, 5) } diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index 635de1f0a6e..e3f97970077 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -319,7 +319,7 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { log.Warn("distributeValidators newNodes failed", "error", err) } - if arg.flagStakingV4Step3 && !distributeShuffledToWaitingInStakingV4 { + if arg.flagStakingV4Step3 { log.Debug("distributing selected nodes from auction to waiting", "num auction nodes", len(arg.auction), "num waiting nodes", numNewWaiting) @@ -655,6 +655,16 @@ func moveNodesToMap(destination map[uint32][]Validator, source map[uint32][]Vali return nil } +func getNumPubKeys(shardValidatorsMap map[uint32][]Validator) uint32 { + numPubKeys := uint32(0) + + for _, validatorsInShard := range shardValidatorsMap { + numPubKeys += uint32(len(validatorsInShard)) + } + + return numPubKeys +} + // moveMaxNumNodesToMap moves the validators in the source list to the corresponding destination list // but adding just enough nodes so that at most the number of nodes is kept in the destination list // The parameter maxNodesToMove is a limiting factor and should limit the number of nodes diff --git a/sharding/nodesCoordinator/hashValidatorShufflerWithAuction.go b/sharding/nodesCoordinator/hashValidatorShufflerWithAuction.go deleted file mode 100644 index 77edafcc52a..00000000000 --- a/sharding/nodesCoordinator/hashValidatorShufflerWithAuction.go +++ /dev/null @@ -1,11 +0,0 @@ -package nodesCoordinator - -func getNumPubKeys(shardValidatorsMap map[uint32][]Validator) uint32 { - numPubKeys := uint32(0) - - for _, validatorsInShard := range shardValidatorsMap { - numPubKeys += uint32(len(validatorsInShard)) - } - - return numPubKeys -} From 9f27284c615dbd8d7ad3a707049f70ef8b7dad27 Mon Sep 17 00:00:00 2001 From: MariusC Date: Wed, 8 Mar 2023 18:19:07 +0200 Subject: [PATCH 07/14] FEAT: Extend edge case testing --- integrationTests/vm/staking/stakingV4_test.go | 112 +++++++++++++++--- 1 file changed, 93 insertions(+), 19 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index 7864de8974f..8e85b110fc9 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -902,7 +902,7 @@ func TestStakingV4_JailAndUnJailNodes(t *testing.T) { require.Empty(t, node.NodesConfig.queue) } -func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T) { +func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffledToToWaiting(t *testing.T) { pubKeys := generateAddresses(0, 20) owner1 := "owner1" @@ -943,11 +943,6 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T MaxNumNodes: 12, NodesToShufflePerShard: 1, }, - { - EpochEnable: 9, - MaxNumNodes: 12, - NodesToShufflePerShard: 1, - }, }, } node := NewTestMetaProcessorWithCustomNodes(cfg) @@ -965,8 +960,15 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T require.Empty(t, currNodesConfig.auction) prevNodesConfig := currNodesConfig - epochs := uint32(0) - for epochs < 9 { + epoch := uint32(0) + + // During these 9 epochs, we will always have: + // - 10 activeNodes (8 eligible + 2 waiting) + // - 1 node to shuffle out per shard + // Meanwhile, maxNumNodes changes from 12-10-12 + // Since activeNodes <= maxNumNodes, shuffled out nodes will always be sent directly to waiting list, + // instead of auction(there is no reason to send them to auction, they will be selected anyway) + for epoch < 9 { node.Process(t, 5) currNodesConfig = node.NodesConfig @@ -985,11 +987,15 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) prevNodesConfig = currNodesConfig - epochs++ + epoch++ } - require.Equal(t, epochs, node.EpochStartTrigger.Epoch()) + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + // Epoch = 9 with: + // - activeNodes = 10 + // - maxNumNodes = 12 + // Owner2 stakes 2 nodes, which should be initially sent to auction list owner2Nodes := pubKeys[10:12] node.ProcessStake(t, map[string]*NodesRegisterData{ "owner2": { @@ -1007,6 +1013,10 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T require.Empty(t, currNodesConfig.shuffledOut) requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner2Nodes) + // Epoch = 10 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner2's new nodes are selected from auction and distributed to waiting list node.Process(t, 5) currNodesConfig = node.NodesConfig require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) @@ -1024,10 +1034,14 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes) + // During epochs 10-13, we will have: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Since activeNodes == maxNumNodes, shuffled out nodes will always be sent directly to waiting list, instead of auction + epoch = 10 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) prevNodesConfig = currNodesConfig - epochs = 10 - require.Equal(t, epochs, node.EpochStartTrigger.Epoch()) - for epochs < 13 { + for epoch < 13 { node.Process(t, 5) currNodesConfig = node.NodesConfig @@ -1046,9 +1060,13 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) prevNodesConfig = currNodesConfig - epochs++ + epoch++ } + // Epoch = 13 with: + // - activeNodes = 12 + // - maxNumNodes = 12 + // Owner3 stakes 2 nodes, which should be initially sent to auction list owner3Nodes := pubKeys[12:14] node.ProcessStake(t, map[string]*NodesRegisterData{ "owner3": { @@ -1066,11 +1084,15 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T require.Empty(t, currNodesConfig.shuffledOut) requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) + // During epochs 14-18, we will have: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes, shuffled out nodes (2) will be sent to auction list node.Process(t, 5) prevNodesConfig = node.NodesConfig - epochs = 14 - require.Equal(t, epochs, node.EpochStartTrigger.Epoch()) - for epochs < 18 { + epoch = 14 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + for epoch < 18 { require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) @@ -1099,12 +1121,64 @@ func TestStakingV4_NotEnoughNodesShouldSendAuctionDirectlyToWaiting(t *testing.T requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction) prevNodesConfig = currNodesConfig - epochs++ + epoch++ } + // Epoch = 18, with: + // - activeNodes = 14 + // - maxNumNodes = 12 + // Owner3 unStakes one of his nodes node.ProcessUnStake(t, map[string][][]byte{ "owner3": {owner3Nodes[0]}, }) + + // Epoch = 19, with: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Owner3's unStaked node is now leaving node.Process(t, 5) - node.Process(t, 5) + currNodesConfig = node.NodesConfig + require.Len(t, currNodesConfig.leaving, 1) + requireMapContains(t, currNodesConfig.leaving, [][]byte{owner3Nodes[0]}) + + epoch = 19 + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + prevNodesConfig = node.NodesConfig + require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) + + // During epochs 19-23, we will have: + // - activeNodes = 13 + // - maxNumNodes = 12 + // Since activeNodes > maxNumNodes: + // - shuffled out nodes (2) will be sent to auction list + // - waiting lists will be unbalanced (3 in total: 1 + 2 per shard) + // - no node will spend extra epochs in eligible/waiting, since waiting lists will always be refilled + for epoch < 23 { + require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) + require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) + require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) + require.Len(t, currNodesConfig.eligible[0], 4) + require.Len(t, currNodesConfig.auction, 2) + + node.Process(t, 5) + + currNodesConfig = node.NodesConfig + // Nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + + // New auction list does not contain nodes from previous auction list, since all of them have been distributed to waiting + requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, 0) + + // All shuffled out are from previous eligible config + requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All shuffled out are now in auction + requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All nodes which have been selected from previous auction list are now in waiting + requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction) + + prevNodesConfig = currNodesConfig + epoch++ + } } From 8f7f754be052a1dc27c53cbbe1e67d01ec92fa53 Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 9 Mar 2023 10:33:57 +0200 Subject: [PATCH 08/14] CLN: Comments --- integrationTests/vm/staking/stakingV4_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index 8e85b110fc9..9698bbe5ab1 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -981,7 +981,7 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl require.Empty(t, currNodesConfig.shuffledOut) require.Empty(t, currNodesConfig.auction) - // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) @@ -1028,7 +1028,7 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl require.Empty(t, currNodesConfig.shuffledOut) require.Empty(t, currNodesConfig.auction) - // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) @@ -1054,7 +1054,7 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl require.Empty(t, currNodesConfig.shuffledOut) require.Empty(t, currNodesConfig.auction) - // Shuffled nodes previous eligible ones are sent to waiting and previous waiting list nodes are replacing shuffled nodes + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) From b56b74c66a5ffcc4045cd1b2caedcfb1d4fc78bd Mon Sep 17 00:00:00 2001 From: MariusC Date: Thu, 9 Mar 2023 10:48:14 +0200 Subject: [PATCH 09/14] FIX: Typo --- sharding/nodesCoordinator/hashValidatorShuffler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index e3f97970077..4b2b67f133c 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -298,7 +298,7 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { distributeShuffledToWaitingInStakingV4 := false if totalNodes <= maxNumNodes { log.Warn("num of total nodes in waiting is too low after shuffling; will distribute "+ - "shuffled out nodes directly in waiting and skip sending them to auction", + "shuffled out nodes directly to waiting and skip sending them to auction", "numShuffled", numShuffled, "numNewEligible", numNewEligible, "numSelectedAuction", numSelectedAuction, From db83ac23c6c008314390caea6cb7a253fdc335b6 Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 13 Mar 2023 11:34:41 +0200 Subject: [PATCH 10/14] FIX: Refactor integration tests --- integrationTests/vm/staking/stakingV4_test.go | 206 ++++++++---------- 1 file changed, 90 insertions(+), 116 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index 9698bbe5ab1..ccf4f17a413 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -194,21 +194,7 @@ func TestStakingV4(t *testing.T) { require.Empty(t, newNodeConfig.queue) require.Empty(t, newNodeConfig.leaving) - // 320 nodes which are now in eligible are from previous waiting list - requireSliceContainsNumOfElements(t, getAllPubKeys(newNodeConfig.eligible), getAllPubKeys(prevConfig.waiting), numOfShuffledOut) - - // New auction list also contains unselected nodes from previous auction list - requireSliceContainsNumOfElements(t, newNodeConfig.auction, prevConfig.auction, numOfUnselectedNodesFromAuction) - - // All shuffled out are from previous eligible config - requireMapContains(t, prevConfig.eligible, getAllPubKeys(newNodeConfig.shuffledOut)) - - // All shuffled out are now in auction - requireSliceContains(t, newNodeConfig.auction, getAllPubKeys(newNodeConfig.shuffledOut)) - - // 320 nodes which have been selected from previous auction list are now in waiting - requireSliceContainsNumOfElements(t, getAllPubKeys(newNodeConfig.waiting), prevConfig.auction, numOfSelectedNodesFromAuction) - + checkStakingV4EpochChangeFlow(t, newNodeConfig, prevConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) prevConfig = newNodeConfig epochs++ } @@ -949,18 +935,18 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl node.EpochStartTrigger.SetRoundsPerEpoch(4) // 1. Check initial config is correct + expectedNodesNum := &configNum{ + eligible: map[uint32]int{ + core.MetachainShardId: 4, + 0: 4, + }, + waiting: map[uint32]int{ + core.MetachainShardId: 1, + 0: 1, + }, + } currNodesConfig := node.NodesConfig - require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 2) - require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) - require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 1) - require.Empty(t, currNodesConfig.shuffledOut) - require.Empty(t, currNodesConfig.auction) - - prevNodesConfig := currNodesConfig - epoch := uint32(0) + checkConfig(t, expectedNodesNum, currNodesConfig) // During these 9 epochs, we will always have: // - 10 activeNodes (8 eligible + 2 waiting) @@ -968,23 +954,16 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl // Meanwhile, maxNumNodes changes from 12-10-12 // Since activeNodes <= maxNumNodes, shuffled out nodes will always be sent directly to waiting list, // instead of auction(there is no reason to send them to auction, they will be selected anyway) + epoch := uint32(0) + numOfShuffledOut := 2 + numRemainingEligible := 6 + prevNodesConfig := currNodesConfig for epoch < 9 { node.Process(t, 5) currNodesConfig = node.NodesConfig - require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 2) - require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) - require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 1) - require.Empty(t, currNodesConfig.shuffledOut) - require.Empty(t, currNodesConfig.auction) - - // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) prevNodesConfig = currNodesConfig epoch++ @@ -1004,13 +983,8 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl }, }) currNodesConfig = node.NodesConfig - require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 2) - require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 1) - require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 1) - require.Empty(t, currNodesConfig.shuffledOut) + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner2Nodes) // Epoch = 10 with: @@ -1019,19 +993,11 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl // Owner2's new nodes are selected from auction and distributed to waiting list node.Process(t, 5) currNodesConfig = node.NodesConfig - require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) - require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) - require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 2) - require.Empty(t, currNodesConfig.shuffledOut) - require.Empty(t, currNodesConfig.auction) - - // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) + expectedNodesNum.waiting[core.MetachainShardId]++ + expectedNodesNum.waiting[0]++ + expectedNodesNum.auction = 0 + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), owner2Nodes) // During epochs 10-13, we will have: @@ -1045,19 +1011,8 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl node.Process(t, 5) currNodesConfig = node.NodesConfig - require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) - require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) - require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 2) - require.Empty(t, currNodesConfig.shuffledOut) - require.Empty(t, currNodesConfig.auction) - - // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), 6) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), 2) + checkConfig(t, expectedNodesNum, currNodesConfig) + checkShuffledOutNodes(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numRemainingEligible) prevNodesConfig = currNodesConfig epoch++ @@ -1075,13 +1030,8 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl }, }) currNodesConfig = node.NodesConfig - require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) - require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) - require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 2) - require.Empty(t, currNodesConfig.shuffledOut) + expectedNodesNum.auction = 2 + checkConfig(t, expectedNodesNum, currNodesConfig) requireSameSliceDifferentOrder(t, currNodesConfig.auction, owner3Nodes) // During epochs 14-18, we will have: @@ -1092,33 +1042,15 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl prevNodesConfig = node.NodesConfig epoch = 14 require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) - for epoch < 18 { - require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) - require.Len(t, getAllPubKeys(currNodesConfig.waiting), 4) - require.Len(t, currNodesConfig.eligible[core.MetachainShardId], 4) - require.Len(t, currNodesConfig.waiting[core.MetachainShardId], 2) - require.Len(t, currNodesConfig.eligible[0], 4) - require.Len(t, currNodesConfig.waiting[0], 2) - require.Len(t, currNodesConfig.auction, 2) + numOfUnselectedNodesFromAuction := 0 + numOfSelectedNodesFromAuction := 2 + for epoch < 18 { + checkConfig(t, expectedNodesNum, currNodesConfig) node.Process(t, 5) - currNodesConfig = node.NodesConfig - // Nodes which are now in eligible are from previous waiting list - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) - - // New auction list does not contain nodes from previous auction list, since all of them have been distributed to waiting - requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, 0) - - // All shuffled out are from previous eligible config - requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) - - // All shuffled out are now in auction - requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) - - // All nodes which have been selected from previous auction list are now in waiting - requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction) + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) prevNodesConfig = currNodesConfig epoch++ @@ -1143,8 +1075,6 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl epoch = 19 require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) - prevNodesConfig = node.NodesConfig - require.Equal(t, epoch, node.EpochStartTrigger.Epoch()) // During epochs 19-23, we will have: // - activeNodes = 13 @@ -1153,6 +1083,7 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl // - shuffled out nodes (2) will be sent to auction list // - waiting lists will be unbalanced (3 in total: 1 + 2 per shard) // - no node will spend extra epochs in eligible/waiting, since waiting lists will always be refilled + prevNodesConfig = node.NodesConfig for epoch < 23 { require.Len(t, getAllPubKeys(currNodesConfig.eligible), 8) require.Len(t, getAllPubKeys(currNodesConfig.waiting), 3) @@ -1163,22 +1094,65 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl node.Process(t, 5) currNodesConfig = node.NodesConfig - // Nodes which are now in eligible are from previous waiting list - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), 2) + checkStakingV4EpochChangeFlow(t, currNodesConfig, prevNodesConfig, numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction) - // New auction list does not contain nodes from previous auction list, since all of them have been distributed to waiting - requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, 0) + prevNodesConfig = currNodesConfig + epoch++ + } +} - // All shuffled out are from previous eligible config - requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) +type configNum struct { + eligible map[uint32]int + waiting map[uint32]int + leaving map[uint32]int + shuffledOut map[uint32]int + queue int + auction int + new int +} - // All shuffled out are now in auction - requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) +func checkConfig(t *testing.T, expectedConfig *configNum, nodesConfig nodesConfig) { + checkNumNodes(t, expectedConfig.eligible, nodesConfig.eligible) + checkNumNodes(t, expectedConfig.waiting, nodesConfig.waiting) + checkNumNodes(t, expectedConfig.leaving, nodesConfig.leaving) + checkNumNodes(t, expectedConfig.shuffledOut, nodesConfig.shuffledOut) - // All nodes which have been selected from previous auction list are now in waiting - requireSliceContains(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction) + require.Equal(t, expectedConfig.queue, len(nodesConfig.queue)) + require.Equal(t, expectedConfig.auction, len(nodesConfig.auction)) + require.Equal(t, expectedConfig.new, len(nodesConfig.new)) +} - prevNodesConfig = currNodesConfig - epoch++ +func checkNumNodes(t *testing.T, expectedNumNodes map[uint32]int, actualNodes map[uint32][][]byte) { + for shardID, numNodesInShard := range expectedNumNodes { + require.Equal(t, numNodesInShard, len(actualNodes[shardID])) } } + +func checkShuffledOutNodes(t *testing.T, currNodesConfig, prevNodesConfig nodesConfig, numShuffledOutNodes int, numRemainingEligible int) { + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numShuffledOutNodes) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), numRemainingEligible) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), numShuffledOutNodes) +} + +func checkStakingV4EpochChangeFlow( + t *testing.T, + currNodesConfig, prevNodesConfig nodesConfig, + numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction int) { + + // Nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numOfShuffledOut) + + // New auction list also contains unselected nodes from previous auction list + requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, numOfUnselectedNodesFromAuction) + + // All shuffled out are from previous eligible config + requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All shuffled out are now in auction + requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // Nodes which have been selected from previous auction list are now in waiting + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction, numOfSelectedNodesFromAuction) + +} From 77b331d96c3ecb9171a33fda3113849c02113086 Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 13 Mar 2023 11:37:07 +0200 Subject: [PATCH 11/14] CLN: Move test functionalities --- integrationTests/vm/staking/stakingV4_test.go | 111 +++++++++--------- 1 file changed, 55 insertions(+), 56 deletions(-) diff --git a/integrationTests/vm/staking/stakingV4_test.go b/integrationTests/vm/staking/stakingV4_test.go index ccf4f17a413..92ab77ff24a 100644 --- a/integrationTests/vm/staking/stakingV4_test.go +++ b/integrationTests/vm/staking/stakingV4_test.go @@ -106,6 +106,61 @@ func unStake(t *testing.T, owner []byte, accountsDB state.AccountsAdapter, marsh require.Nil(t, err) } +type configNum struct { + eligible map[uint32]int + waiting map[uint32]int + leaving map[uint32]int + shuffledOut map[uint32]int + queue int + auction int + new int +} + +func checkConfig(t *testing.T, expectedConfig *configNum, nodesConfig nodesConfig) { + checkNumNodes(t, expectedConfig.eligible, nodesConfig.eligible) + checkNumNodes(t, expectedConfig.waiting, nodesConfig.waiting) + checkNumNodes(t, expectedConfig.leaving, nodesConfig.leaving) + checkNumNodes(t, expectedConfig.shuffledOut, nodesConfig.shuffledOut) + + require.Equal(t, expectedConfig.queue, len(nodesConfig.queue)) + require.Equal(t, expectedConfig.auction, len(nodesConfig.auction)) + require.Equal(t, expectedConfig.new, len(nodesConfig.new)) +} + +func checkNumNodes(t *testing.T, expectedNumNodes map[uint32]int, actualNodes map[uint32][][]byte) { + for shardID, numNodesInShard := range expectedNumNodes { + require.Equal(t, numNodesInShard, len(actualNodes[shardID])) + } +} + +func checkShuffledOutNodes(t *testing.T, currNodesConfig, prevNodesConfig nodesConfig, numShuffledOutNodes int, numRemainingEligible int) { + // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numShuffledOutNodes) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), numRemainingEligible) + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), numShuffledOutNodes) +} + +func checkStakingV4EpochChangeFlow( + t *testing.T, + currNodesConfig, prevNodesConfig nodesConfig, + numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction int) { + + // Nodes which are now in eligible are from previous waiting list + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numOfShuffledOut) + + // New auction list also contains unselected nodes from previous auction list + requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, numOfUnselectedNodesFromAuction) + + // All shuffled out are from previous eligible config + requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) + + // All shuffled out are now in auction + requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) + + // Nodes which have been selected from previous auction list are now in waiting + requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction, numOfSelectedNodesFromAuction) +} + func TestStakingV4(t *testing.T) { numOfMetaNodes := uint32(400) numOfShards := uint32(3) @@ -1100,59 +1155,3 @@ func TestStakingV4_DifferentEdgeCasesWithNotEnoughNodesInWaitingShouldSendShuffl epoch++ } } - -type configNum struct { - eligible map[uint32]int - waiting map[uint32]int - leaving map[uint32]int - shuffledOut map[uint32]int - queue int - auction int - new int -} - -func checkConfig(t *testing.T, expectedConfig *configNum, nodesConfig nodesConfig) { - checkNumNodes(t, expectedConfig.eligible, nodesConfig.eligible) - checkNumNodes(t, expectedConfig.waiting, nodesConfig.waiting) - checkNumNodes(t, expectedConfig.leaving, nodesConfig.leaving) - checkNumNodes(t, expectedConfig.shuffledOut, nodesConfig.shuffledOut) - - require.Equal(t, expectedConfig.queue, len(nodesConfig.queue)) - require.Equal(t, expectedConfig.auction, len(nodesConfig.auction)) - require.Equal(t, expectedConfig.new, len(nodesConfig.new)) -} - -func checkNumNodes(t *testing.T, expectedNumNodes map[uint32]int, actualNodes map[uint32][][]byte) { - for shardID, numNodesInShard := range expectedNumNodes { - require.Equal(t, numNodesInShard, len(actualNodes[shardID])) - } -} - -func checkShuffledOutNodes(t *testing.T, currNodesConfig, prevNodesConfig nodesConfig, numShuffledOutNodes int, numRemainingEligible int) { - // Shuffled nodes from previous eligible are sent to waiting and previous waiting list nodes are replacing shuffled nodes - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numShuffledOutNodes) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.eligible), numRemainingEligible) - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), getAllPubKeys(prevNodesConfig.eligible), numShuffledOutNodes) -} - -func checkStakingV4EpochChangeFlow( - t *testing.T, - currNodesConfig, prevNodesConfig nodesConfig, - numOfShuffledOut, numOfUnselectedNodesFromAuction, numOfSelectedNodesFromAuction int) { - - // Nodes which are now in eligible are from previous waiting list - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.eligible), getAllPubKeys(prevNodesConfig.waiting), numOfShuffledOut) - - // New auction list also contains unselected nodes from previous auction list - requireSliceContainsNumOfElements(t, currNodesConfig.auction, prevNodesConfig.auction, numOfUnselectedNodesFromAuction) - - // All shuffled out are from previous eligible config - requireMapContains(t, prevNodesConfig.eligible, getAllPubKeys(currNodesConfig.shuffledOut)) - - // All shuffled out are now in auction - requireSliceContains(t, currNodesConfig.auction, getAllPubKeys(currNodesConfig.shuffledOut)) - - // Nodes which have been selected from previous auction list are now in waiting - requireSliceContainsNumOfElements(t, getAllPubKeys(currNodesConfig.waiting), prevNodesConfig.auction, numOfSelectedNodesFromAuction) - -} From 1afecd5f8a469d8014d39bbe19362cdcdf33c303 Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 13 Mar 2023 13:24:44 +0200 Subject: [PATCH 12/14] CLN: Create new func for shouldDistributeShuffledToWaitingInStakingV4 --- .../nodesCoordinator/hashValidatorShuffler.go | 76 ++++++++++++------- 1 file changed, 49 insertions(+), 27 deletions(-) diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index 4b2b67f133c..f9fc41fa856 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -47,6 +47,15 @@ type shuffleNodesArg struct { flagStakingV4Step3 bool } +type shuffledNodesStakingV4 struct { + numShuffled uint32 + numNewEligible uint32 + numNewWaiting uint32 + numSelectedAuction uint32 + maxNumNodes uint32 + flagStakingV4Step2 bool +} + // TODO: Decide if transaction load statistics will be used for limiting the number of shards type randHashShuffler struct { // TODO: remove the references to this constant and the distributor @@ -285,30 +294,6 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { shuffledOutMap, newEligible := shuffleOutNodes(newEligible, numToRemove, arg.randomness) - numShuffled := getNumPubKeys(shuffledOutMap) - numNewEligible := getNumPubKeys(newEligible) - numNewWaiting := getNumPubKeys(newWaiting) - - numSelectedAuction := uint32(len(arg.auction)) - totalNewWaiting := numNewWaiting + numSelectedAuction - - totalNodes := totalNewWaiting + numNewEligible + numShuffled - maxNumNodes := arg.maxNumNodes - - distributeShuffledToWaitingInStakingV4 := false - if totalNodes <= maxNumNodes { - log.Warn("num of total nodes in waiting is too low after shuffling; will distribute "+ - "shuffled out nodes directly to waiting and skip sending them to auction", - "numShuffled", numShuffled, - "numNewEligible", numNewEligible, - "numSelectedAuction", numSelectedAuction, - "totalNewWaiting", totalNewWaiting, - "totalNodes", totalNodes, - "maxNumNodes", maxNumNodes) - - distributeShuffledToWaitingInStakingV4 = arg.flagStakingV4Step2 - } - err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard) if err != nil { log.Warn("moveNodesToMap failed", "error", err) @@ -319,9 +304,18 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { log.Warn("distributeValidators newNodes failed", "error", err) } + shuffledNodesCfg := &shuffledNodesStakingV4{ + numShuffled: getNumPubKeys(shuffledOutMap), + numNewEligible: getNumPubKeys(newEligible), + numNewWaiting: getNumPubKeys(newWaiting), + numSelectedAuction: uint32(len(arg.auction)), + maxNumNodes: arg.maxNumNodes, + flagStakingV4Step2: arg.flagStakingV4Step2, + } + if arg.flagStakingV4Step3 { log.Debug("distributing selected nodes from auction to waiting", - "num auction nodes", len(arg.auction), "num waiting nodes", numNewWaiting) + "num auction nodes", len(arg.auction), "num waiting nodes", shuffledNodesCfg.numNewWaiting) // Distribute selected validators from AUCTION -> WAITING err = distributeValidators(newWaiting, arg.auction, arg.randomness, false) @@ -330,9 +324,9 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { } } - if distributeShuffledToWaitingInStakingV4 { + if shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg) { log.Debug("distributing shuffled out nodes to waiting in staking V4", - "num shuffled nodes", numShuffled, "num waiting nodes", numNewWaiting) + "num shuffled nodes", shuffledNodesCfg.numShuffled, "num waiting nodes", shuffledNodesCfg.numNewWaiting) // Distribute validators from SHUFFLED OUT -> WAITING err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) @@ -595,6 +589,34 @@ func removeValidatorFromList(validatorList []Validator, index int) []Validator { return validatorList[:len(validatorList)-1] } +func shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg *shuffledNodesStakingV4) bool { + if !shuffledNodesCfg.flagStakingV4Step2 { + return false + } + + totalNewWaiting := shuffledNodesCfg.numNewWaiting + shuffledNodesCfg.numSelectedAuction + totalNodes := totalNewWaiting + shuffledNodesCfg.numNewEligible + shuffledNodesCfg.numShuffled + + log.Debug("checking if should distribute shuffled out nodes to waiting in staking v4", + "numShuffled", shuffledNodesCfg.numShuffled, + "numNewEligible", shuffledNodesCfg.numNewEligible, + "numSelectedAuction", shuffledNodesCfg.numSelectedAuction, + "totalNewWaiting", totalNewWaiting, + "totalNodes", totalNodes, + "maxNumNodes", shuffledNodesCfg.maxNumNodes, + ) + + distributeShuffledToWaitingInStakingV4 := false + if totalNodes <= shuffledNodesCfg.maxNumNodes { + log.Warn("num of total nodes in waiting is too low after shuffling; will distribute " + + "shuffled out nodes directly to waiting and skip sending them to auction") + + distributeShuffledToWaitingInStakingV4 = true + } + + return distributeShuffledToWaitingInStakingV4 +} + func removeValidatorFromListKeepOrder(validatorList []Validator, index int) []Validator { indexNotOK := index > len(validatorList)-1 || index < 0 if indexNotOK { From c26f690f82d31e4d237449696853d76349c13a2d Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 13 Mar 2023 16:31:48 +0200 Subject: [PATCH 13/14] CLN: Refactor error handling + new nodes in shuffler --- .../nodesCoordinator/hashValidatorShuffler.go | 48 +++++++++++-------- .../hashValidatorShuffler_test.go | 18 +++---- 2 files changed, 38 insertions(+), 28 deletions(-) diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index f9fc41fa856..dcae87c12a9 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -12,6 +12,7 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing/sha256" "github.com/multiversx/mx-chain-go/common" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" ) var _ NodesShuffler = (*randHashShuffler)(nil) @@ -47,7 +48,7 @@ type shuffleNodesArg struct { flagStakingV4Step3 bool } -type shuffledNodesStakingV4 struct { +type shuffledNodesConfig struct { numShuffled uint32 numNewEligible uint32 numNewWaiting uint32 @@ -299,12 +300,12 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { log.Warn("moveNodesToMap failed", "error", err) } - err = distributeValidators(newWaiting, arg.newNodes, arg.randomness, false) + err = checkAndDistributeNewNodes(newWaiting, arg.newNodes, arg.randomness, arg.flagStakingV4Step3) if err != nil { - log.Warn("distributeValidators newNodes failed", "error", err) + return nil, fmt.Errorf("distributeValidators newNodes failed, error: %w", err) } - shuffledNodesCfg := &shuffledNodesStakingV4{ + shuffledNodesCfg := &shuffledNodesConfig{ numShuffled: getNumPubKeys(shuffledOutMap), numNewEligible: getNumPubKeys(newEligible), numNewWaiting: getNumPubKeys(newWaiting), @@ -318,28 +319,20 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { "num auction nodes", len(arg.auction), "num waiting nodes", shuffledNodesCfg.numNewWaiting) // Distribute selected validators from AUCTION -> WAITING - err = distributeValidators(newWaiting, arg.auction, arg.randomness, false) + err = distributeValidators(newWaiting, arg.auction, arg.randomness, arg.flagBalanceWaitingLists) if err != nil { - log.Warn("distributeValidators auction list failed", "error", err) + return nil, fmt.Errorf("distributeValidators auction list failed, error: %w", err) } } - if shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg) { - log.Debug("distributing shuffled out nodes to waiting in staking V4", + if shouldDistributeShuffledToWaiting(shuffledNodesCfg) { + log.Debug("distributing shuffled out nodes to waiting", "num shuffled nodes", shuffledNodesCfg.numShuffled, "num waiting nodes", shuffledNodesCfg.numNewWaiting) // Distribute validators from SHUFFLED OUT -> WAITING err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) if err != nil { - log.Warn("distributeValidators shuffledOut failed", "error", err) - } - } - - if !arg.flagStakingV4Step2 { - // Distribute validators from SHUFFLED OUT -> WAITING - err = arg.distributor.DistributeValidators(newWaiting, shuffledOutMap, arg.randomness, arg.flagBalanceWaitingLists) - if err != nil { - log.Warn("distributeValidators shuffledOut failed", "error", err) + return nil, fmt.Errorf("distributeValidators shuffled out failed, error: %w", err) } } @@ -589,9 +582,26 @@ func removeValidatorFromList(validatorList []Validator, index int) []Validator { return validatorList[:len(validatorList)-1] } -func shouldDistributeShuffledToWaitingInStakingV4(shuffledNodesCfg *shuffledNodesStakingV4) bool { +func checkAndDistributeNewNodes( + waiting map[uint32][]Validator, + newNodes []Validator, + randomness []byte, + flagStakingV4Step3 bool, +) error { + if !flagStakingV4Step3 { + return distributeValidators(waiting, newNodes, randomness, false) + } + + if len(newNodes) > 0 { + return epochStart.ErrReceivedNewListNodeInStakingV4 + } + + return nil +} + +func shouldDistributeShuffledToWaiting(shuffledNodesCfg *shuffledNodesConfig) bool { if !shuffledNodesCfg.flagStakingV4Step2 { - return false + return true } totalNewWaiting := shuffledNodesCfg.numNewWaiting + shuffledNodesCfg.numSelectedAuction diff --git a/sharding/nodesCoordinator/hashValidatorShuffler_test.go b/sharding/nodesCoordinator/hashValidatorShuffler_test.go index cae9ad879ce..bf53154a925 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler_test.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler_test.go @@ -14,6 +14,7 @@ import ( "github.com/multiversx/mx-chain-core-go/core" "github.com/multiversx/mx-chain-go/config" + "github.com/multiversx/mx-chain-go/epochStart" "github.com/multiversx/mx-chain-go/sharding/mock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -2429,6 +2430,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NoWaiting(t *testing.T) { ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) @@ -2490,6 +2492,7 @@ func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_NilOrEmptyWaiting(t *test ShuffleBetweenShards: shuffleBetweenShards, MaxNodesEnableConfig: nil, EnableEpochsHandler: &mock.EnableEpochsHandlerMock{}, + EnableEpochs: config.EnableEpochs{StakingV4Step3EnableEpoch: stakingV4Epoch}, } shuffler, err := NewHashValidatorsShuffler(shufflerArgs) require.Nil(t, err) @@ -2566,20 +2569,17 @@ func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { t.Parallel() numEligiblePerShard := 100 - numNewNodesPerShard := 100 numWaitingPerShard := 30 numAuction := 40 nbShards := uint32(2) eligibleMap := generateValidatorMap(numEligiblePerShard, nbShards) waitingMap := generateValidatorMap(numWaitingPerShard, nbShards) - newNodes := generateValidatorList(numNewNodesPerShard * (int(nbShards) + 1)) auctionList := generateValidatorList(numAuction) args := ArgsUpdateNodes{ Eligible: eligibleMap, Waiting: waitingMap, - NewNodes: newNodes, UnStakeLeaving: make([]Validator, 0), AdditionalLeaving: make([]Validator, 0), Rand: generateRandomByteArray(32), @@ -2592,11 +2592,6 @@ func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { resUpdateNodeList, err := shuffler.UpdateNodeLists(args) require.Nil(t, err) - for _, newNode := range args.NewNodes { - found, _ := searchInMap(resUpdateNodeList.Waiting, newNode.PubKey()) - assert.True(t, found) - } - for _, auctionNode := range args.Auction { found, _ := searchInMap(resUpdateNodeList.Waiting, auctionNode.PubKey()) assert.True(t, found) @@ -2611,9 +2606,14 @@ func TestRandHashShuffler_UpdateNodeLists_WithStakingV4(t *testing.T) { allNewEligible := getValidatorsInMap(resUpdateNodeList.Eligible) allNewWaiting := getValidatorsInMap(resUpdateNodeList.Waiting) - previousNumberOfNodes := (numEligiblePerShard+numWaitingPerShard+numNewNodesPerShard)*(int(nbShards)+1) + numAuction + previousNumberOfNodes := (numEligiblePerShard+numWaitingPerShard)*(int(nbShards)+1) + numAuction currentNumberOfNodes := len(allNewEligible) + len(allNewWaiting) + len(allShuffledOut) assert.Equal(t, previousNumberOfNodes, currentNumberOfNodes) + + args.NewNodes = generateValidatorList(100 * (int(nbShards) + 1)) + resUpdateNodeList, err = shuffler.UpdateNodeLists(args) + require.ErrorIs(t, err, epochStart.ErrReceivedNewListNodeInStakingV4) + require.Nil(t, resUpdateNodeList) } func TestRandHashShuffler_UpdateNodeLists_WithNewNodes_WithWaiting_WithLeaving(t *testing.T) { From 09be7261d448a47392211d014306b53abe6bc524 Mon Sep 17 00:00:00 2001 From: MariusC Date: Mon, 13 Mar 2023 16:36:12 +0200 Subject: [PATCH 14/14] FIX: Return error if moveMaxNumNodesToMap fails --- sharding/nodesCoordinator/hashValidatorShuffler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sharding/nodesCoordinator/hashValidatorShuffler.go b/sharding/nodesCoordinator/hashValidatorShuffler.go index dcae87c12a9..d2a4fc0d92b 100644 --- a/sharding/nodesCoordinator/hashValidatorShuffler.go +++ b/sharding/nodesCoordinator/hashValidatorShuffler.go @@ -297,7 +297,7 @@ func shuffleNodes(arg shuffleNodesArg) (*ResUpdateNodes, error) { err = moveMaxNumNodesToMap(newEligible, newWaiting, arg.nodesMeta, arg.nodesPerShard) if err != nil { - log.Warn("moveNodesToMap failed", "error", err) + return nil, fmt.Errorf("moveNodesToMap failed, error: %w", err) } err = checkAndDistributeNewNodes(newWaiting, arg.newNodes, arg.randomness, arg.flagStakingV4Step3)