Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FIP10] Add new delegation lockin #98

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 69 additions & 33 deletions contracts/ConsensusUtils.sol
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ contract ConsensusUtils is EternalStorage, ValidatorSet {
uint256 public constant SNAPSHOTS_PER_CYCLE = 0; // snapshot each 288 minutes [34560/10/60*5]
uint256 public constant DEFAULT_VALIDATOR_FEE = 15e16; // 15%
uint256 public constant UNBOUNDING_PERIOD = CYCLE_DURATION_BLOCKS;
uint256 public constant MAX_WITHDRAW_QUEUE_LENGTH = 500;
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we have at the moment 40K daily users. if they start using this, it could easily reach that.
i dont think this should be limited. how can this be used to attack anything?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i guess you would need to add a way to limit the amount of records we iterate over when withdrawing, cause it might get stuck forever with out of gas

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

perhaps a withdraw helper that stops if gasLeft<100K and also returns a bool indicating if reached gaslimit

Copy link
Member Author

@Andrew-Pohl Andrew-Pohl Oct 22, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes you might be right here, maybe put the limit on the amount claimed in one transaction rather than the queue itself...

We will need some limit on the queue because it could be an attack vector by withdrawing 1X10-18 repeatedly causing a massive array! (but this could be bigger than 500)

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. well most of the times you would only attack yourself as the array is per address.
  2. in our case, the contract itself can limit user withdrawals to min X% of their staked tokens, so a user wouldnt be able to DDOS other withdrawals.
  3. as mentioned, in any case we require a solution that would let you withdraw in chunks or until a specific amount of gas is left, in case of a large array


/**
* @dev This event will be emitted after a change to the validator set has been finalized
Expand Down Expand Up @@ -111,48 +112,79 @@ contract ConsensusUtils is EternalStorage, ValidatorSet {
}
}

function _unBoundStake(address _staker) internal {
require(block.number > unboundingBlock(_staker));
_staker.transfer(unBoundingAmount(_staker));
_setUnBoundingAmount(_staker, 0);
function _addToUboundingList(address _staker, uint256 amount) internal {
require(unboundingQueueLength(_staker) < MAX_WITHDRAW_QUEUE_LENGTH);
uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueBlock", _staker))].push(block.number + UNBOUNDING_PERIOD);
uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueAmount", _staker))].push(amount);
}

function unbound(address _staker) internal {
uint256 toReturn = 0;
uint256 currentBlock = block.number;
uint256 index = 0;
uint256 oldArrayLength = unboundingQueueLength(_staker);

//loop through queue and check if we have anything to withdraw
for (uint256 i = 0; i < oldArrayLength; i ++)
{
if (uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueBlock", _staker))][i] > currentBlock)
{
break;
}
toReturn = toReturn.add(uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueAmount", _staker))][i]);
}

//check if we have anything
require(toReturn != 0);
index = i - 1;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

isn't i scoped to the for loop?


//shift queue and pop back
for (i = 0; i < oldArrayLength - index; i++) {
uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueBlock", _staker))][i] = uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueBlock", _staker))][i + index];
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

have you considered the gas costs for all these keccak256 calls? is there a less gas-intensive way to handle this?

uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueAmount", _staker))][i] = uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueAmount", _staker))][i + index];
}
for(i = oldArrayLength - 1; i >= oldArrayLength - index; i--)
{
delete uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueBlock", _staker))][i];
uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueBlock", _staker))].length--;
delete uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueAmount", _staker))][i];
uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueAmount", _staker))].length--;
}

//transfer
_staker.transfer(toReturn);
}

function _withdraw(address _staker, uint256 _amount, address _validator) internal {
require(_validator != address(0));
require(_amount > 0);
if (unBoundingAmount(_staker) != 0)
{
//If we have requested a withdraw then try and pull it
_unBoundStake(_staker);
} else {
require(_amount <= stakeAmount(_validator));
require(_amount <= delegatedAmount(_staker, _validator));
_setUnboundingPeriod(_staker);
_setUnBoundingAmount(_staker, _amount);

bool _isValidator = isValidator(_validator);

// if new stake amount is lesser than minStake and the validator is one of the current validators
if (stakeAmount(_validator).sub(_amount) < getMinStake() && _isValidator) {
// do not withdaw the amount until the validator is in current set
_pendingValidatorsRemove(_validator);
return;
}

require(_amount <= stakeAmount(_validator));
require(_amount <= delegatedAmount(_staker, _validator));
_addToUboundingList(_staker, _amount);

bool _isValidator = isValidator(_validator);

// if new stake amount is lesser than minStake and the validator is one of the current validators
if (stakeAmount(_validator).sub(_amount) < getMinStake() && _isValidator) {
// do not withdaw the amount until the validator is in current set
_pendingValidatorsRemove(_validator);
return;
}


_delegatedAmountSub(_staker, _validator, _amount);
_stakeAmountSub(_validator, _amount);
_delegatedAmountSub(_staker, _validator, _amount);
_stakeAmountSub(_validator, _amount);

// if _validator is one of the current validators
if (_isValidator) {
// the total stake needs to be adjusted for the block reward formula
_totalStakeAmountSub(_amount);
}
// if _validator is one of the current validators
if (_isValidator) {
// the total stake needs to be adjusted for the block reward formula
_totalStakeAmountSub(_amount);
}

// if validator is needed to be removed from pending, but not current
if (stakeAmount(_validator) < getMinStake()) {
_pendingValidatorsRemove(_validator);
}
// if validator is needed to be removed from pending, but not current
if (stakeAmount(_validator) < getMinStake()) {
_pendingValidatorsRemove(_validator);
}
}

Expand Down Expand Up @@ -492,6 +524,10 @@ contract ConsensusUtils is EternalStorage, ValidatorSet {
return addressArrayStorage[NEW_VALIDATOR_SET].length;
}

function unboundingQueueLength(address _staker) public view returns(uint256) {
return uintArrayStorage[keccak256(abi.encodePacked("unboundingQueueBlock", _staker))].length;
}

function _setNewValidatorSet(address[] _newSet) internal {
addressArrayStorage[NEW_VALIDATOR_SET] = _newSet;
}
Expand Down