Skip to content

Commit

Permalink
chore: Cover changing throttle definitions (#16821)
Browse files Browse the repository at this point in the history
Signed-off-by: Michael Tinker <michael.tinker@swirldslabs.com>
  • Loading branch information
tinker-michaelj authored Dec 3, 2024
1 parent 5d218c7 commit 2bdb889
Show file tree
Hide file tree
Showing 6 changed files with 300 additions and 7 deletions.
6 changes: 6 additions & 0 deletions hapi/hedera-protobufs/services/response_code.proto
Original file line number Diff line number Diff line change
Expand Up @@ -1619,4 +1619,10 @@ enum ResponseCodeEnum {
* The provided gRPC certificate hash is invalid.
*/
INVALID_GRPC_CERTIFICATE_HASH = 373;

/**
* A scheduled transaction configured to wait for expiry to execute was not
* given an explicit expiration time.
*/
MISSING_EXPIRY_TIME = 374;
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_TRANSACTION_BODY;
import static com.hedera.hapi.node.base.ResponseCodeEnum.MAX_ENTITIES_IN_PRICE_REGIME_HAVE_BEEN_CREATED;
import static com.hedera.hapi.node.base.ResponseCodeEnum.MEMO_TOO_LONG;
import static com.hedera.hapi.node.base.ResponseCodeEnum.MISSING_EXPIRY_TIME;
import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST;
import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_EXPIRY_IS_BUSY;
import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_EXPIRY_MUST_BE_FUTURE;
Expand All @@ -45,6 +46,8 @@
import com.hedera.hapi.node.scheduled.SchedulableTransactionBody;
import com.hedera.hapi.node.scheduled.ScheduleCreateTransactionBody;
import com.hedera.hapi.node.state.schedule.Schedule;
import com.hedera.hapi.node.state.schedule.ScheduledOrder;
import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.node.app.hapi.fees.usage.SigUsage;
import com.hedera.node.app.hapi.fees.usage.schedule.ScheduleOpsUsage;
Expand Down Expand Up @@ -72,12 +75,16 @@
import java.util.Objects;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

/**
* This class contains all workflow-related functionality regarding {@link HederaFunctionality#SCHEDULE_CREATE}.
*/
@Singleton
public class ScheduleCreateHandler extends AbstractScheduleHandler implements TransactionHandler {
private static final Logger log = LogManager.getLogger(ScheduleCreateHandler.class);

private final ScheduleOpsUsage scheduleOpsUsage = new ScheduleOpsUsage();
private final InstantSource instantSource;
private final Throttle.Factory throttleFactory;
Expand All @@ -96,7 +103,7 @@ public void pureChecks(@NonNull final TransactionBody body) throws PreCheckExcep
final var op = body.scheduleCreateOrThrow();
validateTruePreCheck(op.hasScheduledTransactionBody(), INVALID_TRANSACTION);
// (FUTURE) Add a dedicated response code for an op waiting for an unspecified expiration time
validateFalsePreCheck(op.waitForExpiry() && !op.hasExpirationTime(), INVALID_TRANSACTION);
validateFalsePreCheck(op.waitForExpiry() && !op.hasExpirationTime(), MISSING_EXPIRY_TIME);
}

@Override
Expand Down Expand Up @@ -195,7 +202,8 @@ public void handle(@NonNull final HandleContext context) throws HandleException
MAX_ENTITIES_IN_PRICE_REGIME_HAVE_BEEN_CREATED);
final var capacityFraction = schedulingConfig.schedulableCapacityFraction();
final var usageSnapshots = scheduleStore.usageSnapshotsForScheduled(then);
final var throttle = throttleFactory.newThrottle(capacityFraction.asApproxCapacitySplit(), usageSnapshots);
final var throttle =
upToDateThrottle(then, capacityFraction.asApproxCapacitySplit(), usageSnapshots, scheduleStore);
validateTrue(
throttle.allow(
provisionalSchedule.payerAccountIdOrThrow(),
Expand Down Expand Up @@ -305,4 +313,46 @@ private HederaFunctionality functionOf(@NonNull final Schedule schedule) {
return functionalityForType(
schedule.scheduledTransactionOrThrow().data().kind());
}

/**
* Attempts to recover a throttle from the given usage snapshots, or creates a new throttle if the recovery fails.
* (This edge case can occur if the network throttle definitions changed since a transaction was last scheduled
* in the given second and snapshots were taken.)
* @param then the second for which the throttle is being recovered
* @param capacitySplit the capacity split for the throttle
* @param usageSnapshots the usage snapshots to recover from
* @return the throttle
*/
private Throttle upToDateThrottle(
final long then,
final int capacitySplit,
@Nullable final ThrottleUsageSnapshots usageSnapshots,
@NonNull final WritableScheduleStore scheduleStore) {
requireNonNull(scheduleStore);
try {
return throttleFactory.newThrottle(capacitySplit, usageSnapshots);
} catch (Exception e) {
final var instantThen = Instant.ofEpochSecond(then);
log.info(
"Could not recreate throttle at {} from {} ({}), rebuilding with up-to-date throttle",
instantThen,
usageSnapshots,
e.getMessage());
final var throttle = throttleFactory.newThrottle(capacitySplit, null);
final var counts = requireNonNull(scheduleStore.scheduledCountsAt(then));
final int n = counts.numberScheduled();
for (int i = 0; i < n; i++) {
final var scheduleId = requireNonNull(scheduleStore.getByOrder(new ScheduledOrder(then, i)));
final var schedule = requireNonNull(scheduleStore.get(scheduleId));
// Consume capacity from every already-scheduled transaction in the new throttle
throttle.allow(
schedule.payerAccountIdOrThrow(),
childAsOrdinary(schedule),
functionOf(schedule),
Instant.ofEpochSecond(then));
}
log.info("Rebuilt throttle at {} from {} scheduled transactions", instantThen, n);
return throttle;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -702,6 +702,7 @@ public Stream<DynamicTest> sharedKeyWorksAsExpected() {
@Order(15)
public Stream<DynamicTest> overlappingKeysTreatedAsExpected() {
var keyGen = OverlappingKeyGenerator.withAtLeastOneOverlappingByte(2);
final long scheduleLifetime = 6;

return defaultHapiSpec("OverlappingKeysTreatedAsExpectedAtExpiry")
.given(
Expand All @@ -717,7 +718,7 @@ public Stream<DynamicTest> overlappingKeysTreatedAsExpected() {
tinyBarsFromTo("aSender", ADDRESS_BOOK_CONTROL, 1),
tinyBarsFromTo("cSender", ADDRESS_BOOK_CONTROL, 1)))
.waitForExpiry()
.withRelativeExpiry(SENDER_TXN, 5)
.withRelativeExpiry(SENDER_TXN, scheduleLifetime)
.recordingScheduledTxn())
.then(
scheduleSign(DEFERRED_XFER).alsoSigningWith("aKey"),
Expand All @@ -743,9 +744,9 @@ public Stream<DynamicTest> overlappingKeysTreatedAsExpected() {
.hasWaitForExpiry()
.isNotExecuted()
.isNotDeleted()
.hasRelativeExpiry(SENDER_TXN, 5)
.hasRelativeExpiry(SENDER_TXN, scheduleLifetime)
.hasRecordedScheduledTxn(),
sleepFor(TimeUnit.SECONDS.toMillis(6)),
sleepFor(TimeUnit.SECONDS.toMillis(scheduleLifetime)),
cryptoCreate("foo"),
getScheduleInfo(DEFERRED_XFER).hasCostAnswerPrecheck(INVALID_SCHEDULE_ID),
getAccountBalance(ADDRESS_BOOK_CONTROL).hasTinyBars(changeFromSnapshot(BEFORE, +2)));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.newKeyNamed;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.overriding;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.overridingAllOf;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.overridingThrottles;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sleepFor;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sleepForSeconds;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcing;
Expand All @@ -66,6 +67,7 @@
import static com.hedera.services.bdd.suites.HapiSuite.ONE_MILLION_HBARS;
import static com.hederahashgraph.api.proto.java.HederaFunctionality.ConsensusCreateTopic;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.BUSY;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.MISSING_EXPIRY_TIME;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SCHEDULE_EXPIRY_IS_BUSY;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SCHEDULE_EXPIRY_MUST_BE_FUTURE;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SCHEDULE_EXPIRY_TOO_LONG;
Expand Down Expand Up @@ -179,7 +181,10 @@ final Stream<DynamicTest> expiryMustBeValid() {
exposeSpecSecondTo(lastSecond::set),
sourcing(() -> scheduleCreate("tooLate", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 34L)))
.expiringAt(lastSecond.get() + 1 + ONE_MINUTE + 1)
.hasKnownStatus(SCHEDULE_EXPIRY_TOO_LONG)));
.hasKnownStatus(SCHEDULE_EXPIRY_TOO_LONG)),
scheduleCreate("unspecified", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 56L)))
.waitForExpiry()
.hasPrecheck(MISSING_EXPIRY_TIME));
}

/**
Expand Down Expand Up @@ -230,6 +235,64 @@ final Stream<DynamicTest> throttlingAndExecutionAsExpected() {
sourcingContextual(spec -> purgeExpiringWithin(maxLifetime.get())));
}

/**
* Tests that the consensus {@link com.hedera.hapi.node.base.HederaFunctionality#SCHEDULE_CREATE} throttle is
* enforced by overriding the dev throttles to the more restrictive mainnet throttles and scheduling one more
* {@link com.hedera.hapi.node.base.HederaFunctionality#CONSENSUS_CREATE_TOPIC} that is allowed.
*/
@LeakyRepeatableHapiTest(
value = {
NEEDS_LAST_ASSIGNED_CONSENSUS_TIME,
NEEDS_VIRTUAL_TIME_FOR_FAST_EXECUTION,
NEEDS_STATE_ACCESS,
THROTTLE_OVERRIDES
},
overrides = {
"scheduling.whitelist",
},
throttles = "testSystemFiles/mainnet-throttles-sans-reservations.json")
final Stream<DynamicTest> throttlingRebuiltForSecondWhenSnapshotsNoLongerMatch() {
final var expirySecond = new AtomicLong();
final var maxLifetime = new AtomicLong();
final var maxSchedulableTopicCreates = new AtomicInteger();
return hapiTest(
overriding("scheduling.whitelist", "ConsensusCreateTopic"),
doWithStartupConfigNow(
"scheduling.maxExpirationFutureSeconds",
(value, specTime) -> doAdhoc(() -> {
maxLifetime.set(Long.parseLong(value));
expirySecond.set(specTime.getEpochSecond() + maxLifetime.get());
})),
cryptoCreate(CIVILIAN_PAYER).balance(ONE_MILLION_HBARS),
exposeMaxSchedulable(ConsensusCreateTopic, maxSchedulableTopicCreates::set),
// Schedule one fewer than the maximum number of topic creations allowed using
// the initial throttles without the PriorityReservations bucket
sourcing(() -> blockingOrder(IntStream.range(0, maxSchedulableTopicCreates.get() - 1)
.mapToObj(i -> scheduleCreate(
"topic" + i, createTopic("t" + i).topicMemo("m" + i))
.expiringAt(expirySecond.get())
.payingWith(CIVILIAN_PAYER)
.fee(ONE_HUNDRED_HBARS))
.toArray(SpecOperation[]::new))),
// Now override the throttles to the mainnet throttles with the PriorityReservations bucket
// (so that the throttle snapshots in state for this second don't match the new throttles)
overridingThrottles("testSystemFiles/mainnet-throttles.json"),
// And confirm we can schedule one more
sourcing(() -> scheduleCreate(
"lastTopicCreation", createTopic("oneMore").topicMemo("N-1"))
.expiringAt(expirySecond.get())
.payingWith(CIVILIAN_PAYER)
.fee(ONE_HUNDRED_HBARS)),
// But then the next is throttled
sourcing(() -> scheduleCreate(
"throttledTopicCreation", createTopic("NTB").topicMemo("NOPE"))
.expiringAt(expirySecond.get())
.payingWith(CIVILIAN_PAYER)
.fee(ONE_HUNDRED_HBARS)
.hasKnownStatus(SCHEDULE_EXPIRY_IS_BUSY)),
sourcingContextual(spec -> purgeExpiringWithin(maxLifetime.get())));
}

/**
* Tests that execution of scheduled transactions purges the associated state as expected when a single
* user transaction fully executes multiple seconds. The test uses three scheduled transactions, two of
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ final Stream<DynamicTest> nodeOperatorAccountInfoQueryCharged() {
return hapiTest(flattened(
nodeOperatorAccount(),
getAccountInfo(NODE_OPERATOR).payingWith(NODE_OPERATOR).via("accountInfoQueryTxn"),
sleepFor(1000),
sleepFor(2000),
getAccountBalance(NODE_OPERATOR).hasTinyBars(lessThan(ONE_HUNDRED_HBARS))));
}

Expand Down
Loading

0 comments on commit 2bdb889

Please sign in to comment.