diff --git a/hapi/hedera-protobufs/block/stream/output/state_changes.proto b/hapi/hedera-protobufs/block/stream/output/state_changes.proto
index 4d55384f0332..e847eb32f07a 100644
--- a/hapi/hedera-protobufs/block/stream/output/state_changes.proto
+++ b/hapi/hedera-protobufs/block/stream/output/state_changes.proto
@@ -354,9 +354,10 @@ enum StateIdentifier {
STATE_ID_ROSTERS = 28;
/**
- * A state identifier for scheduled transaction expiration.
+ * A state identifier for counts of transactions scheduled and
+ * processed in a second.
*/
- STATE_ID_SCHEDULE_IDS_BY_EXPIRY = 29;
+ STATE_ID_SCHEDULED_COUNTS = 29;
/**
* A state identifier for scheduled transaction deduplication.
@@ -373,6 +374,16 @@ enum StateIdentifier {
*/
STATE_ID_TSS_VOTES = 32;
+ /**
+ * A state identifier for the ordering of scheduled transactions.
+ */
+ STATE_ID_SCHEDULED_ORDERS = 33;
+
+ /**
+ * A state identifier for scheduled throttle usage snapshots.
+ */
+ STATE_ID_SCHEDULED_USAGES = 34;
+
/**
* A state identifier for the round receipts queue.
*/
@@ -711,6 +722,21 @@ message MapChangeKey {
* A change to a virtual map keyed by pending airdrop id identifier.
*/
proto.PendingAirdropId pending_airdrop_id_key = 14;
+
+ /**
+ * An exact date and time, with a resolution of one second
+ */
+ proto.TimestampSeconds timestamp_seconds_key = 15;
+
+ /**
+ * An ordering key mapped to a particular schedule.
+ * This identifies the order in which long term scheduled transactions
+ * that are requested to execute in the same consensus second will
+ * be executed. The value will be the `ScheduleID` for the schedule
+ * to be executed at a particular consensus second and order within
+ * that second.
+ */
+ proto.ScheduledOrder scheduled_order_key = 16;
}
}
@@ -817,14 +843,20 @@ message MapChangeValue {
com.hedera.hapi.node.state.roster.Roster roster_value = 16;
/**
- * A list of scheduled ids.
+ * The value of a map summarizing the counts of scheduled and processed transactions
+ * within a particular consensus second.
*/
- proto.ScheduleIdList schedule_id_list_value = 17;
+ proto.ScheduledCounts scheduled_counts_value = 17;
/**
* A scheduled id value.
*/
proto.ScheduleID schedule_id_value = 18;
+
+ /**
+ * A change to the scheduled throttle usage snapshots.
+ */
+ proto.ThrottleUsageSnapshots throttle_usage_snapshots_value = 19;
}
}
diff --git a/hapi/hedera-protobufs/services/response_code.proto b/hapi/hedera-protobufs/services/response_code.proto
index 1327de4fee91..d8e89221421b 100644
--- a/hapi/hedera-protobufs/services/response_code.proto
+++ b/hapi/hedera-protobufs/services/response_code.proto
@@ -1593,4 +1593,25 @@ enum ResponseCodeEnum {
* airdrop and whether the sender can fulfill the offer.
*/
INVALID_TOKEN_IN_PENDING_AIRDROP = 369;
+
+ /**
+ * A scheduled transaction configured to wait for expiry to execute was given
+ * an expiry time not strictly after the time at which its creation reached
+ * consensus.
+ */
+ SCHEDULE_EXPIRY_MUST_BE_FUTURE = 370;
+
+ /**
+ * A scheduled transaction configured to wait for expiry to execute was given
+ * an expiry time too far in the future after the time at which its creation
+ * reached consensus.
+ */
+ SCHEDULE_EXPIRY_TOO_LONG = 371;
+
+ /**
+ * A scheduled transaction configured to wait for expiry to execute was given
+ * an expiry time at which there is already too many transactions scheduled to
+ * expire; its creation must be retried with a different expiry.
+ */
+ SCHEDULE_EXPIRY_IS_BUSY = 372;
}
diff --git a/hapi/hedera-protobufs/services/state/blockstream/block_stream_info.proto b/hapi/hedera-protobufs/services/state/blockstream/block_stream_info.proto
index c619c6c6f3f2..529fdabf071f 100644
--- a/hapi/hedera-protobufs/services/state/blockstream/block_stream_info.proto
+++ b/hapi/hedera-protobufs/services/state/blockstream/block_stream_info.proto
@@ -155,4 +155,12 @@ message BlockStreamInfo {
* at which an interval of time-dependent events were processed.
*/
proto.Timestamp last_interval_process_time = 12;
+
+ /**
+ * The time stamp at which the last user transaction was handled.
+ *
+ * This field SHALL hold the consensus time for the last time
+ * at which a user transaction was handled.
+ */
+ proto.Timestamp last_handle_time = 13;
}
diff --git a/hapi/hedera-protobufs/services/state/schedule/schedule.proto b/hapi/hedera-protobufs/services/state/schedule/schedule.proto
index 9fce1e4227d6..611c5a372633 100644
--- a/hapi/hedera-protobufs/services/state/schedule/schedule.proto
+++ b/hapi/hedera-protobufs/services/state/schedule/schedule.proto
@@ -160,3 +160,33 @@ message ScheduleIdList {
*/
repeated ScheduleID schedule_ids = 1;
}
+
+/**
+ * The value of a map summarizing the counts of scheduled and processed transactions
+ * within a particular consensus second.
+ */
+message ScheduledCounts {
+ /**
+ * The number of transactions scheduled to expire at a consensus second.
+ */
+ uint32 number_scheduled = 1;
+ /**
+ * The number of scheduled transactions that have been processed at a consensus second.
+ */
+ uint32 number_processed = 2;
+}
+
+/**
+ * A key mapping to a particular ScheduleID that will execute at a given order number
+ * within a given consensus second.
+ */
+message ScheduledOrder {
+ /**
+ * The consensus second in which the transaction is expired.
+ */
+ uint64 expiry_second = 1;
+ /*
+ * The ordered position within the consensus second that the transaction will be executed.
+ */
+ uint32 order_number = 2;
+}
diff --git a/hedera-node/hapi-utils/src/main/java/com/hedera/node/app/hapi/utils/sysfiles/domain/throttling/ScaleFactor.java b/hedera-node/hapi-utils/src/main/java/com/hedera/node/app/hapi/utils/sysfiles/domain/throttling/ScaleFactor.java
index 6180291d35a7..6211c6cb8f96 100644
--- a/hedera-node/hapi-utils/src/main/java/com/hedera/node/app/hapi/utils/sysfiles/domain/throttling/ScaleFactor.java
+++ b/hedera-node/hapi-utils/src/main/java/com/hedera/node/app/hapi/utils/sysfiles/domain/throttling/ScaleFactor.java
@@ -47,6 +47,14 @@ public int scaling(int nominalOps) {
return Math.max(1, nominalOps * numerator / denominator);
}
+ /**
+ * Returns the scale factor as an approximate 1:n split of capacity, rounding up.
+ * @return the approximate capacity split
+ */
+ public int asApproxCapacitySplit() {
+ return (denominator + numerator - 1) / numerator;
+ }
+
@Override
public int compareTo(final ScaleFactor that) {
return Integer.compare(this.numerator * that.denominator, that.numerator * this.denominator);
diff --git a/hedera-node/hedera-addressbook-service-impl/src/main/java/com/hedera/node/app/service/addressbook/impl/helpers/AddressBookHelper.java b/hedera-node/hedera-addressbook-service-impl/src/main/java/com/hedera/node/app/service/addressbook/impl/helpers/AddressBookHelper.java
index 4c99c7824c3f..868e63d98e03 100644
--- a/hedera-node/hedera-addressbook-service-impl/src/main/java/com/hedera/node/app/service/addressbook/impl/helpers/AddressBookHelper.java
+++ b/hedera-node/hedera-addressbook-service-impl/src/main/java/com/hedera/node/app/service/addressbook/impl/helpers/AddressBookHelper.java
@@ -46,10 +46,12 @@ public AddressBookHelper() {}
/**
* Adjusts the node metadata after upgrade. This method will mark nodes as deleted if they are not present in the
* address book and add new nodes to the node store.
+ * IMPORTANT: Once DAB is enabled, should always be a no-op.
* @param networkInfo the network info
* @param config configuration
* @param nodeStore the node store
*/
+ @Deprecated
public void adjustPostUpgradeNodeMetadata(
@NonNull final NetworkInfo networkInfo,
@NonNull final Configuration config,
diff --git a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/AppContext.java b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/AppContext.java
index 8725c57a7134..0248cc876d7c 100644
--- a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/AppContext.java
+++ b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/AppContext.java
@@ -19,6 +19,7 @@
import com.hedera.hapi.node.base.ResponseCodeEnum;
import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.node.app.spi.signatures.SignatureVerifier;
+import com.hedera.node.app.spi.throttle.Throttle;
import com.swirlds.common.crypto.Signature;
import com.swirlds.config.api.Configuration;
import com.swirlds.state.lifecycle.Service;
@@ -103,4 +104,10 @@ public Signature sign(final byte[] ledgerId) {
* @return the supplier
*/
Supplier selfNodeInfoSupplier();
+
+ /**
+ * The application's strategy for creating {@link Throttle} instances.
+ * @return the throttle factory
+ */
+ Throttle.Factory throttleFactory();
}
diff --git a/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/throttle/Throttle.java b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/throttle/Throttle.java
new file mode 100644
index 000000000000..0d50fa2e5aed
--- /dev/null
+++ b/hedera-node/hedera-app-spi/src/main/java/com/hedera/node/app/spi/throttle/Throttle.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.node.app.spi.throttle;
+
+import com.hedera.hapi.node.base.AccountID;
+import com.hedera.hapi.node.base.HederaFunctionality;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
+import com.hedera.hapi.node.transaction.TransactionBody;
+import com.hedera.node.app.spi.AppContext;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
+import java.time.Instant;
+
+/**
+ * A throttle that can be used to limit the rate of transactions. Provided in the {@link AppContext} so that services
+ * can align to the application's strategy for throttling transactions.
+ */
+public interface Throttle {
+ /**
+ * A factory for creating {@link Throttle} instances.
+ */
+ interface Factory {
+ /**
+ * Creates a new throttle based on the capacity split and usage snapshots.
+ * @param capacitySplit the split of the capacity
+ * @param initialUsageSnapshots if not null, the usage snapshots the throttle should start with
+ * @return the new throttle
+ */
+ Throttle newThrottle(int capacitySplit, @Nullable ThrottleUsageSnapshots initialUsageSnapshots);
+ }
+
+ /**
+ * Tries to consume throttle capacity for the given payer, transaction, function, time, and state.
+ * @param payerId the account ID of the payer
+ * @param body the transaction body
+ * @param function the functionality of the transaction
+ * @param now the current time
+ * @return whether the capacity could be consumed
+ */
+ boolean allow(
+ @NonNull AccountID payerId,
+ @NonNull TransactionBody body,
+ @NonNull HederaFunctionality function,
+ @NonNull Instant now);
+
+ /**
+ * Returns the usage snapshots of the throttle.
+ */
+ ThrottleUsageSnapshots usageSnapshots();
+}
diff --git a/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/BlockStreamManagerBenchmark.java b/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/BlockStreamManagerBenchmark.java
index b18c6d8250bc..53f27b705619 100644
--- a/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/BlockStreamManagerBenchmark.java
+++ b/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/BlockStreamManagerBenchmark.java
@@ -122,8 +122,11 @@ public static void main(String... args) throws Exception {
Instant::now,
fakeSignatureVerifier(),
UNAVAILABLE_GOSSIP,
- () -> configProvider.getConfiguration(),
- () -> DEFAULT_NODE_INFO),
+ configProvider::getConfiguration,
+ () -> DEFAULT_NODE_INFO,
+ (split, snapshots) -> {
+ throw new UnsupportedOperationException();
+ }),
ForkJoinPool.commonPool(),
ForkJoinPool.commonPool(),
new PlaceholderTssLibrary(),
diff --git a/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/StandaloneRoundManagement.java b/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/StandaloneRoundManagement.java
index 3f770009f24d..b14f6633fc22 100644
--- a/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/StandaloneRoundManagement.java
+++ b/hedera-node/hedera-app/src/jmh/java/com/hedera/node/app/blocks/StandaloneRoundManagement.java
@@ -98,8 +98,11 @@ public class StandaloneRoundManagement {
Instant::now,
fakeSignatureVerifier(),
UNAVAILABLE_GOSSIP,
- () -> configProvider.getConfiguration(),
- () -> DEFAULT_NODE_INFO),
+ configProvider::getConfiguration,
+ () -> DEFAULT_NODE_INFO,
+ (split, snapshots) -> {
+ throw new UnsupportedOperationException();
+ }),
ForkJoinPool.commonPool(),
ForkJoinPool.commonPool(),
new PlaceholderTssLibrary(),
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java
index 2ff2025a0f23..5b6d54bc9670 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/Hedera.java
@@ -56,6 +56,7 @@
import com.hedera.hapi.node.base.SemanticVersion;
import com.hedera.hapi.node.state.blockrecords.BlockInfo;
import com.hedera.hapi.node.state.blockstream.BlockStreamInfo;
+import com.hedera.hapi.node.transaction.ThrottleDefinitions;
import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.hapi.platform.state.PlatformState;
import com.hedera.hapi.util.HapiUtils;
@@ -98,7 +99,9 @@
import com.hedera.node.app.statedumpers.DumpCheckpoint;
import com.hedera.node.app.statedumpers.MerkleStateChild;
import com.hedera.node.app.store.ReadableStoreFactory;
+import com.hedera.node.app.throttle.AppThrottleFactory;
import com.hedera.node.app.throttle.CongestionThrottleService;
+import com.hedera.node.app.throttle.ThrottleAccumulator;
import com.hedera.node.app.tss.TssBaseService;
import com.hedera.node.app.version.ServicesSoftwareVersion;
import com.hedera.node.app.workflows.handle.HandleWorkflow;
@@ -233,9 +236,9 @@ public final class Hedera implements SwirldMain, PlatformStatusChangeListener, A
private final SemanticVersion hapiVersion;
/**
- * The source of time the node should use for screening transactions at ingest.
+ * The application context for the node.
*/
- private final InstantSource instantSource;
+ private final AppContext appContext;
/**
* The contract service singleton, kept as a field here to avoid constructing twice
@@ -243,6 +246,12 @@ public final class Hedera implements SwirldMain, PlatformStatusChangeListener, A
*/
private final ContractServiceImpl contractServiceImpl;
+ /**
+ * The schedule service singleton, kept as a field here to avoid constructing twice
+ * (once in constructor to register schemas, again inside Dagger component).
+ */
+ private final ScheduleServiceImpl scheduleServiceImpl;
+
/**
* The TSS base service singleton, kept as a field here to avoid constructing twice
* (once in constructor to register schemas, again inside Dagger component).
@@ -387,7 +396,6 @@ public Hedera(
requireNonNull(constructableRegistry);
this.selfNodeId = requireNonNull(selfNodeId);
this.serviceMigrator = requireNonNull(migrator);
- this.instantSource = requireNonNull(instantSource);
this.startupNetworksFactory = requireNonNull(startupNetworksFactory);
logger.info(
"""
@@ -411,17 +419,24 @@ public Hedera(
() -> HapiUtils.toString(hapiVersion));
fileServiceImpl = new FileServiceImpl();
- final var appContext = new AppContextImpl(
+ final Supplier configSupplier = () -> configProvider.getConfiguration();
+ this.appContext = new AppContextImpl(
instantSource,
new AppSignatureVerifier(
bootstrapConfig.getConfigData(HederaConfig.class),
new SignatureExpanderImpl(),
new SignatureVerifierImpl(CryptographyHolder.get())),
this,
- () -> configProvider.getConfiguration(),
- () -> daggerApp.networkInfo().selfNodeInfo());
+ configSupplier,
+ () -> daggerApp.networkInfo().selfNodeInfo(),
+ new AppThrottleFactory(
+ configSupplier,
+ () -> daggerApp.workingStateAccessor().getState(),
+ () -> daggerApp.throttleServiceManager().activeThrottleDefinitionsOrThrow(),
+ ThrottleAccumulator::new));
tssBaseService = tssBaseServiceFactory.apply(appContext);
contractServiceImpl = new ContractServiceImpl(appContext);
+ scheduleServiceImpl = new ScheduleServiceImpl();
blockStreamService = new BlockStreamService();
// Register all service schema RuntimeConstructable factories before platform init
Set.of(
@@ -431,7 +446,7 @@ public Hedera(
fileServiceImpl,
tssBaseService,
new FreezeServiceImpl(),
- new ScheduleServiceImpl(),
+ scheduleServiceImpl,
new TokenServiceImpl(),
new UtilServiceImpl(),
new RecordCacheService(),
@@ -917,10 +932,18 @@ public HandleWorkflow handleWorkflow() {
return daggerApp.handleWorkflow();
}
+ public ConfigProvider configProvider() {
+ return configProvider;
+ }
+
public BlockStreamManager blockStreamManager() {
return daggerApp.blockStreamManager();
}
+ public ThrottleDefinitions activeThrottleDefinitions() {
+ return daggerApp.throttleServiceManager().activeThrottleDefinitionsOrThrow();
+ }
+
public boolean isBlockStreamEnabled() {
return streamMode != RECORDS;
}
@@ -932,6 +955,14 @@ public boolean isRosterLifecycleEnabled() {
.useRosterLifecycle();
}
+ public KVStateChangeListener kvStateChangeListener() {
+ return kvStateChangeListener;
+ }
+
+ public BoundaryStateChangeListener boundaryStateChangeListener() {
+ return boundaryStateChangeListener;
+ }
+
/*==================================================================================================================
*
* Random private helper methods
@@ -980,6 +1011,7 @@ private void initializeDagger(@NonNull final State state, @NonNull final InitTri
.bootstrapConfigProviderImpl(bootstrapConfigProvider)
.fileServiceImpl(fileServiceImpl)
.contractServiceImpl(contractServiceImpl)
+ .scheduleService(scheduleServiceImpl)
.tssBaseService(tssBaseService)
.initTrigger(trigger)
.softwareVersion(version.getPbjSemanticVersion())
@@ -989,7 +1021,8 @@ private void initializeDagger(@NonNull final State state, @NonNull final InitTri
.crypto(CryptographyHolder.get())
.currentPlatformStatus(new CurrentPlatformStatusImpl(platform))
.servicesRegistry(servicesRegistry)
- .instantSource(instantSource)
+ .instantSource(appContext.instantSource())
+ .throttleFactory(appContext.throttleFactory())
.metrics(metrics)
.kvStateChangeListener(kvStateChangeListener)
.boundaryStateChangeListener(boundaryStateChangeListener)
@@ -1140,7 +1173,7 @@ private void manageBlockEndRound(@NonNull final Round round, @NonNull final Stat
* @return true if the source of time is the system time
*/
private boolean isNotEmbedded() {
- return instantSource == InstantSource.system();
+ return appContext.instantSource() == InstantSource.system();
}
private class ReadReconnectStartingStateHash implements ReconnectCompleteListener {
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/HederaInjectionComponent.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/HederaInjectionComponent.java
index 2f7c85ea09ec..2de952d208e3 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/HederaInjectionComponent.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/HederaInjectionComponent.java
@@ -40,10 +40,12 @@
import com.hedera.node.app.records.BlockRecordManager;
import com.hedera.node.app.service.contract.impl.ContractServiceImpl;
import com.hedera.node.app.service.file.impl.FileServiceImpl;
+import com.hedera.node.app.service.schedule.ScheduleService;
import com.hedera.node.app.services.ServicesInjectionModule;
import com.hedera.node.app.services.ServicesRegistry;
import com.hedera.node.app.spi.metrics.StoreMetricsService;
import com.hedera.node.app.spi.records.RecordCache;
+import com.hedera.node.app.spi.throttle.Throttle;
import com.hedera.node.app.state.HederaStateInjectionModule;
import com.hedera.node.app.state.WorkingStateAccessor;
import com.hedera.node.app.throttle.ThrottleServiceManager;
@@ -154,6 +156,9 @@ interface Builder {
@BindsInstance
Builder contractServiceImpl(ContractServiceImpl contractService);
+ @BindsInstance
+ Builder scheduleService(ScheduleService scheduleService);
+
@BindsInstance
Builder configProviderImpl(ConfigProviderImpl configProvider);
@@ -184,6 +189,9 @@ interface Builder {
@BindsInstance
Builder instantSource(InstantSource instantSource);
+ @BindsInstance
+ Builder throttleFactory(Throttle.Factory throttleFactory);
+
@BindsInstance
Builder softwareVersion(SemanticVersion softwareVersion);
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamManager.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamManager.java
index 08c754d1e8be..c421e0d9392c 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamManager.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/BlockStreamManager.java
@@ -101,6 +101,18 @@ enum PendingWork {
@NonNull
Instant lastIntervalProcessTime();
+ /**
+ * Sets the last consensus time at which a user transaction was last handled.
+ * @param lastHandleTime the last consensus time at which a user transaction was handled
+ */
+ void setLastHandleTime(@NonNull Instant lastHandleTime);
+
+ /**
+ * Returns the consensus time at which a user transaction was last handled.
+ */
+ @NonNull
+ Instant lastHandleTime();
+
/**
* Updates both the internal state of the block stream manager and the durable state of the network
* to reflect the end of the last-started round.
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockImplUtils.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockImplUtils.java
index 5243d2c10562..b8c044da7adc 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockImplUtils.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockImplUtils.java
@@ -35,10 +35,12 @@
import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_ROSTERS;
import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_ROSTER_STATE;
import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_RUNNING_HASHES;
+import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_SCHEDULED_COUNTS;
+import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_SCHEDULED_ORDERS;
+import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_SCHEDULED_USAGES;
import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_SCHEDULES_BY_EQUALITY;
import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_SCHEDULES_BY_EXPIRY;
import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_SCHEDULES_BY_ID;
-import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_SCHEDULE_IDS_BY_EXPIRY;
import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_SCHEDULE_ID_BY_EQUALITY;
import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_STAKING_INFO;
import static com.hedera.hapi.block.stream.output.StateIdentifier.STATE_ID_THROTTLE_USAGE;
@@ -173,8 +175,10 @@ public static int stateIdFor(@NonNull final String serviceName, @NonNull final S
case "SCHEDULES_BY_EQUALITY" -> STATE_ID_SCHEDULES_BY_EQUALITY.protoOrdinal();
case "SCHEDULES_BY_EXPIRY_SEC" -> STATE_ID_SCHEDULES_BY_EXPIRY.protoOrdinal();
case "SCHEDULES_BY_ID" -> STATE_ID_SCHEDULES_BY_ID.protoOrdinal();
- case "SCHEDULE_IDS_BY_EXPIRY_SEC" -> STATE_ID_SCHEDULE_IDS_BY_EXPIRY.protoOrdinal();
case "SCHEDULE_ID_BY_EQUALITY" -> STATE_ID_SCHEDULE_ID_BY_EQUALITY.protoOrdinal();
+ case "SCHEDULED_COUNTS" -> STATE_ID_SCHEDULED_COUNTS.protoOrdinal();
+ case "SCHEDULED_ORDERS" -> STATE_ID_SCHEDULED_ORDERS.protoOrdinal();
+ case "SCHEDULED_USAGES" -> STATE_ID_SCHEDULED_USAGES.protoOrdinal();
default -> UNKNOWN_STATE_ID;
};
case "TokenService" -> switch (stateKey) {
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamBuilder.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamBuilder.java
index 4625b783dc7e..50bc71243f76 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamBuilder.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamBuilder.java
@@ -1042,7 +1042,6 @@ public void nullOutSideEffectFields() {
scheduledTransactionId = null;
}
- transactionResultBuilder.scheduleRef((ScheduleID) null);
evmAddress = Bytes.EMPTY;
ethereumHash = Bytes.EMPTY;
runningHash = Bytes.EMPTY;
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImpl.java
index c10529530ac2..051f6f7e8d3b 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImpl.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImpl.java
@@ -119,6 +119,8 @@ public class BlockStreamManagerImpl implements BlockStreamManager {
private PendingWork pendingWork = NONE;
// The last time at which interval-based processing was done
private Instant lastIntervalProcessTime = Instant.EPOCH;
+ // The last time at which interval-based processing was done
+ private Instant lastHandleTime = Instant.EPOCH;
// All this state is scoped to producing the current block
private long blockNumber;
// Set to the round number of the last round handled before entering a freeze period
@@ -227,6 +229,7 @@ public void startRound(@NonNull final Round round, @NonNull final State state) {
final var blockStreamInfo = blockStreamInfoFrom(state);
pendingWork = classifyPendingWork(blockStreamInfo, version);
+ lastHandleTime = asInstant(blockStreamInfo.lastHandleTimeOrElse(EPOCH));
lastIntervalProcessTime = asInstant(blockStreamInfo.lastIntervalProcessTimeOrElse(EPOCH));
blockHashManager.startBlock(blockStreamInfo, lastBlockHash);
runningHashManager.startBlock(blockStreamInfo);
@@ -273,6 +276,16 @@ public void setLastIntervalProcessTime(@NonNull final Instant lastIntervalProces
this.lastIntervalProcessTime = requireNonNull(lastIntervalProcessTime);
}
+ @Override
+ public @NonNull final Instant lastHandleTime() {
+ return lastHandleTime;
+ }
+
+ @Override
+ public void setLastHandleTime(@NonNull final Instant lastHandleTime) {
+ this.lastHandleTime = requireNonNull(lastHandleTime);
+ }
+
@Override
public void endRound(@NonNull final State state, final long roundNum) {
if (shouldCloseBlock(roundNum, roundsPerBlock)) {
@@ -306,7 +319,8 @@ public void endRound(@NonNull final State state, final long roundNum) {
boundaryTimestamp,
pendingWork != POST_UPGRADE_WORK,
version,
- asTimestamp(lastIntervalProcessTime)));
+ asTimestamp(lastIntervalProcessTime),
+ asTimestamp(lastHandleTime)));
((CommittableWritableStates) writableState).commit();
// Serialize and hash the final block item
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BoundaryStateChangeListener.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BoundaryStateChangeListener.java
index 532cf1a2f97e..4da8ca14d14a 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BoundaryStateChangeListener.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/BoundaryStateChangeListener.java
@@ -64,6 +64,9 @@ public class BoundaryStateChangeListener implements StateChangeListener {
private final SortedMap singletonUpdates = new TreeMap<>();
private final SortedMap> queueUpdates = new TreeMap<>();
+ @Nullable
+ private Instant lastConsensusTime;
+
@Nullable
private Timestamp boundaryTimestamp;
@@ -75,11 +78,19 @@ public class BoundaryStateChangeListener implements StateChangeListener {
return requireNonNull(boundaryTimestamp);
}
+ /**
+ * Returns the last consensus time used for a transaction.
+ */
+ public @NonNull Instant lastConsensusTimeOrThrow() {
+ return requireNonNull(lastConsensusTime);
+ }
+
/**
* Resets the state of the listener.
*/
public void reset() {
boundaryTimestamp = null;
+ lastConsensusTime = null;
singletonUpdates.clear();
queueUpdates.clear();
}
@@ -116,7 +127,8 @@ public List allStateChanges() {
* @param lastUsedConsensusTime the last used consensus time
*/
public void setBoundaryTimestamp(@NonNull final Instant lastUsedConsensusTime) {
- boundaryTimestamp = asTimestamp(requireNonNull(lastUsedConsensusTime).plusNanos(1));
+ this.lastConsensusTime = requireNonNull(lastUsedConsensusTime);
+ boundaryTimestamp = asTimestamp(lastUsedConsensusTime.plusNanos(1));
}
@Override
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/KVStateChangeListener.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/KVStateChangeListener.java
index fa5c93f56dbf..9300b3c4c6ef 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/KVStateChangeListener.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/impl/KVStateChangeListener.java
@@ -29,6 +29,7 @@
import com.hedera.hapi.node.base.NftID;
import com.hedera.hapi.node.base.PendingAirdropId;
import com.hedera.hapi.node.base.ScheduleID;
+import com.hedera.hapi.node.base.TimestampSeconds;
import com.hedera.hapi.node.base.TokenAssociation;
import com.hedera.hapi.node.base.TokenID;
import com.hedera.hapi.node.base.TopicID;
@@ -45,8 +46,10 @@
import com.hedera.hapi.node.state.primitives.ProtoString;
import com.hedera.hapi.node.state.roster.Roster;
import com.hedera.hapi.node.state.schedule.Schedule;
-import com.hedera.hapi.node.state.schedule.ScheduleIdList;
import com.hedera.hapi.node.state.schedule.ScheduleList;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
import com.hedera.hapi.node.state.token.Account;
import com.hedera.hapi.node.state.token.AccountPendingAirdrop;
import com.hedera.hapi.node.state.token.Nft;
@@ -161,6 +164,12 @@ private static MapChangeKey mapChangeKeyFor(@NonNull final K key) {
case PendingAirdropId pendingAirdropId -> MapChangeKey.newBuilder()
.pendingAirdropIdKey(pendingAirdropId)
.build();
+ case TimestampSeconds timestampSeconds -> MapChangeKey.newBuilder()
+ .timestampSecondsKey(timestampSeconds)
+ .build();
+ case ScheduledOrder scheduledOrder -> MapChangeKey.newBuilder()
+ .scheduledOrderKey(scheduledOrder)
+ .build();
default -> throw new IllegalStateException(
"Unrecognized key type " + key.getClass().getSimpleName());
};
@@ -195,9 +204,6 @@ private static MapChangeValue mapChangeValueFor(@NonNull final V value) {
case ScheduleList scheduleList -> MapChangeValue.newBuilder()
.scheduleListValue(scheduleList)
.build();
- case ScheduleIdList scheduleIdList -> MapChangeValue.newBuilder()
- .scheduleIdListValue(scheduleIdList)
- .build();
case SlotValue slotValue -> MapChangeValue.newBuilder()
.slotValueValue(slotValue)
.build();
@@ -212,6 +218,12 @@ private static MapChangeValue mapChangeValueFor(@NonNull final V value) {
case AccountPendingAirdrop accountPendingAirdrop -> MapChangeValue.newBuilder()
.accountPendingAirdropValue(accountPendingAirdrop)
.build();
+ case ScheduledCounts scheduledCounts -> MapChangeValue.newBuilder()
+ .scheduledCountsValue(scheduledCounts)
+ .build();
+ case ThrottleUsageSnapshots throttleUsageSnapshots -> MapChangeValue.newBuilder()
+ .throttleUsageSnapshotsValue(throttleUsageSnapshots)
+ .build();
default -> throw new IllegalStateException(
"Unexpected value: " + value.getClass().getSimpleName());
};
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/schemas/V0560BlockStreamSchema.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/schemas/V0560BlockStreamSchema.java
index d789265c4642..ab8d8fb642f9 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/schemas/V0560BlockStreamSchema.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/blocks/schemas/V0560BlockStreamSchema.java
@@ -116,6 +116,7 @@ public void restart(@NonNull final MigrationContext ctx) {
.postUpgradeWorkDone(false)
.creationSoftwareVersion(ctx.previousVersion())
.lastIntervalProcessTime(blockInfo.consTimeOfLastHandledTxn())
+ .lastHandleTime(blockInfo.consTimeOfLastHandledTxn())
.build());
}
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/services/AppContextImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/services/AppContextImpl.java
index f8fda60ab8a2..1c9c9c92e6e9 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/services/AppContextImpl.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/services/AppContextImpl.java
@@ -18,6 +18,7 @@
import com.hedera.node.app.spi.AppContext;
import com.hedera.node.app.spi.signatures.SignatureVerifier;
+import com.hedera.node.app.spi.throttle.Throttle;
import com.swirlds.config.api.Configuration;
import com.swirlds.state.lifecycle.info.NodeInfo;
import edu.umd.cs.findbugs.annotations.NonNull;
@@ -37,5 +38,6 @@ public record AppContextImpl(
@NonNull SignatureVerifier signatureVerifier,
@NonNull Gossip gossip,
@NonNull Supplier configSupplier,
- @NonNull Supplier selfNodeInfoSupplier)
+ @NonNull Supplier selfNodeInfoSupplier,
+ @NonNull Throttle.Factory throttleFactory)
implements AppContext {}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/ServiceApiFactory.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/ServiceApiFactory.java
index c81a27d4ae8a..83cece491729 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/ServiceApiFactory.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/ServiceApiFactory.java
@@ -22,29 +22,27 @@
import com.hedera.node.app.service.token.api.TokenServiceApi;
import com.hedera.node.app.spi.api.ServiceApiProvider;
import com.hedera.node.app.spi.metrics.StoreMetricsService;
-import com.hedera.node.app.workflows.handle.stack.SavepointStackImpl;
import com.swirlds.config.api.Configuration;
+import com.swirlds.state.State;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.util.Map;
-import javax.inject.Inject;
/**
- * A factory for creating service APIs based on the {@link SavepointStackImpl} for the current transaction.
+ * A factory for creating service APIs based on a {@link State}.
*/
public class ServiceApiFactory {
- private final SavepointStackImpl stack;
+ private final State state;
private final Configuration configuration;
private final StoreMetricsService storeMetricsService;
private static final Map, ServiceApiProvider>> API_PROVIDER =
Map.of(TokenServiceApi.class, TOKEN_SERVICE_API_PROVIDER);
- @Inject
public ServiceApiFactory(
- @NonNull final SavepointStackImpl stack,
+ @NonNull final State state,
@NonNull final Configuration configuration,
@NonNull final StoreMetricsService storeMetricsService) {
- this.stack = requireNonNull(stack);
+ this.state = requireNonNull(state);
this.configuration = requireNonNull(configuration);
this.storeMetricsService = requireNonNull(storeMetricsService);
}
@@ -53,7 +51,7 @@ public C getApi(@NonNull final Class apiInterface) throws IllegalArgument
requireNonNull(apiInterface);
final var provider = API_PROVIDER.get(apiInterface);
if (provider != null) {
- final var writableStates = stack.getWritableStates(provider.serviceName());
+ final var writableStates = state.getWritableStates(provider.serviceName());
final var api = provider.newInstance(configuration, storeMetricsService, writableStates);
assert apiInterface.isInstance(api); // This needs to be ensured while apis are registered
return apiInterface.cast(api);
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/StoreFactoryImpl.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/StoreFactoryImpl.java
index 3af18066e897..6404ee200310 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/StoreFactoryImpl.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/StoreFactoryImpl.java
@@ -18,7 +18,10 @@
import static java.util.Objects.requireNonNull;
+import com.hedera.node.app.spi.metrics.StoreMetricsService;
import com.hedera.node.app.spi.store.StoreFactory;
+import com.swirlds.config.api.Configuration;
+import com.swirlds.state.State;
import edu.umd.cs.findbugs.annotations.NonNull;
/**
@@ -30,6 +33,28 @@ public class StoreFactoryImpl implements StoreFactory {
private final WritableStoreFactory writableStoreFactory;
private final ServiceApiFactory serviceApiFactory;
+ /**
+ * Returns a {@link StoreFactory} based on the given state, configuration, and store metrics for the given service.
+ *
+ * @param state the state to create stores from
+ * @param serviceName the name of the service to scope the stores to
+ * @param configuration the configuration for the service
+ * @param storeMetricsService the metrics service to use for the stores
+ * @return a new {@link StoreFactory} instance
+ */
+ public static StoreFactory from(
+ @NonNull final State state,
+ @NonNull final String serviceName,
+ @NonNull final Configuration configuration,
+ @NonNull final StoreMetricsService storeMetricsService) {
+ requireNonNull(state);
+ requireNonNull(serviceName);
+ return new StoreFactoryImpl(
+ new ReadableStoreFactory(state),
+ new WritableStoreFactory(state, serviceName, configuration, storeMetricsService),
+ new ServiceApiFactory(state, configuration, storeMetricsService));
+ }
+
public StoreFactoryImpl(
@NonNull final ReadableStoreFactory readableStoreFactory,
@NonNull final WritableStoreFactory writableStoreFactory,
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/WritableStoreFactory.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/WritableStoreFactory.java
index 79fffd3081f3..721b2de37caf 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/WritableStoreFactory.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/store/WritableStoreFactory.java
@@ -138,7 +138,7 @@ public WritableStoreFactory(
@NonNull final String serviceName,
@NonNull final Configuration configuration,
@NonNull final StoreMetricsService storeMetricsService) {
- requireNonNull(state, "The argument 'stack' cannot be null!");
+ requireNonNull(state);
this.serviceName = requireNonNull(serviceName, "The argument 'serviceName' cannot be null!");
this.configuration = requireNonNull(configuration, "The argument 'configuration' cannot be null!");
this.storeMetricsService =
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/AppThrottleFactory.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/AppThrottleFactory.java
new file mode 100644
index 000000000000..4ca258a377f5
--- /dev/null
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/AppThrottleFactory.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.node.app.throttle;
+
+import static java.util.Objects.requireNonNull;
+
+import com.hedera.hapi.node.base.AccountID;
+import com.hedera.hapi.node.base.HederaFunctionality;
+import com.hedera.hapi.node.base.SignatureMap;
+import com.hedera.hapi.node.base.Transaction;
+import com.hedera.hapi.node.base.TransactionID;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
+import com.hedera.hapi.node.transaction.ThrottleDefinitions;
+import com.hedera.hapi.node.transaction.TransactionBody;
+import com.hedera.node.app.hapi.utils.throttles.DeterministicThrottle;
+import com.hedera.node.app.spi.throttle.Throttle;
+import com.hedera.node.app.workflows.TransactionInfo;
+import com.hedera.pbj.runtime.io.buffer.Bytes;
+import com.swirlds.config.api.Configuration;
+import com.swirlds.state.State;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
+import java.time.Instant;
+import java.util.function.IntSupplier;
+import java.util.function.Supplier;
+
+/**
+ * The application's strategy for creating a {@link Throttle} to use at consensus.
+ */
+public class AppThrottleFactory implements Throttle.Factory {
+ private final Supplier stateSupplier;
+ private final Supplier configSupplier;
+ private final Supplier definitionsSupplier;
+ private final ThrottleAccumulatorFactory throttleAccumulatorFactory;
+
+ public interface ThrottleAccumulatorFactory {
+ ThrottleAccumulator newThrottleAccumulator(
+ @NonNull Supplier config,
+ @NonNull IntSupplier capacitySplitSource,
+ @NonNull ThrottleAccumulator.ThrottleType throttleType);
+ }
+
+ public AppThrottleFactory(
+ @NonNull final Supplier configSupplier,
+ @NonNull final Supplier stateSupplier,
+ @NonNull final Supplier definitionsSupplier,
+ @NonNull final ThrottleAccumulatorFactory throttleAccumulatorFactory) {
+ this.configSupplier = requireNonNull(configSupplier);
+ this.stateSupplier = requireNonNull(stateSupplier);
+ this.definitionsSupplier = requireNonNull(definitionsSupplier);
+ this.throttleAccumulatorFactory = requireNonNull(throttleAccumulatorFactory);
+ }
+
+ @Override
+ public Throttle newThrottle(final int capacitySplit, @Nullable final ThrottleUsageSnapshots initialUsageSnapshots) {
+ final var throttleAccumulator = throttleAccumulatorFactory.newThrottleAccumulator(
+ configSupplier, () -> capacitySplit, ThrottleAccumulator.ThrottleType.BACKEND_THROTTLE);
+ throttleAccumulator.applyGasConfig();
+ throttleAccumulator.rebuildFor(definitionsSupplier.get());
+ if (initialUsageSnapshots != null) {
+ final var tpsThrottles = throttleAccumulator.allActiveThrottles();
+ final var tpsUsageSnapshots = initialUsageSnapshots.tpsThrottles();
+ for (int i = 0, n = tpsThrottles.size(); i < n; i++) {
+ tpsThrottles.get(i).resetUsageTo(tpsUsageSnapshots.get(i));
+ }
+ throttleAccumulator.gasLimitThrottle().resetUsageTo(initialUsageSnapshots.gasThrottleOrThrow());
+ }
+ // Throttle.allow() has the opposite polarity of ThrottleAccumulator.checkAndEnforceThrottle()
+ return new Throttle() {
+ @Override
+ public boolean allow(
+ @NonNull final AccountID payerId,
+ @NonNull final TransactionBody body,
+ @NonNull final HederaFunctionality function,
+ @NonNull final Instant now) {
+ return !throttleAccumulator.checkAndEnforceThrottle(
+ new TransactionInfo(
+ Transaction.DEFAULT,
+ body,
+ TransactionID.DEFAULT,
+ payerId,
+ SignatureMap.DEFAULT,
+ Bytes.EMPTY,
+ function,
+ null),
+ now,
+ stateSupplier.get());
+ }
+
+ @Override
+ public ThrottleUsageSnapshots usageSnapshots() {
+ return new ThrottleUsageSnapshots(
+ throttleAccumulator.allActiveThrottles().stream()
+ .map(DeterministicThrottle::usageSnapshot)
+ .toList(),
+ throttleAccumulator.gasLimitThrottle().usageSnapshot());
+ }
+ };
+ }
+}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/NetworkUtilizationManager.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/NetworkUtilizationManager.java
index b8d36e87429f..3bb8775f9ef9 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/NetworkUtilizationManager.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/NetworkUtilizationManager.java
@@ -34,7 +34,7 @@ public interface NetworkUtilizationManager {
* @param txnInfo - the transaction to use for updating the network utilization.
* @param consensusTime - the consensus time of the transaction.
* @param state - the state of the node.
- * @return
+ * @return whether the transaction was throttled
*/
boolean trackTxn(
@NonNull final TransactionInfo txnInfo, @NonNull final Instant consensusTime, @NonNull final State state);
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleAccumulator.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleAccumulator.java
index 2375fbf4a634..9a08017ee5d2 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleAccumulator.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleAccumulator.java
@@ -36,11 +36,13 @@
import static java.util.Collections.emptyList;
import static java.util.Objects.requireNonNull;
+import com.google.common.annotations.VisibleForTesting;
import com.hedera.hapi.node.base.AccountAmount;
import com.hedera.hapi.node.base.AccountID;
import com.hedera.hapi.node.base.HederaFunctionality;
import com.hedera.hapi.node.base.NftTransfer;
import com.hedera.hapi.node.base.SignatureMap;
+import com.hedera.hapi.node.base.Timestamp;
import com.hedera.hapi.node.base.TokenID;
import com.hedera.hapi.node.base.Transaction;
import com.hedera.hapi.node.base.TransactionID;
@@ -59,17 +61,19 @@
import com.hedera.node.app.hapi.utils.throttles.DeterministicThrottle;
import com.hedera.node.app.hapi.utils.throttles.GasLimitDeterministicThrottle;
import com.hedera.node.app.service.schedule.ReadableScheduleStore;
+import com.hedera.node.app.service.schedule.ScheduleService;
+import com.hedera.node.app.service.schedule.impl.ReadableScheduleStoreImpl;
import com.hedera.node.app.service.token.ReadableAccountStore;
import com.hedera.node.app.service.token.ReadableTokenRelationStore;
import com.hedera.node.app.spi.workflows.HandleException;
import com.hedera.node.app.store.ReadableStoreFactory;
import com.hedera.node.app.workflows.TransactionInfo;
-import com.hedera.node.config.ConfigProvider;
import com.hedera.node.config.data.AccountsConfig;
import com.hedera.node.config.data.AutoCreationConfig;
import com.hedera.node.config.data.ContractsConfig;
import com.hedera.node.config.data.EntitiesConfig;
import com.hedera.node.config.data.LazyCreationConfig;
+import com.hedera.node.config.data.LedgerConfig;
import com.hedera.node.config.data.SchedulingConfig;
import com.hedera.node.config.data.TokensConfig;
import com.hedera.pbj.runtime.io.buffer.Bytes;
@@ -88,6 +92,7 @@
import java.util.Optional;
import java.util.Set;
import java.util.function.IntSupplier;
+import java.util.function.Supplier;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -110,39 +115,61 @@ public class ThrottleAccumulator {
private boolean lastTxnWasGasThrottled;
private GasLimitDeterministicThrottle gasThrottle;
private List activeThrottles = emptyList();
+
+ @Nullable
private final ThrottleMetrics throttleMetrics;
- private final ConfigProvider configProvider;
+ private final Supplier configSupplier;
private final IntSupplier capacitySplitSource;
private final ThrottleType throttleType;
+ private final Verbose verbose;
+
+ /**
+ * Whether the accumulator should log verbose definitions.
+ */
+ public enum Verbose {
+ YES,
+ NO
+ }
public ThrottleAccumulator(
+ @NonNull final Supplier configSupplier,
@NonNull final IntSupplier capacitySplitSource,
- @NonNull final ConfigProvider configProvider,
+ @NonNull final ThrottleType throttleType) {
+ this(capacitySplitSource, configSupplier, throttleType, null, Verbose.NO);
+ }
+
+ public ThrottleAccumulator(
+ @NonNull final IntSupplier capacitySplitSource,
+ @NonNull final Supplier configSupplier,
@NonNull final ThrottleType throttleType,
- @NonNull final ThrottleMetrics throttleMetrics) {
- this.configProvider = requireNonNull(configProvider, "configProvider must not be null");
+ @Nullable final ThrottleMetrics throttleMetrics,
+ @NonNull final Verbose verbose) {
+ this.configSupplier = requireNonNull(configSupplier, "configProvider must not be null");
this.capacitySplitSource = requireNonNull(capacitySplitSource, "capacitySplitSource must not be null");
this.throttleType = requireNonNull(throttleType, "throttleType must not be null");
- this.throttleMetrics = requireNonNull(throttleMetrics, "throttleMetrics must not be null");
+ this.verbose = requireNonNull(verbose);
+ this.throttleMetrics = throttleMetrics;
}
// For testing purposes, in practice the gas throttle is
// lazy-initialized based on the configuration before handling
// any transactions
+ @VisibleForTesting
public ThrottleAccumulator(
@NonNull final IntSupplier capacitySplitSource,
- @NonNull final ConfigProvider configProvider,
+ @NonNull final Supplier configSupplier,
@NonNull final ThrottleType throttleType,
@NonNull final ThrottleMetrics throttleMetrics,
@NonNull final GasLimitDeterministicThrottle gasThrottle) {
- this.configProvider = requireNonNull(configProvider, "configProvider must not be null");
+ this.configSupplier = requireNonNull(configSupplier, "configProvider must not be null");
this.capacitySplitSource = requireNonNull(capacitySplitSource, "capacitySplitSource must not be null");
this.throttleType = requireNonNull(throttleType, "throttleType must not be null");
this.gasThrottle = requireNonNull(gasThrottle, "gasThrottle must not be null");
this.throttleMetrics = throttleMetrics;
- this.throttleMetrics.setupGasThrottleMetric(gasThrottle, configProvider.getConfiguration());
+ this.throttleMetrics.setupGasThrottleMetric(gasThrottle, configSupplier.get());
+ this.verbose = Verbose.YES;
}
/**
@@ -182,7 +209,7 @@ public boolean checkAndEnforceThrottle(
@NonNull final Query query,
@NonNull final State state,
@Nullable final AccountID queryPayerId) {
- final var configuration = configProvider.getConfiguration();
+ final var configuration = configSupplier.get();
if (throttleExempt(queryPayerId, configuration)) {
return false;
}
@@ -272,7 +299,7 @@ public void leakCapacityForNOfUnscaled(final int n, @NonNull final HederaFunctio
* @param value the amount of gas to leak
*/
public void leakUnusedGasPreviouslyReserved(@NonNull final TransactionInfo txnInfo, final long value) {
- final var configuration = configProvider.getConfiguration();
+ final var configuration = configSupplier.get();
if (throttleExempt(txnInfo.payerID(), configuration)) {
return;
}
@@ -346,7 +373,9 @@ public void resetUsage() {
* Updates all metrics for the active throttles and the gas throttle
*/
public void updateAllMetrics() {
- throttleMetrics.updateAllMetrics();
+ if (throttleMetrics != null) {
+ throttleMetrics.updateAllMetrics();
+ }
}
private boolean shouldThrottleTxn(
@@ -355,7 +384,7 @@ private boolean shouldThrottleTxn(
@NonNull final Instant now,
@NonNull final State state) {
final var function = txnInfo.functionality();
- final var configuration = configProvider.getConfiguration();
+ final var configuration = configSupplier.get();
// Note that by payer exempt from throttling we mean just that those transactions will not be throttled,
// such payer accounts neither impact the throttles nor are they impacted by them
@@ -384,14 +413,12 @@ private boolean shouldThrottleTxn(
if (isScheduled) {
throw new IllegalStateException("ScheduleCreate cannot be a child!");
}
-
yield shouldThrottleScheduleCreate(manager, txnInfo, now, state);
}
case SCHEDULE_SIGN -> {
if (isScheduled) {
throw new IllegalStateException("ScheduleSign cannot be a child!");
}
-
yield shouldThrottleScheduleSign(manager, txnInfo, now, state);
}
case TOKEN_MINT -> shouldThrottleMint(manager, txnInfo.txBody().tokenMint(), now, configuration);
@@ -417,16 +444,15 @@ yield shouldThrottleEthTxn(
private boolean shouldThrottleScheduleCreate(
final ThrottleReqsManager manager, final TransactionInfo txnInfo, final Instant now, final State state) {
final var txnBody = txnInfo.txBody();
- final var scheduleCreate = txnBody.scheduleCreateOrThrow();
- final var scheduled = scheduleCreate.scheduledTransactionBodyOrThrow();
+ final var op = txnBody.scheduleCreateOrThrow();
+ final var scheduled = op.scheduledTransactionBodyOrThrow();
final var schedule = Schedule.newBuilder()
.originalCreateTransaction(txnBody)
.payerAccountId(txnInfo.payerID())
.scheduledTransaction(scheduled)
.build();
-
- TransactionBody innerTxn;
- HederaFunctionality scheduledFunction;
+ final TransactionBody innerTxn;
+ final HederaFunctionality scheduledFunction;
try {
innerTxn = childAsOrdinary(schedule);
scheduledFunction = functionOf(innerTxn);
@@ -434,16 +460,14 @@ private boolean shouldThrottleScheduleCreate(
log.debug("ScheduleCreate was associated with an invalid txn.", ex);
return true;
}
-
// maintain legacy behaviour
- final var configuration = configProvider.getConfiguration();
- final boolean areLongTermSchedulesEnabled =
- configuration.getConfigData(SchedulingConfig.class).longTermEnabled();
- if (!areLongTermSchedulesEnabled) {
+ final var config = configSupplier.get();
+ final var schedulingConfig = config.getConfigData(SchedulingConfig.class);
+ if (!schedulingConfig.longTermEnabled()) {
final boolean isAutoCreationEnabled =
- configuration.getConfigData(AutoCreationConfig.class).enabled();
+ config.getConfigData(AutoCreationConfig.class).enabled();
final boolean isLazyCreationEnabled =
- configuration.getConfigData(LazyCreationConfig.class).enabled();
+ config.getConfigData(LazyCreationConfig.class).enabled();
// we check for CryptoTransfer because implicit creations (i.e. auto- or lazy-creation) may happen in it,
// and we need to throttle those separately
@@ -462,32 +486,27 @@ private boolean shouldThrottleScheduleCreate(
}
return !manager.allReqsMetAt(now);
} else {
- // TODO : throttles will be implemented in following PRs
- // log.warn("Long term scheduling is enabled, but throttling of long term schedules is not yet
- // implemented.");
+ // We first enforce the limit on the ScheduleCreate TPS
if (!manager.allReqsMetAt(now)) {
return true;
}
-
- // only check deeply if the schedule could immediately execute
- if ((!scheduleCreate.waitForExpiry()) && (throttleType == FRONTEND_THROTTLE)) {
- var effectivePayer = scheduleCreate.hasPayerAccountID()
- ? scheduleCreate.payerAccountID()
- : txnBody.transactionID().accountID();
-
- final var innerTxnInfo = new TransactionInfo(
- Transaction.DEFAULT,
- innerTxn,
- TransactionID.DEFAULT,
- effectivePayer,
- SignatureMap.DEFAULT,
- Bytes.EMPTY,
- scheduledFunction,
- null);
-
- return shouldThrottleTxn(true, innerTxnInfo, now, state);
+ // And then at ingest, ensure that not too many schedules will expire in a given second
+ if (throttleType == FRONTEND_THROTTLE) {
+ final long expiry;
+ if (op.waitForExpiry()) {
+ expiry = op.expirationTimeOrElse(Timestamp.DEFAULT).seconds();
+ } else {
+ final var ledgerConfig = config.getConfigData(LedgerConfig.class);
+ expiry = Optional.ofNullable(txnInfo.transactionID())
+ .orElse(TransactionID.DEFAULT)
+ .transactionValidStartOrElse(Timestamp.DEFAULT)
+ .seconds()
+ + ledgerConfig.scheduleTxExpiryTimeSecs();
+ }
+ final var scheduleStore = new ReadableScheduleStoreImpl(state.getReadableStates(ScheduleService.NAME));
+ final var numScheduled = scheduleStore.numTransactionsScheduledAt(expiry);
+ return numScheduled >= schedulingConfig.maxTxnPerSec();
}
-
return false;
}
}
@@ -500,7 +519,7 @@ private boolean shouldThrottleScheduleSign(
}
// maintain legacy behaviour
- final var configuration = configProvider.getConfiguration();
+ final var configuration = configSupplier.get();
final boolean areLongTermSchedulesEnabled =
configuration.getConfigData(SchedulingConfig.class).longTermEnabled();
if (!areLongTermSchedulesEnabled) {
@@ -871,8 +890,10 @@ public void rebuildFor(@NonNull final ThrottleDefinitions defs) {
functionReqs = newFunctionReqs;
activeThrottles = newActiveThrottles;
- final var configuration = configProvider.getConfiguration();
- throttleMetrics.setupThrottleMetrics(activeThrottles, configuration);
+ if (throttleMetrics != null) {
+ final var configuration = configSupplier.get();
+ throttleMetrics.setupThrottleMetrics(activeThrottles, configuration);
+ }
logResolvedDefinitions(capacitySplitSource.getAsInt());
}
@@ -881,18 +902,22 @@ public void rebuildFor(@NonNull final ThrottleDefinitions defs) {
* Rebuilds the gas throttle based on the current configuration.
*/
public void applyGasConfig() {
- final var configuration = configProvider.getConfiguration();
+ final var configuration = configSupplier.get();
final var contractsConfig = configuration.getConfigData(ContractsConfig.class);
if (contractsConfig.throttleThrottleByGas() && contractsConfig.maxGasPerSec() == 0) {
log.warn("{} gas throttling enabled, but limited to 0 gas/sec", throttleType.name());
}
gasThrottle = new GasLimitDeterministicThrottle(contractsConfig.maxGasPerSec());
- throttleMetrics.setupGasThrottleMetric(gasThrottle, configuration);
- log.info(
- "Resolved {} gas throttle -\n {} gas/sec (throttling {})",
- throttleType.name(),
- gasThrottle.capacity(),
- (contractsConfig.throttleThrottleByGas() ? "ON" : "OFF"));
+ if (throttleMetrics != null) {
+ throttleMetrics.setupGasThrottleMetric(gasThrottle, configuration);
+ }
+ if (verbose == Verbose.YES) {
+ log.info(
+ "Resolved {} gas throttle -\n {} gas/sec (throttling {})",
+ throttleType.name(),
+ gasThrottle.capacity(),
+ (contractsConfig.throttleThrottleByGas() ? "ON" : "OFF"));
+ }
}
@NonNull
@@ -902,6 +927,9 @@ private ThrottleGroup hapiGroupFromPbj(
}
private void logResolvedDefinitions(final int capacitySplit) {
+ if (verbose != Verbose.YES) {
+ return;
+ }
var sb = new StringBuilder("Resolved ")
.append(throttleType.name())
.append(" ")
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleServiceManager.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleServiceManager.java
index b898d6387470..fe4365cd9694 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleServiceManager.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleServiceManager.java
@@ -28,6 +28,7 @@
import com.hedera.hapi.node.state.congestion.CongestionLevelStarts;
import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshot;
import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
+import com.hedera.hapi.node.transaction.ThrottleDefinitions;
import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.node.app.fees.congestion.CongestionMultipliers;
import com.hedera.node.app.hapi.utils.throttles.DeterministicThrottle;
@@ -42,6 +43,7 @@
import com.swirlds.state.spi.WritableSingletonState;
import com.swirlds.state.spi.WritableStates;
import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
@@ -59,6 +61,9 @@ public class ThrottleServiceManager {
private final ThrottleAccumulator backendThrottle;
private final CongestionMultipliers congestionMultipliers;
+ @Nullable
+ private ThrottleDefinitions activeDefinitions;
+
@Inject
public ThrottleServiceManager(
@NonNull final ThrottleParser throttleParser,
@@ -96,6 +101,15 @@ public void init(@NonNull final State state, @NonNull final Bytes throttleDefini
syncFromCongestionLevelStarts(serviceStates);
}
+ /**
+ * Returns the throttle definitions that are currently active.
+ * @return the active throttle definitions
+ * @throws IllegalStateException if the active throttle definitions are not available
+ */
+ public @NonNull ThrottleDefinitions activeThrottleDefinitionsOrThrow() {
+ return requireNonNull(activeDefinitions);
+ }
+
/**
* Saves the current state of the throttles and congestion level starts
* to the given state.
@@ -176,10 +190,11 @@ private void saveCongestionLevelStartsTo(@NonNull final WritableStates serviceSt
translateToList(congestionMultipliers.gasThrottleMultiplierCongestionStarts())));
}
- private @NonNull ThrottleParser.ValidatedThrottles rebuildThrottlesFrom(@NonNull Bytes encoded) {
+ private @NonNull ThrottleParser.ValidatedThrottles rebuildThrottlesFrom(@NonNull final Bytes encoded) {
final var validatedThrottles = throttleParser.parse(encoded);
ingestThrottle.rebuildFor(validatedThrottles.throttleDefinitions());
backendThrottle.rebuildFor(validatedThrottles.throttleDefinitions());
+ this.activeDefinitions = validatedThrottles.throttleDefinitions();
return validatedThrottles;
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleServiceModule.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleServiceModule.java
index dafb463e44ee..32d9ae6b2208 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleServiceModule.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/throttle/ThrottleServiceModule.java
@@ -21,6 +21,7 @@
import static com.hedera.node.app.throttle.ThrottleAccumulator.ThrottleType.FRONTEND_THROTTLE;
import com.hedera.node.app.fees.congestion.ThrottleMultiplier;
+import com.hedera.node.app.throttle.ThrottleAccumulator.Verbose;
import com.hedera.node.app.throttle.annotations.BackendThrottle;
import com.hedera.node.app.throttle.annotations.CryptoTransferThrottleMultiplier;
import com.hedera.node.app.throttle.annotations.GasThrottleMultiplier;
@@ -48,9 +49,11 @@ NetworkUtilizationManager provideNetworkUtilizationManager(
@Provides
@Singleton
@BackendThrottle
- static ThrottleAccumulator provideBackendThrottleAccumulator(ConfigProvider configProvider, Metrics metrics) {
+ static ThrottleAccumulator provideBackendThrottleAccumulator(
+ @NonNull final ConfigProvider configProvider, @NonNull final Metrics metrics) {
final var throttleMetrics = new ThrottleMetrics(metrics, BACKEND_THROTTLE);
- return new ThrottleAccumulator(SUPPLY_ONE, configProvider, BACKEND_THROTTLE, throttleMetrics);
+ return new ThrottleAccumulator(
+ SUPPLY_ONE, configProvider::getConfiguration, BACKEND_THROTTLE, throttleMetrics, Verbose.YES);
}
@Provides
@@ -61,7 +64,12 @@ static ThrottleAccumulator provideIngestThrottleAccumulator(
@NonNull final ConfigProvider configProvider,
@NonNull final Metrics metrics) {
final var throttleMetrics = new ThrottleMetrics(metrics, FRONTEND_THROTTLE);
- return new ThrottleAccumulator(frontendThrottleSplit, configProvider, FRONTEND_THROTTLE, throttleMetrics);
+ return new ThrottleAccumulator(
+ frontendThrottleSplit,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ Verbose.YES);
}
@Provides
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/TransactionInfo.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/TransactionInfo.java
index e28fd948289d..57751b7f8767 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/TransactionInfo.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/TransactionInfo.java
@@ -16,6 +16,8 @@
package com.hedera.node.app.workflows;
+import static java.util.Objects.requireNonNull;
+
import com.hedera.hapi.node.base.AccountID;
import com.hedera.hapi.node.base.HederaFunctionality;
import com.hedera.hapi.node.base.SignatureMap;
@@ -92,4 +94,13 @@ public static TransactionInfo from(
return new TransactionInfo(
transaction, txBody, transactionId, payerId, signatureMap, signedBytes, functionality, null);
}
+
+ /**
+ * Returns the {@link TransactionID} of the transaction.
+ * @return the transaction ID
+ * @throws NullPointerException if the transaction ID is null
+ */
+ public TransactionID txnIdOrThrow() {
+ return requireNonNull(transactionID);
+ }
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/HandleWorkflow.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/HandleWorkflow.java
index 84beb45eb25f..a50568480ac7 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/HandleWorkflow.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/HandleWorkflow.java
@@ -18,9 +18,9 @@
import static com.hedera.hapi.node.base.ResponseCodeEnum.BUSY;
import static com.hedera.hapi.node.base.ResponseCodeEnum.FAIL_INVALID;
+import static com.hedera.hapi.util.HapiUtils.asTimestamp;
import static com.hedera.node.app.records.schemas.V0490BlockRecordSchema.BLOCK_INFO_STATE_KEY;
import static com.hedera.node.app.service.file.impl.schemas.V0490FileSchema.BLOBS_KEY;
-import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.childAsOrdinary;
import static com.hedera.node.app.spi.workflows.HandleContext.TransactionCategory.SCHEDULED;
import static com.hedera.node.app.spi.workflows.HandleContext.TransactionCategory.USER;
import static com.hedera.node.app.spi.workflows.record.StreamBuilder.ReversingBehavior.REVERSIBLE;
@@ -34,8 +34,8 @@
import static com.hedera.node.app.workflows.handle.TransactionType.GENESIS_TRANSACTION;
import static com.hedera.node.app.workflows.handle.TransactionType.ORDINARY_TRANSACTION;
import static com.hedera.node.app.workflows.handle.TransactionType.POST_UPGRADE_TRANSACTION;
-import static com.hedera.node.app.workflows.prehandle.PreHandleResult.Status.NODE_DUE_DILIGENCE_FAILURE;
import static com.hedera.node.config.types.StreamMode.BLOCKS;
+import static com.hedera.node.config.types.StreamMode.BOTH;
import static com.hedera.node.config.types.StreamMode.RECORDS;
import static com.swirlds.platform.system.InitTrigger.EVENT_STREAM_RECOVERY;
import static com.swirlds.state.lifecycle.HapiUtils.SEMANTIC_VERSION_COMPARATOR;
@@ -53,6 +53,8 @@
import com.hedera.hapi.util.HapiUtils;
import com.hedera.node.app.blocks.BlockStreamManager;
import com.hedera.node.app.blocks.impl.BlockStreamBuilder;
+import com.hedera.node.app.blocks.impl.BoundaryStateChangeListener;
+import com.hedera.node.app.blocks.impl.KVStateChangeListener;
import com.hedera.node.app.fees.ExchangeRateManager;
import com.hedera.node.app.records.BlockRecordManager;
import com.hedera.node.app.records.BlockRecordService;
@@ -60,10 +62,9 @@
import com.hedera.node.app.service.addressbook.impl.WritableNodeStore;
import com.hedera.node.app.service.addressbook.impl.helpers.AddressBookHelper;
import com.hedera.node.app.service.file.FileService;
-import com.hedera.node.app.service.schedule.ReadableScheduleStore;
+import com.hedera.node.app.service.schedule.ExecutableTxn;
import com.hedera.node.app.service.schedule.ScheduleService;
-import com.hedera.node.app.service.schedule.ScheduleStreamBuilder;
-import com.hedera.node.app.service.schedule.WritableScheduleStore;
+import com.hedera.node.app.service.schedule.impl.WritableScheduleStoreImpl;
import com.hedera.node.app.service.token.TokenService;
import com.hedera.node.app.service.token.impl.WritableNetworkStakingRewardsStore;
import com.hedera.node.app.service.token.impl.WritableStakingInfoStore;
@@ -76,8 +77,6 @@
import com.hedera.node.app.state.HederaRecordCache.DueDiligenceFailure;
import com.hedera.node.app.state.recordcache.BlockRecordSource;
import com.hedera.node.app.state.recordcache.LegacyListRecordSource;
-import com.hedera.node.app.store.ReadableStoreFactory;
-import com.hedera.node.app.store.ServiceApiFactory;
import com.hedera.node.app.store.StoreFactoryImpl;
import com.hedera.node.app.store.WritableStoreFactory;
import com.hedera.node.app.throttle.ThrottleServiceManager;
@@ -93,23 +92,24 @@
import com.hedera.node.app.workflows.handle.steps.UserTxnFactory;
import com.hedera.node.config.ConfigProvider;
import com.hedera.node.config.data.BlockStreamConfig;
+import com.hedera.node.config.data.ConsensusConfig;
import com.hedera.node.config.data.SchedulingConfig;
import com.hedera.node.config.data.TssConfig;
import com.hedera.node.config.types.StreamMode;
import com.hedera.pbj.runtime.io.buffer.Bytes;
import com.swirlds.platform.system.InitTrigger;
import com.swirlds.platform.system.Round;
-import com.swirlds.platform.system.events.ConsensusEvent;
import com.swirlds.platform.system.transaction.ConsensusTransaction;
import com.swirlds.state.State;
import com.swirlds.state.lifecycle.info.NetworkInfo;
import com.swirlds.state.lifecycle.info.NodeInfo;
+import com.swirlds.state.spi.CommittableWritableStates;
+import com.swirlds.state.spi.WritableStates;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.time.Instant;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.logging.log4j.LogManager;
@@ -147,9 +147,14 @@ public class HandleWorkflow {
private final AddressBookHelper addressBookHelper;
private final TssBaseService tssBaseService;
private final ConfigProvider configProvider;
+ private final KVStateChangeListener kvStateChangeListener;
+ private final BoundaryStateChangeListener boundaryStateChangeListener;
+ private final ScheduleService scheduleService;
// The last second since the epoch at which the metrics were updated; this does not affect transaction handling
private long lastMetricUpdateSecond;
+ // The last second for which this workflow has confirmed all scheduled transactions are executed
+ private long lastExecutedSecond;
@Inject
public HandleWorkflow(
@@ -174,7 +179,10 @@ public HandleWorkflow(
@NonNull final List migrationStateChanges,
@NonNull final UserTxnFactory userTxnFactory,
@NonNull final AddressBookHelper addressBookHelper,
- @NonNull final TssBaseService tssBaseService) {
+ @NonNull final TssBaseService tssBaseService,
+ @NonNull final KVStateChangeListener kvStateChangeListener,
+ @NonNull final BoundaryStateChangeListener boundaryStateChangeListener,
+ @NonNull final ScheduleService scheduleService) {
this.networkInfo = requireNonNull(networkInfo);
this.stakePeriodChanges = requireNonNull(stakePeriodChanges);
this.dispatchProcessor = requireNonNull(dispatchProcessor);
@@ -195,12 +203,15 @@ public HandleWorkflow(
this.migrationStateChanges = new ArrayList<>(migrationStateChanges);
this.userTxnFactory = requireNonNull(userTxnFactory);
this.configProvider = requireNonNull(configProvider);
+ this.addressBookHelper = requireNonNull(addressBookHelper);
+ this.tssBaseService = requireNonNull(tssBaseService);
+ this.kvStateChangeListener = requireNonNull(kvStateChangeListener);
+ this.boundaryStateChangeListener = requireNonNull(boundaryStateChangeListener);
+ this.scheduleService = requireNonNull(scheduleService);
this.streamMode = configProvider
.getConfiguration()
.getConfigData(BlockStreamConfig.class)
.streamMode();
- this.addressBookHelper = requireNonNull(addressBookHelper);
- this.tssBaseService = requireNonNull(tssBaseService);
}
/**
@@ -238,11 +249,20 @@ public void handleRound(@NonNull final State state, @NonNull final Round round)
}
}
+ /**
+ * Applies all effects of the events in the given round to the given state, writing stream items
+ * that capture these effects in the process.
+ * @param state the state to apply the effects to
+ * @param round the round to apply the effects of
+ */
private void handleEvents(@NonNull final State state, @NonNull final Round round) {
- final var userTransactionsHandled = new AtomicBoolean(false);
+ boolean userTransactionsHandled = false;
for (final var event : round) {
if (streamMode != RECORDS) {
- streamMetadata(event);
+ final var headerItem = BlockItem.newBuilder()
+ .eventHeader(new EventHeader(event.getEventCore(), event.getSignature()))
+ .build();
+ blockStreamManager.writeItem(headerItem);
}
final var creator = networkInfo.nodeInfo(event.getCreatorId().id());
if (creator == null) {
@@ -269,8 +289,8 @@ private void handleEvents(@NonNull final State state, @NonNull final Round round
try {
// skip system transactions
if (!platformTxn.isSystem()) {
- userTransactionsHandled.set(true);
- handlePlatformTransaction(state, event, creator, platformTxn);
+ userTransactionsHandled = true;
+ handlePlatformTransaction(state, creator, platformTxn, event.getSoftwareVersion());
}
} catch (final Exception e) {
logger.fatal(
@@ -288,33 +308,26 @@ private void handleEvents(@NonNull final State state, @NonNull final Round round
// round is the minimum we can do. Note the BlockStreamManager#endRound() method is called in Hedera's
// implementation of SwirldState#sealConsensusRound(), since the BlockStreamManager cannot do its
// end-of-block work until the platform has finished all its state changes.
- if (userTransactionsHandled.get() && streamMode != BLOCKS) {
+ if (userTransactionsHandled && streamMode != BLOCKS) {
blockRecordManager.endRound(state);
}
}
- private void streamMetadata(@NonNull final ConsensusEvent event) {
- final var metadataItem = BlockItem.newBuilder()
- .eventHeader(new EventHeader(event.getEventCore(), event.getSignature()))
- .build();
- blockStreamManager.writeItem(metadataItem);
- }
-
/**
* Handles a platform transaction. This method is responsible for creating a {@link UserTxn} and
* executing the workflow for the transaction. This produces a stream of records that are then passed to the
* {@link BlockRecordManager} to be externalized.
*
- * @param state the writable {@link State} that this transaction will work on
- * @param event the {@link ConsensusEvent} that this transaction belongs to
+ * @param state the writable {@link State} that this transaction will work on
* @param creator the {@link NodeInfo} of the creator of the transaction
- * @param txn the {@link ConsensusTransaction} to be handled
+ * @param txn the {@link ConsensusTransaction} to be handled
+ * @param txnVersion the software version for the event containing the transaction
*/
private void handlePlatformTransaction(
@NonNull final State state,
- @NonNull final ConsensusEvent event,
@NonNull final NodeInfo creator,
- @NonNull final ConsensusTransaction txn) {
+ @NonNull final ConsensusTransaction txn,
+ @NonNull final SemanticVersion txnVersion) {
final var handleStart = System.nanoTime();
// Always use platform-assigned time for user transaction, c.f. https://hips.hedera.com/hip/hip-993
@@ -334,8 +347,9 @@ private void handlePlatformTransaction(
default -> ORDINARY_TRANSACTION;};
}
- final var userTxn = userTxnFactory.createUserTxn(state, event, creator, txn, consensusNow, type);
- final var handleOutput = execute(state, userTxn);
+ final var userTxn = userTxnFactory.createUserTxn(state, creator, txn, consensusNow, type);
+ var lastRecordManagerTime = streamMode == RECORDS ? blockRecordManager.consTimeOfLastHandledTxn() : null;
+ final var handleOutput = execute(userTxn, txnVersion);
if (streamMode != BLOCKS) {
final var records = ((LegacyListRecordSource) handleOutput.recordSourceOrThrow()).precomputedRecords();
blockRecordManager.endUserTransaction(records.stream(), state);
@@ -343,42 +357,164 @@ private void handlePlatformTransaction(
if (streamMode != RECORDS) {
handleOutput.blockRecordSourceOrThrow().forEachItem(blockStreamManager::writeItem);
}
+
opWorkflowMetrics.updateDuration(userTxn.functionality(), (int) (System.nanoTime() - handleStart));
+
+ if (streamMode == RECORDS) {
+ // We don't support long-term scheduled transactions if only producing records
+ // because that legacy state doesn't have an appropriate way to track the status
+ // of triggered execution work; so we just purge all expired schedules without
+ // further consideration here
+ purgeScheduling(state, lastRecordManagerTime, userTxn.consensusNow());
+ } else {
+ final var executionStart = blockStreamManager.lastIntervalProcessTime();
+ if (Instant.EPOCH.equals(executionStart)) {
+ blockStreamManager.setLastIntervalProcessTime(userTxn.consensusNow());
+ } else if (executionStart.getEpochSecond() > lastExecutedSecond) {
+ final var schedulingConfig = userTxn.config().getConfigData(SchedulingConfig.class);
+ final var consensusConfig = userTxn.config().getConfigData(ConsensusConfig.class);
+ // Since the next consensus time may be (now + separationNanos), we need to ensure that
+ // even if the last scheduled execution time is followed by the maximum number of records,
+ // its final assigned time will be strictly before the first of the next consensus time's
+ // preceding records; i.e. (now + separationNanos) - (maxAfter + maxBefore + 1)
+ final var lastUsableTime = userTxn.consensusNow()
+ .plusNanos(schedulingConfig.consTimeSeparationNanos()
+ - consensusConfig.handleMaxPrecedingRecords()
+ - (consensusConfig.handleMaxFollowingRecords() + 1));
+ // And the first possible time for the next execution is strictly after the last execution
+ // time plus the maximum number of preceding records
+ var nextTime = boundaryStateChangeListener
+ .lastConsensusTimeOrThrow()
+ .plusNanos(consensusConfig.handleMaxPrecedingRecords() + 1);
+ final var iter = scheduleService.executableTxns(
+ executionStart,
+ userTxn.consensusNow(),
+ StoreFactoryImpl.from(state, ScheduleService.NAME, userTxn.config(), storeMetricsService));
+ final var writableStates = state.getWritableStates(ScheduleService.NAME);
+ int n = schedulingConfig.maxExecutionsPerUserTxn();
+ // If we discover an executable transaction somewhere in the middle of the interval, this will
+ // be revised to the NBF time of that transaction; but for now we assume that everything up to
+ // the last second of the interval was executed
+ var executionEnd = userTxn.consensusNow();
+ while (iter.hasNext() && !nextTime.isAfter(lastUsableTime) && n > 0) {
+ final var executableTxn = iter.next();
+ if (schedulingConfig.longTermEnabled()) {
+ final var scheduledTxn = userTxnFactory.createUserTxn(
+ state,
+ userTxn.creatorInfo(),
+ nextTime,
+ ORDINARY_TRANSACTION,
+ executableTxn.payerId(),
+ executableTxn.body());
+ final var baseBuilder = baseBuilderFor(executableTxn, scheduledTxn);
+ final var scheduledDispatch = userTxnFactory.createDispatch(
+ scheduledTxn, baseBuilder, executableTxn.keyVerifier(), SCHEDULED);
+ dispatchProcessor.processDispatch(scheduledDispatch);
+ final var scheduledOutput = scheduledTxn
+ .stack()
+ .buildHandleOutput(scheduledTxn.consensusNow(), exchangeRateManager.exchangeRates());
+ recordCache.addRecordSource(
+ scheduledTxn.creatorInfo().nodeId(),
+ scheduledTxn.txnInfo().txnIdOrThrow(),
+ DueDiligenceFailure.NO,
+ scheduledOutput.preferringBlockRecordSource());
+ scheduledOutput.blockRecordSourceOrThrow().forEachItem(blockStreamManager::writeItem);
+ if (streamMode == BOTH) {
+ final var records = ((LegacyListRecordSource) scheduledOutput.recordSourceOrThrow())
+ .precomputedRecords();
+ blockRecordManager.endUserTransaction(records.stream(), state);
+ }
+ }
+ executionEnd = executableTxn.nbf();
+ doStreamingKVChanges(writableStates, executionEnd, iter::remove);
+ nextTime = boundaryStateChangeListener
+ .lastConsensusTimeOrThrow()
+ .plusNanos(consensusConfig.handleMaxPrecedingRecords() + 1);
+ n--;
+ }
+ blockStreamManager.setLastIntervalProcessTime(executionEnd);
+ if (!iter.hasNext() && executionEnd.getEpochSecond() > executionStart.getEpochSecond()) {
+ // Since the execution interval spanned at least full second and there are no remaining
+ // transactions to execute in it, we can mark the last full second as executed
+ lastExecutedSecond = executionEnd.getEpochSecond() - 1;
+ }
+ doStreamingKVChanges(writableStates, executionEnd, iter::purgeUntilNext);
+ }
+ }
}
/**
- * Executes the user transaction and returns a stream of records that capture all
- * side effects on state that are stipulated by the pre-block-stream contract with
- * mirror nodes.
- *
- * Never throws an exception without a fundamental breakdown in the integrity
- * of the system invariants. If there is an internal error when executing the
- * transaction, returns a stream of a single {@link ResponseCodeEnum#FAIL_INVALID}
- * record with no other side effects.
- *
- *
IMPORTANT: With block streams, this contract will expand to include
- * all side effects on state, no exceptions.
+ * Type inference helper to compute the base builder for a {@link UserTxn} derived from a
+ * {@link ExecutableTxn}.
*
- * @return the stream of records
+ * @param the type of the stream builder
+ * @param executableTxn the executable transaction to compute the base builder for
+ * @param userTxn the user transaction derived from the executable transaction
+ * @return the base builder for the user transaction
*/
- private HandleOutput execute(@NonNull final State state, @NonNull final UserTxn userTxn) {
+ private T baseBuilderFor(
+ @NonNull final ExecutableTxn executableTxn, @NonNull final UserTxn userTxn) {
+ return userTxn.initBaseBuilder(
+ exchangeRateManager.exchangeRates(), executableTxn.builderType(), executableTxn.builderSpec());
+ }
+
+ /**
+ * Purges all service state used for scheduling work that was expired by the last time the purge
+ * was triggered; but is not expired at the current time. Returns true if the last purge time
+ * should be set to the current time.
+ * @param state the state to purge
+ * @param then the last time the purge was triggered
+ * @param now the current time
+ */
+ private void purgeScheduling(@NonNull final State state, final Instant then, final Instant now) {
+ if (!Instant.EPOCH.equals(then) && then.getEpochSecond() < now.getEpochSecond()) {
+ final var writableStates = state.getWritableStates(ScheduleService.NAME);
+ doStreamingKVChanges(writableStates, now, () -> {
+ final var scheduleStore = new WritableScheduleStoreImpl(
+ writableStates, configProvider.getConfiguration(), storeMetricsService);
+ scheduleStore.purgeExpiredRangeClosed(then.getEpochSecond(), now.getEpochSecond() - 1);
+ });
+ }
+ }
+
+ private void doStreamingKVChanges(
+ @NonNull final WritableStates writableStates, @NonNull final Instant now, @NonNull final Runnable action) {
+ if (streamMode != RECORDS) {
+ kvStateChangeListener.reset();
+ }
+ action.run();
+ ((CommittableWritableStates) writableStates).commit();
+ if (streamMode != RECORDS) {
+ final var changes = kvStateChangeListener.getStateChanges();
+ if (!changes.isEmpty()) {
+ final var stateChangesItem = BlockItem.newBuilder()
+ .stateChanges(new StateChanges(asTimestamp(now), new ArrayList<>(changes)))
+ .build();
+ blockStreamManager.writeItem(stateChangesItem);
+ }
+ }
+ }
+
+ /**
+ * Executes the user transaction and returns the output that should be externalized in the
+ * block stream. (And if still producing records, the precomputed records.)
+ *
+ * Never throws an exception without a fundamental breakdown of the system invariants. If
+ * there is an internal error when executing the transaction, returns stream output of
+ * just the transaction with a {@link ResponseCodeEnum#FAIL_INVALID} transaction result,
+ * and no other side effects.
+ * @param userTxn the user transaction to execute
+ * @param txnVersion the software version for the event containing the transaction
+ * @return the stream output from executing the transaction
+ */
+ private HandleOutput execute(@NonNull final UserTxn userTxn, @NonNull final SemanticVersion txnVersion) {
try {
- if (isOlderSoftwareEvent(userTxn)) {
+ if (isOlderSoftwareEvent(txnVersion)) {
if (streamMode != BLOCKS) {
- final var lastRecordManagerTime = blockRecordManager.consTimeOfLastHandledTxn();
- // This updates consTimeOfLastHandledTxn as a side-effect
+ // This updates consTimeOfLastHandledTxn as a side effect
blockRecordManager.advanceConsensusClock(userTxn.consensusNow(), userTxn.state());
- if (streamMode == RECORDS) {
- // If relying on last-handled time to trigger interval processing, do so now
- processInterval(
- state,
- userTxn.event(),
- userTxn.creatorInfo(),
- lastRecordManagerTime,
- userTxn.consensusNow(),
- userTxn);
- }
}
+ blockStreamManager.setLastHandleTime(userTxn.consensusNow());
initializeBuilderInfo(userTxn.baseBuilder(), userTxn.txnInfo(), exchangeRateManager.exchangeRates())
.status(BUSY);
// Flushes the BUSY builder to the stream, no other side effects
@@ -389,20 +525,26 @@ private HandleOutput execute(@NonNull final State state, @NonNull final UserTxn
systemSetup.externalizeInitSideEffects(
userTxn.tokenContextImpl(), exchangeRateManager.exchangeRates());
} else if (userTxn.type() == POST_UPGRADE_TRANSACTION) {
- final var writableStoreFactory = new WritableStoreFactory(
- userTxn.stack(), AddressBookService.NAME, userTxn.config(), storeMetricsService);
- final var nodeStore = writableStoreFactory.getStore(WritableNodeStore.class);
- final var writableStakingInfoStore =
- new WritableStakingInfoStore(userTxn.stack().getWritableStates(TokenService.NAME));
- final var writableNetworkStakingRewardsStore = new WritableNetworkStakingRewardsStore(
- userTxn.stack().getWritableStates(TokenService.NAME));
+ // Since we track node stake metadata separately from the future address book (FAB),
+ // we need to update that stake metadata from any node additions or deletions that
+ // just took effect; it would be nice to unify the FAB and stake metadata in the future
+ final var writableTokenStates = userTxn.stack().getWritableStates(TokenService.NAME);
final var streamBuilder = stakeInfoHelper.adjustPostUpgradeStakes(
userTxn.tokenContextImpl(),
networkInfo,
userTxn.config(),
- writableStakingInfoStore,
- writableNetworkStakingRewardsStore);
- addressBookHelper.adjustPostUpgradeNodeMetadata(networkInfo, userTxn.config(), nodeStore);
+ new WritableStakingInfoStore(writableTokenStates),
+ new WritableNetworkStakingRewardsStore(writableTokenStates));
+
+ // (FUTURE) Verify we can remove this deprecated node metadata sync now that DAB is active;
+ // it should never happen case that nodes are added or removed from the address book without
+ // those changes already being visible in the FAB
+ final var addressBookWritableStoreFactory = new WritableStoreFactory(
+ userTxn.stack(), AddressBookService.NAME, userTxn.config(), storeMetricsService);
+ addressBookHelper.adjustPostUpgradeNodeMetadata(
+ networkInfo,
+ userTxn.config(),
+ addressBookWritableStoreFactory.getStore(WritableNodeStore.class));
if (streamMode != RECORDS) {
// Only externalize this if we are streaming blocks
@@ -413,40 +555,17 @@ private HandleOutput execute(@NonNull final State state, @NonNull final UserTxn
blockRecordManager.markMigrationRecordsStreamed();
userTxn.stack().commitSystemStateChanges();
}
- // C.f. https://github.com/hashgraph/hedera-services/issues/14751,
- // here we may need to switch the newly adopted candidate roster
- // in the RosterService state to become the active roster
- // Generate key material for the active roster once it is switched
}
- final var baseBuilder = initializeBuilderInfo(
- userTxn.baseBuilder(), userTxn.txnInfo(), exchangeRateManager.exchangeRates());
- final var dispatch = userTxnFactory.createDispatch(userTxn, baseBuilder);
- updateNodeStakes(userTxn, dispatch);
- var lastRecordManagerTime = Instant.EPOCH;
+
+ final var dispatch = userTxnFactory.createDispatch(userTxn, exchangeRateManager.exchangeRates());
+ // WARNING: this relies on the BlockStreamManager's last-handled time not being updated yet to
+ // correctly detect stake period boundary, so the order of the following two lines is important
+ processStakePeriodChanges(userTxn, dispatch);
+ blockStreamManager.setLastHandleTime(userTxn.consensusNow());
if (streamMode != BLOCKS) {
- lastRecordManagerTime = blockRecordManager.consTimeOfLastHandledTxn();
- // This updates consTimeOfLastHandledTxn as a side-effect
+ // This updates consTimeOfLastHandledTxn as a side effect
blockRecordManager.advanceConsensusClock(userTxn.consensusNow(), userTxn.state());
}
- if (streamMode == RECORDS) {
- processInterval(
- state,
- userTxn.event(),
- userTxn.creatorInfo(),
- lastRecordManagerTime,
- userTxn.consensusNow(),
- userTxn);
- } else {
- if (processInterval(
- state,
- userTxn.event(),
- userTxn.creatorInfo(),
- blockStreamManager.lastIntervalProcessTime(),
- userTxn.consensusNow(),
- userTxn)) {
- blockStreamManager.setLastIntervalProcessTime(userTxn.consensusNow());
- }
- }
logPreDispatch(userTxn);
if (userTxn.type() != ORDINARY_TRANSACTION) {
if (userTxn.type() == GENESIS_TRANSACTION) {
@@ -466,13 +585,10 @@ private HandleOutput execute(@NonNull final State state, @NonNull final UserTxn
}
final var handleOutput =
userTxn.stack().buildHandleOutput(userTxn.consensusNow(), exchangeRateManager.exchangeRates());
- final var dueDiligenceFailure = userTxn.preHandleResult().status() == NODE_DUE_DILIGENCE_FAILURE
- ? DueDiligenceFailure.YES
- : DueDiligenceFailure.NO;
recordCache.addRecordSource(
userTxn.creatorInfo().nodeId(),
- requireNonNull(userTxn.txnInfo().transactionID()),
- dueDiligenceFailure,
+ userTxn.txnInfo().txnIdOrThrow(),
+ userTxn.preHandleResult().dueDiligenceFailure(),
handleOutput.preferringBlockRecordSource());
return handleOutput;
} catch (final Exception e) {
@@ -533,9 +649,9 @@ private HandleOutput failInvalidStreamItems(@NonNull final UserTxn userTxn) {
*
* @return true if the software event is older than the current software version
*/
- private boolean isOlderSoftwareEvent(@NonNull final UserTxn userTxn) {
+ private boolean isOlderSoftwareEvent(@NonNull final SemanticVersion txnVersion) {
return this.initTrigger != EVENT_STREAM_RECOVERY
- && SEMANTIC_VERSION_COMPARATOR.compare(version, userTxn.event().getSoftwareVersion()) > 0;
+ && SEMANTIC_VERSION_COMPARATOR.compare(version, txnVersion) > 0;
}
/**
@@ -581,7 +697,12 @@ public static StreamBuilder initializeBuilderInfo(
.memo(txnInfo.txBody().memo());
}
- private void updateNodeStakes(@NonNull final UserTxn userTxn, @NonNull final Dispatch dispatch) {
+ /**
+ * Processes any side effects of crossing a stake period boundary.
+ * @param userTxn the user transaction that crossed the boundary
+ * @param dispatch the dispatch for the user transaction that crossed the boundary
+ */
+ private void processStakePeriodChanges(@NonNull final UserTxn userTxn, @NonNull final Dispatch dispatch) {
try {
stakePeriodChanges.process(
dispatch,
@@ -589,12 +710,12 @@ private void updateNodeStakes(@NonNull final UserTxn userTxn, @NonNull final Dis
userTxn.tokenContextImpl(),
streamMode,
userTxn.type() == GENESIS_TRANSACTION,
- blockStreamManager.lastIntervalProcessTime());
+ blockStreamManager.lastHandleTime());
} catch (final Exception e) {
// We don't propagate a failure here to avoid a catastrophic scenario
// where we are "stuck" trying to process node stake updates and never
// get back to user transactions
- logger.error("Failed to process staking period time hook", e);
+ logger.error("Failed to process stake period changes", e);
}
}
@@ -609,129 +730,6 @@ private static void logPreDispatch(@NonNull final UserTxn userTxn) {
}
}
- /**
- * Process all time-based events that are due since the last processing time.
- *
- * Note: While long-term schedule transactions (and any future time-based events) will work directly on the state,
- * we still want to pass the userTxn here and use its stack to commit the state purge. Especially when the feature
- * flag is false.
- *
- * @param state the writable {@link State} that transactions will work on
- * @param event the {@link ConsensusEvent} that current user transaction belongs to
- * @param creator the {@link NodeInfo} of the creator of the user transaction
- * @param lastProcessTime an upper bound on the last time that time-based events were processed
- * @param consensusNow the current consensus time
- * @param userTxn the user transaction
- *
- * @return true if the interval was processed
- */
- private boolean processInterval(
- final State state,
- final ConsensusEvent event,
- final NodeInfo creator,
- final Instant lastProcessTime,
- final Instant consensusNow,
- final UserTxn userTxn) {
- // If we have never processed an interval, treat this time as the last processed time
- if (Instant.EPOCH.equals(lastProcessTime)) {
- return true;
- } else if (lastProcessTime.getEpochSecond() < consensusNow.getEpochSecond()) {
- // There is at least one unprocessed second since the last processing time
- final var scheduleConfig = configProvider.getConfiguration().getConfigData(SchedulingConfig.class);
- final var startSecond = lastProcessTime.getEpochSecond();
- final var endSecond = userTxn.consensusNow().getEpochSecond() - 1;
-
- // try to execute schedules only if longTermEnabled
- if (scheduleConfig.longTermEnabled()) {
- final var readableStore = new ReadableStoreFactory(state).getStore(ReadableScheduleStore.class);
- final var schedulesToExecute = readableStore.getByExpirationBetween(startSecond, endSecond);
- // future: consensus nanos offset will be calculated more precisely in following PR,
- // for now just add 1 nano on each iteration.
- var consensusNanosOffset = 1;
- // try to execute schedules
- for (var i = 0; i < schedulesToExecute.size(); i++) {
- final var schedule = schedulesToExecute.get(i);
- // skip deleted or executed transactions
- if (schedule.deleted() || schedule.executed()) {
- continue;
- }
- // update schedule consensus timestamp
- final var scheduleConsensus = Instant.from(consensusNow.plusNanos(consensusNanosOffset));
- final var txnBody = childAsOrdinary(schedule);
- final var scheduleUserTnx = userTxnFactory.createUserTxn(
- state,
- event,
- creator,
- scheduleConsensus,
- ORDINARY_TRANSACTION,
- schedule.payerAccountIdOrThrow(),
- txnBody);
- final var baseBuilder = initializeBuilderInfo(
- scheduleUserTnx.baseBuilder(),
- scheduleUserTnx.txnInfo(),
- exchangeRateManager.exchangeRates());
- ((ScheduleStreamBuilder) baseBuilder).scheduleRef(schedule.scheduleId());
- final var scheduleDispatch = userTxnFactory.createDispatch(
- scheduleUserTnx,
- baseBuilder,
- k -> schedule.signatories().contains(k),
- SCHEDULED);
-
- // mark as deleted
- final var scheduleStore =
- getScheduleServiceStoreFactory(scheduleUserTnx).writableStore(WritableScheduleStore.class);
- scheduleStore.delete(schedule.scheduleId(), consensusNow);
- scheduleUserTnx.stack().commitSystemStateChanges();
-
- // execute the schedule
- dispatchProcessor.processDispatch(scheduleDispatch);
-
- // build the output and save the record/stream
- generateStreams(state, scheduleUserTnx);
- consensusNanosOffset++;
- }
- }
-
- final var scheduleStore = new WritableStoreFactory(
- userTxn.stack(), ScheduleService.NAME, userTxn.config(), storeMetricsService)
- .getStore(WritableScheduleStore.class);
- scheduleStore.purgeExpiredSchedulesBetween(startSecond, endSecond);
- userTxn.stack().commitSystemStateChanges();
- return true;
- }
- return false;
- }
-
- private void generateStreams(final State state, final UserTxn scheduleUserTnx) {
- final var handleOutput = scheduleUserTnx
- .stack()
- .buildHandleOutput(scheduleUserTnx.consensusNow(), exchangeRateManager.exchangeRates());
- recordCache.addRecordSource(
- scheduleUserTnx.creatorInfo().nodeId(),
- requireNonNull(scheduleUserTnx.txnInfo().transactionID()),
- DueDiligenceFailure.NO,
- handleOutput.preferringBlockRecordSource());
-
- // write records + state changes
- if (streamMode != BLOCKS) {
- final var records = ((LegacyListRecordSource) handleOutput.recordSourceOrThrow()).precomputedRecords();
- blockRecordManager.endUserTransaction(records.stream(), state);
- }
- if (streamMode != RECORDS) {
- handleOutput.blockRecordSourceOrThrow().forEachItem(blockStreamManager::writeItem);
- }
- }
-
- private StoreFactoryImpl getScheduleServiceStoreFactory(final UserTxn userTxn) {
- // Build store factory for the schedule service iterator
- final var readableStoreFactory = new ReadableStoreFactory(userTxn.state());
- final var writableStoreFactory = new WritableStoreFactory(
- userTxn.stack(), ScheduleService.NAME, configProvider.getConfiguration(), storeMetricsService);
- final var serviceApiFactory =
- new ServiceApiFactory(userTxn.stack(), configProvider.getConfiguration(), storeMetricsService);
- return new StoreFactoryImpl(readableStoreFactory, writableStoreFactory, serviceApiFactory);
- }
-
/**
* Returns the type of transaction encountering the given state at a block boundary.
*
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/record/RecordStreamBuilder.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/record/RecordStreamBuilder.java
index 8f2b77bec8de..a032287de07c 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/record/RecordStreamBuilder.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/record/RecordStreamBuilder.java
@@ -324,7 +324,6 @@ public void nullOutSideEffectFields() {
transactionReceiptBuilder.scheduledTransactionID((TransactionID) null);
}
// Note that internal contract creations are removed instead of reversed
- transactionRecordBuilder.scheduleRef((ScheduleID) null);
transactionReceiptBuilder.topicRunningHash(Bytes.EMPTY);
transactionReceiptBuilder.newTotalSupply(0L);
transactionReceiptBuilder.topicRunningHashVersion(0L);
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/StakePeriodChanges.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/StakePeriodChanges.java
index 051ea9ed148b..da6cb0af3769 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/StakePeriodChanges.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/StakePeriodChanges.java
@@ -97,7 +97,7 @@ public StakePeriodChanges(
* @param tokenContext the token context
* @param streamMode the stream mode
* @param isGenesis whether the current transaction is the genesis transaction
- * @param lastIntervalProcessTime if known, the last instant when time-based events were processed
+ * @param lastHandleTime the last instant at which a transaction was handled
*/
public void process(
@NonNull final Dispatch dispatch,
@@ -105,13 +105,13 @@ public void process(
@NonNull final TokenContext tokenContext,
@NonNull final StreamMode streamMode,
final boolean isGenesis,
- @NonNull final Instant lastIntervalProcessTime) {
+ @NonNull final Instant lastHandleTime) {
requireNonNull(stack);
requireNonNull(dispatch);
requireNonNull(tokenContext);
requireNonNull(streamMode);
- requireNonNull(lastIntervalProcessTime);
- if (isGenesis || isStakingPeriodBoundary(streamMode, tokenContext, lastIntervalProcessTime)) {
+ requireNonNull(lastHandleTime);
+ if (isGenesis || isStakingPeriodBoundary(streamMode, tokenContext, lastHandleTime)) {
try {
exchangeRateManager.updateMidnightRates(stack);
stack.commitSystemStateChanges();
@@ -146,20 +146,20 @@ public void process(
private boolean isStakingPeriodBoundary(
@NonNull final StreamMode streamMode,
@NonNull final TokenContext tokenContext,
- @NonNull final Instant lastIntervalProcessTime) {
+ @NonNull final Instant lastHandleTime) {
final var consensusTime = tokenContext.consensusTime();
if (streamMode == RECORDS) {
final var blockStore = tokenContext.readableStore(ReadableBlockRecordStore.class);
- final var lastHandleTime = blockStore.getLastBlockInfo().consTimeOfLastHandledTxnOrThrow();
- if (consensusTime.getEpochSecond() > lastHandleTime.seconds()) {
+ final var consTimeOfLastHandled = blockStore.getLastBlockInfo().consTimeOfLastHandledTxnOrThrow();
+ if (consensusTime.getEpochSecond() > consTimeOfLastHandled.seconds()) {
return isNextStakingPeriod(
consensusTime,
- Instant.ofEpochSecond(lastHandleTime.seconds(), lastHandleTime.nanos()),
+ Instant.ofEpochSecond(consTimeOfLastHandled.seconds(), consTimeOfLastHandled.nanos()),
tokenContext);
}
} else {
- if (consensusTime.getEpochSecond() > lastIntervalProcessTime.getEpochSecond()) {
- return isNextStakingPeriod(consensusTime, lastIntervalProcessTime, tokenContext);
+ if (consensusTime.getEpochSecond() > lastHandleTime.getEpochSecond()) {
+ return isNextStakingPeriod(consensusTime, lastHandleTime, tokenContext);
}
}
return false;
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/UserTxn.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/UserTxn.java
index 5822a9f61180..2e00de3016b9 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/UserTxn.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/UserTxn.java
@@ -16,7 +16,10 @@
package com.hedera.node.app.workflows.handle.steps;
+import static com.hedera.node.app.workflows.handle.HandleWorkflow.initializeBuilderInfo;
+
import com.hedera.hapi.node.base.HederaFunctionality;
+import com.hedera.hapi.node.transaction.ExchangeRateSet;
import com.hedera.node.app.spi.workflows.record.StreamBuilder;
import com.hedera.node.app.store.ReadableStoreFactory;
import com.hedera.node.app.workflows.TransactionInfo;
@@ -25,18 +28,17 @@
import com.hedera.node.app.workflows.handle.stack.SavepointStackImpl;
import com.hedera.node.app.workflows.prehandle.PreHandleResult;
import com.swirlds.config.api.Configuration;
-import com.swirlds.platform.system.events.ConsensusEvent;
import com.swirlds.state.State;
import com.swirlds.state.lifecycle.info.NodeInfo;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.time.Instant;
+import java.util.function.Consumer;
public record UserTxn(
@NonNull TransactionType type,
@NonNull HederaFunctionality functionality,
@NonNull Instant consensusNow,
@NonNull State state,
- @NonNull ConsensusEvent event,
@NonNull TransactionInfo txnInfo,
@NonNull TokenContextImpl tokenContextImpl,
@NonNull SavepointStackImpl stack,
@@ -45,6 +47,31 @@ public record UserTxn(
@NonNull Configuration config,
@NonNull NodeInfo creatorInfo) {
+ /**
+ * Initializes and returns the base stream builder for this user transaction.
+ * @param exchangeRates the exchange rates to use
+ * @return the initialized stream builder
+ */
+ public StreamBuilder initBaseBuilder(@NonNull final ExchangeRateSet exchangeRates) {
+ return initializeBuilderInfo(baseBuilder(), txnInfo, exchangeRates);
+ }
+
+ /**
+ * Initializes and returns the base stream builder for this user transaction.
+ *
+ * @param exchangeRates the exchange rates to use
+ * @param builderSpec the builder specification
+ * @return the initialized stream builder
+ */
+ public T initBaseBuilder(
+ @NonNull final ExchangeRateSet exchangeRates,
+ @NonNull final Class builderType,
+ @NonNull final Consumer builderSpec) {
+ final var baseBuilder = builderType.cast(initializeBuilderInfo(baseBuilder(), txnInfo, exchangeRates));
+ builderSpec.accept(baseBuilder);
+ return baseBuilder;
+ }
+
/**
* Returns the base stream builder for this user transaction.
*
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/UserTxnFactory.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/UserTxnFactory.java
index c30a5db8be06..1d87d8db27f1 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/UserTxnFactory.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/steps/UserTxnFactory.java
@@ -32,6 +32,7 @@
import com.hedera.hapi.node.base.AccountID;
import com.hedera.hapi.node.base.Key;
+import com.hedera.hapi.node.transaction.ExchangeRateSet;
import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.node.app.blocks.BlockStreamManager;
import com.hedera.node.app.blocks.impl.BoundaryStateChangeListener;
@@ -78,7 +79,6 @@
import com.hedera.node.config.data.HederaConfig;
import com.hedera.node.config.types.StreamMode;
import com.swirlds.config.api.Configuration;
-import com.swirlds.platform.system.events.ConsensusEvent;
import com.swirlds.platform.system.transaction.ConsensusTransaction;
import com.swirlds.state.State;
import com.swirlds.state.lifecycle.info.NetworkInfo;
@@ -154,7 +154,6 @@ public UserTxnFactory(
* Creates a new {@link UserTxn} instance from the given parameters.
*
* @param state the state the transaction will be applied to
- * @param event the consensus event containing the transaction
* @param creatorInfo the node information of the creator
* @param platformTxn the transaction itself
* @param consensusNow the current consensus time
@@ -163,11 +162,15 @@ public UserTxnFactory(
*/
public UserTxn createUserTxn(
@NonNull final State state,
- @NonNull final ConsensusEvent event,
@NonNull final NodeInfo creatorInfo,
@NonNull final ConsensusTransaction platformTxn,
@NonNull final Instant consensusNow,
@NonNull final TransactionType type) {
+ requireNonNull(state);
+ requireNonNull(creatorInfo);
+ requireNonNull(platformTxn);
+ requireNonNull(consensusNow);
+ requireNonNull(type);
final var config = configProvider.getConfiguration();
final var stack = createRootSavepointStack(state, type);
final var readableStoreFactory = new ReadableStoreFactory(stack);
@@ -180,7 +183,6 @@ public UserTxn createUserTxn(
txnInfo.functionality(),
consensusNow,
state,
- event,
txnInfo,
tokenContext,
stack,
@@ -194,34 +196,36 @@ public UserTxn createUserTxn(
* Creates a new {@link UserTxn} for synthetic transaction body.
*
* @param state the state the transaction will be applied to
- * @param event the consensus event containing the transaction
* @param creatorInfo the node information of the creator
* @param consensusNow the current consensus time
* @param type the type of the transaction
- * @param txBody synthetic transaction body
+ * @param body synthetic transaction body
* @return the new user transaction
*/
public UserTxn createUserTxn(
@NonNull final State state,
- @NonNull final ConsensusEvent event,
@NonNull final NodeInfo creatorInfo,
@NonNull final Instant consensusNow,
@NonNull final TransactionType type,
@NonNull final AccountID payerId,
- @NonNull final TransactionBody txBody) {
+ @NonNull final TransactionBody body) {
+ requireNonNull(state);
+ requireNonNull(creatorInfo);
+ requireNonNull(consensusNow);
+ requireNonNull(type);
+ requireNonNull(payerId);
+ requireNonNull(body);
final var config = configProvider.getConfiguration();
final var stack = createRootSavepointStack(state, type);
final var readableStoreFactory = new ReadableStoreFactory(stack);
- final var functionality = functionOfTxn(txBody);
- final var preHandleResult = preHandleSyntheticTransaction(txBody, payerId, config, readableStoreFactory);
+ final var functionality = functionOfTxn(body);
+ final var preHandleResult = preHandleSyntheticTransaction(body, payerId, config, readableStoreFactory);
final var tokenContext = new TokenContextImpl(config, storeMetricsService, stack, consensusNow);
-
return new UserTxn(
type,
functionality,
consensusNow,
state,
- event,
preHandleResult.txInfo(),
tokenContext,
stack,
@@ -235,18 +239,19 @@ public UserTxn createUserTxn(
* Creates a new {@link Dispatch} instance for this user transaction in the given context.
*
* @param userTxn user transaction
- * @param baseBuilder the base record builder
+ * @param exchangeRates the exchange rates to use
* @return the new dispatch instance
*/
- public Dispatch createDispatch(@NonNull final UserTxn userTxn, @NonNull final StreamBuilder baseBuilder) {
- final var config = userTxn.config();
- final var txnInfo = userTxn.txnInfo();
+ public Dispatch createDispatch(@NonNull final UserTxn userTxn, @NonNull final ExchangeRateSet exchangeRates) {
+ requireNonNull(userTxn);
+ requireNonNull(exchangeRates);
final var preHandleResult = userTxn.preHandleResult();
final var keyVerifier = new DefaultKeyVerifier(
- txnInfo.signatureMap().sigPair().size(),
- config.getConfigData(HederaConfig.class),
+ userTxn.txnInfo().signatureMap().sigPair().size(),
+ userTxn.config().getConfigData(HederaConfig.class),
preHandleResult.getVerificationResults());
final var category = getTxnCategory(preHandleResult);
+ final var baseBuilder = userTxn.initBaseBuilder(exchangeRates);
return createDispatch(userTxn, baseBuilder, keyVerifier, category);
}
@@ -344,18 +349,25 @@ private Dispatch createDispatch(
transactionCategory,
tokenContextImpl,
preHandleResult,
- // scheduled txn will be throttled only at creation (SCHEDULE_CREATE)
- SCHEDULED.equals(transactionCategory)
+ transactionCategory == SCHEDULED
? HandleContext.ConsensusThrottling.OFF
: HandleContext.ConsensusThrottling.ON);
}
+ /**
+ * Creates a new root savepoint stack for the given state and transaction type, where genesis and
+ * post-upgrade transactions have the maximum number of preceding records; and other transaction
+ * types only support the number of preceding records specified in the network configuration.
+ * @param state the state the stack is based on
+ * @param type the type of the transaction
+ * @return the new root savepoint stack
+ */
private SavepointStackImpl createRootSavepointStack(
- @NonNull final State state, @NonNull final TransactionType txnType) {
+ @NonNull final State state, @NonNull final TransactionType type) {
final var config = configProvider.getConfiguration();
final var consensusConfig = config.getConfigData(ConsensusConfig.class);
final var blockStreamConfig = config.getConfigData(BlockStreamConfig.class);
- final var maxPrecedingRecords = (txnType == GENESIS_TRANSACTION || txnType == POST_UPGRADE_TRANSACTION)
+ final var maxPrecedingRecords = (type == GENESIS_TRANSACTION || type == POST_UPGRADE_TRANSACTION)
? Integer.MAX_VALUE
: consensusConfig.handleMaxPrecedingRecords();
return SavepointStackImpl.newRootStack(
@@ -368,16 +380,16 @@ private SavepointStackImpl createRootSavepointStack(
}
private PreHandleResult preHandleSyntheticTransaction(
- @NonNull final TransactionBody txBody,
+ @NonNull final TransactionBody body,
@NonNull final AccountID syntheticPayerId,
@NonNull final Configuration config,
@NonNull final ReadableStoreFactory readableStoreFactory) {
try {
- dispatcher.dispatchPureChecks(txBody);
+ dispatcher.dispatchPureChecks(body);
final var preHandleContext =
- new PreHandleContextImpl(readableStoreFactory, txBody, syntheticPayerId, config, dispatcher);
+ new PreHandleContextImpl(readableStoreFactory, body, syntheticPayerId, config, dispatcher);
dispatcher.dispatchPreHandle(preHandleContext);
- final var txInfo = getTxnInfoFrom(syntheticPayerId, txBody);
+ final var txInfo = getTxnInfoFrom(syntheticPayerId, body);
return new PreHandleResult(
null,
null,
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/prehandle/PreHandleResult.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/prehandle/PreHandleResult.java
index 98aa59f0a803..90f61c7131ee 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/prehandle/PreHandleResult.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/prehandle/PreHandleResult.java
@@ -18,6 +18,7 @@
import static com.hedera.hapi.node.base.ResponseCodeEnum.UNKNOWN;
import static com.hedera.node.app.spi.key.KeyUtils.IMMUTABILITY_SENTINEL_KEY;
+import static com.hedera.node.app.workflows.prehandle.PreHandleResult.Status.NODE_DUE_DILIGENCE_FAILURE;
import static java.util.Objects.requireNonNull;
import com.hedera.hapi.node.base.AccountID;
@@ -26,6 +27,7 @@
import com.hedera.hapi.node.state.token.Account;
import com.hedera.node.app.signature.SignatureVerificationFuture;
import com.hedera.node.app.spi.workflows.PreHandleContext;
+import com.hedera.node.app.state.HederaRecordCache;
import com.hedera.node.app.workflows.TransactionInfo;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
@@ -87,6 +89,15 @@ && getOptionalKeys().equals(context.optionalNonPayerKeys())
&& getHollowAccounts().equals(context.requiredHollowAccounts());
}
+ /**
+ * Returns whether this result represents a node due diligence failure.
+ */
+ public HederaRecordCache.DueDiligenceFailure dueDiligenceFailure() {
+ return status == NODE_DUE_DILIGENCE_FAILURE
+ ? HederaRecordCache.DueDiligenceFailure.YES
+ : HederaRecordCache.DueDiligenceFailure.NO;
+ }
+
/**
* Returns the key verifications for this result; or an empty map if none could be computed.
*
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/ExecutorComponent.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/ExecutorComponent.java
index 38602cc7ff8a..b64046a7c96b 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/ExecutorComponent.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/ExecutorComponent.java
@@ -22,8 +22,11 @@
import com.hedera.node.app.fees.ExchangeRateManager;
import com.hedera.node.app.service.contract.impl.ContractServiceImpl;
import com.hedera.node.app.service.file.impl.FileServiceImpl;
+import com.hedera.node.app.service.schedule.impl.ScheduleServiceImpl;
import com.hedera.node.app.services.ServicesInjectionModule;
+import com.hedera.node.app.spi.throttle.Throttle;
import com.hedera.node.app.state.HederaStateInjectionModule;
+import com.hedera.node.app.throttle.ThrottleServiceManager;
import com.hedera.node.app.throttle.ThrottleServiceModule;
import com.hedera.node.app.tss.TssBaseService;
import com.hedera.node.app.workflows.FacilityInitModule;
@@ -68,6 +71,9 @@ interface Builder {
@BindsInstance
Builder contractServiceImpl(ContractServiceImpl contractService);
+ @BindsInstance
+ Builder scheduleServiceImpl(ScheduleServiceImpl scheduleService);
+
@BindsInstance
Builder configProviderImpl(ConfigProviderImpl configProvider);
@@ -77,6 +83,9 @@ interface Builder {
@BindsInstance
Builder metrics(Metrics metrics);
+ @BindsInstance
+ Builder throttleFactory(Throttle.Factory throttleFactory);
+
ExecutorComponent build();
}
@@ -88,5 +97,7 @@ interface Builder {
ExchangeRateManager exchangeRateManager();
+ ThrottleServiceManager throttleServiceManager();
+
StandaloneDispatchFactory standaloneDispatchFactory();
}
diff --git a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java
index 385feb14129f..faddec83abb1 100644
--- a/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java
+++ b/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/standalone/TransactionExecutors.java
@@ -25,11 +25,14 @@
import com.hedera.node.app.info.NodeInfoImpl;
import com.hedera.node.app.service.contract.impl.ContractServiceImpl;
import com.hedera.node.app.service.file.impl.FileServiceImpl;
+import com.hedera.node.app.service.schedule.impl.ScheduleServiceImpl;
import com.hedera.node.app.services.AppContextImpl;
import com.hedera.node.app.signature.AppSignatureVerifier;
import com.hedera.node.app.signature.impl.SignatureExpanderImpl;
import com.hedera.node.app.signature.impl.SignatureVerifierImpl;
import com.hedera.node.app.state.recordcache.LegacyListRecordSource;
+import com.hedera.node.app.throttle.AppThrottleFactory;
+import com.hedera.node.app.throttle.ThrottleAccumulator;
import com.hedera.node.app.tss.PlaceholderTssLibrary;
import com.hedera.node.app.tss.TssBaseServiceImpl;
import com.hedera.node.config.data.HederaConfig;
@@ -44,6 +47,7 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import org.hyperledger.besu.evm.tracing.OperationTracer;
@@ -52,6 +56,7 @@
*/
public enum TransactionExecutors {
TRANSACTION_EXECUTORS;
+
public static final NodeInfo DEFAULT_NODE_INFO = new NodeInfoImpl(0, asAccount(3L), 10, List.of(), Bytes.EMPTY);
/**
@@ -75,7 +80,7 @@ public TransactionExecutor newExecutor(
@Nullable final TracerBinding customTracerBinding) {
final var tracerBinding =
customTracerBinding != null ? customTracerBinding : DefaultTracerBinding.DEFAULT_TRACER_BINDING;
- final var executor = newExecutorComponent(properties, tracerBinding);
+ final var executor = newExecutorComponent(state, properties, tracerBinding);
executor.initializer().accept(state);
executor.stateNetworkInfo().initFrom(state);
final var exchangeRateManager = executor.exchangeRateManager();
@@ -91,8 +96,12 @@ public TransactionExecutor newExecutor(
}
private ExecutorComponent newExecutorComponent(
- @NonNull final Map properties, @NonNull final TracerBinding tracerBinding) {
+ @NonNull final State state,
+ @NonNull final Map properties,
+ @NonNull final TracerBinding tracerBinding) {
final var bootstrapConfigProvider = new BootstrapConfigProviderImpl();
+ final var configProvider = new ConfigProviderImpl(false, null, properties);
+ final AtomicReference componentRef = new AtomicReference<>();
final var appContext = new AppContextImpl(
InstantSource.system(),
new AppSignatureVerifier(
@@ -101,7 +110,12 @@ private ExecutorComponent newExecutorComponent(
new SignatureVerifierImpl(CryptographyHolder.get())),
UNAVAILABLE_GOSSIP,
bootstrapConfigProvider::getConfiguration,
- () -> DEFAULT_NODE_INFO);
+ () -> DEFAULT_NODE_INFO,
+ new AppThrottleFactory(
+ configProvider::getConfiguration,
+ () -> state,
+ () -> componentRef.get().throttleServiceManager().activeThrottleDefinitionsOrThrow(),
+ ThrottleAccumulator::new));
final var tssBaseService = new TssBaseServiceImpl(
appContext,
ForkJoinPool.commonPool(),
@@ -111,15 +125,19 @@ private ExecutorComponent newExecutorComponent(
new NoOpMetrics());
final var contractService = new ContractServiceImpl(appContext, NOOP_VERIFICATION_STRATEGIES, tracerBinding);
final var fileService = new FileServiceImpl();
- final var configProvider = new ConfigProviderImpl(false, null, properties);
- return DaggerExecutorComponent.builder()
+ final var scheduleService = new ScheduleServiceImpl();
+ final var component = DaggerExecutorComponent.builder()
.configProviderImpl(configProvider)
.bootstrapConfigProviderImpl(bootstrapConfigProvider)
.tssBaseService(tssBaseService)
.fileServiceImpl(fileService)
.contractServiceImpl(contractService)
+ .scheduleServiceImpl(scheduleService)
.metrics(new NoOpMetrics())
+ .throttleFactory(appContext.throttleFactory())
.build();
+ componentRef.set(component);
+ return component;
}
/**
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockImplUtilsTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockImplUtilsTest.java
index f14f9d04e2b6..0fcbcacd4765 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockImplUtilsTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockImplUtilsTest.java
@@ -117,7 +117,9 @@ private static String nameOf(@NonNull final StateIdentifier stateId) {
case STATE_ID_SCHEDULES_BY_EXPIRY -> "ScheduleService.SCHEDULES_BY_EXPIRY_SEC";
case STATE_ID_SCHEDULES_BY_ID -> "ScheduleService.SCHEDULES_BY_ID";
case STATE_ID_SCHEDULE_ID_BY_EQUALITY -> "ScheduleService.SCHEDULE_ID_BY_EQUALITY";
- case STATE_ID_SCHEDULE_IDS_BY_EXPIRY -> "ScheduleService.SCHEDULE_IDS_BY_EXPIRY_SEC";
+ case STATE_ID_SCHEDULED_COUNTS -> "ScheduleService.SCHEDULED_COUNTS";
+ case STATE_ID_SCHEDULED_ORDERS -> "ScheduleService.SCHEDULED_ORDERS";
+ case STATE_ID_SCHEDULED_USAGES -> "ScheduleService.SCHEDULED_USAGES";
case STATE_ID_ACCOUNTS -> "TokenService.ACCOUNTS";
case STATE_ID_ALIASES -> "TokenService.ALIASES";
case STATE_ID_NFTS -> "TokenService.NFTS";
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImplTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImplTest.java
index db738cdb7e49..e0775bfe40cc 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImplTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/impl/BlockStreamManagerImplTest.java
@@ -58,6 +58,7 @@
import com.hedera.node.app.blocks.BlockStreamManager;
import com.hedera.node.app.blocks.BlockStreamService;
import com.hedera.node.app.blocks.InitialStateHash;
+import com.hedera.node.app.records.BlockRecordService;
import com.hedera.node.app.tss.TssBaseService;
import com.hedera.node.config.ConfigProvider;
import com.hedera.node.config.VersionedConfigImpl;
@@ -200,7 +201,7 @@ void classifiesNonGenesisBlockOfSameVersionWithWorkDoneAsNoWork() {
}
@Test
- void canUpdateIntervalProcessTime() {
+ void canUpdateDistinguishedTimes() {
given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(DEFAULT_CONFIG, 1L));
subject = new BlockStreamManagerImpl(
() -> aWriter,
@@ -213,6 +214,10 @@ void canUpdateIntervalProcessTime() {
assertSame(Instant.EPOCH, subject.lastIntervalProcessTime());
subject.setLastIntervalProcessTime(CONSENSUS_NOW);
assertEquals(CONSENSUS_NOW, subject.lastIntervalProcessTime());
+
+ assertSame(Instant.EPOCH, subject.lastHandleTime());
+ subject.setLastHandleTime(CONSENSUS_NOW);
+ assertEquals(CONSENSUS_NOW, subject.lastHandleTime());
}
@Test
@@ -288,7 +293,8 @@ void startsAndEndsBlockWithSingleRoundPerBlockAsExpected() throws ParseException
Timestamp.DEFAULT,
true,
SemanticVersion.DEFAULT,
- CONSENSUS_THEN);
+ CONSENSUS_THEN,
+ BlockRecordService.EPOCH);
final var actualBlockInfo = infoRef.get();
assertEquals(expectedBlockInfo, actualBlockInfo);
verify(tssBaseService).requestLedgerSignature(blockHashCaptor.capture(), any());
@@ -392,7 +398,8 @@ void alwaysEndsBlockOnFreezeRoundPerBlockAsExpected() throws ParseException {
Timestamp.DEFAULT,
false,
SemanticVersion.DEFAULT,
- CONSENSUS_THEN);
+ CONSENSUS_THEN,
+ BlockRecordService.EPOCH);
final var actualBlockInfo = infoRef.get();
assertEquals(expectedBlockInfo, actualBlockInfo);
verify(tssBaseService).requestLedgerSignature(blockHashCaptor.capture(), any());
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/schemas/V0560BlockStreamSchemaTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/schemas/V0560BlockStreamSchemaTest.java
index 3fdf4f451c49..ebbbc5d65be4 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/schemas/V0560BlockStreamSchemaTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/blocks/schemas/V0560BlockStreamSchemaTest.java
@@ -127,6 +127,7 @@ void assumesMigrationIfNotGenesisAndStateIsNull() {
blockInfo.consTimeOfLastHandledTxn(),
false,
SemanticVersion.DEFAULT,
+ blockInfo.consTimeOfLastHandledTxn(),
blockInfo.consTimeOfLastHandledTxn());
verify(state).put(expectedInfo);
}
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/components/IngestComponentTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/components/IngestComponentTest.java
index 09339e7beabc..e2c261bb0f5d 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/components/IngestComponentTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/components/IngestComponentTest.java
@@ -39,11 +39,13 @@
import com.hedera.node.app.info.NodeInfoImpl;
import com.hedera.node.app.service.contract.impl.ContractServiceImpl;
import com.hedera.node.app.service.file.impl.FileServiceImpl;
+import com.hedera.node.app.service.schedule.impl.ScheduleServiceImpl;
import com.hedera.node.app.services.AppContextImpl;
import com.hedera.node.app.services.ServicesRegistry;
import com.hedera.node.app.signature.AppSignatureVerifier;
import com.hedera.node.app.signature.impl.SignatureExpanderImpl;
import com.hedera.node.app.signature.impl.SignatureVerifierImpl;
+import com.hedera.node.app.spi.throttle.Throttle;
import com.hedera.node.app.state.recordcache.RecordCacheService;
import com.hedera.node.app.tss.TssBaseService;
import com.hedera.node.app.tss.handlers.TssHandlers;
@@ -91,6 +93,9 @@ class IngestComponentTest {
@Mock
private TssShareSignatureHandler tssShareSignatureHandler;
+ @Mock
+ private Throttle.Factory throttleFactory;
+
@Mock
private StartupNetworks startupNetworks;
@@ -119,7 +124,8 @@ void setUp() {
new SignatureVerifierImpl(CryptographyHolder.get())),
UNAVAILABLE_GOSSIP,
() -> configuration,
- () -> DEFAULT_NODE_INFO);
+ () -> DEFAULT_NODE_INFO,
+ throttleFactory);
given(tssBaseService.tssHandlers())
.willReturn(new TssHandlers(tssMessageHandler, tssVoteHandler, tssShareSignatureHandler));
app = DaggerHederaInjectionComponent.builder()
@@ -127,6 +133,7 @@ void setUp() {
.bootstrapConfigProviderImpl(new BootstrapConfigProviderImpl())
.fileServiceImpl(new FileServiceImpl())
.contractServiceImpl(new ContractServiceImpl(appContext))
+ .scheduleService(new ScheduleServiceImpl())
.initTrigger(InitTrigger.GENESIS)
.platform(platform)
.crypto(CryptographyHolder.get())
@@ -144,6 +151,7 @@ void setUp() {
.initialStateHash(new InitialStateHash(completedFuture(Bytes.EMPTY), 0))
.networkInfo(mock(NetworkInfo.class))
.startupNetworks(startupNetworks)
+ .throttleFactory(throttleFactory)
.build();
final var state = new FakeState();
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/AppThrottleFactoryTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/AppThrottleFactoryTest.java
new file mode 100644
index 000000000000..e0000a5a30e8
--- /dev/null
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/AppThrottleFactoryTest.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.node.app.throttle;
+
+import static com.hedera.hapi.node.base.HederaFunctionality.CRYPTO_TRANSFER;
+import static com.hedera.node.app.throttle.ThrottleAccumulator.ThrottleType.BACKEND_THROTTLE;
+import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.ArgumentMatchers.argThat;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.BDDMockito.given;
+import static org.mockito.Mockito.verify;
+
+import com.hedera.hapi.node.base.AccountID;
+import com.hedera.hapi.node.base.SignatureMap;
+import com.hedera.hapi.node.base.Timestamp;
+import com.hedera.hapi.node.base.Transaction;
+import com.hedera.hapi.node.base.TransactionID;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshot;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
+import com.hedera.hapi.node.token.CryptoTransferTransactionBody;
+import com.hedera.hapi.node.transaction.ThrottleDefinitions;
+import com.hedera.hapi.node.transaction.TransactionBody;
+import com.hedera.node.app.hapi.utils.throttles.DeterministicThrottle;
+import com.hedera.node.app.hapi.utils.throttles.GasLimitDeterministicThrottle;
+import com.hedera.node.app.workflows.TransactionInfo;
+import com.hedera.pbj.runtime.io.buffer.Bytes;
+import com.swirlds.config.api.Configuration;
+import com.swirlds.state.State;
+import java.time.Instant;
+import java.util.List;
+import java.util.function.IntSupplier;
+import java.util.function.Supplier;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+@ExtendWith(MockitoExtension.class)
+class AppThrottleFactoryTest {
+ private static final int SPLIT_FACTOR = 7;
+ private static final Instant CONSENSUS_NOW = Instant.ofEpochSecond(123456, 789);
+ private static final AccountID PAYER_ID =
+ AccountID.newBuilder().accountNum(666L).build();
+ private static final TransactionInfo TXN_INFO = new TransactionInfo(
+ Transaction.DEFAULT,
+ TransactionBody.newBuilder()
+ .cryptoTransfer(CryptoTransferTransactionBody.DEFAULT)
+ .build(),
+ TransactionID.DEFAULT,
+ PAYER_ID,
+ SignatureMap.DEFAULT,
+ Bytes.EMPTY,
+ CRYPTO_TRANSFER,
+ null);
+ private static final ThrottleUsageSnapshots FAKE_SNAPSHOTS = new ThrottleUsageSnapshots(
+ List.of(
+ new ThrottleUsageSnapshot(1L, new Timestamp(234567, 8)),
+ new ThrottleUsageSnapshot(2L, new Timestamp(345678, 9))),
+ ThrottleUsageSnapshot.DEFAULT);
+
+ @Mock
+ private State state;
+
+ @Mock
+ private Supplier config;
+
+ @Mock
+ private ThrottleAccumulator throttleAccumulator;
+
+ @Mock
+ private DeterministicThrottle firstThrottle;
+
+ @Mock
+ private DeterministicThrottle lastThrottle;
+
+ @Mock
+ private GasLimitDeterministicThrottle gasThrottle;
+
+ @Mock
+ private AppThrottleFactory.ThrottleAccumulatorFactory throttleAccumulatorFactory;
+
+ private AppThrottleFactory subject;
+
+ @BeforeEach
+ void setUp() {
+ subject = new AppThrottleFactory(
+ config, () -> state, () -> ThrottleDefinitions.DEFAULT, throttleAccumulatorFactory);
+ }
+
+ @Test
+ void initializesAccumulatorFromCurrentConfigAndGivenDefinitions() {
+ given(throttleAccumulatorFactory.newThrottleAccumulator(
+ eq(config), argThat((IntSupplier i) -> i.getAsInt() == SPLIT_FACTOR), eq(BACKEND_THROTTLE)))
+ .willReturn(throttleAccumulator);
+ given(throttleAccumulator.allActiveThrottles()).willReturn(List.of(firstThrottle, lastThrottle));
+ given(throttleAccumulator.gasLimitThrottle()).willReturn(gasThrottle);
+
+ final var throttle = subject.newThrottle(SPLIT_FACTOR, FAKE_SNAPSHOTS);
+
+ verify(throttleAccumulator).applyGasConfig();
+ verify(throttleAccumulator).rebuildFor(ThrottleDefinitions.DEFAULT);
+ verify(firstThrottle).resetUsageTo(FAKE_SNAPSHOTS.tpsThrottles().getFirst());
+ verify(lastThrottle).resetUsageTo(FAKE_SNAPSHOTS.tpsThrottles().getLast());
+ verify(gasThrottle).resetUsageTo(FAKE_SNAPSHOTS.gasThrottleOrThrow());
+
+ given(throttleAccumulator.checkAndEnforceThrottle(TXN_INFO, CONSENSUS_NOW, state))
+ .willReturn(true);
+ assertThat(throttle.allow(PAYER_ID, TXN_INFO.txBody(), TXN_INFO.functionality(), CONSENSUS_NOW))
+ .isFalse();
+
+ given(firstThrottle.usageSnapshot())
+ .willReturn(FAKE_SNAPSHOTS.tpsThrottles().getFirst());
+ given(lastThrottle.usageSnapshot())
+ .willReturn(FAKE_SNAPSHOTS.tpsThrottles().getLast());
+ given(gasThrottle.usageSnapshot()).willReturn(FAKE_SNAPSHOTS.gasThrottleOrThrow());
+ assertEquals(FAKE_SNAPSHOTS, throttle.usageSnapshots());
+ }
+}
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/ThrottleAccumulatorTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/ThrottleAccumulatorTest.java
index a0c4e833e9ab..a202e9c1a9f6 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/ThrottleAccumulatorTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/throttle/ThrottleAccumulatorTest.java
@@ -77,6 +77,7 @@
import com.hedera.node.app.spi.fixtures.util.LogCaptureExtension;
import com.hedera.node.app.spi.fixtures.util.LoggingSubject;
import com.hedera.node.app.spi.fixtures.util.LoggingTarget;
+import com.hedera.node.app.throttle.ThrottleAccumulator.Verbose;
import com.hedera.node.app.workflows.TransactionInfo;
import com.hedera.node.config.ConfigProvider;
import com.hedera.node.config.VersionedConfigImpl;
@@ -86,6 +87,7 @@
import com.hedera.node.config.data.ContractsConfig;
import com.hedera.node.config.data.EntitiesConfig;
import com.hedera.node.config.data.LazyCreationConfig;
+import com.hedera.node.config.data.LedgerConfig;
import com.hedera.node.config.data.SchedulingConfig;
import com.hedera.node.config.data.TokensConfig;
import com.hedera.node.config.testfixtures.HederaTestConfigBuilder;
@@ -156,6 +158,9 @@ class ThrottleAccumulatorTest {
@Mock
private SchedulingConfig schedulingConfig;
+ @Mock
+ private LedgerConfig ledgerConfig;
+
@Mock
private AccountsConfig accountsConfig;
@@ -199,7 +204,11 @@ class ThrottleAccumulatorTest {
void worksAsExpectedForKnownQueries() throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -214,7 +223,7 @@ void worksAsExpectedForKnownQueries() throws IOException, ParseException {
final var yesAns = subject.checkAndEnforceThrottle(
GET_VERSION_INFO, TIME_INSTANT.plusNanos(2), query, state, queryPayerId);
final var throttlesNow = subject.activeThrottlesFor(TRANSACTION_GET_RECEIPT);
- final var dNow = throttlesNow.get(0);
+ final var dNow = throttlesNow.getFirst();
// then
assertFalse(noAns);
@@ -230,7 +239,11 @@ void worksAsExpectedForSimpleGetBalanceThrottle() throws IOException, ParseExcep
.getOrCreateConfig();
given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(config, 1));
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
final var defs = getThrottleDefs("bootstrap/throttles.json");
subject.rebuildFor(defs);
final var query = Query.newBuilder()
@@ -269,7 +282,11 @@ void worksAsExpectedForCountingGetBalanceThrottle() throws IOException, ParseExc
.getOrCreateConfig();
given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(config, 1));
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
final var defs = getThrottleDefs("bootstrap/throttles.json");
subject.rebuildFor(defs);
final var query = Query.newBuilder()
@@ -304,7 +321,11 @@ void worksAsExpectedForCountingGetBalanceThrottleWithEmptyAccount() throws IOExc
.getOrCreateConfig();
given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(config, 1));
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
final var defs = getThrottleDefs("bootstrap/throttles.json");
subject.rebuildFor(defs);
final var query = Query.newBuilder()
@@ -339,7 +360,11 @@ void worksAsExpectedForCountingGetBalanceThrottleWithEmptyAccount() throws IOExc
void worksAsExpectedForUnknownQueries() throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -360,7 +385,8 @@ void worksAsExpectedForUnknownQueries() throws IOException, ParseException {
void checkAndClaimThrottlesByGasAndTotalAllowedGasPerSecNotSetOrZero(
ThrottleAccumulator.ThrottleType throttleType) {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
given(contractsConfig.throttleThrottleByGas()).willReturn(true);
@@ -379,7 +405,7 @@ void managerBehavesAsExpectedForFungibleMint(ThrottleAccumulator.ThrottleType th
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -417,7 +443,7 @@ void managerBehavesAsExpectedForNftMint(ThrottleAccumulator.ThrottleType throttl
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -458,7 +484,7 @@ void managerBehavesAsExpectedForMultiBucketOp(ThrottleAccumulator.ThrottleType t
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -497,7 +523,7 @@ void managerBehavesAsExpectedForMultiBucketOp(ThrottleAccumulator.ThrottleType t
void handlesThrottleExemption(ThrottleAccumulator.ThrottleType throttleType) throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -531,7 +557,7 @@ void computesNumImplicitCreationsIfNotAlreadyKnown(ThrottleAccumulator.ThrottleT
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -571,7 +597,7 @@ void ifLazyCreationEnabledComputesNumImplicitCreationsIfNotAlreadyKnown(
ThrottleAccumulator.ThrottleType throttleType) throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -612,7 +638,7 @@ void cryptoTransfersWithNoAutoAccountCreationsAreThrottledAsExpected(ThrottleAcc
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -651,7 +677,7 @@ void managerAllowsCryptoTransfersWithAutoAccountCreationsAsExpected(ThrottleAccu
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -690,7 +716,7 @@ void managerAllowsCryptoTransfersWithAutoAssociationsAsExpected(ThrottleAccumula
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -729,7 +755,7 @@ void managerRejectsCryptoTransfersWithAutoAccountCreationsAsExpected(ThrottleAcc
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -767,7 +793,7 @@ void managerRejectsCryptoTransfersWithAutoAssociationsAsExpected(ThrottleAccumul
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -805,7 +831,7 @@ void managerRejectsCryptoTransfersWithMissingCryptoCreateThrottle(ThrottleAccumu
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -843,7 +869,7 @@ void ethereumTransactionWithNoAutoAccountCreationsAreThrottledAsExpected(
ThrottleAccumulator.ThrottleType throttleType) throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -885,7 +911,7 @@ void ethereumTransactionWithAutoAccountCreationsButNoLazyCreationsAreThrottledAs
ThrottleAccumulator.ThrottleType throttleType) throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -927,7 +953,7 @@ void managerAllowsEthereumTransactionWithAutoAccountCreationsAsExpected(
ThrottleAccumulator.ThrottleType throttleType) throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -969,7 +995,7 @@ void managerRejectsEthereumTransactionWithMissingCryptoCreateThrottle(ThrottleAc
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -1010,7 +1036,8 @@ void managerRejectsEthereumTransactionWithMissingCryptoCreateThrottle(ThrottleAc
@EnumSource
void alwaysThrottlesContractCallWhenGasThrottleIsNotDefined(ThrottleAccumulator.ThrottleType throttleType) {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -1038,7 +1065,8 @@ void alwaysThrottlesContractCallWhenGasThrottleIsNotDefined(ThrottleAccumulator.
@EnumSource
void alwaysThrottlesContractCallWhenGasThrottleReturnsTrue(ThrottleAccumulator.ThrottleType throttleType) {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -1068,7 +1096,8 @@ void alwaysThrottlesContractCallWhenGasThrottleReturnsTrue(ThrottleAccumulator.T
@EnumSource
void alwaysThrottlesContractCreateWhenGasThrottleIsNotDefined(ThrottleAccumulator.ThrottleType throttleType) {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -1096,7 +1125,8 @@ void alwaysThrottlesContractCreateWhenGasThrottleIsNotDefined(ThrottleAccumulato
@EnumSource
void alwaysThrottlesContractCreateWhenGasThrottleReturnsTrue(ThrottleAccumulator.ThrottleType throttleType) {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -1131,7 +1161,8 @@ void alwaysThrottlesContractCreateWhenGasThrottleReturnsTrue(ThrottleAccumulator
@EnumSource
void alwaysThrottlesEthereumTxnWhenGasThrottleIsNotDefined(ThrottleAccumulator.ThrottleType throttleType) {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -1159,7 +1190,8 @@ void alwaysThrottlesEthereumTxnWhenGasThrottleIsNotDefined(ThrottleAccumulator.T
@EnumSource
void alwaysThrottlesEthereumTxnWhenGasThrottleReturnsTrue(ThrottleAccumulator.ThrottleType throttleType) {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -1195,7 +1227,8 @@ void alwaysThrottlesEthereumTxnWhenGasThrottleReturnsTrue(ThrottleAccumulator.Th
@EnumSource
void gasLimitThrottleReturnsCorrectObject(ThrottleAccumulator.ThrottleType throttleType) {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
given(contractsConfig.throttleThrottleByGas()).willReturn(true);
@@ -1215,7 +1248,8 @@ void gasLimitThrottleReturnsCorrectObject(ThrottleAccumulator.ThrottleType throt
void constructsExpectedBucketsFromTestResource(ThrottleAccumulator.ThrottleType throttleType)
throws IOException, ParseException {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
final var defs = getThrottleDefs("bootstrap/throttles.json");
@@ -1241,7 +1275,7 @@ void constructsExpectedBucketsFromTestResource(ThrottleAccumulator.ThrottleType
@EnumSource
void alwaysRejectsIfNoThrottle(ThrottleAccumulator.ThrottleType throttleType) {
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -1259,7 +1293,8 @@ void alwaysRejectsIfNoThrottle(ThrottleAccumulator.ThrottleType throttleType) {
@ParameterizedTest
@EnumSource
void verifyLeakUnusedGas(ThrottleAccumulator.ThrottleType throttleType) throws IOException, ParseException {
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, Verbose.YES);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
given(accountsConfig.lastThrottleExempt()).willReturn(100L);
@@ -1294,7 +1329,11 @@ void verifyLeakUnusedGas(ThrottleAccumulator.ThrottleType throttleType) throws I
@Test
void alwaysThrottleNOfUnmanaged() throws IOException, ParseException {
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
final var defs = getThrottleDefs("bootstrap/throttles.json");
subject.rebuildFor(defs);
@@ -1305,7 +1344,11 @@ void alwaysThrottleNOfUnmanaged() throws IOException, ParseException {
@Test
void canThrottleNOfManaged() throws IOException, ParseException {
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
final var defs = getThrottleDefs("bootstrap/throttles.json");
subject.rebuildFor(defs);
@@ -1320,7 +1363,11 @@ void canThrottleNOfManaged() throws IOException, ParseException {
@Test
void whenThrottlesUsesNoCapacity() throws IOException, ParseException {
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
final var defs = getThrottleDefs("bootstrap/throttles.json");
subject.rebuildFor(defs);
@@ -1333,7 +1380,11 @@ void whenThrottlesUsesNoCapacity() throws IOException, ParseException {
@Test
void canLeakCapacityForNOfManaged() throws IOException, ParseException {
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
final var defs = getThrottleDefs("bootstrap/throttles.json");
subject.rebuildFor(defs);
@@ -1346,653 +1397,6 @@ void canLeakCapacityForNOfManaged() throws IOException, ParseException {
assertEquals(42 * oneUsed, fortyTwoUsed);
}
- @ParameterizedTest
- @CsvSource({
- "FRONTEND_THROTTLE,true,true",
- "FRONTEND_THROTTLE,true,false",
- "FRONTEND_THROTTLE,false,true",
- "FRONTEND_THROTTLE,false,false",
- "BACKEND_THROTTLE,true,true",
- "BACKEND_THROTTLE,true,false",
- "BACKEND_THROTTLE,false,true",
- "BACKEND_THROTTLE,false,false",
- })
- @MockitoSettings(strictness = org.mockito.quality.Strictness.LENIENT)
- void usesScheduleCreateThrottleForSubmitMessage(
- final ThrottleAccumulator.ThrottleType throttleType,
- final boolean longTermEnabled,
- final boolean waitForExpiry)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(longTermEnabled);
- given(configuration.getConfigData(AutoCreationConfig.class)).willReturn(autoCreationConfig);
- given(autoCreationConfig.enabled()).willReturn(true);
- given(configuration.getConfigData(LazyCreationConfig.class)).willReturn(lazyCreationConfig);
- given(lazyCreationConfig.enabled()).willReturn(false);
-
- final var scheduledSubmit = SchedulableTransactionBody.newBuilder()
- .consensusSubmitMessage(ConsensusSubmitMessageTransactionBody.DEFAULT)
- .build();
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(scheduledSubmit, waitForExpiry, null);
- final boolean firstAns = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- boolean subsequentAns = false;
- for (int i = 1; i <= 150; i++) {
- subsequentAns = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT.plusNanos(i), state);
- }
-
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertFalse(firstAns);
- assertTrue(subsequentAns);
- assertEquals(149999992500000L, aNow.used());
- assertEquals(
- longTermEnabled && throttleType == FRONTEND_THROTTLE && (!waitForExpiry) ? 149999255000000L : 0,
- subject.activeThrottlesFor(CONSENSUS_SUBMIT_MESSAGE).get(0).used());
- }
-
- @ParameterizedTest
- @CsvSource({
- "FRONTEND_THROTTLE,true,true",
- "FRONTEND_THROTTLE,true,false",
- "FRONTEND_THROTTLE,false,true",
- "FRONTEND_THROTTLE,false,false",
- "BACKEND_THROTTLE,true,true",
- "BACKEND_THROTTLE,true,false",
- "BACKEND_THROTTLE,false,true",
- "BACKEND_THROTTLE,false,false",
- })
- @MockitoSettings(strictness = org.mockito.quality.Strictness.LENIENT)
- void usesScheduleCreateThrottleWithNestedThrottleExempt(
- final ThrottleAccumulator.ThrottleType throttleType,
- final boolean longTermEnabled,
- final boolean waitForExpiry)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(longTermEnabled);
- given(configuration.getConfigData(AutoCreationConfig.class)).willReturn(autoCreationConfig);
- given(autoCreationConfig.enabled()).willReturn(true);
- given(configuration.getConfigData(LazyCreationConfig.class)).willReturn(lazyCreationConfig);
- given(lazyCreationConfig.enabled()).willReturn(false);
-
- final var scheduledSubmit = SchedulableTransactionBody.newBuilder()
- .consensusSubmitMessage(ConsensusSubmitMessageTransactionBody.DEFAULT)
- .build();
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(
- scheduledSubmit,
- waitForExpiry,
- AccountID.newBuilder().accountNum(2L).build());
- final boolean firstAns = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- boolean subsequentAns = false;
- for (int i = 1; i <= 150; i++) {
- subsequentAns = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT.plusNanos(i), state);
- }
-
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertFalse(firstAns);
- assertTrue(subsequentAns);
- assertEquals(149999992500000L, aNow.used());
- assertEquals(
- 0, subject.activeThrottlesFor(CONSENSUS_SUBMIT_MESSAGE).get(0).used());
- }
-
- @ParameterizedTest
- @EnumSource
- void scheduleCreateAlwaysThrottledWhenNoBody(final ThrottleAccumulator.ThrottleType throttleType)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
-
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(SchedulableTransactionBody.DEFAULT, false, null);
- final boolean firstAns = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- for (int i = 1; i <= 150; i++) {
- assertTrue(subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT.plusNanos(i), state));
- }
-
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertTrue(firstAns);
- assertEquals(0, aNow.used());
- assertEquals(
- 0, subject.activeThrottlesFor(CONSENSUS_SUBMIT_MESSAGE).get(0).used());
- }
-
- @ParameterizedTest
- @CsvSource({
- "FRONTEND_THROTTLE,true",
- "FRONTEND_THROTTLE,false",
- "BACKEND_THROTTLE,true",
- "BACKEND_THROTTLE,false",
- })
- @MockitoSettings(strictness = org.mockito.quality.Strictness.LENIENT)
- void usesScheduleCreateThrottleForCryptoTransferNoAutoCreations(
- final ThrottleAccumulator.ThrottleType throttleType, final boolean longTermEnabled)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(longTermEnabled);
- given(configuration.getConfigData(AutoCreationConfig.class)).willReturn(autoCreationConfig);
- given(autoCreationConfig.enabled()).willReturn(true);
- given(configuration.getConfigData(LazyCreationConfig.class)).willReturn(lazyCreationConfig);
- given(lazyCreationConfig.enabled()).willReturn(false);
- given(configuration.getConfigData(EntitiesConfig.class)).willReturn(entitiesConfig);
- given(entitiesConfig.unlimitedAutoAssociationsEnabled()).willReturn(true);
-
- given(state.getReadableStates(any())).willReturn(readableStates);
-
- final var scheduledTransferNoAliases = SchedulableTransactionBody.newBuilder()
- .cryptoTransfer(cryptoTransferWithImplicitCreations(0))
- .build();
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(scheduledTransferNoAliases, false, null);
- final boolean ans = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertFalse(ans);
- assertEquals(BucketThrottle.capacityUnitsPerTxn(), aNow.used());
- assertEquals(
- longTermEnabled && throttleType == FRONTEND_THROTTLE ? BucketThrottle.capacityUnitsPerTxn() : 0,
- subject.activeThrottlesFor(CRYPTO_TRANSFER).get(0).used());
- }
-
- @ParameterizedTest
- @CsvSource({
- "FRONTEND_THROTTLE,true",
- "FRONTEND_THROTTLE,false",
- "BACKEND_THROTTLE,true",
- "BACKEND_THROTTLE,false",
- })
- @MockitoSettings(strictness = org.mockito.quality.Strictness.LENIENT)
- void doesntUseCryptoCreateThrottleForCryptoTransferWithAutoCreationIfAutoAndLazyCreationDisabled(
- final ThrottleAccumulator.ThrottleType throttleType, final boolean longTermEnabled)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(longTermEnabled);
- given(configuration.getConfigData(AutoCreationConfig.class)).willReturn(autoCreationConfig);
- given(autoCreationConfig.enabled()).willReturn(false);
- given(configuration.getConfigData(LazyCreationConfig.class)).willReturn(lazyCreationConfig);
- given(lazyCreationConfig.enabled()).willReturn(false);
- given(configuration.getConfigData(EntitiesConfig.class)).willReturn(entitiesConfig);
- given(entitiesConfig.unlimitedAutoAssociationsEnabled()).willReturn(true);
-
- given(state.getReadableStates(any())).willReturn(readableStates);
- given(readableStates.get(ALIASES_KEY)).willReturn(aliases);
-
- final var alias = keyToBytes(A_PRIMITIVE_KEY);
- var accountAmounts = new ArrayList();
- accountAmounts.add(AccountAmount.newBuilder()
- .amount(-1_000_000_000L)
- .accountID(AccountID.newBuilder().accountNum(3333L).build())
- .build());
- accountAmounts.add(AccountAmount.newBuilder()
- .amount(+1_000_000_000L)
- .accountID(AccountID.newBuilder().alias(alias).build())
- .build());
- final var scheduledTransferWithAutoCreation = SchedulableTransactionBody.newBuilder()
- .cryptoTransfer(CryptoTransferTransactionBody.newBuilder()
- .transfers(TransferList.newBuilder()
- .accountAmounts(accountAmounts)
- .build()))
- .build();
-
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(scheduledTransferWithAutoCreation, false, null);
- final boolean ans = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertFalse(ans);
- assertEquals(BucketThrottle.capacityUnitsPerTxn(), aNow.used());
-
- assertEquals(
- longTermEnabled && throttleType == FRONTEND_THROTTLE ? BucketThrottle.capacityUnitsPerTxn() : 0,
- subject.activeThrottlesFor(CRYPTO_TRANSFER).get(0).used());
- }
-
- @ParameterizedTest
- @CsvSource({
- "FRONTEND_THROTTLE,true,true",
- "FRONTEND_THROTTLE,true,false",
- "FRONTEND_THROTTLE,false,true",
- "FRONTEND_THROTTLE,false,false",
- "BACKEND_THROTTLE,true,true",
- "BACKEND_THROTTLE,true,false",
- "BACKEND_THROTTLE,false,true",
- "BACKEND_THROTTLE,false,false",
- })
- @MockitoSettings(strictness = org.mockito.quality.Strictness.LENIENT)
- void doesntUseCryptoCreateThrottleForCryptoTransferWithNoAliases(
- final ThrottleAccumulator.ThrottleType throttleType,
- final boolean longTermEnabled,
- final boolean autoOrLazyCreationEnabled)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(longTermEnabled);
- given(configuration.getConfigData(AutoCreationConfig.class)).willReturn(autoCreationConfig);
- given(autoCreationConfig.enabled()).willReturn(autoOrLazyCreationEnabled);
- given(configuration.getConfigData(LazyCreationConfig.class)).willReturn(lazyCreationConfig);
- given(lazyCreationConfig.enabled()).willReturn(!autoOrLazyCreationEnabled);
- given(configuration.getConfigData(EntitiesConfig.class)).willReturn(entitiesConfig);
- given(entitiesConfig.unlimitedAutoAssociationsEnabled()).willReturn(true);
-
- given(state.getReadableStates(any())).willReturn(readableStates);
-
- var accountAmounts = new ArrayList();
- accountAmounts.add(AccountAmount.newBuilder()
- .amount(-1_000_000_000L)
- .accountID(AccountID.newBuilder().accountNum(3333L).build())
- .build());
- accountAmounts.add(AccountAmount.newBuilder()
- .amount(+1_000_000_000L)
- .accountID(AccountID.newBuilder().accountNum(4444L).build())
- .build());
- final var scheduledTransferNoAliases = SchedulableTransactionBody.newBuilder()
- .cryptoTransfer(CryptoTransferTransactionBody.newBuilder()
- .transfers(TransferList.newBuilder()
- .accountAmounts(accountAmounts)
- .build()))
- .build();
-
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(scheduledTransferNoAliases, false, null);
- final boolean ans = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertFalse(ans);
- assertEquals(BucketThrottle.capacityUnitsPerTxn(), aNow.used());
- assertEquals(
- longTermEnabled && throttleType == FRONTEND_THROTTLE ? BucketThrottle.capacityUnitsPerTxn() : 0,
- subject.activeThrottlesFor(CRYPTO_TRANSFER).get(0).used());
- }
-
- @ParameterizedTest
- @CsvSource({
- "FRONTEND_THROTTLE,true,true",
- "FRONTEND_THROTTLE,true,false",
- "FRONTEND_THROTTLE,false,true",
- "FRONTEND_THROTTLE,false,false",
- "BACKEND_THROTTLE,true,true",
- "BACKEND_THROTTLE,true,false",
- "BACKEND_THROTTLE,false,true",
- "BACKEND_THROTTLE,false,false",
- })
- void doesntUseCryptoCreateThrottleForNonCryptoTransfer(
- final ThrottleAccumulator.ThrottleType throttleType,
- final boolean autoCreationEnabled,
- final boolean lazyCreationEnabled)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(false);
- given(configuration.getConfigData(AutoCreationConfig.class)).willReturn(autoCreationConfig);
- given(autoCreationConfig.enabled()).willReturn(autoCreationEnabled);
- given(configuration.getConfigData(LazyCreationConfig.class)).willReturn(lazyCreationConfig);
- given(lazyCreationConfig.enabled()).willReturn(lazyCreationEnabled);
-
- final var scheduledTxn = SchedulableTransactionBody.newBuilder()
- .consensusSubmitMessage(ConsensusSubmitMessageTransactionBody.DEFAULT)
- .build();
-
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(scheduledTxn, false, null);
- final boolean ans = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertFalse(ans);
- assertEquals(BucketThrottle.capacityUnitsPerTxn(), aNow.used());
- }
-
- @ParameterizedTest
- @CsvSource({
- "FRONTEND_THROTTLE,true",
- "FRONTEND_THROTTLE,false",
- "BACKEND_THROTTLE,true",
- "BACKEND_THROTTLE,false",
- })
- @MockitoSettings(strictness = org.mockito.quality.Strictness.LENIENT)
- void usesCryptoCreateThrottleForCryptoTransferWithAutoCreationInScheduleCreate(
- final ThrottleAccumulator.ThrottleType throttleType, final boolean longTermEnabled)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(longTermEnabled);
- given(configuration.getConfigData(AutoCreationConfig.class)).willReturn(autoCreationConfig);
- given(autoCreationConfig.enabled()).willReturn(true);
- given(configuration.getConfigData(LazyCreationConfig.class)).willReturn(lazyCreationConfig);
- given(lazyCreationConfig.enabled()).willReturn(false);
- given(configuration.getConfigData(EntitiesConfig.class)).willReturn(entitiesConfig);
- given(entitiesConfig.unlimitedAutoAssociationsEnabled()).willReturn(true);
-
- given(state.getReadableStates(any())).willReturn(readableStates);
- given(readableStates.get(ALIASES_KEY)).willReturn(aliases);
-
- final var alias = keyToBytes(A_PRIMITIVE_KEY);
- if (!(throttleType != FRONTEND_THROTTLE && longTermEnabled)) {
- given(aliases.get(any())).willReturn(null);
- }
-
- var accountAmounts = new ArrayList();
- accountAmounts.add(AccountAmount.newBuilder()
- .amount(-1_000_000_000L)
- .accountID(AccountID.newBuilder().accountNum(3333L).build())
- .build());
- accountAmounts.add(AccountAmount.newBuilder()
- .amount(+1_000_000_000L)
- .accountID(AccountID.newBuilder().alias(alias).build())
- .build());
- final var scheduledTransferWithAutoCreation = SchedulableTransactionBody.newBuilder()
- .cryptoTransfer(CryptoTransferTransactionBody.newBuilder()
- .transfers(TransferList.newBuilder()
- .accountAmounts(accountAmounts)
- .build()))
- .build();
-
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(scheduledTransferWithAutoCreation, false, null);
- final boolean ans = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertFalse(ans);
- if (longTermEnabled && throttleType == FRONTEND_THROTTLE) {
- // with long term enabled, we count the schedule create in addition to the auto
- // creations, which
- // is how it should have been to start with
- assertEquals(51 * BucketThrottle.capacityUnitsPerTxn(), aNow.used());
- } else if (longTermEnabled) {
- // with long term enabled, consensus throttles do not count the contained txn
- assertEquals(BucketThrottle.capacityUnitsPerTxn(), aNow.used());
- } else {
- assertEquals(50 * BucketThrottle.capacityUnitsPerTxn(), aNow.used());
- }
-
- assertEquals(0, subject.activeThrottlesFor(CRYPTO_TRANSFER).get(0).used());
- }
-
- @ParameterizedTest
- @CsvSource({
- "FRONTEND_THROTTLE,true",
- "FRONTEND_THROTTLE,false",
- "BACKEND_THROTTLE,true",
- "BACKEND_THROTTLE,false",
- })
- @MockitoSettings(strictness = org.mockito.quality.Strictness.LENIENT)
- void usesCryptoCreateThrottleForCryptoTransferWithAutoAssociationsInScheduleCreate(
- final ThrottleAccumulator.ThrottleType throttleType, final boolean longTermEnabled)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(longTermEnabled);
- given(configuration.getConfigData(AutoCreationConfig.class)).willReturn(autoCreationConfig);
- given(autoCreationConfig.enabled()).willReturn(true);
- given(configuration.getConfigData(LazyCreationConfig.class)).willReturn(lazyCreationConfig);
- given(lazyCreationConfig.enabled()).willReturn(false);
- given(configuration.getConfigData(EntitiesConfig.class)).willReturn(entitiesConfig);
- given(entitiesConfig.unlimitedAutoAssociationsEnabled()).willReturn(true);
- given(state.getReadableStates(any())).willReturn(readableStates);
- given(readableStates.get(any())).willReturn(tokenRels);
-
- final var scheduledTransferWithAutAssoc = SchedulableTransactionBody.newBuilder()
- .cryptoTransfer(cryptoTransferFungibleWithAutoAssociations(10))
- .build();
-
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(scheduledTransferWithAutAssoc, false, null);
- final boolean ans = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertFalse(ans);
- if (longTermEnabled && throttleType == FRONTEND_THROTTLE) {
- // with long term enabled, we count the schedule create in addition to the auto
- // associations, which
- // is how it should have been to start with
- assertEquals(11 * BucketThrottle.capacityUnitsPerTxn(), aNow.used());
- } else {
- assertEquals(BucketThrottle.capacityUnitsPerTxn(), aNow.used());
- }
-
- assertEquals(0, subject.activeThrottlesFor(CRYPTO_TRANSFER).get(0).used());
- }
-
- @ParameterizedTest
- @CsvSource({
- "FRONTEND_THROTTLE,true",
- "FRONTEND_THROTTLE,false",
- "BACKEND_THROTTLE,true",
- "BACKEND_THROTTLE,false",
- })
- @MockitoSettings(strictness = org.mockito.quality.Strictness.LENIENT)
- void usesScheduleCreateThrottleForAliasedCryptoTransferWithNoAutoCreation(
- final ThrottleAccumulator.ThrottleType throttleType, final boolean longTermEnabled)
- throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(longTermEnabled);
- given(configuration.getConfigData(AutoCreationConfig.class)).willReturn(autoCreationConfig);
- given(autoCreationConfig.enabled()).willReturn(true);
- given(configuration.getConfigData(LazyCreationConfig.class)).willReturn(lazyCreationConfig);
- given(lazyCreationConfig.enabled()).willReturn(false);
- given(configuration.getConfigData(EntitiesConfig.class)).willReturn(entitiesConfig);
- given(entitiesConfig.unlimitedAutoAssociationsEnabled()).willReturn(true);
-
- given(state.getReadableStates(any())).willReturn(readableStates);
- given(readableStates.get(ALIASES_KEY)).willReturn(aliases);
-
- final var alias = keyToBytes(A_PRIMITIVE_KEY);
- if (!(throttleType != FRONTEND_THROTTLE && longTermEnabled)) {
- given(aliases.get(any()))
- .willReturn(AccountID.newBuilder().accountNum(1_234L).build());
- }
-
- var accountAmounts = new ArrayList();
- accountAmounts.add(AccountAmount.newBuilder()
- .amount(-1_000_000_000L)
- .accountID(AccountID.newBuilder().accountNum(3333L).build())
- .build());
- accountAmounts.add(AccountAmount.newBuilder()
- .amount(+1_000_000_000L)
- .accountID(AccountID.newBuilder().alias(alias).build())
- .build());
- final var scheduledTransferWithAutoCreation = SchedulableTransactionBody.newBuilder()
- .cryptoTransfer(CryptoTransferTransactionBody.newBuilder()
- .transfers(TransferList.newBuilder()
- .accountAmounts(accountAmounts)
- .build()))
- .build();
-
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles.json");
- subject.rebuildFor(defs);
-
- // when
- final var txnInfo = scheduleCreate(scheduledTransferWithAutoCreation, false, null);
- final boolean ans = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- final var throttlesNow = subject.activeThrottlesFor(SCHEDULE_CREATE);
- final var aNow = throttlesNow.get(0);
-
- // then
- assertFalse(ans);
- assertEquals(BucketThrottle.capacityUnitsPerTxn(), aNow.used());
-
- assertEquals(
- longTermEnabled && throttleType == FRONTEND_THROTTLE ? BucketThrottle.capacityUnitsPerTxn() : 0,
- subject.activeThrottlesFor(CRYPTO_TRANSFER).get(0).used());
- }
-
- @Test
- void reclaimsAllUsagesOnThrottledCheckAndEnforceThrottleTxn() throws IOException, ParseException {
- // given
- subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
-
- given(configProvider.getConfiguration()).willReturn(configuration);
- given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
- given(accountsConfig.lastThrottleExempt()).willReturn(100L);
- given(configuration.getConfigData(ContractsConfig.class)).willReturn(contractsConfig);
- given(contractsConfig.throttleThrottleByGas()).willReturn(false);
- given(configuration.getConfigData(SchedulingConfig.class)).willReturn(schedulingConfig);
- given(schedulingConfig.longTermEnabled()).willReturn(true);
-
- final var scheduledSubmit = SchedulableTransactionBody.newBuilder()
- .consensusSubmitMessage(ConsensusSubmitMessageTransactionBody.DEFAULT)
- .build();
- final var defs = getThrottleDefs("bootstrap/schedule-create-throttles-inverted.json");
- subject.rebuildFor(defs);
-
- final var txnInfo = scheduleCreate(scheduledSubmit, false, null);
- final boolean firstAns = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT, state);
- boolean subsequentAns = false;
- for (int i = 1; i <= 150; i++) {
- subsequentAns = subject.checkAndEnforceThrottle(txnInfo, TIME_INSTANT.plusNanos(i), state);
- }
-
- assertFalse(firstAns);
- assertTrue(subsequentAns);
- assertEquals(
- 4999250000000L,
- subject.activeThrottlesFor(SCHEDULE_CREATE).get(0).used());
-
- assertEquals(
- 4999999250000L,
- subject.activeThrottlesFor(CONSENSUS_SUBMIT_MESSAGE).get(0).used());
-
- // when
- subject.resetUsage();
-
- // then
- assertEquals(0L, subject.activeThrottlesFor(SCHEDULE_CREATE).get(0).used());
- assertEquals(
- 0L, subject.activeThrottlesFor(CONSENSUS_SUBMIT_MESSAGE).get(0).used());
- }
-
@ParameterizedTest
@CsvSource({
"FRONTEND_THROTTLE,true,true",
@@ -2011,7 +1415,7 @@ void usesScheduleSignThrottle(
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
@@ -2081,7 +1485,7 @@ void usesScheduleSignThrottleWithNestedThrottleExempt(
throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
@@ -2139,7 +1543,11 @@ void usesScheduleSignThrottleWithNestedThrottleExempt(
void scheduleSignAlwaysThrottledWhenNoBody() throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
@@ -2185,7 +1593,11 @@ void scheduleSignAlwaysThrottledWhenNoBody() throws IOException, ParseException
void scheduleSignAlwaysThrottledWhenNotExisting() throws IOException, ParseException {
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
@@ -2232,7 +1644,7 @@ void usesCryptoCreateThrottleForCryptoTransferWithAutoCreationInScheduleSign(
// given
subject = new ThrottleAccumulator(
- () -> CAPACITY_SPLIT, configProvider, throttleType, throttleMetrics, gasThrottle);
+ () -> CAPACITY_SPLIT, configProvider::getConfiguration, throttleType, throttleMetrics, gasThrottle);
given(configProvider.getConfiguration()).willReturn(configuration);
given(configuration.getConfigData(AccountsConfig.class)).willReturn(accountsConfig);
@@ -2313,7 +1725,12 @@ void usesCryptoCreateThrottleForCryptoTransferWithAutoCreationInScheduleSign(
@Test
void updateMetrics() {
// given
- subject = new ThrottleAccumulator(() -> CAPACITY_SPLIT, configProvider, FRONTEND_THROTTLE, throttleMetrics);
+ subject = new ThrottleAccumulator(
+ () -> CAPACITY_SPLIT,
+ configProvider::getConfiguration,
+ FRONTEND_THROTTLE,
+ throttleMetrics,
+ Verbose.YES);
// when
subject.updateAllMetrics();
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/HandleWorkflowTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/HandleWorkflowTest.java
index c1d8aa2f347b..03670c97601b 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/HandleWorkflowTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/HandleWorkflowTest.java
@@ -31,9 +31,12 @@
import com.hedera.hapi.node.base.SemanticVersion;
import com.hedera.hapi.node.base.Timestamp;
import com.hedera.node.app.blocks.BlockStreamManager;
+import com.hedera.node.app.blocks.impl.BoundaryStateChangeListener;
+import com.hedera.node.app.blocks.impl.KVStateChangeListener;
import com.hedera.node.app.fees.ExchangeRateManager;
import com.hedera.node.app.records.BlockRecordManager;
import com.hedera.node.app.service.addressbook.impl.helpers.AddressBookHelper;
+import com.hedera.node.app.service.schedule.ScheduleService;
import com.hedera.node.app.service.token.impl.handlers.staking.StakeInfoHelper;
import com.hedera.node.app.service.token.impl.handlers.staking.StakePeriodManager;
import com.hedera.node.app.spi.metrics.StoreMetricsService;
@@ -97,6 +100,15 @@ class HandleWorkflowTest {
@Mock
private CacheWarmer cacheWarmer;
+ @Mock
+ private ScheduleService scheduleService;
+
+ @Mock
+ private KVStateChangeListener kvStateChangeListener;
+
+ @Mock
+ private BoundaryStateChangeListener boundaryStateChangeListener;
+
@Mock
private OpWorkflowMetrics opWorkflowMetrics;
@@ -212,6 +224,9 @@ private void givenSubjectWith(
migrationStateChanges,
userTxnFactory,
new AddressBookHelper(),
- tssBaseService);
+ tssBaseService,
+ kvStateChangeListener,
+ boundaryStateChangeListener,
+ scheduleService);
}
}
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/UserTxnTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/UserTxnTest.java
index 2e3d24173f57..d17cf93e1b2c 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/UserTxnTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/handle/steps/UserTxnTest.java
@@ -22,21 +22,25 @@
import static com.hedera.node.app.workflows.handle.TransactionType.GENESIS_TRANSACTION;
import static java.util.Collections.emptyMap;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.BDDMockito.given;
-import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.withSettings;
+import com.hedera.hapi.block.stream.BlockItem;
import com.hedera.hapi.node.base.AccountID;
import com.hedera.hapi.node.base.Key;
import com.hedera.hapi.node.base.SignatureMap;
+import com.hedera.hapi.node.base.Transaction;
+import com.hedera.hapi.node.base.TransactionID;
import com.hedera.hapi.node.state.token.Account;
+import com.hedera.hapi.node.transaction.ExchangeRateSet;
import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.hapi.platform.event.EventTransaction;
import com.hedera.node.app.blocks.BlockStreamManager;
+import com.hedera.node.app.blocks.impl.BlockStreamBuilder;
import com.hedera.node.app.blocks.impl.BoundaryStateChangeListener;
import com.hedera.node.app.blocks.impl.KVStateChangeListener;
import com.hedera.node.app.blocks.impl.PairedStreamBuilder;
@@ -44,13 +48,11 @@
import com.hedera.node.app.fees.FeeManager;
import com.hedera.node.app.records.BlockRecordManager;
import com.hedera.node.app.service.consensus.impl.ConsensusServiceImpl;
-import com.hedera.node.app.service.token.api.FeeStreamBuilder;
import com.hedera.node.app.services.ServiceScopeLookup;
import com.hedera.node.app.spi.authorization.Authorizer;
import com.hedera.node.app.spi.fees.Fees;
import com.hedera.node.app.spi.metrics.StoreMetricsService;
import com.hedera.node.app.spi.records.BlockRecordInfo;
-import com.hedera.node.app.spi.workflows.record.StreamBuilder;
import com.hedera.node.app.store.ReadableStoreFactory;
import com.hedera.node.app.throttle.NetworkUtilizationManager;
import com.hedera.node.app.workflows.TransactionInfo;
@@ -77,7 +79,6 @@
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
-import org.mockito.Mockito;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
@@ -166,8 +167,6 @@ class UserTxnTest {
@Mock
private WritableKVState accountState;
- private StreamBuilder baseBuilder;
-
@BeforeEach
void setUp() {
given(preHandleWorkflow.getCurrentPreHandleResult(
@@ -182,14 +181,12 @@ void usesPairedStreamBuilderWithDefaultConfig() {
given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(DEFAULT_CONFIG, 1));
final var factory = createUserTxnFactory();
- final var subject =
- factory.createUserTxn(state, event, creatorInfo, PLATFORM_TXN, CONSENSUS_NOW, GENESIS_TRANSACTION);
+ final var subject = factory.createUserTxn(state, creatorInfo, PLATFORM_TXN, CONSENSUS_NOW, GENESIS_TRANSACTION);
assertSame(GENESIS_TRANSACTION, subject.type());
assertSame(CONSENSUS_CREATE_TOPIC, subject.functionality());
assertSame(CONSENSUS_NOW, subject.consensusNow());
assertSame(state, subject.state());
- assertSame(event, subject.event());
assertSame(txnInfo, subject.txnInfo());
assertSame(preHandleResult, subject.preHandleResult());
assertSame(creatorInfo, subject.creatorInfo());
@@ -203,57 +200,71 @@ void usesPairedStreamBuilderWithDefaultConfig() {
@Test
void constructsDispatchAsExpectedWithCongestionMultiplierGreaterThanOne() {
- baseBuilder = Mockito.mock(StreamBuilder.class, withSettings().extraInterfaces(FeeStreamBuilder.class));
given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(BLOCKS_CONFIG, 1));
given(txnInfo.payerID()).willReturn(PAYER_ID);
- given(txnInfo.txBody()).willReturn(TransactionBody.DEFAULT);
+ given(txnInfo.txBody())
+ .willReturn(TransactionBody.newBuilder()
+ .transactionID(TransactionID.DEFAULT)
+ .build());
+ given(txnInfo.transaction()).willReturn(Transaction.DEFAULT);
given(txnInfo.signatureMap()).willReturn(SignatureMap.DEFAULT);
given(preHandleResult.payerKey()).willReturn(AN_ED25519_KEY);
given(preHandleResult.getVerificationResults()).willReturn(emptyMap());
- given(feeManager.congestionMultiplierFor(
- eq(TransactionBody.DEFAULT), eq(CONSENSUS_CREATE_TOPIC), any(ReadableStoreFactory.class)))
+ given(feeManager.congestionMultiplierFor(any(), eq(CONSENSUS_CREATE_TOPIC), any(ReadableStoreFactory.class)))
.willReturn(CONGESTION_MULTIPLIER);
- given(serviceScopeLookup.getServiceName(TransactionBody.DEFAULT)).willReturn(ConsensusServiceImpl.NAME);
+ given(serviceScopeLookup.getServiceName(any())).willReturn(ConsensusServiceImpl.NAME);
given(state.getWritableStates(any())).willReturn(writableStates);
given(writableStates.get(ACCOUNTS_KEY)).willReturn(accountState);
given(accountState.getStateKey()).willReturn(ACCOUNTS_KEY);
given(dispatcher.dispatchComputeFees(any())).willReturn(Fees.FREE);
final var factory = createUserTxnFactory();
- final var subject =
- factory.createUserTxn(state, event, creatorInfo, PLATFORM_TXN, CONSENSUS_NOW, GENESIS_TRANSACTION);
+ final var subject = factory.createUserTxn(state, creatorInfo, PLATFORM_TXN, CONSENSUS_NOW, GENESIS_TRANSACTION);
- final var dispatch = factory.createDispatch(subject, baseBuilder);
+ final var dispatch = factory.createDispatch(subject, ExchangeRateSet.DEFAULT);
assertSame(PAYER_ID, dispatch.payerId());
- verify(baseBuilder).congestionMultiplier(CONGESTION_MULTIPLIER);
+ final var result = ((BlockStreamBuilder) subject.baseBuilder())
+ .build().blockItems().stream()
+ .filter(BlockItem::hasTransactionResult)
+ .findFirst()
+ .map(BlockItem::transactionResultOrThrow)
+ .orElseThrow();
+ assertEquals(CONGESTION_MULTIPLIER, result.congestionPricingMultiplier());
}
@Test
void constructsDispatchAsExpectedWithCongestionMultiplierEqualToOne() {
- baseBuilder = Mockito.mock(StreamBuilder.class, withSettings().extraInterfaces(FeeStreamBuilder.class));
given(configProvider.getConfiguration()).willReturn(new VersionedConfigImpl(BLOCKS_CONFIG, 1));
given(txnInfo.payerID()).willReturn(PAYER_ID);
- given(txnInfo.txBody()).willReturn(TransactionBody.DEFAULT);
+ given(txnInfo.txBody())
+ .willReturn(TransactionBody.newBuilder()
+ .transactionID(TransactionID.DEFAULT)
+ .build());
+ given(txnInfo.transaction()).willReturn(Transaction.DEFAULT);
given(txnInfo.signatureMap()).willReturn(SignatureMap.DEFAULT);
given(preHandleResult.getVerificationResults()).willReturn(emptyMap());
- given(feeManager.congestionMultiplierFor(
- eq(TransactionBody.DEFAULT), eq(CONSENSUS_CREATE_TOPIC), any(ReadableStoreFactory.class)))
+ given(feeManager.congestionMultiplierFor(any(), eq(CONSENSUS_CREATE_TOPIC), any(ReadableStoreFactory.class)))
.willReturn(1L);
- given(serviceScopeLookup.getServiceName(TransactionBody.DEFAULT)).willReturn(ConsensusServiceImpl.NAME);
+ given(serviceScopeLookup.getServiceName(any())).willReturn(ConsensusServiceImpl.NAME);
given(state.getWritableStates(any())).willReturn(writableStates);
given(writableStates.get(ACCOUNTS_KEY)).willReturn(accountState);
given(accountState.getStateKey()).willReturn(ACCOUNTS_KEY);
given(dispatcher.dispatchComputeFees(any())).willReturn(Fees.FREE);
final var factory = createUserTxnFactory();
- final var subject =
- factory.createUserTxn(state, event, creatorInfo, PLATFORM_TXN, CONSENSUS_NOW, GENESIS_TRANSACTION);
+ final var subject = factory.createUserTxn(state, creatorInfo, PLATFORM_TXN, CONSENSUS_NOW, GENESIS_TRANSACTION);
- final var dispatch = factory.createDispatch(subject, baseBuilder);
+ final var dispatch = factory.createDispatch(subject, ExchangeRateSet.DEFAULT);
assertSame(PAYER_ID, dispatch.payerId());
- verify(baseBuilder, never()).congestionMultiplier(1);
+ final var result = ((BlockStreamBuilder) subject.baseBuilder())
+ .build().blockItems().stream()
+ .filter(BlockItem::hasTransactionResult)
+ .findFirst()
+ .map(BlockItem::transactionResultOrThrow)
+ .orElseThrow();
+ assertEquals(0L, result.congestionPricingMultiplier());
}
private UserTxnFactory createUserTxnFactory() {
diff --git a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/standalone/TransactionExecutorsTest.java b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/standalone/TransactionExecutorsTest.java
index 9f87d489e948..75ade645a7e4 100644
--- a/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/standalone/TransactionExecutorsTest.java
+++ b/hedera-node/hedera-app/src/test/java/com/hedera/node/app/workflows/standalone/TransactionExecutorsTest.java
@@ -38,6 +38,7 @@
import com.hedera.hapi.node.file.FileCreateTransactionBody;
import com.hedera.hapi.node.state.file.File;
import com.hedera.hapi.node.state.roster.Roster;
+import com.hedera.hapi.node.transaction.ThrottleDefinitions;
import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.node.app.config.BootstrapConfigProviderImpl;
import com.hedera.node.app.config.ConfigProviderImpl;
@@ -61,9 +62,12 @@
import com.hedera.node.app.service.util.impl.UtilServiceImpl;
import com.hedera.node.app.services.AppContextImpl;
import com.hedera.node.app.services.ServicesRegistry;
+import com.hedera.node.app.spi.AppContext;
import com.hedera.node.app.spi.signatures.SignatureVerifier;
import com.hedera.node.app.state.recordcache.RecordCacheService;
+import com.hedera.node.app.throttle.AppThrottleFactory;
import com.hedera.node.app.throttle.CongestionThrottleService;
+import com.hedera.node.app.throttle.ThrottleAccumulator;
import com.hedera.node.app.version.ServicesSoftwareVersion;
import com.hedera.node.config.data.EntitiesConfig;
import com.hedera.node.config.data.FilesConfig;
@@ -240,7 +244,15 @@ private State genesisState(@NonNull final Map overrides) {
final var config = configBuilder.getOrCreateConfig();
final var networkInfo = fakeNetworkInfo();
final var servicesRegistry = new FakeServicesRegistry();
- registerServices(config, servicesRegistry);
+ final var appContext = new AppContextImpl(
+ InstantSource.system(),
+ signatureVerifier,
+ UNAVAILABLE_GOSSIP,
+ () -> config,
+ () -> DEFAULT_NODE_INFO,
+ new AppThrottleFactory(
+ () -> config, () -> state, () -> ThrottleDefinitions.DEFAULT, ThrottleAccumulator::new));
+ registerServices(appContext, config, servicesRegistry);
final var migrator = new FakeServiceMigrator();
final var bootstrapConfig = new BootstrapConfigProviderImpl().getConfiguration();
migrator.doMigrations(
@@ -285,17 +297,14 @@ private Map> genesisContentProviders(
}
private void registerServices(
- @NonNull final Configuration config, @NonNull final ServicesRegistry servicesRegistry) {
+ @NonNull final AppContext appContext,
+ @NonNull final Configuration config,
+ @NonNull final ServicesRegistry servicesRegistry) {
// Register all service schema RuntimeConstructable factories before platform init
Set.of(
new EntityIdService(),
new ConsensusServiceImpl(),
- new ContractServiceImpl(new AppContextImpl(
- InstantSource.system(),
- signatureVerifier,
- UNAVAILABLE_GOSSIP,
- () -> config,
- () -> DEFAULT_NODE_INFO)),
+ new ContractServiceImpl(appContext),
new FileServiceImpl(),
new FreezeServiceImpl(),
new ScheduleServiceImpl(),
diff --git a/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/SchedulingConfig.java b/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/SchedulingConfig.java
index c1bc6a11fef9..9066c97c8a43 100644
--- a/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/SchedulingConfig.java
+++ b/hedera-node/hedera-config/src/main/java/com/hedera/node/config/data/SchedulingConfig.java
@@ -16,6 +16,7 @@
package com.hedera.node.config.data;
+import com.hedera.node.app.hapi.utils.sysfiles.domain.throttling.ScaleFactor;
import com.hedera.node.config.NetworkProperty;
import com.hedera.node.config.types.HederaFunctionalitySet;
import com.swirlds.config.api.ConfigData;
@@ -25,8 +26,11 @@
// spotless:off
@ConfigData("scheduling")
public record SchedulingConfig(
+ @ConfigProperty(defaultValue = "1:10") ScaleFactor schedulableCapacityFraction,
@ConfigProperty(defaultValue = "false") @NetworkProperty boolean longTermEnabled,
- @ConfigProperty(defaultValue = "100") @NetworkProperty long maxTxnPerSec,
+ @ConfigProperty(defaultValue = "100") @NetworkProperty int maxExecutionsPerUserTxn,
+ @ConfigProperty(defaultValue = "100") @NetworkProperty int maxTxnPerSec,
+ @ConfigProperty(defaultValue = "1000") @NetworkProperty int consTimeSeparationNanos,
@ConfigProperty(defaultValue = "10000000") @NetworkProperty long maxNumber,
@ConfigProperty(defaultValue = "5356800") @NetworkProperty long maxExpirationFutureSeconds,
@ConfigProperty(defaultValue =
diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ReadableScheduleStoreImpl.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ReadableScheduleStoreImpl.java
index 845afd5432df..4812c3657737 100644
--- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ReadableScheduleStoreImpl.java
+++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ReadableScheduleStoreImpl.java
@@ -16,22 +16,25 @@
package com.hedera.node.app.service.schedule.impl;
+import static com.hedera.node.app.service.schedule.impl.ScheduleStoreUtility.calculateBytesHash;
+import static java.util.Objects.requireNonNull;
+
import com.hedera.hapi.node.base.ScheduleID;
+import com.hedera.hapi.node.base.TimestampSeconds;
import com.hedera.hapi.node.state.primitives.ProtoBytes;
-import com.hedera.hapi.node.state.primitives.ProtoLong;
import com.hedera.hapi.node.state.schedule.Schedule;
-import com.hedera.hapi.node.state.schedule.ScheduleIdList;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
import com.hedera.node.app.service.schedule.ReadableScheduleStore;
import com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema;
import com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema;
-import com.hedera.pbj.runtime.io.buffer.Bytes;
import com.swirlds.state.spi.ReadableKVState;
import com.swirlds.state.spi.ReadableStates;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.ArrayList;
import java.util.List;
-import java.util.Objects;
/**
* Provides read-only methods for interacting with the underlying data storage mechanisms for
@@ -43,7 +46,9 @@ public class ReadableScheduleStoreImpl implements ReadableScheduleStore {
"Null states instance passed to ReadableScheduleStore constructor, possible state corruption.";
private final ReadableKVState schedulesById;
- private final ReadableKVState scheduleIdsByExpirationSecond;
+ private final ReadableKVState scheduledCounts;
+ private final ReadableKVState scheduledUsages;
+ private final ReadableKVState scheduledOrders;
private final ReadableKVState scheduleIdByStringHash;
/**
@@ -52,9 +57,11 @@ public class ReadableScheduleStoreImpl implements ReadableScheduleStore {
* @param states The state to use.
*/
public ReadableScheduleStoreImpl(@NonNull final ReadableStates states) {
- Objects.requireNonNull(states, NULL_STATE_IN_CONSTRUCTOR_MESSAGE);
+ requireNonNull(states, NULL_STATE_IN_CONSTRUCTOR_MESSAGE);
schedulesById = states.get(V0490ScheduleSchema.SCHEDULES_BY_ID_KEY);
- scheduleIdsByExpirationSecond = states.get(V0570ScheduleSchema.SCHEDULE_IDS_BY_EXPIRY_SEC_KEY);
+ scheduledCounts = states.get(V0570ScheduleSchema.SCHEDULED_COUNTS_KEY);
+ scheduledOrders = states.get(V0570ScheduleSchema.SCHEDULED_ORDERS_KEY);
+ scheduledUsages = states.get(V0570ScheduleSchema.SCHEDULED_USAGES_KEY);
scheduleIdByStringHash = states.get(V0570ScheduleSchema.SCHEDULE_ID_BY_EQUALITY_KEY);
}
@@ -74,16 +81,30 @@ public Schedule get(@Nullable final ScheduleID id) {
@Override
@Nullable
- public ScheduleID getByEquality(final @NonNull Schedule scheduleToMatch) {
- Bytes bytesHash = ScheduleStoreUtility.calculateBytesHash(scheduleToMatch);
+ public ScheduleID getByEquality(@NonNull final Schedule schedule) {
+ requireNonNull(schedule);
+ final var bytesHash = calculateBytesHash(schedule);
return scheduleIdByStringHash.get(new ProtoBytes(bytesHash));
}
@Nullable
@Override
- public List getByExpirationSecond(final long expirationTime) {
- final ScheduleIdList inStateValue = scheduleIdsByExpirationSecond.get(new ProtoLong(expirationTime));
- return inStateValue != null ? inStateValue.scheduleIds() : null;
+ public ScheduleID getByOrder(@NonNull final ScheduledOrder scheduledOrder) {
+ requireNonNull(scheduledOrder);
+ return scheduledOrders.get(scheduledOrder);
+ }
+
+ @Override
+ public @NonNull List getByExpirationSecond(final long expirationTime) {
+ final List scheduleIds = new ArrayList<>();
+ final var counts = scheduledCounts.get(new TimestampSeconds(expirationTime));
+ if (counts != null) {
+ for (int i = counts.numberProcessed(), n = counts.numberScheduled(); i < n; i++) {
+ final var scheduleId = scheduledOrders.get(new ScheduledOrder(expirationTime, i));
+ scheduleIds.add(requireNonNull(scheduleId));
+ }
+ }
+ return scheduleIds;
}
/**
@@ -104,4 +125,21 @@ public List getByExpirationBetween(final long firstSecondToExpire, fin
public long numSchedulesInState() {
return schedulesById.size();
}
+
+ @Override
+ public int numTransactionsScheduledAt(final long consensusSecond) {
+ final var counts = scheduledCounts.get(new TimestampSeconds(consensusSecond));
+ return counts == null ? 0 : counts.numberScheduled();
+ }
+
+ @Nullable
+ @Override
+ public ScheduledCounts scheduledCountsAt(long consensusSecond) {
+ return scheduledCounts.get(new TimestampSeconds(consensusSecond));
+ }
+
+ @Override
+ public @Nullable ThrottleUsageSnapshots usageSnapshotsForScheduled(final long consensusSecond) {
+ return scheduledUsages.get(new TimestampSeconds(consensusSecond));
+ }
}
diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ScheduleServiceImpl.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ScheduleServiceImpl.java
index 85f9dec0df84..37a4f9c1ee3b 100644
--- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ScheduleServiceImpl.java
+++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ScheduleServiceImpl.java
@@ -16,22 +16,28 @@
package com.hedera.node.app.service.schedule.impl;
+import static com.hedera.node.app.service.schedule.impl.handlers.AbstractScheduleHandler.simpleKeyVerifierFrom;
import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.childAsOrdinary;
+import static java.util.Objects.requireNonNull;
import com.hedera.hapi.node.state.schedule.Schedule;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
+import com.hedera.node.app.service.schedule.ExecutableTxn;
+import com.hedera.node.app.service.schedule.ExecutableTxnIterator;
import com.hedera.node.app.service.schedule.ScheduleService;
+import com.hedera.node.app.service.schedule.ScheduleStreamBuilder;
+import com.hedera.node.app.service.schedule.WritableScheduleStore;
import com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema;
import com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema;
+import com.hedera.node.app.service.token.ReadableAccountStore;
import com.hedera.node.app.spi.RpcService;
-import com.hedera.node.app.spi.signatures.VerificationAssistant;
import com.hedera.node.app.spi.store.StoreFactory;
import com.swirlds.state.lifecycle.SchemaRegistry;
import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
import java.time.Instant;
-import java.util.Iterator;
-import java.util.List;
+import java.util.Comparator;
import java.util.NoSuchElementException;
-import java.util.function.Supplier;
/**
* Standard implementation of the {@link ScheduleService} {@link RpcService}.
@@ -44,77 +50,195 @@ public void registerSchemas(@NonNull final SchemaRegistry registry) {
}
@Override
- public Iterator iterTxnsForInterval(
- final Instant start, final Instant end, final Supplier cleanupStoreFactory) {
- final var store = cleanupStoreFactory.get().readableStore(ReadableScheduleStoreImpl.class);
-
- // Get transactions from state that are not executed/deleted
- final var executableTxns = store.getByExpirationBetween(start.getEpochSecond(), end.getEpochSecond()).stream()
- .filter(schedule -> !schedule.executed() && !schedule.deleted())
- .toList();
-
- // Return a custom iterator that supports the remove() method
- return new Iterator<>() {
- private int currentIndex = -1;
- private ExecutableTxn lastReturned;
- private boolean shouldCleanUp = true;
- private final List transactions = executableTxns;
- private final long startSecond = start.getEpochSecond();
- private final long endSecond = end.getEpochSecond();
-
- @Override
- public boolean hasNext() {
- var hasNext = currentIndex + 1 < transactions.size();
- if (!hasNext && shouldCleanUp) {
- // After we finish iterating, clean up the expired schedules
- cleanUpExpiredSchedules();
- }
- return hasNext;
+ public ExecutableTxnIterator executableTxns(
+ @NonNull final Instant start, @NonNull final Instant end, @NonNull final StoreFactory storeFactory) {
+ requireNonNull(start);
+ requireNonNull(end);
+ requireNonNull(storeFactory);
+ return new PurgingIterator(start.getEpochSecond(), end.getEpochSecond(), storeFactory);
+ }
+
+ /**
+ * An {@link ExecutableTxnIterator} that traverses the executable transactions in the specified
+ * interval and purges all traversed scheduling metadata (not just for executable transactions)
+ * in response to calls to {@link ExecutableTxnIterator#remove()} and
+ * {@link ExecutableTxnIterator#purgeUntilNext()}.
+ */
+ private static class PurgingIterator implements ExecutableTxnIterator {
+ private static final Comparator ORDER_COMPARATOR =
+ Comparator.comparingLong(ScheduledOrder::expirySecond).thenComparingInt(ScheduledOrder::orderNumber);
+
+ private final long startSecond;
+ private final long endSecond;
+ private final StoreFactory storeFactory;
+ private final WritableScheduleStore scheduleStore;
+
+ /**
+ * True if the next executable transaction to be processed is known; false otherwise.
+ */
+ private boolean nextKnown = false;
+
+ /**
+ * The order of the next executable transaction to be processed. Null in exactly two cases:
+ *
+ * - When neither {@link #hasNext()} nor {@link #next()} has ever been called (so that
+ * {@link #nextKnown} is false).
+ * - When the last call to {@link #hasNext()} or {@link #next()} discovered that
+ * there are no more executable transactions in the scoped {@code [start, end]} interval (so
+ * that {@link #nextKnown} is true).
+ *
+ * If not null, then is the order of the last executable transaction to have been discovered.
+ * When {@link #nextKnown} is true, the executable transaction with this order will be returned
+ * from then next call to {@link #next()}; if {@link #nextKnown} is false, the executable
+ * transaction with this order was already returned from a call to {@link #next()}.
+ */
+ @Nullable
+ private ScheduledOrder nextOrder;
+
+ /**
+ * If not null, the schedule representing the next executable transaction to be processed.
+ */
+ @Nullable
+ private Schedule nextSchedule;
+
+ /**
+ * If not null, the earliest order before {@link #nextOrder} that is known to contain scheduled
+ * transaction metadata.
+ */
+ @Nullable
+ private ScheduledOrder previousOrder;
+
+ /**
+ * If not null, the earliest order after {@link #nextOrder} that may contain scheduled transaction metadata.
+ */
+ @Nullable
+ private ScheduledOrder candidateOrder;
+
+ public PurgingIterator(final long startSecond, final long endSecond, @NonNull final StoreFactory storeFactory) {
+ this.startSecond = startSecond;
+ this.endSecond = endSecond;
+ this.storeFactory = requireNonNull(storeFactory);
+ this.scheduleStore = storeFactory.writableStore(WritableScheduleStore.class);
+ }
+
+ @Override
+ public boolean hasNext() {
+ return prepNext() != null;
+ }
+
+ @Override
+ public ExecutableTxn next() {
+ if (!nextKnown) {
+ prepNext();
+ }
+ nextKnown = false;
+ if (nextSchedule == null) {
+ throw new NoSuchElementException();
}
+ return executableTxnFrom(storeFactory.readableStore(ReadableAccountStore.class), nextSchedule);
+ }
- @Override
- public ExecutableTxn next() {
- if (!hasNext()) {
- if (shouldCleanUp) {
- // If excessive next() calls are made without calling hasNext(), clean up the expired schedules
- cleanUpExpiredSchedules();
- }
- throw new NoSuchElementException();
- }
- lastReturned = toExecutableTxn(transactions.get(++currentIndex));
- return lastReturned;
+ @Override
+ public void remove() {
+ if (nextOrder == null) {
+ throw new IllegalStateException("remove() called before next()");
+ }
+ if (candidateOrder != null && ORDER_COMPARATOR.compare(candidateOrder, nextOrder) > 0) {
+ throw new IllegalStateException("remove() called twice");
}
+ // Pointer to the order whose executable transaction metadata should be purged
+ var order = requireNonNull(previousOrder);
+ while (ORDER_COMPARATOR.compare(order, nextOrder) <= 0) {
+ final var lastOfSecond = scheduleStore.purgeByOrder(order);
+ order = next(order, lastOfSecond);
+ }
+ candidateOrder = order;
+ previousOrder = null;
+ }
- @Override
- public void remove() {
- if (lastReturned == null) {
- throw new IllegalStateException("No transaction to remove");
+ @Override
+ public boolean purgeUntilNext() {
+ if (!nextKnown) {
+ throw new IllegalStateException("purgeUntilNext() called before next()");
+ }
+ if (previousOrder != null) {
+ var order = previousOrder;
+ final var boundaryOrder = nextOrder != null ? nextOrder : new ScheduledOrder(endSecond + 1, 0);
+ while (ORDER_COMPARATOR.compare(order, boundaryOrder) < 0) {
+ final var lastOfSecond = scheduleStore.purgeByOrder(order);
+ order = next(order, lastOfSecond);
}
-
- // Use the StoreFactory to mark a schedule as deleted
- final var iteratorStore = cleanupStoreFactory.get().writableStore(WritableScheduleStoreImpl.class);
- final var scheduleId = transactions.get(currentIndex).scheduleId();
- iteratorStore.delete(scheduleId, Instant.now());
+ return true;
}
+ return false;
+ }
- private void cleanUpExpiredSchedules() {
- if (shouldCleanUp) {
- // After we finish iterating, clean up the expired schedules
- var cleanUpStore = cleanupStoreFactory.get().writableStore(WritableScheduleStoreImpl.class);
- cleanUpStore.purgeExpiredSchedulesBetween(startSecond, endSecond);
- shouldCleanUp = false;
+ /**
+ * When {@link #nextKnown} is not already true, resets the iterator to be agnostic about the next
+ * and previous orders, and then traverses orders starting from either {@link #candidateOrder} (if
+ * not null), or the first candidate order in the interval if {@link #candidateOrder} is null.
+ *
+ * It sets {@link #previousOrder} to the first encountered order with scheduled transaction metadata;
+ * and sets {@link #nextOrder} and {@link #nextSchedule} to the first encountered order with an
+ * executable schedule.
+ * @return the next executable transaction to be processed, or null if there are no more
+ */
+ private @Nullable ScheduledOrder prepNext() {
+ if (nextKnown) {
+ return nextOrder;
+ }
+ nextOrder = null;
+ nextSchedule = null;
+ previousOrder = null;
+ // Pointer to the order of the next schedule that should possibly be executed
+ ScheduledOrder order;
+ if (candidateOrder != null) {
+ order = candidateOrder;
+ } else {
+ final var startCounts = scheduleStore.scheduledCountsAt(startSecond);
+ if (startCounts == null) {
+ order = new ScheduledOrder(startSecond + 1, 0);
+ } else {
+ order = new ScheduledOrder(startSecond, startCounts.numberProcessed());
}
}
-
- private ExecutableTxn toExecutableTxn(final Schedule schedule) {
- final var signatories = schedule.signatories();
- final VerificationAssistant callback = (k, ignore) -> signatories.contains(k);
- return new ExecutableTxn(
- childAsOrdinary(schedule),
- callback,
- schedule.payerAccountId(),
- Instant.ofEpochSecond(schedule.calculatedExpirationSecond()));
+ while (order.expirySecond() <= endSecond) {
+ final var nextId = scheduleStore.getByOrder(order);
+ if (nextId != null) {
+ if (previousOrder == null) {
+ previousOrder = order;
+ }
+ final var schedule = requireNonNull(scheduleStore.get(nextId));
+ if (!schedule.waitForExpiry() || schedule.deleted()) {
+ order = next(order, false);
+ } else {
+ nextOrder = order;
+ nextSchedule = schedule;
+ break;
+ }
+ } else {
+ order = next(order, true);
+ }
}
- };
+ nextKnown = true;
+ return nextOrder;
+ }
+
+ private ScheduledOrder next(@NonNull final ScheduledOrder order, final boolean lastInSecond) {
+ return lastInSecond
+ ? new ScheduledOrder(order.expirySecond() + 1, 0)
+ : order.copyBuilder().orderNumber(order.orderNumber() + 1).build();
+ }
+ }
+
+ private static ExecutableTxn executableTxnFrom(
+ @NonNull final ReadableAccountStore accountStore, @NonNull final Schedule schedule) {
+ return new ExecutableTxn<>(
+ childAsOrdinary(schedule),
+ schedule.payerAccountIdOrThrow(),
+ simpleKeyVerifierFrom(accountStore, schedule.signatories()),
+ Instant.ofEpochSecond(schedule.calculatedExpirationSecond()),
+ ScheduleStreamBuilder.class,
+ builder -> builder.scheduleRef(schedule.scheduleIdOrThrow()));
}
}
diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ScheduleStoreUtility.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ScheduleStoreUtility.java
index 956fe683098e..710a8981cef8 100644
--- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ScheduleStoreUtility.java
+++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/ScheduleStoreUtility.java
@@ -16,6 +16,8 @@
package com.hedera.node.app.service.schedule.impl;
+import static java.util.Objects.requireNonNull;
+
import com.google.common.hash.Hasher;
import com.google.common.hash.Hashing;
import com.hedera.hapi.node.base.Key;
@@ -30,7 +32,6 @@
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.Objects;
/**
* Provides utility methods for the schedule store.
@@ -43,27 +44,25 @@ private ScheduleStoreUtility() {}
* Calculate bytes hash of a schedule based on the schedule's memo, admin key, scheduled transaction, expiration
* time, and wait for expiry flag.
*
- * @param scheduleToHash the schedule to hash
+ * @param schedule the schedule to hash
* @return the bytes
*/
@SuppressWarnings("UnstableApiUsage")
- public static Bytes calculateBytesHash(@NonNull final Schedule scheduleToHash) {
- Objects.requireNonNull(scheduleToHash);
+ public static Bytes calculateBytesHash(@NonNull final Schedule schedule) {
+ requireNonNull(schedule);
final Hasher hasher = Hashing.sha256().newHasher();
- if (scheduleToHash.memo() != null) {
- hasher.putString(scheduleToHash.memo(), StandardCharsets.UTF_8);
- }
- if (scheduleToHash.adminKey() != null) {
- addToHash(hasher, scheduleToHash.adminKey());
+ hasher.putString(schedule.memo(), StandardCharsets.UTF_8);
+ if (schedule.adminKey() != null) {
+ addToHash(hasher, schedule.adminKey());
}
// @note We should check scheduler here, but mono doesn't, so we cannot either, yet.
- if (scheduleToHash.scheduledTransaction() != null) {
- addToHash(hasher, scheduleToHash.scheduledTransaction());
+ if (schedule.scheduledTransaction() != null) {
+ addToHash(hasher, schedule.scheduledTransaction());
}
// @todo('9447') This should be modified to use calculated expiration once
// differential testing completes
- hasher.putLong(scheduleToHash.providedExpirationSecond());
- hasher.putBoolean(scheduleToHash.waitForExpiry());
+ hasher.putLong(schedule.providedExpirationSecond());
+ hasher.putBoolean(schedule.waitForExpiry());
return Bytes.wrap(hasher.hash().asBytes());
}
diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/WritableScheduleStoreImpl.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/WritableScheduleStoreImpl.java
index 1ce6d529e559..42d09b4d1967 100644
--- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/WritableScheduleStoreImpl.java
+++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/WritableScheduleStoreImpl.java
@@ -16,14 +16,16 @@
package com.hedera.node.app.service.schedule.impl;
-import static com.hedera.node.app.service.schedule.impl.ScheduleStoreUtility.add;
+import static java.util.Objects.requireNonNull;
import com.hedera.hapi.node.base.ScheduleID;
import com.hedera.hapi.node.base.Timestamp;
+import com.hedera.hapi.node.base.TimestampSeconds;
import com.hedera.hapi.node.state.primitives.ProtoBytes;
-import com.hedera.hapi.node.state.primitives.ProtoLong;
import com.hedera.hapi.node.state.schedule.Schedule;
-import com.hedera.hapi.node.state.schedule.ScheduleIdList;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
import com.hedera.node.app.service.schedule.WritableScheduleStore;
import com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema;
import com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema;
@@ -36,7 +38,6 @@
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.time.Instant;
-import java.util.Objects;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -45,15 +46,13 @@
* schedule objects as a result of ScheduleCreate, ScheduleSign, or ScheduleDelete transactions.
*/
public class WritableScheduleStoreImpl extends ReadableScheduleStoreImpl implements WritableScheduleStore {
-
private static final Logger logger = LogManager.getLogger(WritableScheduleStoreImpl.class);
- private static final String SCHEDULE_NULL_FOR_DELETE_MESSAGE =
- "Request to delete null schedule ID cannot be fulfilled.";
- private static final String SCHEDULE_MISSING_FOR_DELETE_MESSAGE =
- "Schedule to be deleted, %1$s, not found in state.";
+
private final WritableKVState schedulesByIdMutable;
private final WritableKVState scheduleIdByEqualityMutable;
- private final WritableKVState scheduleIdsByExpirationMutable;
+ private final WritableKVState scheduleCountsMutable;
+ private final WritableKVState scheduleUsagesMutable;
+ private final WritableKVState scheduleOrdersMutable;
/**
* Create a new {@link WritableScheduleStoreImpl} instance.
@@ -67,9 +66,14 @@ public WritableScheduleStoreImpl(
@NonNull final Configuration configuration,
@NonNull final StoreMetricsService storeMetricsService) {
super(states);
+ requireNonNull(configuration);
+ requireNonNull(storeMetricsService);
+
schedulesByIdMutable = states.get(V0490ScheduleSchema.SCHEDULES_BY_ID_KEY);
+ scheduleCountsMutable = states.get(V0570ScheduleSchema.SCHEDULED_COUNTS_KEY);
+ scheduleOrdersMutable = states.get(V0570ScheduleSchema.SCHEDULED_ORDERS_KEY);
+ scheduleUsagesMutable = states.get(V0570ScheduleSchema.SCHEDULED_USAGES_KEY);
scheduleIdByEqualityMutable = states.get(V0570ScheduleSchema.SCHEDULE_ID_BY_EQUALITY_KEY);
- scheduleIdsByExpirationMutable = states.get(V0570ScheduleSchema.SCHEDULE_IDS_BY_EXPIRY_SEC_KEY);
final long maxCapacity =
configuration.getConfigData(SchedulingConfig.class).maxNumber();
@@ -81,36 +85,29 @@ public WritableScheduleStoreImpl(
* Delete a given schedule from this state.
* Given the ID of a schedule and a consensus time, delete that ID from this state as of the
* consensus time {@link Instant} provided.
- * @param scheduleToDelete The ID of a schedule to be deleted.
+ * @param scheduleId The ID of a schedule to be deleted.
* @param consensusTime The current consensus time
* @return the {@link Schedule} marked as deleted
- * @throws IllegalStateException if the {@link ScheduleID} to be deleted is not present in this state,
- * or the ID value has a mismatched realm or shard for this node.
+ * @throws IllegalStateException if the {@link ScheduleID} to be deleted is not present in this state
*/
- @SuppressWarnings("DataFlowIssue")
@Override
- @NonNull
- public Schedule delete(@Nullable final ScheduleID scheduleToDelete, @NonNull final Instant consensusTime) {
- Objects.requireNonNull(consensusTime, "Null consensusTime provided to schedule delete, cannot proceed.");
- if (scheduleToDelete != null) {
- final Schedule schedule = schedulesByIdMutable.getForModify(scheduleToDelete);
- if (schedule != null) {
- final Schedule deletedSchedule = markDeleted(schedule, consensusTime);
- schedulesByIdMutable.put(scheduleToDelete, deletedSchedule);
- return schedulesByIdMutable.get(scheduleToDelete);
- } else {
- throw new IllegalStateException(SCHEDULE_MISSING_FOR_DELETE_MESSAGE.formatted(scheduleToDelete));
- }
- } else {
- throw new IllegalStateException(SCHEDULE_NULL_FOR_DELETE_MESSAGE);
+ public @NonNull Schedule delete(@Nullable final ScheduleID scheduleId, @NonNull final Instant consensusTime) {
+ requireNonNull(consensusTime);
+ requireNonNull(scheduleId);
+ final var schedule = schedulesByIdMutable.getForModify(scheduleId);
+ if (schedule == null) {
+ throw new IllegalStateException("Schedule to be deleted, %1$s, not found in state.".formatted(scheduleId));
}
+ final var deletedSchedule = markDeleted(schedule, consensusTime);
+ schedulesByIdMutable.put(scheduleId, deletedSchedule);
+ return deletedSchedule;
}
@Override
- public Schedule getForModify(@Nullable final ScheduleID idToFind) {
+ public Schedule getForModify(@Nullable final ScheduleID scheduleId) {
final Schedule result;
- if (idToFind != null) {
- result = schedulesByIdMutable.getForModify(idToFind);
+ if (scheduleId != null) {
+ result = schedulesByIdMutable.getForModify(scheduleId);
} else {
result = null;
}
@@ -118,31 +115,74 @@ public Schedule getForModify(@Nullable final ScheduleID idToFind) {
}
@Override
- public void put(@NonNull final Schedule scheduleToAdd) {
- schedulesByIdMutable.put(scheduleToAdd.scheduleIdOrThrow(), scheduleToAdd);
-
- final ProtoBytes newHash = new ProtoBytes(ScheduleStoreUtility.calculateBytesHash(scheduleToAdd));
- scheduleIdByEqualityMutable.put(newHash, scheduleToAdd.scheduleIdOrThrow());
-
- // calculated expiration time is never null...
- final ProtoLong expirationSecond = new ProtoLong(scheduleToAdd.calculatedExpirationSecond());
- final ScheduleIdList inStateExpiration = scheduleIdsByExpirationMutable.get(expirationSecond);
- // we should not be modifying the scheduleIds list directly. This could cause ISS
- final var newExpiryScheduleIdList = add(scheduleToAdd.scheduleId(), inStateExpiration);
- scheduleIdsByExpirationMutable.put(expirationSecond, newExpiryScheduleIdList);
+ public void put(@NonNull final Schedule schedule) {
+ requireNonNull(schedule);
+ final var scheduleId = schedule.scheduleIdOrThrow();
+ final var extant = schedulesByIdMutable.get(scheduleId);
+ schedulesByIdMutable.put(scheduleId, schedule);
+ // Updating a schedule that already exists in the store has no other side-effects
+ if (extant != null) {
+ return;
+ }
+ final var equalityKey = new ProtoBytes(ScheduleStoreUtility.calculateBytesHash(schedule));
+ scheduleIdByEqualityMutable.put(equalityKey, scheduleId);
+ final var second = schedule.calculatedExpirationSecond();
+ final var countsKey = new TimestampSeconds(second);
+ final var oldCounts = scheduleCountsMutable.get(countsKey);
+ final var counts = oldCounts == null
+ ? new ScheduledCounts(1, 0)
+ : new ScheduledCounts(oldCounts.numberScheduled() + 1, oldCounts.numberProcessed());
+ scheduleCountsMutable.put(countsKey, counts);
+ final var orderKey = new ScheduledOrder(second, counts.numberScheduled() - 1);
+ scheduleOrdersMutable.put(orderKey, schedule.scheduleIdOrThrow());
+ }
+
+ @Override
+ public boolean purgeByOrder(@NonNull final ScheduledOrder order) {
+ requireNonNull(order);
+ final var scheduleId = getByOrder(order);
+ if (scheduleId != null) {
+ final var key = new TimestampSeconds(order.expirySecond());
+ final var counts = requireNonNull(scheduleCountsMutable.get(key));
+ if (order.orderNumber() != counts.numberProcessed()) {
+ throw new IllegalStateException("Order %s is not next in counts %s".formatted(order, counts));
+ }
+ purge(scheduleId);
+ scheduleOrdersMutable.remove(order);
+ final var newCounts = counts.copyBuilder()
+ .numberProcessed(counts.numberProcessed() + 1)
+ .build();
+ if (newCounts.numberProcessed() < newCounts.numberScheduled()) {
+ scheduleCountsMutable.put(key, newCounts);
+ return false;
+ } else {
+ scheduleCountsMutable.remove(key);
+ scheduleUsagesMutable.remove(key);
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public void trackUsage(final long consensusSecond, @NonNull final ThrottleUsageSnapshots usageSnapshots) {
+ requireNonNull(usageSnapshots);
+ scheduleUsagesMutable.put(new TimestampSeconds(consensusSecond), usageSnapshots);
}
- /**
- * {@inheritDoc}
- */
@Override
- public void purgeExpiredSchedulesBetween(final long firstSecondToExpire, final long lastSecondToExpire) {
- for (long i = firstSecondToExpire; i <= lastSecondToExpire; i++) {
- final var second = new ProtoLong(i);
- final var scheduleIdList = scheduleIdsByExpirationMutable.get(second);
- if (scheduleIdList != null) {
- scheduleIdList.scheduleIds().forEach(this::purge);
- scheduleIdsByExpirationMutable.remove(second);
+ public void purgeExpiredRangeClosed(final long start, final long end) {
+ for (long i = start; i <= end; i++) {
+ final var countsAndUsagesKey = new TimestampSeconds(i);
+ final var counts = scheduleCountsMutable.get(countsAndUsagesKey);
+ if (counts != null) {
+ for (int j = 0, n = counts.numberScheduled(); j < n; j++) {
+ final var orderKey = new ScheduledOrder(i, j);
+ final var scheduleId = requireNonNull(scheduleOrdersMutable.get(orderKey));
+ purge(scheduleId);
+ scheduleOrdersMutable.remove(orderKey);
+ }
+ scheduleCountsMutable.remove(countsAndUsagesKey);
+ scheduleUsagesMutable.remove(countsAndUsagesKey);
}
}
}
@@ -152,7 +192,7 @@ public void purgeExpiredSchedulesBetween(final long firstSecondToExpire, final l
*
* @param scheduleId The ID of the schedule to purge
*/
- private void purge(final ScheduleID scheduleId) {
+ private void purge(@NonNull final ScheduleID scheduleId) {
final var schedule = schedulesByIdMutable.get(scheduleId);
if (schedule != null) {
final ProtoBytes hash = new ProtoBytes(ScheduleStoreUtility.calculateBytesHash(schedule));
diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandler.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandler.java
index 4006495b07f2..e69ca50078af 100644
--- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandler.java
+++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/AbstractScheduleHandler.java
@@ -66,7 +66,7 @@
* Provides some implementation support needed for both the {@link ScheduleCreateHandler} and {@link
* ScheduleSignHandler}.
*/
-abstract class AbstractScheduleHandler {
+public abstract class AbstractScheduleHandler {
static final Comparator KEY_COMPARATOR = new KeyComparator();
@FunctionalInterface
@@ -295,7 +295,7 @@ protected boolean tryToExecuteSchedule(
* @param signatories the approving signatories
* @return the key verifier
*/
- static Predicate simpleKeyVerifierFrom(
+ public static Predicate simpleKeyVerifierFrom(
@NonNull final ReadableAccountStore accountStore, @NonNull final List signatories) {
final Set cryptoSigs = new HashSet<>();
final Set contractIdSigs = new HashSet<>();
diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandler.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandler.java
index fd6517a2870b..eaba5519a14e 100644
--- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandler.java
+++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandler.java
@@ -24,9 +24,13 @@
import static com.hedera.hapi.node.base.ResponseCodeEnum.MAX_ENTITIES_IN_PRICE_REGIME_HAVE_BEEN_CREATED;
import static com.hedera.hapi.node.base.ResponseCodeEnum.MEMO_TOO_LONG;
import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST;
+import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_EXPIRY_IS_BUSY;
+import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_EXPIRY_MUST_BE_FUTURE;
+import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULE_EXPIRY_TOO_LONG;
import static com.hedera.hapi.node.base.SubType.DEFAULT;
import static com.hedera.hapi.node.base.SubType.SCHEDULE_CREATE_CONTRACT_CALL;
import static com.hedera.node.app.hapi.utils.CommonPbjConverters.fromPbj;
+import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.childAsOrdinary;
import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.createProvisionalSchedule;
import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.functionalityForType;
import static com.hedera.node.app.service.schedule.impl.handlers.HandlerUtility.transactionIdForScheduled;
@@ -50,6 +54,7 @@
import com.hedera.node.app.service.token.ReadableAccountStore;
import com.hedera.node.app.spi.fees.FeeContext;
import com.hedera.node.app.spi.fees.Fees;
+import com.hedera.node.app.spi.throttle.Throttle;
import com.hedera.node.app.spi.workflows.HandleContext;
import com.hedera.node.app.spi.workflows.HandleException;
import com.hedera.node.app.spi.workflows.PreCheckException;
@@ -61,6 +66,7 @@
import com.hederahashgraph.api.proto.java.FeeData;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
+import java.time.Instant;
import java.time.InstantSource;
import java.util.Collections;
import java.util.Objects;
@@ -74,10 +80,13 @@
public class ScheduleCreateHandler extends AbstractScheduleHandler implements TransactionHandler {
private final ScheduleOpsUsage scheduleOpsUsage = new ScheduleOpsUsage();
private final InstantSource instantSource;
+ private final Throttle.Factory throttleFactory;
@Inject
- public ScheduleCreateHandler(@NonNull final InstantSource instantSource) {
- this.instantSource = instantSource;
+ public ScheduleCreateHandler(
+ @NonNull final InstantSource instantSource, @NonNull final Throttle.Factory throttleFactory) {
+ this.instantSource = requireNonNull(instantSource);
+ this.throttleFactory = requireNonNull(throttleFactory);
}
@Override
@@ -135,12 +144,16 @@ public void handle(@NonNull final HandleContext context) throws HandleException
final var schedulingConfig = context.configuration().getConfigData(SchedulingConfig.class);
final boolean isLongTermEnabled = schedulingConfig.longTermEnabled();
final var ledgerConfig = context.configuration().getConfigData(LedgerConfig.class);
- final var expirationSeconds = isLongTermEnabled
+ final var maxLifetime = isLongTermEnabled
? schedulingConfig.maxExpirationFutureSeconds()
: ledgerConfig.scheduleTxExpiryTimeSecs();
final var consensusNow = context.consensusNow();
final var provisionalSchedule =
- createProvisionalSchedule(context.body(), consensusNow, expirationSeconds, isLongTermEnabled);
+ createProvisionalSchedule(context.body(), consensusNow, maxLifetime, isLongTermEnabled);
+ final var now = consensusNow.getEpochSecond();
+ final var then = provisionalSchedule.calculatedExpirationSecond();
+ validateTrue(then > now, SCHEDULE_EXPIRY_MUST_BE_FUTURE);
+ validateTrue(then <= now + maxLifetime, SCHEDULE_EXPIRY_TOO_LONG);
validateTrue(
isAllowedFunction(provisionalSchedule.scheduledTransactionOrThrow(), schedulingConfig),
SCHEDULED_TRANSACTION_NOT_IN_WHITELIST);
@@ -180,6 +193,17 @@ public void handle(@NonNull final HandleContext context) throws HandleException
validateTrue(
scheduleStore.numSchedulesInState() + 1 <= schedulingConfig.maxNumber(),
MAX_ENTITIES_IN_PRICE_REGIME_HAVE_BEEN_CREATED);
+ final var capacityFraction = schedulingConfig.schedulableCapacityFraction();
+ final var usageSnapshots = scheduleStore.usageSnapshotsForScheduled(then);
+ final var throttle = throttleFactory.newThrottle(capacityFraction.asApproxCapacitySplit(), usageSnapshots);
+ validateTrue(
+ throttle.allow(
+ provisionalSchedule.payerAccountIdOrThrow(),
+ childAsOrdinary(provisionalSchedule),
+ functionOf(provisionalSchedule),
+ Instant.ofEpochSecond(then)),
+ SCHEDULE_EXPIRY_IS_BUSY);
+ scheduleStore.trackUsage(then, throttle.usageSnapshots());
// With all validations done, we check if the new schedule is already executable
final var transactionKeys = getTransactionKeysOrThrow(provisionalSchedule, context::allKeysForTransaction);
@@ -276,4 +300,9 @@ private boolean isAllowedFunction(
final var scheduledFunctionality = functionalityForType(body.data().kind());
return config.whitelist().functionalitySet().contains(scheduledFunctionality);
}
+
+ private HederaFunctionality functionOf(@NonNull final Schedule schedule) {
+ return functionalityForType(
+ schedule.scheduledTransactionOrThrow().data().kind());
+ }
}
diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/schemas/V0570ScheduleSchema.java b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/schemas/V0570ScheduleSchema.java
index 9e2f4b38646b..3d38a236b9a2 100644
--- a/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/schemas/V0570ScheduleSchema.java
+++ b/hedera-node/hedera-schedule-service-impl/src/main/java/com/hedera/node/app/service/schedule/impl/schemas/V0570ScheduleSchema.java
@@ -20,23 +20,25 @@
import static com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema.SCHEDULES_BY_EXPIRY_SEC_KEY;
import static java.util.Objects.requireNonNull;
import static java.util.Spliterator.DISTINCT;
+import static java.util.Spliterators.spliterator;
import com.hedera.hapi.node.base.ScheduleID;
import com.hedera.hapi.node.base.SemanticVersion;
+import com.hedera.hapi.node.base.TimestampSeconds;
import com.hedera.hapi.node.state.primitives.ProtoBytes;
import com.hedera.hapi.node.state.primitives.ProtoLong;
-import com.hedera.hapi.node.state.schedule.Schedule;
-import com.hedera.hapi.node.state.schedule.ScheduleIdList;
import com.hedera.hapi.node.state.schedule.ScheduleList;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
import com.swirlds.state.lifecycle.MigrationContext;
import com.swirlds.state.lifecycle.Schema;
import com.swirlds.state.lifecycle.StateDefinition;
import com.swirlds.state.spi.ReadableKVState;
import com.swirlds.state.spi.WritableKVState;
import edu.umd.cs.findbugs.annotations.NonNull;
-import java.util.Objects;
import java.util.Set;
-import java.util.Spliterators;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.StreamSupport;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -46,15 +48,31 @@
*/
public final class V0570ScheduleSchema extends Schema {
private static final Logger log = LogManager.getLogger(V0570ScheduleSchema.class);
- private static final long MAX_SCHEDULE_IDS_BY_EXPIRY_SEC_KEY = 50_000_000L;
+
+ private static final long MAX_SCHEDULED_COUNTS = 50_000_000L;
+ private static final long MAX_SCHEDULED_ORDERS = 50_000_000L;
+ private static final long MAX_SCHEDULED_USAGES = 50_000_000L;
private static final long MAX_SCHEDULE_ID_BY_EQUALITY = 50_000_000L;
- /**
- * The version of the schema.
- */
private static final SemanticVersion VERSION =
SemanticVersion.newBuilder().major(0).minor(57).patch(0).build();
-
- public static final String SCHEDULE_IDS_BY_EXPIRY_SEC_KEY = "SCHEDULE_IDS_BY_EXPIRY_SEC";
+ /**
+ * The state key of a map from consensus second to the counts of transactions scheduled
+ * and processed within that second.
+ */
+ public static final String SCHEDULED_COUNTS_KEY = "SCHEDULED_COUNTS";
+ /**
+ * The state key of a map from an order position within a consensus second to the id of
+ * the transaction scheduled to executed in that order within that second.
+ */
+ public static final String SCHEDULED_ORDERS_KEY = "SCHEDULED_ORDERS";
+ /**
+ * The state key of a map from consensus second to the throttle utilization of transactions
+ * scheduled so far in that second.
+ */
+ public static final String SCHEDULED_USAGES_KEY = "SCHEDULED_USAGES";
+ /**
+ * The state key of a map from a hash of the schedule's equality values to its schedule id.
+ */
public static final String SCHEDULE_ID_BY_EQUALITY_KEY = "SCHEDULE_ID_BY_EQUALITY";
/**
@@ -65,15 +83,13 @@ public V0570ScheduleSchema() {
}
@SuppressWarnings("rawtypes")
- @NonNull
@Override
- public Set statesToCreate() {
- return Set.of(scheduleIdsByExpirySec(), scheduleIdByEquality());
+ public @NonNull Set statesToCreate() {
+ return Set.of(scheduleIdByEquality(), scheduledOrders(), scheduledCounts(), scheduledUsages());
}
- @NonNull
@Override
- public Set statesToRemove() {
+ public @NonNull Set statesToRemove() {
return Set.of(SCHEDULES_BY_EXPIRY_SEC_KEY, SCHEDULES_BY_EQUALITY_KEY);
}
@@ -81,30 +97,39 @@ public Set statesToRemove() {
public void migrate(@NonNull final MigrationContext ctx) {
requireNonNull(ctx);
- log.info("Started migrating Schedule Schema from 0.49.0 to 0.57.0");
- final WritableKVState writableScheduleIdsByExpirySec =
- ctx.newStates().get(SCHEDULE_IDS_BY_EXPIRY_SEC_KEY);
- final ReadableKVState readableSchedulesByExpirySec =
+ final ReadableKVState schedulesByExpiry =
ctx.previousStates().get(SCHEDULES_BY_EXPIRY_SEC_KEY);
- StreamSupport.stream(
- Spliterators.spliterator(
- readableSchedulesByExpirySec.keys(), readableSchedulesByExpirySec.size(), DISTINCT),
- false)
- .forEach(key -> {
- final var scheduleList = readableSchedulesByExpirySec.get(key);
+ final WritableKVState scheduledCounts =
+ ctx.newStates().get(SCHEDULED_COUNTS_KEY);
+ final WritableKVState scheduledOrders =
+ ctx.newStates().get(SCHEDULED_ORDERS_KEY);
+
+ final var secondsMigrated = new AtomicInteger();
+ final var schedulesMigrated = new AtomicInteger();
+ StreamSupport.stream(spliterator(schedulesByExpiry.keys(), schedulesByExpiry.size(), DISTINCT), false)
+ .forEach(second -> {
+ final var scheduleList = schedulesByExpiry.get(second);
if (scheduleList != null) {
- writableScheduleIdsByExpirySec.put(key, convertToScheduleIdList(scheduleList));
+ secondsMigrated.incrementAndGet();
+ final var schedules = scheduleList.schedules();
+ final var n = schedules.size();
+ scheduledCounts.put(new TimestampSeconds(second.value()), new ScheduledCounts(n, 0));
+ for (int i = 0; i < n; i++) {
+ scheduledOrders.put(
+ new ScheduledOrder(second.value(), i),
+ schedules.get(i).scheduleIdOrThrow());
+ }
+ schedulesMigrated.addAndGet(n);
}
});
- log.info("Migrated {} Schedules from SCHEDULES_BY_EXPIRY_SEC_KEY", readableSchedulesByExpirySec.size());
+ log.info("Migrated {} schedules from {} seconds", schedulesMigrated.get(), secondsMigrated.get());
final WritableKVState writableScheduleByEquality =
ctx.newStates().get(SCHEDULE_ID_BY_EQUALITY_KEY);
final ReadableKVState readableSchedulesByEquality =
ctx.previousStates().get(SCHEDULES_BY_EQUALITY_KEY);
StreamSupport.stream(
- Spliterators.spliterator(
- readableSchedulesByEquality.keys(), readableSchedulesByEquality.size(), DISTINCT),
+ spliterator(readableSchedulesByEquality.keys(), readableSchedulesByEquality.size(), DISTINCT),
false)
.forEach(key -> {
final var scheduleList = readableSchedulesByEquality.get(key);
@@ -115,25 +140,22 @@ public void migrate(@NonNull final MigrationContext ctx) {
writableScheduleByEquality.put(key, newScheduleId);
}
});
- log.info("Migrated {} Schedules from SCHEDULES_BY_EQUALITY_KEY", readableSchedulesByEquality.size());
+ log.info("Migrated {} schedules from SCHEDULES_BY_EQUALITY_KEY", readableSchedulesByEquality.size());
}
- private ScheduleIdList convertToScheduleIdList(@NonNull final ScheduleList scheduleList) {
- return ScheduleIdList.newBuilder()
- .scheduleIds(scheduleList.schedules().stream()
- .map(Schedule::scheduleId)
- .filter(Objects::nonNull)
- .map(id -> id.copyBuilder().build())
- .toList())
- .build();
+ private static StateDefinition scheduledCounts() {
+ return StateDefinition.onDisk(
+ SCHEDULED_COUNTS_KEY, TimestampSeconds.PROTOBUF, ScheduledCounts.PROTOBUF, MAX_SCHEDULED_COUNTS);
+ }
+
+ private static StateDefinition scheduledOrders() {
+ return StateDefinition.onDisk(
+ SCHEDULED_ORDERS_KEY, ScheduledOrder.PROTOBUF, ScheduleID.PROTOBUF, MAX_SCHEDULED_ORDERS);
}
- private static StateDefinition scheduleIdsByExpirySec() {
+ private static StateDefinition scheduledUsages() {
return StateDefinition.onDisk(
- SCHEDULE_IDS_BY_EXPIRY_SEC_KEY,
- ProtoLong.PROTOBUF,
- ScheduleIdList.PROTOBUF,
- MAX_SCHEDULE_IDS_BY_EXPIRY_SEC_KEY);
+ SCHEDULED_USAGES_KEY, TimestampSeconds.PROTOBUF, ThrottleUsageSnapshots.PROTOBUF, MAX_SCHEDULED_USAGES);
}
private static StateDefinition scheduleIdByEquality() {
diff --git a/hedera-node/hedera-schedule-service-impl/src/main/java/module-info.java b/hedera-node/hedera-schedule-service-impl/src/main/java/module-info.java
index ae3d52e1be9f..1192606c8040 100644
--- a/hedera-node/hedera-schedule-service-impl/src/main/java/module-info.java
+++ b/hedera-node/hedera-schedule-service-impl/src/main/java/module-info.java
@@ -1,6 +1,7 @@
module com.hedera.node.app.service.schedule.impl {
requires transitive com.hedera.node.app.hapi.fees;
requires transitive com.hedera.node.app.service.schedule;
+ requires transitive com.hedera.node.app.service.token;
requires transitive com.hedera.node.app.spi;
requires transitive com.hedera.node.hapi;
requires transitive com.swirlds.config.api;
@@ -10,7 +11,6 @@
requires transitive static java.compiler; // javax.annotation.processing.Generated
requires transitive javax.inject;
requires com.hedera.node.app.hapi.utils;
- requires com.hedera.node.app.service.token; // ReadableAccountStore: payer account details on create, sign, query
requires com.hedera.node.config;
requires com.google.common;
requires org.apache.logging.log4j;
diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/ScheduleServiceImplTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/ScheduleServiceImplTest.java
index 92cb5878d411..fcc10e15940e 100644
--- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/ScheduleServiceImplTest.java
+++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/ScheduleServiceImplTest.java
@@ -17,37 +17,21 @@
package com.hedera.node.app.service.schedule.impl;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.assertThatThrownBy;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyLong;
-import static org.mockito.ArgumentMatchers.eq;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
-import com.hedera.hapi.node.base.TransactionID;
-import com.hedera.hapi.node.state.schedule.Schedule;
-import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.node.app.service.schedule.ScheduleService;
import com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema;
import com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema;
-import com.hedera.node.app.spi.store.StoreFactory;
import com.swirlds.state.lifecycle.Schema;
import com.swirlds.state.lifecycle.SchemaRegistry;
import com.swirlds.state.lifecycle.StateDefinition;
-import java.time.Instant;
-import java.util.Collections;
import java.util.List;
-import java.util.NoSuchElementException;
import java.util.Set;
-import java.util.function.Supplier;
import org.assertj.core.api.BDDAssertions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.ArgumentCaptor;
-import org.mockito.InOrder;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@@ -56,12 +40,6 @@ class ScheduleServiceImplTest {
@Mock
private SchemaRegistry registry;
- private StoreFactory storeFactory;
- private ReadableScheduleStoreImpl readableStore;
- private WritableScheduleStoreImpl writableStore;
- private Supplier cleanupStoreFactory;
- private ScheduleService scheduleService;
-
@Test
void testsSpi() {
final ScheduleService service = new ScheduleServiceImpl();
@@ -96,220 +74,10 @@ void registersExpectedSchema() {
BDDAssertions.assertThat(statesToCreate).isNotNull();
statesList =
statesToCreate.stream().map(StateDefinition::stateKey).sorted().toList();
- BDDAssertions.assertThat(statesToCreate.size()).isEqualTo(2);
- BDDAssertions.assertThat(statesList.get(0)).isEqualTo(V0570ScheduleSchema.SCHEDULE_IDS_BY_EXPIRY_SEC_KEY);
- BDDAssertions.assertThat(statesList.get(1)).isEqualTo(V0570ScheduleSchema.SCHEDULE_ID_BY_EQUALITY_KEY);
- }
-
- @Test
- void testBasicIteration() {
- setUpMocks();
- // Given two schedules within the interval
- final var schedule1 = createMockSchedule(Instant.now().plusSeconds(60));
- final var schedule2 = createMockSchedule(Instant.now().plusSeconds(120));
- when(readableStore.getByExpirationBetween(anyLong(), anyLong())).thenReturn(List.of(schedule1, schedule2));
-
- final var iterator =
- scheduleService.iterTxnsForInterval(Instant.now(), Instant.now().plusSeconds(180), cleanupStoreFactory);
-
- // Assert both schedules can be iterated over
- assertThat(iterator.hasNext()).isTrue();
- assertThat(iterator.next()).isNotNull();
- assertThat(iterator.hasNext()).isTrue();
- assertThat(iterator.next()).isNotNull();
- assertThat(iterator.hasNext()).isFalse();
- }
-
- @Test
- void testEmptyList() {
- setUpMocks();
- // No schedules within the interval
- when(readableStore.getByExpirationBetween(anyLong(), anyLong())).thenReturn(List.of());
-
- final var iterator =
- scheduleService.iterTxnsForInterval(Instant.now(), Instant.now().plusSeconds(180), cleanupStoreFactory);
-
- // Assert that iterator has no elements
- assertThat(iterator.hasNext()).isFalse();
- assertThatThrownBy(iterator::next).isInstanceOf(NoSuchElementException.class);
- }
-
- @Test
- void testDeleteFunctionality() {
- setUpMocks();
- // Given one schedule
- final var schedule = createMockSchedule(Instant.now().plusSeconds(60));
- when(readableStore.getByExpirationBetween(anyLong(), anyLong())).thenReturn(List.of(schedule));
-
- final var iterator =
- scheduleService.iterTxnsForInterval(Instant.now(), Instant.now().plusSeconds(120), cleanupStoreFactory);
-
- assertThat(iterator.hasNext()).isTrue();
- final var txn = iterator.next();
- assertThat(txn).isNotNull();
-
- // Test remove
- iterator.remove();
-
- // Verify that delete and purge were called on the store
- final InOrder inOrder = inOrder(writableStore);
- inOrder.verify(writableStore).delete(eq(schedule.scheduleId()), any());
- }
-
- @Test
- void testRemoveWithoutNextShouldThrowException() {
- setUpReadableStore();
- // Given one schedule
- final var schedule = mock(Schedule.class);
- when(readableStore.getByExpirationBetween(anyLong(), anyLong())).thenReturn(List.of(schedule));
-
- final var iterator =
- scheduleService.iterTxnsForInterval(Instant.now(), Instant.now().plusSeconds(120), cleanupStoreFactory);
-
- // Attempt to remove without calling next() should throw IllegalStateException
- assertThatThrownBy(iterator::remove).isInstanceOf(IllegalStateException.class);
- }
-
- @Test
- void testNextBeyondEndShouldThrowException() {
- setUpMocks();
- // Given one schedule
- final var schedule = createMockSchedule(Instant.now().plusSeconds(60));
- when(readableStore.getByExpirationBetween(anyLong(), anyLong())).thenReturn(List.of(schedule));
-
- final var iterator =
- scheduleService.iterTxnsForInterval(Instant.now(), Instant.now().plusSeconds(120), cleanupStoreFactory);
-
- assertThat(iterator.hasNext()).isTrue();
- iterator.next();
-
- // No more elements, calling next() should throw NoSuchElementException
- assertThatThrownBy(iterator::next).isInstanceOf(NoSuchElementException.class);
- }
-
- @Test
- void testFilterExecutedOrDeletedSchedules() {
- setUpMocks();
- // Given three schedules, one executed, one deleted, and one valid
- final var schedule1 = mockExecuted();
- final var schedule2 = mockDeleted();
- final var schedule3 = createMockSchedule(Instant.now().plusSeconds(180)); // Valid
-
- when(readableStore.getByExpirationBetween(anyLong(), anyLong()))
- .thenReturn(List.of(schedule1, schedule2, schedule3));
-
- final var iterator =
- scheduleService.iterTxnsForInterval(Instant.now(), Instant.now().plusSeconds(200), cleanupStoreFactory);
-
- // Only the valid schedule should be iterated over
- assertThat(iterator.hasNext()).isTrue();
- final var txn = iterator.next();
- assertThat(txn).isNotNull();
- assertThat(iterator.hasNext()).isFalse();
- }
-
- @Test
- void iteratorShouldCallCleanUpExpiredSchedulesOnceAfterFullIteration() {
- setUpMocks();
-
- final var schedule = createMockSchedule(Instant.now().plusSeconds(60));
- final var start = Instant.now();
- final var end = Instant.now().plusSeconds(120);
- when(readableStore.getByExpirationBetween(start.getEpochSecond(), end.getEpochSecond()))
- .thenReturn(List.of(schedule));
- final var iterator = scheduleService.iterTxnsForInterval(start, end, cleanupStoreFactory);
-
- // Iterate through all elements
- while (iterator.hasNext()) {
- iterator.next();
- }
-
- // Verify cleanUpExpiredSchedules is called exactly once
- verify(writableStore, times(1)).purgeExpiredSchedulesBetween(start.getEpochSecond(), end.getEpochSecond());
- }
-
- @Test
- void iteratorShouldTriggerCleanUpOnExcessiveNextCallsWithoutHasNext() {
- setUpMocks();
-
- final var schedule = createMockSchedule(Instant.now().plusSeconds(60));
- final var start = Instant.now();
- final var end = Instant.now().plusSeconds(120);
- when(readableStore.getByExpirationBetween(start.getEpochSecond(), end.getEpochSecond()))
- .thenReturn(List.of(schedule));
- final var iterator = scheduleService.iterTxnsForInterval(start, end, cleanupStoreFactory);
-
- // Exhaust the iterator without checking hasNext()
- iterator.next();
-
- // After elements are exhausted, calling next() again should trigger cleanup and throw NoSuchElementException
- assertThatThrownBy(iterator::next).isInstanceOf(NoSuchElementException.class);
-
- // Verify cleanUpExpiredSchedules is called exactly once
- verify(writableStore, times(1)).purgeExpiredSchedulesBetween(start.getEpochSecond(), end.getEpochSecond());
- }
-
- @Test
- void iteratorShouldNotCallCleanUpExpiredSchedulesMultipleTimesAfterCompletion() {
- setUpMocks();
-
- final var schedule = createMockSchedule(Instant.now().plusSeconds(60));
- final var start = Instant.now();
- final var end = Instant.now().plusSeconds(120);
- when(readableStore.getByExpirationBetween(start.getEpochSecond(), end.getEpochSecond()))
- .thenReturn(List.of(schedule));
- final var iterator = scheduleService.iterTxnsForInterval(start, end, cleanupStoreFactory);
-
- // Exhaust the iterator
- while (iterator.hasNext()) {
- iterator.next();
- }
-
- // First extra call to next() after completion
- assertThatThrownBy(iterator::next).isInstanceOf(NoSuchElementException.class);
-
- // Second extra call to next() to verify cleanup is not called again
- assertThatThrownBy(iterator::next).isInstanceOf(NoSuchElementException.class);
-
- // Verify cleanUpExpiredSchedules is only called once despite multiple calls to next()
- verify(writableStore, times(1)).purgeExpiredSchedulesBetween(start.getEpochSecond(), end.getEpochSecond());
- }
-
- private Schedule createMockSchedule(final Instant expiration) {
- final var schedule = mock(Schedule.class);
- final var createTransaction = mock(TransactionBody.class);
- when(createTransaction.transactionIDOrThrow()).thenReturn(TransactionID.DEFAULT);
- when(schedule.originalCreateTransactionOrThrow()).thenReturn(createTransaction);
- when(schedule.executed()).thenReturn(false);
- when(schedule.deleted()).thenReturn(false);
- when(schedule.calculatedExpirationSecond()).thenReturn(expiration.getEpochSecond());
- when(schedule.signatories()).thenReturn(Collections.emptyList()); // Customize as necessary
- return schedule;
- }
-
- private Schedule mockDeleted() {
- final var schedule = mock(Schedule.class);
- when(schedule.deleted()).thenReturn(true);
- return schedule;
- }
-
- private Schedule mockExecuted() {
- final var schedule = mock(Schedule.class);
- when(schedule.executed()).thenReturn(true);
- return schedule;
- }
-
- private void setUpReadableStore() {
- storeFactory = mock(StoreFactory.class);
- readableStore = mock(ReadableScheduleStoreImpl.class);
- cleanupStoreFactory = () -> storeFactory;
- scheduleService = new ScheduleServiceImpl();
- when(storeFactory.readableStore(ReadableScheduleStoreImpl.class)).thenReturn(readableStore);
- }
-
- private void setUpMocks() {
- setUpReadableStore();
- writableStore = mock(WritableScheduleStoreImpl.class);
- when(storeFactory.writableStore(WritableScheduleStoreImpl.class)).thenReturn(writableStore);
+ BDDAssertions.assertThat(statesToCreate.size()).isEqualTo(4);
+ BDDAssertions.assertThat(statesList.get(0)).isEqualTo(V0570ScheduleSchema.SCHEDULED_COUNTS_KEY);
+ BDDAssertions.assertThat(statesList.get(1)).isEqualTo(V0570ScheduleSchema.SCHEDULED_ORDERS_KEY);
+ BDDAssertions.assertThat(statesList.get(2)).isEqualTo(V0570ScheduleSchema.SCHEDULED_USAGES_KEY);
+ BDDAssertions.assertThat(statesList.get(3)).isEqualTo(V0570ScheduleSchema.SCHEDULE_ID_BY_EQUALITY_KEY);
}
}
diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/ScheduleTestBase.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/ScheduleTestBase.java
index 096e5d0583a4..fb6245892554 100644
--- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/ScheduleTestBase.java
+++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/ScheduleTestBase.java
@@ -17,7 +17,9 @@
package com.hedera.node.app.service.schedule.impl;
import static com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema.SCHEDULES_BY_ID_KEY;
-import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULE_IDS_BY_EXPIRY_SEC_KEY;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULED_COUNTS_KEY;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULED_ORDERS_KEY;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULED_USAGES_KEY;
import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULE_ID_BY_EQUALITY_KEY;
import static org.mockito.BDDMockito.given;
import static org.mockito.Mockito.mock;
@@ -26,6 +28,7 @@
import com.hedera.hapi.node.base.Key;
import com.hedera.hapi.node.base.ScheduleID;
import com.hedera.hapi.node.base.Timestamp;
+import com.hedera.hapi.node.base.TimestampSeconds;
import com.hedera.hapi.node.base.TransactionID;
import com.hedera.hapi.node.consensus.ConsensusCreateTopicTransactionBody;
import com.hedera.hapi.node.consensus.ConsensusDeleteTopicTransactionBody;
@@ -50,6 +53,9 @@
import com.hedera.hapi.node.state.primitives.ProtoLong;
import com.hedera.hapi.node.state.schedule.Schedule;
import com.hedera.hapi.node.state.schedule.ScheduleIdList;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
import com.hedera.hapi.node.state.token.Account;
import com.hedera.hapi.node.token.CryptoApproveAllowanceTransactionBody;
import com.hedera.hapi.node.token.CryptoCreateTransactionBody;
@@ -190,9 +196,14 @@ public class ScheduleTestBase {
protected Map scheduleMapById;
protected Map scheduleMapByEquality;
protected Map scheduleMapByExpiration;
+ protected Map scheduledCounts;
+ protected Map scheduledOrders;
+ protected Map scheduledUsages;
protected WritableKVState writableById;
protected WritableKVState writableByEquality;
- protected WritableKVState writableByExpiration;
+ protected WritableKVState writableScheduledCounts;
+ protected WritableKVState writableScheduledUsages;
+ protected WritableKVState writableScheduledOrders;
protected Map> writableStatesMap;
protected ReadableStates states;
protected WritableStates scheduleStates;
@@ -234,7 +245,9 @@ protected void setUpBase() throws PreCheckException, InvalidKeyException {
protected void commitScheduleStores() {
commit(writableByEquality);
- commit(writableByExpiration);
+ commit(writableScheduledOrders);
+ commit(writableScheduledCounts);
+ commit(writableScheduledUsages);
commit(writableById);
}
@@ -446,16 +459,23 @@ private void setUpStates() {
scheduleMapById = new HashMap<>(0);
scheduleMapByEquality = new HashMap<>(0);
scheduleMapByExpiration = new HashMap<>(0);
+ scheduledCounts = new HashMap<>(0);
+ scheduledOrders = new HashMap<>(0);
+ scheduledUsages = new HashMap<>(0);
accountsMapById = new HashMap<>(0);
writableById = new MapWritableKVState<>(SCHEDULES_BY_ID_KEY, scheduleMapById);
writableByEquality = new MapWritableKVState<>(SCHEDULE_ID_BY_EQUALITY_KEY, scheduleMapByEquality);
- writableByExpiration = new MapWritableKVState<>(SCHEDULE_IDS_BY_EXPIRY_SEC_KEY, scheduleMapByExpiration);
+ writableScheduledCounts = new MapWritableKVState<>(SCHEDULED_COUNTS_KEY, scheduledCounts);
+ writableScheduledOrders = new MapWritableKVState<>(SCHEDULED_ORDERS_KEY, scheduledOrders);
+ writableScheduledUsages = new MapWritableKVState<>(SCHEDULED_USAGES_KEY, scheduledUsages);
accountById = new MapWritableKVState<>(ACCOUNT_STATE_KEY, accountsMapById);
accountAliases = new MapWritableKVState<>(ACCOUNT_ALIAS_STATE_KEY, new HashMap<>(0));
writableStatesMap = new TreeMap<>();
writableStatesMap.put(SCHEDULES_BY_ID_KEY, writableById);
writableStatesMap.put(SCHEDULE_ID_BY_EQUALITY_KEY, writableByEquality);
- writableStatesMap.put(SCHEDULE_IDS_BY_EXPIRY_SEC_KEY, writableByExpiration);
+ writableStatesMap.put(SCHEDULED_COUNTS_KEY, writableScheduledCounts);
+ writableStatesMap.put(SCHEDULED_ORDERS_KEY, writableScheduledOrders);
+ writableStatesMap.put(SCHEDULED_USAGES_KEY, writableScheduledUsages);
writableStatesMap.put(ACCOUNT_STATE_KEY, accountById);
writableStatesMap.put(ACCOUNT_ALIAS_STATE_KEY, accountAliases);
scheduleStates = new MapWritableStates(writableStatesMap);
diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/WritableScheduleStoreImplTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/WritableScheduleStoreImplTest.java
index 5db5d3ba1c4f..2581ad919767 100644
--- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/WritableScheduleStoreImplTest.java
+++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/WritableScheduleStoreImplTest.java
@@ -22,8 +22,8 @@
import com.hedera.hapi.node.base.Key;
import com.hedera.hapi.node.base.ScheduleID;
import com.hedera.hapi.node.base.Timestamp;
+import com.hedera.hapi.node.base.TimestampSeconds;
import com.hedera.hapi.node.state.primitives.ProtoBytes;
-import com.hedera.hapi.node.state.primitives.ProtoLong;
import com.hedera.hapi.node.state.schedule.Schedule;
import com.hedera.node.app.spi.workflows.PreCheckException;
import edu.umd.cs.findbugs.annotations.NonNull;
@@ -73,13 +73,12 @@ void verifyDeleteNonExistentScheduleThrows() {
@Test
void verifyDeleteNullScheduleThrows() {
assertThatThrownBy(() -> writableSchedules.delete(null, testConsensusTime))
- .isInstanceOf(IllegalStateException.class)
- .hasMessage("Request to delete null schedule ID cannot be fulfilled.");
+ .isInstanceOf(NullPointerException.class);
}
@Test
void verifyPutModifiesState() {
- final ScheduleID idToDelete = scheduleInState.scheduleId();
+ final ScheduleID idToDelete = scheduleInState.scheduleIdOrThrow();
Schedule actual = writableById.getForModify(idToDelete);
assertThat(actual).isNotNull();
assertThat(actual.signatories()).containsExactlyInAnyOrderElementsOf(scheduleInState.signatories());
@@ -106,8 +105,10 @@ void verifyPutDoesDeduplication() {
final var equality = writableByEquality.get(hash);
assertThat(equality).isNotNull();
- final var expiryList = writableByExpiration.get(new ProtoLong(actual.calculatedExpirationSecond()));
- assertThat(expiryList.scheduleIds().size()).isEqualTo(1);
+ final var scheduledCounts =
+ writableScheduledCounts.get(new TimestampSeconds(actual.calculatedExpirationSecond()));
+ assertThat(scheduledCounts).isNotNull();
+ assertThat(scheduledCounts.numberScheduled()).isEqualTo(1);
writableSchedules.put(modified);
writableSchedules.put(modified);
@@ -123,27 +124,9 @@ void verifyPutDoesDeduplication() {
final var equalitAfter = writableByEquality.get(hash);
assertThat(equalitAfter).isNotNull();
- final var expiryListAfter = writableByExpiration.get(new ProtoLong(actual.calculatedExpirationSecond()));
- assertThat(expiryListAfter.scheduleIds().size()).isEqualTo(1);
- }
-
- @Test
- void purgesExpiredSchedules() {
- final ScheduleID idToDelete = scheduleInState.scheduleId();
- final Schedule actual = writableById.get(idToDelete);
- final var expirationTime = actual.calculatedExpirationSecond();
- assertThat(actual).isNotNull();
- assertThat(actual.signatories()).containsExactlyInAnyOrderElementsOf(scheduleInState.signatories());
- writableSchedules.purgeExpiredSchedulesBetween(expirationTime - 1, expirationTime + 1);
-
- final var purged = writableSchedules.get(idToDelete);
- assertThat(purged).isNull();
-
- final var byEquality = writableSchedules.getByEquality(actual);
- assertThat(byEquality).isNull();
-
- final var byExpiry = writableSchedules.getByExpirationSecond(expirationTime);
- assertThat(byExpiry).isNull();
+ final var countAfter = writableScheduledCounts.get(new TimestampSeconds(actual.calculatedExpirationSecond()));
+ assertThat(countAfter).isNotNull();
+ assertThat(countAfter.numberScheduled()).isEqualTo(1);
}
@NonNull
diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandlerTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandlerTest.java
index b630177ddcf9..1eab31290203 100644
--- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandlerTest.java
+++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/handlers/ScheduleCreateHandlerTest.java
@@ -21,6 +21,8 @@
import static com.hedera.hapi.node.base.ResponseCodeEnum.MAX_ENTITIES_IN_PRICE_REGIME_HAVE_BEEN_CREATED;
import static com.hedera.hapi.node.base.ResponseCodeEnum.SCHEDULED_TRANSACTION_NOT_IN_WHITELIST;
import static org.assertj.core.api.BDDAssertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.BDDMockito.given;
import static org.mockito.Mockito.mock;
@@ -33,6 +35,7 @@
import com.hedera.hapi.node.scheduled.SchedulableTransactionBody;
import com.hedera.hapi.node.scheduled.SchedulableTransactionBody.DataOneOfType;
import com.hedera.hapi.node.state.schedule.Schedule;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.node.app.service.schedule.WritableScheduleStore;
import com.hedera.node.app.service.schedule.impl.ScheduledTransactionFactory;
@@ -41,6 +44,7 @@
import com.hedera.node.app.spi.ids.EntityNumGenerator;
import com.hedera.node.app.spi.key.KeyComparator;
import com.hedera.node.app.spi.signatures.VerificationAssistant;
+import com.hedera.node.app.spi.throttle.Throttle;
import com.hedera.node.app.spi.workflows.HandleException;
import com.hedera.node.app.spi.workflows.PreCheckException;
import com.hedera.node.app.spi.workflows.PreHandleContext;
@@ -51,16 +55,21 @@
import java.util.concurrent.ConcurrentSkipListSet;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.mockito.BDDMockito;
-import org.mockito.Mockito;
+import org.mockito.Mock;
class ScheduleCreateHandlerTest extends ScheduleHandlerTestBase {
+ @Mock
+ private Throttle.Factory throttleFactory;
+
+ @Mock
+ private Throttle throttle;
+
private ScheduleCreateHandler subject;
private PreHandleContext realPreContext;
@BeforeEach
void setUp() throws PreCheckException, InvalidKeyException {
- subject = new ScheduleCreateHandler(InstantSource.system());
+ subject = new ScheduleCreateHandler(InstantSource.system(), throttleFactory);
setUpBase();
}
@@ -152,6 +161,9 @@ void handleRejectsNonWhitelist() throws HandleException, PreCheckException {
final Set configuredWhitelist =
scheduleConfig.whitelist().functionalitySet();
given(keyVerifier.authorizingSimpleKeys()).willReturn(new ConcurrentSkipListSet<>(new KeyComparator()));
+ given(throttleFactory.newThrottle(anyInt(), any())).willReturn(throttle);
+ given(throttle.allow(any(), any(), any(), any())).willReturn(true);
+ given(throttle.usageSnapshots()).willReturn(ThrottleUsageSnapshots.DEFAULT);
for (final Schedule next : listOfScheduledOptions) {
final TransactionBody createTransaction = next.originalCreateTransaction();
final TransactionID createId = createTransaction.transactionID();
@@ -187,7 +199,7 @@ void handleRefusesToExceedCreationLimit() throws HandleException, PreCheckExcept
final HederaFunctionality functionType = HandlerUtility.functionalityForType(transactionType);
prepareContext(createTransaction, next.scheduleId().scheduleNum());
// all keys are "valid" with this mock setup
- given(keyVerifier.verificationFor(BDDMockito.any(Key.class), BDDMockito.any(VerificationAssistant.class)))
+ given(keyVerifier.verificationFor(any(Key.class), any(VerificationAssistant.class)))
.willReturn(new SignatureVerificationImpl(nullKey, null, true));
if (configuredWhitelist.contains(functionType)) {
throwsHandleException(
@@ -203,6 +215,9 @@ void handleExecutesImmediateIfPossible() throws HandleException, PreCheckExcepti
int successCount = 0;
// make sure we have at least four items in the whitelist to test.
assertThat(configuredWhitelist.size()).isGreaterThan(4);
+ given(throttleFactory.newThrottle(anyInt(), any())).willReturn(throttle);
+ given(throttle.allow(any(), any(), any(), any())).willReturn(true);
+ given(throttle.usageSnapshots()).willReturn(ThrottleUsageSnapshots.DEFAULT);
for (final Schedule next : listOfScheduledOptions) {
final TransactionBody createTransaction = next.originalCreateTransaction();
final TransactionID createId = createTransaction.transactionID();
@@ -211,7 +226,7 @@ void handleExecutesImmediateIfPossible() throws HandleException, PreCheckExcepti
final HederaFunctionality functionType = HandlerUtility.functionalityForType(transactionType);
prepareContext(createTransaction, next.scheduleId().scheduleNum());
// all keys are "valid" with this mock setup
- given(keyVerifier.verificationFor(BDDMockito.any(Key.class), BDDMockito.any(VerificationAssistant.class)))
+ given(keyVerifier.verificationFor(any(Key.class), any(VerificationAssistant.class)))
.willReturn(new SignatureVerificationImpl(nullKey, null, true));
given(keyVerifier.authorizingSimpleKeys()).willReturn(new ConcurrentSkipListSet<>(new KeyComparator()));
final int startCount = scheduleMapById.size();
@@ -255,9 +270,9 @@ private void prepareContext(final TransactionBody createTransaction, final long
given(mockContext.body()).willReturn(createTransaction);
given(mockContext.entityNumGenerator()).willReturn(entityNumGenerator);
given(entityNumGenerator.newEntityNum()).willReturn(nextEntityId);
- given(mockContext.allKeysForTransaction(Mockito.any(), Mockito.any())).willReturn(testChildKeys);
+ given(mockContext.allKeysForTransaction(any(), any())).willReturn(testChildKeys);
// This is how you get side effects replicated, by having the "Answer" called in place of the real method.
- given(keyVerifier.verificationFor(BDDMockito.any(Key.class), BDDMockito.any(VerificationAssistant.class)))
+ given(keyVerifier.verificationFor(any(Key.class), any(VerificationAssistant.class)))
.will(new VerificationForAnswer(testChildKeys));
}
}
diff --git a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/schemas/V0570ScheduleSchemaTest.java b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/schemas/V0570ScheduleSchemaTest.java
index cac878e97ae1..89f94cb57e07 100644
--- a/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/schemas/V0570ScheduleSchemaTest.java
+++ b/hedera-node/hedera-schedule-service-impl/src/test/java/com/hedera/node/app/service/schedule/impl/schemas/V0570ScheduleSchemaTest.java
@@ -18,7 +18,8 @@
import static com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema.SCHEDULES_BY_EQUALITY_KEY;
import static com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema.SCHEDULES_BY_EXPIRY_SEC_KEY;
-import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULE_IDS_BY_EXPIRY_SEC_KEY;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULED_COUNTS_KEY;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULED_ORDERS_KEY;
import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULE_ID_BY_EQUALITY_KEY;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.AssertionsForClassTypes.assertThatCode;
@@ -29,10 +30,12 @@
import com.hedera.hapi.node.base.ScheduleID;
import com.hedera.hapi.node.base.SemanticVersion;
+import com.hedera.hapi.node.base.TimestampSeconds;
import com.hedera.hapi.node.state.primitives.ProtoBytes;
import com.hedera.hapi.node.state.primitives.ProtoLong;
-import com.hedera.hapi.node.state.schedule.ScheduleIdList;
import com.hedera.hapi.node.state.schedule.ScheduleList;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
import com.hedera.node.app.service.schedule.impl.ScheduleStoreUtility;
import com.hedera.node.app.service.schedule.impl.ScheduleTestBase;
import com.hedera.node.app.spi.fixtures.util.LogCaptor;
@@ -41,13 +44,11 @@
import com.hedera.node.app.spi.fixtures.util.LoggingTarget;
import com.hedera.node.app.spi.workflows.PreCheckException;
import com.swirlds.state.lifecycle.MigrationContext;
-import com.swirlds.state.lifecycle.StateDefinition;
import com.swirlds.state.test.fixtures.MapReadableKVState;
import com.swirlds.state.test.fixtures.MapReadableStates;
import com.swirlds.state.test.fixtures.MapWritableKVState;
import com.swirlds.state.test.fixtures.MapWritableStates;
import java.security.InvalidKeyException;
-import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -71,9 +72,9 @@ class V0570ScheduleSchemaTest extends ScheduleTestBase {
private MigrationContext migrationContext;
private final Map schedulesByExpirySec = new HashMap<>();
- private final Map scheduleIdsByExpirySec = new HashMap<>();
private MapReadableKVState readableSchedulesByExpirySec;
- private MapWritableKVState writableScheduleIdsByExpirySec;
+ private MapWritableKVState writableScheduleCounts;
+ private MapWritableKVState writableScheduleOrders;
private final Map schedulesByEquality = new HashMap<>();
private final Map scheduleByEquality = new HashMap<>();
@@ -91,28 +92,11 @@ void setUp() throws PreCheckException, InvalidKeyException {
@Test
void constructorHappyPath() {
- Assertions.assertThat(subject.getVersion())
+ assertThat(subject.getVersion())
.isEqualTo(
SemanticVersion.newBuilder().major(0).minor(57).patch(0).build());
}
- @Test
- void statesToCreateIsCorrect() {
- var sortedResult = subject.statesToCreate().stream()
- .sorted(Comparator.comparing(StateDefinition::stateKey))
- .toList();
-
- final var stateDef1 = sortedResult.getFirst();
- Assertions.assertThat(stateDef1.stateKey()).isEqualTo(SCHEDULE_IDS_BY_EXPIRY_SEC_KEY);
- Assertions.assertThat(stateDef1.keyCodec()).isEqualTo(ProtoLong.PROTOBUF);
- Assertions.assertThat(stateDef1.valueCodec()).isEqualTo(ScheduleIdList.PROTOBUF);
-
- final var stateDef2 = sortedResult.get(1);
- Assertions.assertThat(stateDef2.stateKey()).isEqualTo(SCHEDULE_ID_BY_EQUALITY_KEY);
- Assertions.assertThat(stateDef2.keyCodec()).isEqualTo(ProtoBytes.PROTOBUF);
- Assertions.assertThat(stateDef2.valueCodec()).isEqualTo(ScheduleID.PROTOBUF);
- }
-
@Test
void testStatesToRemove() {
Set statesToRemove = subject.statesToRemove();
@@ -137,10 +121,7 @@ void migrateAsExpected() {
setupMigrationContext();
assertThatCode(() -> subject.migrate(migrationContext)).doesNotThrowAnyException();
- assertThat(logCaptor.infoLogs()).contains("Started migrating Schedule Schema from 0.49.0 to 0.57.0");
- assertThat(logCaptor.infoLogs()).contains("Migrated 1 Schedules from SCHEDULES_BY_EXPIRY_SEC_KEY");
- assertThat(logCaptor.infoLogs()).contains("Migrated 2 Schedules from SCHEDULES_BY_EQUALITY_KEY");
- assertThat(writableScheduleIdsByExpirySec.size()).isEqualTo(1L);
+ assertThat(logCaptor.infoLogs()).contains("Migrated 2 schedules from SCHEDULES_BY_EQUALITY_KEY");
assertThat(writableScheduleIdByEquality.size()).isEqualTo(2L);
}
@@ -153,8 +134,8 @@ private void setupMigrationContext() {
.schedules(List.of(scheduler1, otherScheduleInState))
.build());
readableSchedulesByExpirySec = new MapReadableKVState<>(SCHEDULES_BY_EXPIRY_SEC_KEY, schedulesByExpirySec);
- writableScheduleIdsByExpirySec =
- new MapWritableKVState<>(SCHEDULE_IDS_BY_EXPIRY_SEC_KEY, scheduleIdsByExpirySec);
+ writableScheduleCounts = new MapWritableKVState<>(SCHEDULED_COUNTS_KEY, new HashMap<>());
+ writableScheduleOrders = new MapWritableKVState<>(SCHEDULED_ORDERS_KEY, new HashMap<>());
final ProtoBytes protoHash1 = new ProtoBytes(ScheduleStoreUtility.calculateBytesHash(scheduler1));
final ProtoBytes protoHash2 = new ProtoBytes(ScheduleStoreUtility.calculateBytesHash(otherScheduleInState));
@@ -171,7 +152,8 @@ private void setupMigrationContext() {
writableStates = MapWritableStates.builder()
.state(writableScheduleIdByEquality)
- .state(writableScheduleIdsByExpirySec)
+ .state(writableScheduleCounts)
+ .state(writableScheduleOrders)
.build();
readableStates = MapReadableStates.builder()
.state(readableSchedulesByExpirySec)
diff --git a/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ExecutableTxn.java b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ExecutableTxn.java
new file mode 100644
index 000000000000..f23701a735fd
--- /dev/null
+++ b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ExecutableTxn.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.node.app.service.schedule;
+
+import com.hedera.hapi.node.base.AccountID;
+import com.hedera.hapi.node.base.Key;
+import com.hedera.hapi.node.transaction.TransactionBody;
+import com.hedera.node.app.spi.workflows.record.StreamBuilder;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.time.Instant;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+
+/**
+ * An executable transaction with the verifier to use for child signature verifications.
+ */
+public record ExecutableTxn(
+ @NonNull TransactionBody body,
+ @NonNull AccountID payerId,
+ @NonNull Predicate keyVerifier,
+ @NonNull Instant nbf,
+ @NonNull Class builderType,
+ @NonNull Consumer builderSpec) {}
diff --git a/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ExecutableTxnIterator.java b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ExecutableTxnIterator.java
new file mode 100644
index 000000000000..bd931330905f
--- /dev/null
+++ b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ExecutableTxnIterator.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.node.app.service.schedule;
+
+import com.hedera.node.app.spi.workflows.record.StreamBuilder;
+import java.util.Iterator;
+
+/**
+ * An iterator over executable transactions that can also purge state up to the next known executable transaction.
+ */
+public interface ExecutableTxnIterator extends Iterator> {
+ /**
+ * Purges any expired state up to the point of the next known executable transaction.
+ * @return whether any state was purged
+ * @throws IllegalStateException if {@link Iterator#hasNext()} was never called
+ */
+ boolean purgeUntilNext();
+}
diff --git a/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ReadableScheduleStore.java b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ReadableScheduleStore.java
index e48c62fac4af..8f385d97a040 100644
--- a/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ReadableScheduleStore.java
+++ b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ReadableScheduleStore.java
@@ -18,6 +18,9 @@
import com.hedera.hapi.node.base.ScheduleID;
import com.hedera.hapi.node.state.schedule.Schedule;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.util.List;
@@ -58,6 +61,14 @@ public interface ReadableScheduleStore {
@Nullable
ScheduleID getByEquality(@NonNull Schedule scheduleToMatch);
+ /**
+ * Gets the id of the transaction scheduled to happen at the given order, if it exists.
+ * @param scheduledOrder the order of the transaction
+ * @return the id of the transaction scheduled to happen at the given order
+ */
+ @Nullable
+ ScheduleID getByOrder(@NonNull ScheduledOrder scheduledOrder);
+
/**
* Given a time as seconds since the epoch, find all ScheduleID currently in state that expire at that time.
* The {@code List} returned will contain all {@link ScheduleID} entries in the system that have a
@@ -68,7 +79,7 @@ public interface ReadableScheduleStore {
* to be returned.
* @return a {@code List} of entries that have expiration times within the requested second
*/
- @Nullable
+ @NonNull
List getByExpirationSecond(long expirationTime);
/**
@@ -89,4 +100,30 @@ public interface ReadableScheduleStore {
* @return the number of schedules in state
*/
long numSchedulesInState();
+
+ /**
+ * Returns the number of schedules that are scheduled to execute at the given consensus second.
+ * @param consensusSecond the consensus second to check for scheduled transactions
+ * @return the number of schedules that are scheduled to execute at the given consensus second
+ */
+ int numTransactionsScheduledAt(long consensusSecond);
+
+ /**
+ * Returns the scheduled transaction counts at the given consensus second, if any exist.
+ * @param consensusSecond the consensus second to check for scheduled transactions
+ * @return the scheduled transaction counts at the given consensus second
+ */
+ @Nullable
+ ScheduledCounts scheduledCountsAt(long consensusSecond);
+
+ /**
+ * If the given consensus second has any scheduled transactions, returns a snapshot of the throttle
+ * usage for those transactions within that second. The throttles are implicit in the combination of
+ * the network throttle definitions and the fraction of network capacity that is allowed to be
+ * scheduled to execute in a single second.
+ * @param consensusSecond the consensus second to check for scheduling usage
+ * @return null or a usage snapshot for the transactions scheduled at the given consensus second
+ */
+ @Nullable
+ ThrottleUsageSnapshots usageSnapshotsForScheduled(long consensusSecond);
}
diff --git a/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ScheduleService.java b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ScheduleService.java
index 8155cc55bb94..0562cc03e02d 100644
--- a/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ScheduleService.java
+++ b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/ScheduleService.java
@@ -16,21 +16,14 @@
package com.hedera.node.app.service.schedule;
-import com.hedera.hapi.node.base.AccountID;
-import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.node.app.spi.RpcService;
import com.hedera.node.app.spi.RpcServiceFactory;
-import com.hedera.node.app.spi.signatures.VerificationAssistant;
import com.hedera.node.app.spi.store.StoreFactory;
import com.hedera.pbj.runtime.RpcServiceDefinition;
import edu.umd.cs.findbugs.annotations.NonNull;
-import edu.umd.cs.findbugs.annotations.Nullable;
import java.time.Instant;
-import java.util.Collections;
-import java.util.Iterator;
import java.util.ServiceLoader;
import java.util.Set;
-import java.util.function.Supplier;
/**
* Implements the HAPI iterTxnsForInterval(
- Instant start, Instant end, Supplier cleanupStoreFactory) {
- // Default implementation returns an empty iterator
- return Collections.emptyIterator();
- }
+ ExecutableTxnIterator executableTxns(
+ @NonNull Instant start, @NonNull Instant end, @NonNull StoreFactory storeFactory);
}
diff --git a/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/WritableScheduleStore.java b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/WritableScheduleStore.java
index 1356866a755c..66ebd441e8f5 100644
--- a/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/WritableScheduleStore.java
+++ b/hedera-node/hedera-schedule-service/src/main/java/com/hedera/node/app/service/schedule/WritableScheduleStore.java
@@ -18,6 +18,8 @@
import com.hedera.hapi.node.base.ScheduleID;
import com.hedera.hapi.node.state.schedule.Schedule;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
import java.time.Instant;
@@ -43,10 +45,11 @@ public interface WritableScheduleStore extends ReadableScheduleStore {
/**
* Given the ID of a schedule, return a mutable reference to the schedule in this state.
*
- * @param idToFind The ID to find
+ * @param scheduleId The ID to find
* @return the Schedule to modify
*/
- Schedule getForModify(ScheduleID idToFind);
+ @Nullable
+ Schedule getForModify(@NonNull ScheduleID scheduleId);
/**
* Add a schedule to this state.
@@ -54,7 +57,21 @@ public interface WritableScheduleStore extends ReadableScheduleStore {
*
* @param scheduleToAdd The schedule to add
*/
- void put(Schedule scheduleToAdd);
+ void put(@NonNull Schedule scheduleToAdd);
+
+ /**
+ * Purges all schedule state associated with the given order.
+ * @param order The order to purge schedules for.
+ * @return whether this was the last scheduled order in its consensus second
+ */
+ boolean purgeByOrder(@NonNull ScheduledOrder order);
+
+ /**
+ * Updates the usage of the throttles for the given consensus second.
+ * @param consensusSecond The consensus second to track the usage for.
+ * @param usageSnapshots The usage snapshots to track.
+ */
+ void trackUsage(long consensusSecond, @NonNull ThrottleUsageSnapshots usageSnapshots);
/**
* Purges expired schedules from the store.
@@ -62,5 +79,5 @@ public interface WritableScheduleStore extends ReadableScheduleStore {
* @param firstSecondToExpire The consensus second of the first schedule to expire.
* @param lastSecondToExpire The consensus second of the last schedule to expire.
*/
- void purgeExpiredSchedulesBetween(long firstSecondToExpire, long lastSecondToExpire);
+ void purgeExpiredRangeClosed(long firstSecondToExpire, long lastSecondToExpire);
}
diff --git a/hedera-node/hedera-schedule-service/src/test/java/com/hedera/node/app/service/schedule/ScheduleServiceTest.java b/hedera-node/hedera-schedule-service/src/test/java/com/hedera/node/app/service/schedule/ScheduleServiceTest.java
index ef52cf543f08..af827af0ffdf 100644
--- a/hedera-node/hedera-schedule-service/src/test/java/com/hedera/node/app/service/schedule/ScheduleServiceTest.java
+++ b/hedera-node/hedera-schedule-service/src/test/java/com/hedera/node/app/service/schedule/ScheduleServiceTest.java
@@ -16,13 +16,25 @@
package com.hedera.node.app.service.schedule;
+import com.hedera.node.app.spi.store.StoreFactory;
+import com.swirlds.state.lifecycle.SchemaRegistry;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.time.Instant;
import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
class ScheduleServiceTest {
+ private final ScheduleService subject = new ScheduleService() {
+ @Override
+ public ExecutableTxnIterator executableTxns(
+ @NonNull Instant start, @NonNull Instant end, @NonNull StoreFactory storeFactory) {
+ throw new UnsupportedOperationException();
+ }
- private final ScheduleService subject = (registry) -> {
- // no-op
+ @Override
+ public void registerSchemas(@NonNull SchemaRegistry registry) {
+ // No-op
+ }
};
@Test
diff --git a/hedera-node/hedera-token-service-impl/src/main/java/com/hedera/node/app/service/token/impl/handlers/FinalizeRecordHandler.java b/hedera-node/hedera-token-service-impl/src/main/java/com/hedera/node/app/service/token/impl/handlers/FinalizeRecordHandler.java
index 4b571e1d8dfe..20c8e1d185ee 100644
--- a/hedera-node/hedera-token-service-impl/src/main/java/com/hedera/node/app/service/token/impl/handlers/FinalizeRecordHandler.java
+++ b/hedera-node/hedera-token-service-impl/src/main/java/com/hedera/node/app/service/token/impl/handlers/FinalizeRecordHandler.java
@@ -191,7 +191,7 @@ private void deductChangesFromChildOrPrecedingRecords(
@NonNull final Map fungibleChanges,
@NonNull final Map> nftTransfers,
@NonNull final Map hbarChanges) {
- final Map finalNftOwners = new HashMap<>();
+ final Map childFinalNftOwners = new HashMap<>();
context.forEachChildRecord(ChildStreamBuilder.class, childRecord -> {
final List childHbarChangesFromRecord = childRecord.transferList() == null
? emptyList()
@@ -226,7 +226,7 @@ private void deductChangesFromChildOrPrecedingRecords(
for (final var ownershipChange : tokenTransfers.nftTransfers()) {
final var newOwnerId = ownershipChange.receiverAccountIDOrElse(ZERO_ACCOUNT_ID);
final var key = new NftID(tokenId, ownershipChange.serialNumber());
- finalNftOwners.put(key, newOwnerId);
+ childFinalNftOwners.put(key, newOwnerId);
}
}
}
@@ -237,9 +237,15 @@ private void deductChangesFromChildOrPrecedingRecords(
final var nftTransfersForToken = entry.getValue();
nftTransfersForToken.removeIf(transfer -> {
final var key = new NftID(tokenId, transfer.serialNumber());
- return finalNftOwners
- .getOrDefault(key, ZERO_ACCOUNT_ID)
- .equals(transfer.receiverAccountIDOrElse(ZERO_ACCOUNT_ID));
+ if (childFinalNftOwners.containsKey(key)) {
+ final var childFinalOwner = childFinalNftOwners.get(key);
+ final var ourFinalOwner = transfer.receiverAccountIDOrThrow();
+ // Remove this NFT transfer from our list if the child record's
+ // transfer list already shows it being transferred to the same
+ // final owner as in our list
+ return childFinalOwner.equals(ourFinalOwner);
+ }
+ return false;
});
if (nftTransfersForToken.isEmpty()) {
iter.remove();
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/RepeatableReason.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/RepeatableReason.java
index 778cbdc5471e..462376b2943c 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/RepeatableReason.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/RepeatableReason.java
@@ -37,4 +37,13 @@ public enum RepeatableReason {
* The test needs to control behavior of the TSS subsystem.
*/
NEEDS_TSS_CONTROL,
+ /**
+ * The test must directly access state to assert expectations that cannot be verified through the gRPC API.
+ */
+ NEEDS_STATE_ACCESS,
+ /**
+ * The test requires changes to the network throttle definitions, which might break
+ * other tests if they expect the default throttles.
+ */
+ THROTTLE_OVERRIDES,
}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/extensions/NetworkTargetingExtension.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/extensions/NetworkTargetingExtension.java
index a9614fb3ba1e..9c421d697751 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/extensions/NetworkTargetingExtension.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/extensions/NetworkTargetingExtension.java
@@ -20,6 +20,7 @@
import static com.hedera.services.bdd.junit.ContextRequirement.THROTTLE_OVERRIDES;
import static com.hedera.services.bdd.junit.extensions.ExtensionUtils.hapiTestMethodOf;
import static com.hedera.services.bdd.junit.hedera.embedded.EmbeddedMode.CONCURRENT;
+import static java.util.Objects.requireNonNull;
import static java.util.stream.Collectors.toMap;
import static org.junit.platform.commons.support.AnnotationSupport.isAnnotated;
@@ -71,9 +72,7 @@ public void beforeEach(@NonNull final ExtensionContext extensionContext) {
targetNetwork.startWithOverrides(bootstrapOverrides);
HapiSpec.TARGET_NETWORK.set(targetNetwork);
} else {
- requiredEmbeddedMode(extensionContext)
- .ifPresent(
- SharedNetworkLauncherSessionListener.SharedNetworkExecutionListener::ensureEmbedding);
+ ensureEmbeddedNetwork(extensionContext);
HapiSpec.TARGET_NETWORK.set(SHARED_NETWORK.get());
// If there are properties to preserve or system files to override and restore, bind that info to the
// thread before executing the test factory
@@ -99,12 +98,28 @@ public void afterEach(@NonNull final ExtensionContext extensionContext) {
HapiSpec.PROPERTIES_TO_PRESERVE.remove();
}
- private Optional requiredEmbeddedMode(@NonNull final ExtensionContext extensionContext) {
+ /**
+ * Ensures that the embedded network is running, if required by the test class or method.
+ * @param extensionContext the extension context
+ */
+ public static void ensureEmbeddedNetwork(@NonNull final ExtensionContext extensionContext) {
+ requireNonNull(extensionContext);
+ requiredEmbeddedMode(extensionContext)
+ .ifPresent(SharedNetworkLauncherSessionListener.SharedNetworkExecutionListener::ensureEmbedding);
+ }
+
+ /**
+ * Returns the embedded mode required by the test class or method, if any.
+ * @param extensionContext the extension context
+ * @return the embedded mode
+ */
+ private static Optional requiredEmbeddedMode(@NonNull final ExtensionContext extensionContext) {
+ requireNonNull(extensionContext);
return extensionContext
.getTestClass()
.map(type -> type.getAnnotation(TargetEmbeddedMode.class))
.map(TargetEmbeddedMode::value)
- .or(() -> extensionContext.getParent().flatMap(this::requiredEmbeddedMode));
+ .or(() -> extensionContext.getParent().flatMap(NetworkTargetingExtension::requiredEmbeddedMode));
}
private void bindThreadTargets(
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/extensions/TestLifecycleExtension.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/extensions/TestLifecycleExtension.java
index 0ce949c035f6..0e5cbf8b7fa9 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/extensions/TestLifecycleExtension.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/extensions/TestLifecycleExtension.java
@@ -17,6 +17,8 @@
package com.hedera.services.bdd.junit.extensions;
import static com.hedera.services.bdd.junit.extensions.ExtensionUtils.hapiTestMethodOf;
+import static com.hedera.services.bdd.junit.extensions.NetworkTargetingExtension.SHARED_NETWORK;
+import static com.hedera.services.bdd.junit.extensions.NetworkTargetingExtension.ensureEmbeddedNetwork;
import com.hedera.services.bdd.junit.support.TestLifecycle;
import com.hedera.services.bdd.spec.HapiSpec;
@@ -38,8 +40,8 @@ public class TestLifecycleExtension
@Override
public void beforeAll(@NonNull final ExtensionContext extensionContext) {
if (isRootTestClass(extensionContext)) {
- getStore(extensionContext)
- .put(SPEC_MANAGER, new TestLifecycle(NetworkTargetingExtension.SHARED_NETWORK.get()));
+ ensureEmbeddedNetwork(extensionContext);
+ getStore(extensionContext).put(SPEC_MANAGER, new TestLifecycle(SHARED_NETWORK.get()));
}
getStore(extensionContext)
.get(SPEC_MANAGER, TestLifecycle.class)
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/AbstractEmbeddedHedera.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/AbstractEmbeddedHedera.java
index db6e21565734..659236e75168 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/AbstractEmbeddedHedera.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/AbstractEmbeddedHedera.java
@@ -175,6 +175,11 @@ public void start() {
fakePlatform().notifyListeners(ACTIVE_NOTIFICATION);
}
+ @Override
+ public Hedera hedera() {
+ return hedera;
+ }
+
@Override
public void stop() {
fakePlatform().notifyListeners(FREEZE_COMPLETE_NOTIFICATION);
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/EmbeddedHedera.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/EmbeddedHedera.java
index 4e7e3b4c361f..883f3187298b 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/EmbeddedHedera.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/hedera/embedded/EmbeddedHedera.java
@@ -16,6 +16,7 @@
package com.hedera.services.bdd.junit.hedera.embedded;
+import com.hedera.node.app.Hedera;
import com.hedera.node.app.fixtures.state.FakeState;
import com.hedera.services.bdd.junit.hedera.embedded.fakes.FakeTssBaseService;
import com.hederahashgraph.api.proto.java.AccountID;
@@ -77,6 +78,12 @@ public interface EmbeddedHedera {
*/
Instant now();
+ /**
+ * Returns the embedded Hedera.
+ * @return the embedded Hedera
+ */
+ Hedera hedera();
+
/**
* Advances the synthetic time in the embedded Hedera node by a given duration.
*/
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/translators/BaseTranslator.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/translators/BaseTranslator.java
index 24eb58a7de2e..c87a9a52e843 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/translators/BaseTranslator.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/translators/BaseTranslator.java
@@ -397,7 +397,13 @@ public ExchangeRateSet activeRates() {
private void scanUnit(@NonNull final BlockTransactionalUnit unit) {
unit.stateChanges().forEach(stateChange -> {
- if (stateChange.hasMapUpdate()) {
+ if (stateChange.hasMapDelete()) {
+ final var mapDelete = stateChange.mapDeleteOrThrow();
+ final var key = mapDelete.keyOrThrow();
+ if (key.hasScheduleIdKey()) {
+ scheduleRef = key.scheduleIdKeyOrThrow();
+ }
+ } else if (stateChange.hasMapUpdate()) {
final var mapUpdate = stateChange.mapUpdateOrThrow();
final var key = mapUpdate.keyOrThrow();
if (key.hasTokenIdKey()) {
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/BlockStreamUtils.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/BlockStreamUtils.java
index b5e526f99191..69501e4a7d31 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/BlockStreamUtils.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/BlockStreamUtils.java
@@ -54,7 +54,9 @@ public static String stateNameOf(final int stateId) {
case STATE_ID_SCHEDULES_BY_EXPIRY -> "ScheduleService.SCHEDULES_BY_EXPIRY_SEC";
case STATE_ID_SCHEDULES_BY_ID -> "ScheduleService.SCHEDULES_BY_ID";
case STATE_ID_SCHEDULE_ID_BY_EQUALITY -> "ScheduleService.SCHEDULE_ID_BY_EQUALITY";
- case STATE_ID_SCHEDULE_IDS_BY_EXPIRY -> "ScheduleService.SCHEDULE_IDS_BY_EXPIRY_SEC";
+ case STATE_ID_SCHEDULED_COUNTS -> "ScheduleService.SCHEDULED_COUNTS";
+ case STATE_ID_SCHEDULED_ORDERS -> "ScheduleService.SCHEDULED_ORDERS";
+ case STATE_ID_SCHEDULED_USAGES -> "ScheduleService.SCHEDULED_USAGES";
case STATE_ID_ACCOUNTS -> "TokenService.ACCOUNTS";
case STATE_ID_ALIASES -> "TokenService.ALIASES";
case STATE_ID_NFTS -> "TokenService.NFTS";
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/StateChangesValidator.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/StateChangesValidator.java
index 40d656e09298..583d6487e839 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/StateChangesValidator.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/junit/support/validators/block/StateChangesValidator.java
@@ -136,13 +136,13 @@ public static void main(String[] args) {
.normalize();
final var validator = new StateChangesValidator(
Bytes.fromHex(
- "0340d546d0bfeb6e2f12af275347f584231fa41928a700543c9595affa817da5423bc4aac0689a388f6a8b972de30028"),
+ "65374e72c2572aaaca17fe3a0e879841c0f5ae919348fc18231f8167bd28e326438c6f93a07a45eda7888b69e9812c4d"),
node0Dir.resolve("output/swirlds.log"),
node0Dir.resolve("config.txt"),
node0Dir.resolve("data/config/application.properties"),
Bytes.fromHex("03"));
final var blocks =
- BlockStreamAccess.BLOCK_STREAM_ACCESS.readBlocks(node0Dir.resolve("data/block-streams/block-0.0.3"));
+ BlockStreamAccess.BLOCK_STREAM_ACCESS.readBlocks(node0Dir.resolve("data/blockStreams/block-0.0.3"));
validator.validateBlocks(blocks);
}
@@ -619,6 +619,8 @@ private static Object mapKeyFor(@NonNull final MapChangeKey mapChangeKey) {
case TOPIC_ID_KEY -> mapChangeKey.topicIdKeyOrThrow();
case CONTRACT_ID_KEY -> mapChangeKey.contractIdKeyOrThrow();
case PENDING_AIRDROP_ID_KEY -> mapChangeKey.pendingAirdropIdKeyOrThrow();
+ case TIMESTAMP_SECONDS_KEY -> mapChangeKey.timestampSecondsKeyOrThrow();
+ case SCHEDULED_ORDER_KEY -> mapChangeKey.scheduledOrderKeyOrThrow();
};
}
@@ -634,7 +636,6 @@ private static Object mapValueFor(@NonNull final MapChangeValue mapChangeValue)
case SCHEDULE_VALUE -> mapChangeValue.scheduleValueOrThrow();
case SCHEDULE_ID_VALUE -> mapChangeValue.scheduleIdValueOrThrow();
case SCHEDULE_LIST_VALUE -> mapChangeValue.scheduleListValueOrThrow();
- case SCHEDULE_ID_LIST_VALUE -> mapChangeValue.scheduleIdListValueOrThrow();
case SLOT_VALUE_VALUE -> mapChangeValue.slotValueValueOrThrow();
case STAKING_NODE_INFO_VALUE -> mapChangeValue.stakingNodeInfoValueOrThrow();
case TOKEN_VALUE -> mapChangeValue.tokenValueOrThrow();
@@ -643,6 +644,8 @@ private static Object mapValueFor(@NonNull final MapChangeValue mapChangeValue)
case NODE_VALUE -> mapChangeValue.nodeValueOrThrow();
case ACCOUNT_PENDING_AIRDROP_VALUE -> mapChangeValue.accountPendingAirdropValueOrThrow();
case ROSTER_VALUE -> mapChangeValue.rosterValueOrThrow();
+ case SCHEDULED_COUNTS_VALUE -> mapChangeValue.scheduledCountsValueOrThrow();
+ case THROTTLE_USAGE_SNAPSHOTS_VALUE -> mapChangeValue.throttleUsageSnapshotsValue();
};
}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/HapiPropertySource.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/HapiPropertySource.java
index 395383ff14a2..5de64ef6ae3c 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/HapiPropertySource.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/HapiPropertySource.java
@@ -27,6 +27,7 @@
import com.google.common.primitives.Longs;
import com.google.protobuf.ByteString;
import com.hedera.hapi.node.base.ServiceEndpoint;
+import com.hedera.node.app.hapi.utils.sysfiles.domain.throttling.ScaleFactor;
import com.hedera.node.config.converter.LongPairConverter;
import com.hedera.node.config.types.LongPair;
import com.hedera.node.config.types.StreamMode;
@@ -151,6 +152,11 @@ default TimeUnit getTimeUnit(String property) {
return TimeUnit.valueOf(get(property));
}
+ default ScaleFactor getScaleFactor(@NonNull final String property) {
+ requireNonNull(property);
+ return ScaleFactor.from(get(property));
+ }
+
default double getDouble(String property) {
return Double.parseDouble(get(property));
}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/HapiSpec.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/HapiSpec.java
index 3a88c7841b9a..cc4f0984cd03 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/HapiSpec.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/HapiSpec.java
@@ -18,7 +18,6 @@
import static com.hedera.node.app.roster.schemas.V0540RosterSchema.ROSTER_STATES_KEY;
import static com.hedera.node.app.service.addressbook.impl.schemas.V053AddressBookSchema.NODES_KEY;
-import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULE_IDS_BY_EXPIRY_SEC_KEY;
import static com.hedera.node.app.service.token.impl.schemas.V0490TokenSchema.ACCOUNTS_KEY;
import static com.hedera.node.app.service.token.impl.schemas.V0490TokenSchema.TOKENS_KEY;
import static com.hedera.node.app.tss.schemas.V0560TssBaseSchema.TSS_MESSAGE_MAP_KEY;
@@ -44,7 +43,8 @@
import static com.hedera.services.bdd.spec.transactions.TxnUtils.turnLoggingOff;
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.scheduleCreate;
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.scheduleSign;
-import static com.hedera.services.bdd.spec.utilops.SysFileOverrideOp.Target.*;
+import static com.hedera.services.bdd.spec.utilops.SysFileOverrideOp.Target.FEES;
+import static com.hedera.services.bdd.spec.utilops.SysFileOverrideOp.Target.THROTTLES;
import static com.hedera.services.bdd.spec.utilops.UtilStateChange.createEthereumAccountForSpec;
import static com.hedera.services.bdd.spec.utilops.UtilStateChange.isEthereumAccountCreatedForSpec;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.blockingOrder;
@@ -68,11 +68,11 @@
import static java.util.stream.Collectors.joining;
import com.google.common.base.MoreObjects;
+import com.hedera.hapi.node.base.TimestampSeconds;
import com.hedera.hapi.node.state.addressbook.Node;
import com.hedera.hapi.node.state.common.EntityNumber;
-import com.hedera.hapi.node.state.primitives.ProtoLong;
import com.hedera.hapi.node.state.roster.RosterState;
-import com.hedera.hapi.node.state.schedule.ScheduleIdList;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
import com.hedera.hapi.node.state.token.Account;
import com.hedera.hapi.node.state.token.Token;
import com.hedera.hapi.node.state.tss.TssMessageMapKey;
@@ -80,6 +80,7 @@
import com.hedera.node.app.fixtures.state.FakeState;
import com.hedera.node.app.roster.RosterService;
import com.hedera.node.app.service.schedule.ScheduleService;
+import com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema;
import com.hedera.node.app.service.token.TokenService;
import com.hedera.node.app.tss.TssBaseService;
import com.hedera.services.bdd.junit.LeakyHapiTest;
@@ -465,6 +466,16 @@ public SidecarWatcher getSidecarWatcher() {
}
}
+ /**
+ * Returns the {@link EmbeddedHedera} for a spec in embedded mode, or throws if the spec is not in embedded mode.
+ *
+ * @return the embedded Hedera
+ * @throws IllegalStateException if the spec is not in embedded mode
+ */
+ public EmbeddedHedera embeddedHederaOrThrow() {
+ return embeddedNetworkOrThrow().embeddedHederaOrThrow();
+ }
+
/**
* Returns the {@link EmbeddedHedera} for a spec in embedded mode, or throws if the spec is not in embedded mode.
*
@@ -531,15 +542,15 @@ public void sleepConsensusTime(@NonNull final Duration duration) {
}
/**
- * Get the {@link WritableKVState} for the embedded network's schedule expiries, if this spec is
+ * Get the {@link WritableKVState} for the embedded network's schedule counts, if this spec is
* targeting an embedded network.
*
- * @return the embedded schedule expiries state
+ * @return the embedded schedule counts state
* @throws IllegalStateException if this spec is not targeting an embedded network
*/
- public @NonNull WritableKVState embeddedScheduleExpiriesOrThrow() {
+ public @NonNull WritableKVState embeddedScheduleCountsOrThrow() {
final var state = embeddedStateOrThrow();
- return state.getWritableStates(ScheduleService.NAME).get(SCHEDULE_IDS_BY_EXPIRY_SEC_KEY);
+ return state.getWritableStates(ScheduleService.NAME).get(V0570ScheduleSchema.SCHEDULED_COUNTS_KEY);
}
/**
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/transactions/schedule/HapiScheduleCreate.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/transactions/schedule/HapiScheduleCreate.java
index c8939c1a77ff..49daa5f46fa4 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/transactions/schedule/HapiScheduleCreate.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/transactions/schedule/HapiScheduleCreate.java
@@ -58,6 +58,10 @@
public class HapiScheduleCreate> extends HapiTxnOp> {
private static final Logger log = LogManager.getLogger(HapiScheduleCreate.class);
+ private static long NA = -1;
+
+ private long longTermExpiry = NA;
+ private long longTermLifetime = NA;
private boolean advertiseCreation = false;
private boolean recordScheduledTxn = false;
private boolean skipRegistryUpdate = false;
@@ -159,6 +163,16 @@ public HapiScheduleCreate waitForExpiry(boolean value) {
return this;
}
+ public HapiScheduleCreate expiringAt(final long expiry) {
+ this.longTermExpiry = expiry;
+ return waitForExpiry();
+ }
+
+ public HapiScheduleCreate expiringIn(final long lifetime) {
+ this.longTermLifetime = lifetime;
+ return waitForExpiry();
+ }
+
public HapiScheduleCreate withRelativeExpiry(String txnId, long offsetSeconds) {
this.expirationTimeRelativeTo = Optional.of(Pair.of(txnId, offsetSeconds));
return this;
@@ -202,7 +216,16 @@ protected Consumer opBodyDef(HapiSpec spec) throws Thro
waitForExpiry.ifPresent(b::setWaitForExpiry);
- if (expirationTimeRelativeTo.isPresent()) {
+ if (longTermExpiry != NA) {
+ b.setExpirationTime(Timestamp.newBuilder()
+ .setSeconds(longTermExpiry)
+ .build());
+ } else if (longTermLifetime != NA) {
+ final var approxNow = spec.consensusTime();
+ b.setExpirationTime(Timestamp.newBuilder()
+ .setSeconds(approxNow.getEpochSecond() + longTermLifetime)
+ .build());
+ } else if (expirationTimeRelativeTo.isPresent()) {
var expiry = getRelativeExpiry(
spec,
expirationTimeRelativeTo.get().getKey(),
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/EmbeddedVerbs.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/EmbeddedVerbs.java
index b29c8834f8d8..a559bde83c3a 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/EmbeddedVerbs.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/EmbeddedVerbs.java
@@ -17,42 +17,57 @@
package com.hedera.services.bdd.spec.utilops;
import static com.hedera.node.config.types.StreamMode.RECORDS;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.doingContextual;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext;
import static java.util.Objects.requireNonNull;
+import com.hedera.hapi.node.base.AccountID;
+import com.hedera.hapi.node.base.SignatureMap;
+import com.hedera.hapi.node.base.TimestampSeconds;
+import com.hedera.hapi.node.base.Transaction;
+import com.hedera.hapi.node.base.TransactionID;
import com.hedera.hapi.node.state.addressbook.Node;
import com.hedera.hapi.node.state.blockrecords.BlockInfo;
import com.hedera.hapi.node.state.blockstream.BlockStreamInfo;
import com.hedera.hapi.node.state.common.EntityNumber;
-import com.hedera.hapi.node.state.primitives.ProtoLong;
-import com.hedera.hapi.node.state.schedule.ScheduleIdList;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
import com.hedera.hapi.node.state.token.Account;
import com.hedera.hapi.node.state.token.AccountPendingAirdrop;
import com.hedera.hapi.node.state.token.StakingNodeInfo;
import com.hedera.hapi.node.state.token.Token;
import com.hedera.hapi.node.state.tss.TssMessageMapKey;
import com.hedera.hapi.node.state.tss.TssVoteMapKey;
+import com.hedera.hapi.node.transaction.TransactionBody;
import com.hedera.hapi.services.auxiliary.tss.TssMessageTransactionBody;
import com.hedera.hapi.services.auxiliary.tss.TssVoteTransactionBody;
+import com.hedera.node.app.hapi.utils.CommonPbjConverters;
+import com.hedera.node.app.throttle.ThrottleAccumulator;
+import com.hedera.node.app.workflows.TransactionInfo;
+import com.hedera.pbj.runtime.io.buffer.Bytes;
import com.hedera.services.bdd.junit.hedera.embedded.EmbeddedNetwork;
import com.hedera.services.bdd.spec.SpecOperation;
import com.hedera.services.bdd.spec.utilops.embedded.MutateAccountOp;
+import com.hedera.services.bdd.spec.utilops.embedded.MutateKVStateOp;
import com.hedera.services.bdd.spec.utilops.embedded.MutateNodeOp;
-import com.hedera.services.bdd.spec.utilops.embedded.MutateScheduleExpiriesOp;
+import com.hedera.services.bdd.spec.utilops.embedded.MutateScheduleCountsOp;
import com.hedera.services.bdd.spec.utilops.embedded.MutateStakingInfosOp;
import com.hedera.services.bdd.spec.utilops.embedded.MutateTokenOp;
import com.hedera.services.bdd.spec.utilops.embedded.MutateTssMessagesOp;
import com.hedera.services.bdd.spec.utilops.embedded.MutateTssVotesOp;
import com.hedera.services.bdd.spec.utilops.embedded.ViewAccountOp;
+import com.hedera.services.bdd.spec.utilops.embedded.ViewKVStateOp;
import com.hedera.services.bdd.spec.utilops.embedded.ViewMappingValueOp;
import com.hedera.services.bdd.spec.utilops.embedded.ViewNodeOp;
import com.hedera.services.bdd.spec.utilops.embedded.ViewPendingAirdropOp;
import com.hedera.services.bdd.spec.utilops.embedded.ViewSingletonOp;
+import com.hederahashgraph.api.proto.java.HederaFunctionality;
import com.swirlds.state.spi.CommittableWritableStates;
+import com.swirlds.state.spi.ReadableKVState;
import com.swirlds.state.spi.WritableKVState;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.time.Duration;
import java.util.function.Consumer;
+import java.util.function.IntConsumer;
/**
* Contains operations that are usable only with an {@link EmbeddedNetwork}.
@@ -87,14 +102,14 @@ public static MutateTokenOp mutateToken(
}
/**
- * Returns an operation that allows the test author to directly mutate the schedule expiries.
+ * Returns an operation that allows the test author to directly mutate the schedule counts.
*
- * @param mutation the mutation to apply to the schedule expiries
- * @return the operation that will mutate the schedule expiries
+ * @param mutation the mutation to apply to the schedule counts
+ * @return the operation that will mutate the schedule counts
*/
- public static MutateScheduleExpiriesOp mutateScheduleExpiries(
- @NonNull final Consumer> mutation) {
- return new MutateScheduleExpiriesOp(mutation);
+ public static MutateScheduleCountsOp mutateScheduleCounts(
+ @NonNull final Consumer> mutation) {
+ return new MutateScheduleCountsOp(mutation);
}
/**
@@ -156,6 +171,46 @@ public static ViewSingletonOp viewSingleton(
return new ViewSingletonOp(serviceName, stateKey, observer);
}
+ /**
+ * Returns an operation that allows the test author to view a key/value state an embedded state.
+ * @param serviceName the name of the service that manages the mapping
+ * @param stateKey the mapping state key
+ *
+ * @param observer the observer that will receive the key/value state
+ * @return the operation that will expose the mapped value to the observer
+ * @param the type of the key
+ * @param the type of the value
+ */
+ public static ViewKVStateOp viewKVState(
+ @NonNull final String serviceName,
+ @NonNull final String stateKey,
+ @NonNull final Consumer> observer) {
+ requireNonNull(serviceName);
+ requireNonNull(stateKey);
+ requireNonNull(observer);
+ return new ViewKVStateOp<>(serviceName, stateKey, observer);
+ }
+
+ /**
+ * Returns an operation that allows the test author to mutate a key/value state an embedded state.
+ * @param serviceName the name of the service that manages the mapping
+ * @param stateKey the mapping state key
+ *
+ * @param observer the consumer that will receive the key/value state
+ * @return the operation that will expose the mapped value to the observer
+ * @param the type of the key
+ * @param the type of the value
+ */
+ public static MutateKVStateOp mutateKVState(
+ @NonNull final String serviceName,
+ @NonNull final String stateKey,
+ @NonNull final Consumer> observer) {
+ requireNonNull(serviceName);
+ requireNonNull(stateKey);
+ requireNonNull(observer);
+ return new MutateKVStateOp<>(serviceName, stateKey, observer);
+ }
+
/**
* Returns an operation that allows the test author to view a key's mapped value in an embedded state.
* @param serviceName the name of the service that manages the mapping
@@ -200,6 +255,50 @@ public static ViewNodeOp viewNode(@NonNull final String name, @NonNull final Con
return new ViewNodeOp(name, observer);
}
+ /**
+ * Returns an operation that exposes the maximum number of the given functionality that can be scheduled
+ * in a single consensus second with the embedded network's active throttles and configuration.
+ * @param function the functionality to check
+ * @param observer the observer to receive to the maximum number of transactions
+ * @return the operation that will expose the maximum number of transactions
+ */
+ public static SpecOperation exposeMaxSchedulable(
+ @NonNull final HederaFunctionality function, @NonNull final IntConsumer observer) {
+ requireNonNull(function);
+ requireNonNull(observer);
+ return doingContextual(spec -> {
+ final var properties = spec.startupProperties();
+ final var capacityUtilization = properties.getScaleFactor("scheduling.schedulableCapacityFraction");
+ final var hedera = spec.embeddedHederaOrThrow().hedera();
+ final var throttleAccumulator = new ThrottleAccumulator(
+ hedera.configProvider()::getConfiguration,
+ capacityUtilization::asApproxCapacitySplit,
+ ThrottleAccumulator.ThrottleType.BACKEND_THROTTLE);
+ throttleAccumulator.applyGasConfig();
+ throttleAccumulator.rebuildFor(hedera.activeThrottleDefinitions());
+ final var now = spec.consensusTime();
+ final var state = spec.embeddedStateOrThrow();
+ final var pbjFunction = CommonPbjConverters.toPbj(function);
+ final var throttledPayerId = AccountID.newBuilder()
+ .accountNum(properties.getLong("accounts.lastThrottleExempt") + 1)
+ .build();
+ final var txnInfo = new TransactionInfo(
+ Transaction.DEFAULT,
+ TransactionBody.DEFAULT,
+ TransactionID.DEFAULT,
+ throttledPayerId,
+ SignatureMap.DEFAULT,
+ Bytes.EMPTY,
+ pbjFunction,
+ null);
+ int numSchedulable = 0;
+ for (; !throttleAccumulator.checkAndEnforceThrottle(txnInfo, now, state); numSchedulable++) {
+ // Count until we are throttled
+ }
+ observer.accept(numSchedulable);
+ });
+ }
+
/***
* Returns an operation that allows the test author to view the pending airdrop of an account.
* @param tokenName the name of the token
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/UtilVerbs.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/UtilVerbs.java
index c44529e312af..6edd01472637 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/UtilVerbs.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/UtilVerbs.java
@@ -229,6 +229,7 @@
import java.util.function.DoubleConsumer;
import java.util.function.Function;
import java.util.function.IntFunction;
+import java.util.function.LongConsumer;
import java.util.function.ObjIntConsumer;
import java.util.function.Predicate;
import java.util.function.Supplier;
@@ -565,6 +566,10 @@ public static SpecOperation runBackgroundTrafficUntilFreezeComplete() {
});
}
+ public static HapiSpecSleep sleepForSeconds(final long seconds) {
+ return sleepFor(seconds * 1_000L);
+ }
+
public static HapiSpecSleep sleepFor(long timeMs) {
return new HapiSpecSleep(timeMs);
}
@@ -1214,6 +1219,24 @@ public static HapiSpecOperation[] nOps(final int n, @NonNull final IntFunction observer.accept(instant.getEpochSecond()));
+ }
+
+ /**
+ * Returns an operation that exposes the consensus time of the current spec to the given observer.
+ * @param observer the observer to pass the consensus time to
+ * @return the operation that exposes the consensus time
+ */
+ public static SpecOperation exposeSpecTimeTo(@NonNull final Consumer observer) {
+ return doingContextual(spec -> observer.accept(spec.consensusTime()));
+ }
+
/**
* Returns the given varags as a {@link SpecOperation} array.
*
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/MutateKVStateOp.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/MutateKVStateOp.java
new file mode 100644
index 000000000000..6b8e5d4f90ec
--- /dev/null
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/MutateKVStateOp.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.services.bdd.spec.utilops.embedded;
+
+import static java.util.Objects.requireNonNull;
+
+import com.hedera.services.bdd.spec.HapiSpec;
+import com.hedera.services.bdd.spec.utilops.UtilOp;
+import com.swirlds.state.spi.WritableKVState;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.util.function.Consumer;
+
+public class MutateKVStateOp extends UtilOp {
+ private final String serviceName;
+ private final String stateKey;
+ private final Consumer> observer;
+
+ public MutateKVStateOp(
+ @NonNull final String serviceName,
+ @NonNull final String stateKey,
+ @NonNull final Consumer> observer) {
+ this.serviceName = requireNonNull(serviceName);
+ this.stateKey = requireNonNull(stateKey);
+ this.observer = requireNonNull(observer);
+ }
+
+ @Override
+ protected boolean submitOp(@NonNull final HapiSpec spec) throws Throwable {
+ final var state = spec.embeddedStateOrThrow();
+ final var writableStates = state.getWritableStates(serviceName);
+ observer.accept(requireNonNull(writableStates.get(stateKey)));
+ spec.commitEmbeddedState();
+ return false;
+ }
+}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/MutateScheduleExpiriesOp.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/MutateScheduleCountsOp.java
similarity index 71%
rename from hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/MutateScheduleExpiriesOp.java
rename to hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/MutateScheduleCountsOp.java
index cde218015541..2f73127ebe8e 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/MutateScheduleExpiriesOp.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/MutateScheduleCountsOp.java
@@ -18,8 +18,8 @@
import static java.util.Objects.requireNonNull;
-import com.hedera.hapi.node.state.primitives.ProtoLong;
-import com.hedera.hapi.node.state.schedule.ScheduleIdList;
+import com.hedera.hapi.node.base.TimestampSeconds;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
import com.hedera.services.bdd.spec.HapiSpec;
import com.hedera.services.bdd.spec.utilops.UtilOp;
import com.swirlds.state.spi.WritableKVState;
@@ -27,18 +27,19 @@
import java.util.function.Consumer;
/**
- * An operation that allows the test author to directly mutate the schedule experies in an embedded state.
+ * An operation that allows the test author to directly mutate the schedule counts in an embedded state.
*/
-public class MutateScheduleExpiriesOp extends UtilOp {
- private final Consumer> mutation;
+public class MutateScheduleCountsOp extends UtilOp {
+ private final Consumer> mutation;
- public MutateScheduleExpiriesOp(@NonNull final Consumer> mutation) {
+ public MutateScheduleCountsOp(
+ @NonNull final Consumer> mutation) {
this.mutation = requireNonNull(mutation);
}
@Override
protected boolean submitOp(@NonNull final HapiSpec spec) throws Throwable {
- final var state = spec.embeddedScheduleExpiriesOrThrow();
+ final var state = spec.embeddedScheduleCountsOrThrow();
mutation.accept(state);
spec.commitEmbeddedState();
return false;
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/ViewKVStateOp.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/ViewKVStateOp.java
new file mode 100644
index 000000000000..fbdba921b1d3
--- /dev/null
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/embedded/ViewKVStateOp.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.services.bdd.spec.utilops.embedded;
+
+import static java.util.Objects.requireNonNull;
+
+import com.hedera.services.bdd.spec.HapiSpec;
+import com.hedera.services.bdd.spec.utilops.UtilOp;
+import com.swirlds.state.spi.ReadableKVState;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.util.function.Consumer;
+
+public class ViewKVStateOp extends UtilOp {
+ private final String serviceName;
+ private final String stateKey;
+ private final Consumer> observer;
+
+ public ViewKVStateOp(
+ @NonNull final String serviceName,
+ @NonNull final String stateKey,
+ @NonNull final Consumer> observer) {
+ this.serviceName = requireNonNull(serviceName);
+ this.stateKey = requireNonNull(stateKey);
+ this.observer = requireNonNull(observer);
+ }
+
+ @Override
+ protected boolean submitOp(@NonNull final HapiSpec spec) throws Throwable {
+ final var state = spec.embeddedStateOrThrow();
+ final var readableStates = state.getReadableStates(serviceName);
+ observer.accept(requireNonNull(readableStates.get(stateKey)));
+ return false;
+ }
+}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/streams/StreamValidationOp.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/streams/StreamValidationOp.java
index 5e420cc8bbe1..6b123909b2b1 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/streams/StreamValidationOp.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/spec/utilops/streams/StreamValidationOp.java
@@ -128,6 +128,7 @@ protected boolean submitOp(@NonNull final HapiSpec spec) throws Throwable {
.filter(factory -> factory.appliesTo(spec))
.map(factory -> factory.create(spec))
.flatMap(v -> v.validationErrorsIn(blocks, data))
+ .peek(t -> log.error("Block stream validation error", t))
.map(Throwable::getMessage)
.collect(joining(ERROR_PREFIX));
if (!maybeErrors.isBlank()) {
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/LongTermScheduleUtils.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/LongTermScheduleUtils.java
index c25b77f78841..8ef04a3f0026 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/LongTermScheduleUtils.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/LongTermScheduleUtils.java
@@ -20,7 +20,6 @@
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.scheduleCreate;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.buildUpgradeZipFrom;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.prepareUpgrade;
-import static com.hedera.services.bdd.spec.utilops.UtilVerbs.purgeUpgradeArtifacts;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcing;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.updateSpecialFile;
import static com.hedera.services.bdd.spec.utilops.upgrade.BuildUpgradeZipOp.FAKE_UPGRADE_ZIP_LOC;
@@ -35,6 +34,7 @@
import com.hedera.services.bdd.spec.transactions.TxnUtils;
import com.hederahashgraph.api.proto.java.AccountAmount;
import com.hederahashgraph.api.proto.java.AccountID;
+import edu.umd.cs.findbugs.annotations.NonNull;
import java.util.List;
public final class LongTermScheduleUtils {
@@ -106,7 +106,8 @@ static boolean transferListCheck(
return amountHasBeenTransferred && payerHasPaid;
}
- static SpecOperation[] scheduleFakeUpgrade(String payer, String relativeTransaction, long offset, String via) {
+ static SpecOperation[] scheduleFakeUpgrade(
+ @NonNull final String payer, final long lifetime, @NonNull final String via) {
final var operations = List.of(
buildUpgradeZipFrom(FAKE_ASSETS_LOC),
// Upload it to file 0.0.150; need sourcing() here because the operation reads contents eagerly
@@ -116,7 +117,6 @@ static SpecOperation[] scheduleFakeUpgrade(String payer, String relativeTransact
FAKE_UPGRADE_ZIP_LOC,
TxnUtils.BYTES_4K,
upgradeFileAppendsPerBurst())),
- purgeUpgradeArtifacts(),
// Issue PREPARE_UPGRADE; need sourcing() here because we want to hash only after creating the ZIP
sourcing(() -> scheduleCreate(
VALID_SCHEDULE,
@@ -126,9 +126,8 @@ static SpecOperation[] scheduleFakeUpgrade(String payer, String relativeTransact
.withEntityMemo(randomUppercase(100))
.designatingPayer(GENESIS)
.payingWith(payer)
- .waitForExpiry()
.recordingScheduledTxn()
- .withRelativeExpiry(relativeTransaction, offset)
+ .expiringIn(lifetime)
.via(via)));
return operations.toArray(SpecOperation[]::new);
}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/ScheduleLongTermExecutionTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/ScheduleLongTermExecutionTest.java
index 98d10f1967c2..ba89fda4e64d 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/ScheduleLongTermExecutionTest.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/ScheduleLongTermExecutionTest.java
@@ -17,6 +17,7 @@
package com.hedera.services.bdd.suites.hip423;
import static com.hedera.services.bdd.spec.HapiSpec.defaultHapiSpec;
+import static com.hedera.services.bdd.spec.HapiSpec.hapiTest;
import static com.hedera.services.bdd.spec.assertions.TransactionRecordAsserts.recordWith;
import static com.hedera.services.bdd.spec.queries.QueryVerbs.getAccountBalance;
import static com.hedera.services.bdd.spec.queries.QueryVerbs.getFileInfo;
@@ -80,6 +81,7 @@
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_PAYER_SIGNATURE;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.INVALID_SCHEDULE_ID;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.PAYER_ACCOUNT_DELETED;
+import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SCHEDULE_EXPIRY_IS_BUSY;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SUCCESS;
import com.hedera.services.bdd.junit.HapiTest;
@@ -1061,7 +1063,7 @@ final Stream scheduledFreezeWorksAsExpected() {
return defaultHapiSpec("ScheduledFreezeWorksAsExpectedAtExpiry")
.given(flattened(
cryptoCreate(PAYING_ACCOUNT).via(PAYER_TXN),
- scheduleFakeUpgrade(PAYING_ACCOUNT, PAYER_TXN, 4, SUCCESS_TXN)))
+ scheduleFakeUpgrade(PAYING_ACCOUNT, 4, SUCCESS_TXN)))
.when(scheduleSign(VALID_SCHEDULE)
.alsoSigningWith(GENESIS)
.payingWith(PAYING_ACCOUNT)
@@ -1072,7 +1074,6 @@ final Stream scheduledFreezeWorksAsExpected() {
.hasWaitForExpiry()
.isNotExecuted()
.isNotDeleted()
- .hasRelativeExpiry(PAYER_TXN, 4)
.hasRecordedScheduledTxn(),
sleepFor(5000),
cryptoCreate("foo").via(TRIGGERING_TXN),
@@ -1096,7 +1097,7 @@ final Stream scheduledFreezeWithUnauthorizedPayerFails() {
.given(cryptoCreate(PAYING_ACCOUNT).via(PAYER_TXN), cryptoCreate(PAYING_ACCOUNT_2))
.when()
.then(flattened(
- scheduleFakeUpgrade(PAYING_ACCOUNT, PAYER_TXN, 4, "test"),
+ scheduleFakeUpgrade(PAYING_ACCOUNT, 4, "test"),
// future throttles will be exceeded because there is no throttle
// for freeze
// and the custom payer is not exempt from throttles like and admin
@@ -1244,77 +1245,16 @@ final Stream scheduledSystemDeleteWorksAsExpected() {
@HapiTest
@Order(21)
final Stream scheduledSystemDeleteUnauthorizedPayerFails() {
-
- return defaultHapiSpec("ScheduledSystemDeleteUnauthorizedPayerFailsAtExpiry")
- .given(
- cryptoCreate(PAYING_ACCOUNT).via(PAYER_TXN),
- cryptoCreate(PAYING_ACCOUNT_2),
- fileCreate("misc").lifetime(THREE_MONTHS_IN_SECONDS).contents(ORIG_FILE))
- .when()
- .then(
- scheduleCreate(VALID_SCHEDULE, systemFileDelete("misc").updatingExpiry(1L))
- .withEntityMemo(randomUppercase(100))
- .designatingPayer(PAYING_ACCOUNT_2)
- .payingWith(PAYING_ACCOUNT)
- .waitForExpiry()
- .withRelativeExpiry(PAYER_TXN, 4)
- // future throttles will be exceeded because there is no throttle
- // for system delete
- // and the custom payer is not exempt from throttles like and admin
- // user would be
- // todo future throttle is not implemented yet
- // .hasKnownStatus(SCHEDULE_FUTURE_THROTTLE_EXCEEDED)
- );
+ return hapiTest(
+ cryptoCreate(PAYING_ACCOUNT).via(PAYER_TXN),
+ cryptoCreate(PAYING_ACCOUNT_2),
+ fileCreate("misc").lifetime(THREE_MONTHS_IN_SECONDS).contents(ORIG_FILE),
+ scheduleCreate(VALID_SCHEDULE, systemFileDelete("misc").updatingExpiry(1L))
+ .withEntityMemo(randomUppercase(100))
+ .designatingPayer(PAYING_ACCOUNT_2)
+ .payingWith(PAYING_ACCOUNT)
+ .waitForExpiry()
+ .withRelativeExpiry(PAYER_TXN, 4)
+ .hasKnownStatus(SCHEDULE_EXPIRY_IS_BUSY));
}
-
- // todo throttles are not implemented yet!
- // @HapiTest
- // final Stream futureThrottlesAreRespected() {
- // var artificialLimits = protoDefsFromResource("testSystemFiles/artificial-limits-schedule.json");
- // var defaultThrottles = protoDefsFromResource("testSystemFiles/throttles-dev.json");
- //
- // return defaultHapiSpec("FutureThrottlesAreRespected")
- // .given(
- // cryptoCreate(SENDER).balance(ONE_MILLION_HBARS).via(SENDER_TXN),
- // cryptoCreate(RECEIVER),
- // overriding(SCHEDULING_MAX_TXN_PER_SECOND, "100"),
- // fileUpdate(THROTTLE_DEFS)
- // .payingWith(EXCHANGE_RATE_CONTROL)
- // .contents(artificialLimits.toByteArray()),
- // sleepFor(500))
- // .when(
- // blockingOrder(IntStream.range(0, 17)
- // .mapToObj(i -> new HapiSpecOperation[] {
- // scheduleCreate(
- // "twoSigXfer" + i,
- // cryptoTransfer(tinyBarsFromTo(SENDER, RECEIVER, 1))
- // .fee(ONE_HBAR))
- // .withEntityMemo(randomUppercase(100))
- // .payingWith(SENDER)
- // .waitForExpiry()
- // .withRelativeExpiry(SENDER_TXN, 120),
- // })
- // .flatMap(Arrays::stream)
- // .toArray(HapiSpecOperation[]::new)),
- // scheduleCreate(
- // "twoSigXfer",
- // cryptoTransfer(tinyBarsFromTo(SENDER, RECEIVER, 1))
- // .fee(ONE_HBAR))
- // .withEntityMemo(randomUppercase(100))
- // .payingWith(SENDER)
- // .waitForExpiry()
- // .withRelativeExpiry(SENDER_TXN, 120)
- // .hasKnownStatus(SCHEDULE_FUTURE_THROTTLE_EXCEEDED))
- // .then(
- // overriding(
- // SCHEDULING_MAX_TXN_PER_SECOND,
- // HapiSpecSetup.getDefaultNodeProps().get(SCHEDULING_MAX_TXN_PER_SECOND)),
- // fileUpdate(THROTTLE_DEFS)
- // .fee(ONE_HUNDRED_HBARS)
- // .payingWith(EXCHANGE_RATE_CONTROL)
- // .contents(defaultThrottles.toByteArray()),
- // cryptoTransfer(HapiCryptoTransfer.tinyBarsFromTo(GENESIS, FUNDING, 1))
- // .payingWith(GENESIS));
- // }
-
}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/ScheduleLongTermSignTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/ScheduleLongTermSignTest.java
index 784831d10764..264a3129494a 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/ScheduleLongTermSignTest.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/hip423/ScheduleLongTermSignTest.java
@@ -17,6 +17,7 @@
package com.hedera.services.bdd.suites.hip423;
import static com.hedera.services.bdd.spec.HapiSpec.defaultHapiSpec;
+import static com.hedera.services.bdd.spec.HapiSpec.hapiTest;
import static com.hedera.services.bdd.spec.assertions.AccountInfoAsserts.changeFromSnapshot;
import static com.hedera.services.bdd.spec.keys.ControlForKey.forKey;
import static com.hedera.services.bdd.spec.keys.KeyShape.sigs;
@@ -206,7 +207,7 @@ final Stream reductionInSigningReqsAllowsTxnToGoThrough() {
@HapiTest
@Order(3)
- final Stream reductionInSigningReqsAllowsTxnToGoThroughAtExpiryWithNoWaitForExpiry() {
+ final Stream reductionInSigningReqsAllowsTxnToGoThroughAtExpiryWithWaitForExpiry() {
var senderShape = threshOf(2, threshOf(1, 3), threshOf(1, 3), threshOf(2, 3));
var sigOne = senderShape.signedWith(sigs(sigs(OFF, OFF, ON), sigs(OFF, OFF, OFF), sigs(OFF, OFF, OFF)));
var firstSigThree = senderShape.signedWith(sigs(sigs(OFF, OFF, OFF), sigs(OFF, OFF, OFF), sigs(ON, OFF, OFF)));
@@ -215,38 +216,36 @@ final Stream reductionInSigningReqsAllowsTxnToGoThroughAtExpiryWith
String schedule = "Z";
String senderKey = "sKey";
- return defaultHapiSpec("ReductionInSigningReqsAllowsTxnToGoThroughAtExpiryWithNoWaitForExpiry")
- .given(
- newKeyNamed(senderKey).shape(senderShape),
- keyFromMutation(NEW_SENDER_KEY, senderKey).changing(this::lowerThirdNestedThresholdSigningReq),
- cryptoCreate(sender).key(senderKey).via(SENDER_TXN),
- cryptoCreate(receiver).balance(0L),
- scheduleCreate(schedule, cryptoTransfer(tinyBarsFromTo(sender, receiver, 1)))
- .payingWith(DEFAULT_PAYER)
- .withRelativeExpiry(SENDER_TXN, 5)
- .recordingScheduledTxn()
- .alsoSigningWith(sender)
- .sigControl(ControlForKey.forKey(senderKey, sigOne)),
- getAccountBalance(receiver).hasTinyBars(0L))
- .when(
- scheduleSign(schedule)
- .alsoSigningWith(NEW_SENDER_KEY)
- .sigControl(forKey(NEW_SENDER_KEY, firstSigThree)),
- getAccountBalance(receiver).hasTinyBars(0L),
- cryptoUpdate(sender).key(NEW_SENDER_KEY),
- getAccountBalance(receiver).hasTinyBars(0L))
- .then(
- getScheduleInfo(schedule)
- .hasScheduleId(schedule)
- .hasWaitForExpiry(false)
- .isNotExecuted()
- .isNotDeleted()
- .hasRelativeExpiry(SENDER_TXN, 5)
- .hasRecordedScheduledTxn(),
- sleepFor(TimeUnit.SECONDS.toMillis(6)),
- cryptoCreate("foo"),
- sleepFor(500),
- getAccountBalance(receiver).hasTinyBars(1L));
+ return hapiTest(
+ newKeyNamed(senderKey).shape(senderShape),
+ keyFromMutation(NEW_SENDER_KEY, senderKey).changing(this::lowerThirdNestedThresholdSigningReq),
+ cryptoCreate(sender).key(senderKey).via(SENDER_TXN),
+ cryptoCreate(receiver).balance(0L),
+ scheduleCreate(schedule, cryptoTransfer(tinyBarsFromTo(sender, receiver, 1)))
+ .payingWith(DEFAULT_PAYER)
+ .waitForExpiry()
+ .withRelativeExpiry(SENDER_TXN, 5)
+ .recordingScheduledTxn()
+ .alsoSigningWith(sender)
+ .sigControl(ControlForKey.forKey(senderKey, sigOne)),
+ getAccountBalance(receiver).hasTinyBars(0L),
+ scheduleSign(schedule)
+ .alsoSigningWith(NEW_SENDER_KEY)
+ .sigControl(forKey(NEW_SENDER_KEY, firstSigThree)),
+ getAccountBalance(receiver).hasTinyBars(0L),
+ cryptoUpdate(sender).key(NEW_SENDER_KEY),
+ getAccountBalance(receiver).hasTinyBars(0L),
+ getScheduleInfo(schedule)
+ .hasScheduleId(schedule)
+ .hasWaitForExpiry(true)
+ .isNotExecuted()
+ .isNotDeleted()
+ .hasRelativeExpiry(SENDER_TXN, 5)
+ .hasRecordedScheduledTxn(),
+ sleepFor(TimeUnit.SECONDS.toMillis(6)),
+ cryptoCreate("foo"),
+ sleepFor(500),
+ getAccountBalance(receiver).hasTinyBars(1L));
}
@HapiTest
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/ConcurrentIntegrationTests.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/ConcurrentIntegrationTests.java
index eb1019d6eef4..9424f10b3c7d 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/ConcurrentIntegrationTests.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/ConcurrentIntegrationTests.java
@@ -19,7 +19,6 @@
import static com.hedera.hapi.node.base.HederaFunctionality.NODE_STAKE_UPDATE;
import static com.hedera.hapi.node.base.ResponseCodeEnum.BUSY;
import static com.hedera.hapi.node.base.ResponseCodeEnum.FAIL_INVALID;
-import static com.hedera.node.app.blocks.schemas.V0560BlockStreamSchema.BLOCK_STREAM_INFO_KEY;
import static com.hedera.node.app.roster.schemas.V0540RosterSchema.ROSTER_KEY;
import static com.hedera.node.app.roster.schemas.V0540RosterSchema.ROSTER_STATES_KEY;
import static com.hedera.services.bdd.junit.EmbeddedReason.MANIPULATES_EVENT_VERSION;
@@ -40,7 +39,6 @@
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.nodeCreate;
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.tokenCreate;
import static com.hedera.services.bdd.spec.transactions.crypto.HapiCryptoTransfer.tinyBarsFromTo;
-import static com.hedera.services.bdd.spec.utilops.EmbeddedVerbs.mutateScheduleExpiries;
import static com.hedera.services.bdd.spec.utilops.EmbeddedVerbs.mutateToken;
import static com.hedera.services.bdd.spec.utilops.EmbeddedVerbs.simulatePostUpgradeTransaction;
import static com.hedera.services.bdd.spec.utilops.EmbeddedVerbs.viewMappedValue;
@@ -52,7 +50,6 @@
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.freezeUpgrade;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.mutateNode;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.prepareUpgrade;
-import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sleepFor;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcing;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.updateSpecialFile;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.usingVersion;
@@ -75,14 +72,9 @@
import com.hedera.hapi.block.stream.output.TransactionResult;
import com.hedera.hapi.node.base.AccountID;
import com.hedera.hapi.node.base.ResponseCodeEnum;
-import com.hedera.hapi.node.base.ScheduleID;
-import com.hedera.hapi.node.state.blockstream.BlockStreamInfo;
import com.hedera.hapi.node.state.primitives.ProtoBytes;
-import com.hedera.hapi.node.state.primitives.ProtoLong;
import com.hedera.hapi.node.state.roster.Roster;
import com.hedera.hapi.node.state.roster.RosterState;
-import com.hedera.hapi.node.state.schedule.ScheduleIdList;
-import com.hedera.node.app.blocks.BlockStreamService;
import com.hedera.node.app.roster.RosterService;
import com.hedera.services.bdd.junit.BootstrapOverride;
import com.hedera.services.bdd.junit.EmbeddedHapiTest;
@@ -97,7 +89,6 @@
import edu.umd.cs.findbugs.annotations.NonNull;
import java.security.cert.CertificateEncodingException;
import java.security.cert.X509Certificate;
-import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
@@ -108,8 +99,10 @@
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.DynamicTest;
+import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Tag;
+@Order(0)
@Tag(INTEGRATION)
@TargetEmbeddedMode(CONCURRENT)
public class ConcurrentIntegrationTests {
@@ -215,37 +208,6 @@ final Stream failInvalidDuringDispatchRechargesFees() {
Optional.ofNullable(amount == ONE_HUNDRED_HBARS ? "Fee was not recharged" : null)));
}
- @GenesisHapiTest
- @DisplayName("fail invalid outside dispatch does not attempt to charge fees")
- final Stream failInvalidOutsideDispatchDoesNotAttemptToChargeFees() {
- final AtomicReference blockStreamInfo = new AtomicReference<>();
- final List corruptedScheduleIds = new ArrayList<>();
- corruptedScheduleIds.add(null);
- return hapiTest(
- blockStreamMustIncludePassFrom(spec -> blockWithResultOf(FAIL_INVALID)),
- cryptoCreate("civilian").balance(ONE_HUNDRED_HBARS),
- // Ensure the block with the previous transaction is sealed
- sleepFor(100),
- // Get the last interval process time from state
- viewSingleton(BlockStreamService.NAME, BLOCK_STREAM_INFO_KEY, blockStreamInfo::set),
- // Ensure the next transaction is in a new second
- sleepFor(1000),
- // Corrupt the state by putting invalid expiring schedules into state
- sourcing(() -> mutateScheduleExpiries(state -> state.put(
- new ProtoLong(blockStreamInfo
- .get()
- .lastIntervalProcessTimeOrThrow()
- .seconds()),
- new ScheduleIdList(corruptedScheduleIds)))),
- cryptoTransfer(tinyBarsFromTo("civilian", FUNDING, 1))
- .fee(ONE_HBAR)
- .hasKnownStatus(com.hederahashgraph.api.proto.java.ResponseCodeEnum.FAIL_INVALID),
- // Confirm the payer was still charged a non-zero fee
- getAccountBalance("civilian")
- .hasTinyBars(spec -> amount ->
- Optional.ofNullable(amount != ONE_HUNDRED_HBARS ? "Fee still charged" : null)));
- }
-
@GenesisHapiTest(bootstrapOverrides = {@BootstrapOverride(key = "addressBook.useRosterLifecycle", value = "true")})
@DisplayName("freeze upgrade with roster lifecycle sets candidate roster")
final Stream freezeUpgradeWithRosterLifecycleSetsCandidateRoster()
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/RepeatableHip423Tests.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/RepeatableHip423Tests.java
new file mode 100644
index 000000000000..fb63b6cfce02
--- /dev/null
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/RepeatableHip423Tests.java
@@ -0,0 +1,651 @@
+/*
+ * Copyright (C) 2024 Hedera Hashgraph, LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.hedera.services.bdd.suites.integration;
+
+import static com.hedera.hapi.node.base.ResponseCodeEnum.INVALID_SIGNATURE;
+import static com.hedera.hapi.node.base.ResponseCodeEnum.SUCCESS;
+import static com.hedera.node.app.hapi.utils.CommonPbjConverters.protoToPbj;
+import static com.hedera.node.app.service.schedule.impl.ScheduleStoreUtility.calculateBytesHash;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0490ScheduleSchema.SCHEDULES_BY_ID_KEY;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULED_COUNTS_KEY;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULED_ORDERS_KEY;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULED_USAGES_KEY;
+import static com.hedera.node.app.service.schedule.impl.schemas.V0570ScheduleSchema.SCHEDULE_ID_BY_EQUALITY_KEY;
+import static com.hedera.services.bdd.junit.RepeatableReason.NEEDS_LAST_ASSIGNED_CONSENSUS_TIME;
+import static com.hedera.services.bdd.junit.RepeatableReason.NEEDS_STATE_ACCESS;
+import static com.hedera.services.bdd.junit.RepeatableReason.NEEDS_VIRTUAL_TIME_FOR_FAST_EXECUTION;
+import static com.hedera.services.bdd.junit.RepeatableReason.THROTTLE_OVERRIDES;
+import static com.hedera.services.bdd.junit.TestTags.INTEGRATION;
+import static com.hedera.services.bdd.junit.hedera.embedded.EmbeddedMode.REPEATABLE;
+import static com.hedera.services.bdd.spec.HapiSpec.hapiTest;
+import static com.hedera.services.bdd.spec.queries.QueryVerbs.getAccountBalance;
+import static com.hedera.services.bdd.spec.queries.QueryVerbs.getTxnRecord;
+import static com.hedera.services.bdd.spec.transactions.TxnVerbs.createTopic;
+import static com.hedera.services.bdd.spec.transactions.TxnVerbs.cryptoCreate;
+import static com.hedera.services.bdd.spec.transactions.TxnVerbs.cryptoTransfer;
+import static com.hedera.services.bdd.spec.transactions.TxnVerbs.scheduleCreate;
+import static com.hedera.services.bdd.spec.transactions.TxnVerbs.scheduleDelete;
+import static com.hedera.services.bdd.spec.transactions.crypto.HapiCryptoTransfer.tinyBarsFromTo;
+import static com.hedera.services.bdd.spec.utilops.CustomSpecAssert.allRunFor;
+import static com.hedera.services.bdd.spec.utilops.EmbeddedVerbs.exposeMaxSchedulable;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.blockStreamMustIncludePassFrom;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.blockingOrder;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.doAdhoc;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.doWithStartupConfig;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.doWithStartupConfigNow;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.doingContextual;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.exposeSpecSecondTo;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.newKeyNamed;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.overriding;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.overridingAllOf;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sleepFor;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sleepForSeconds;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcing;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcingContextual;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.waitUntilStartOfNextStakingPeriod;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext;
+import static com.hedera.services.bdd.suites.HapiSuite.CIVILIAN_PAYER;
+import static com.hedera.services.bdd.suites.HapiSuite.DEFAULT_PAYER;
+import static com.hedera.services.bdd.suites.HapiSuite.FUNDING;
+import static com.hedera.services.bdd.suites.HapiSuite.ONE_HBAR;
+import static com.hedera.services.bdd.suites.HapiSuite.ONE_HUNDRED_HBARS;
+import static com.hedera.services.bdd.suites.HapiSuite.ONE_MILLION_HBARS;
+import static com.hederahashgraph.api.proto.java.HederaFunctionality.ConsensusCreateTopic;
+import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.BUSY;
+import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SCHEDULE_EXPIRY_IS_BUSY;
+import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SCHEDULE_EXPIRY_MUST_BE_FUTURE;
+import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SCHEDULE_EXPIRY_TOO_LONG;
+import static java.util.Objects.requireNonNull;
+import static java.util.Spliterator.DISTINCT;
+import static java.util.Spliterator.NONNULL;
+import static java.util.Spliterators.spliteratorUnknownSize;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+import com.hedera.hapi.block.stream.output.TransactionResult;
+import com.hedera.hapi.node.base.ResponseCodeEnum;
+import com.hedera.hapi.node.base.ScheduleID;
+import com.hedera.hapi.node.base.TimestampSeconds;
+import com.hedera.hapi.node.base.TransactionID;
+import com.hedera.hapi.node.state.primitives.ProtoBytes;
+import com.hedera.hapi.node.state.schedule.Schedule;
+import com.hedera.hapi.node.state.schedule.ScheduledCounts;
+import com.hedera.hapi.node.state.schedule.ScheduledOrder;
+import com.hedera.hapi.node.state.throttles.ThrottleUsageSnapshots;
+import com.hedera.hapi.node.transaction.TransactionBody;
+import com.hedera.node.app.service.schedule.ScheduleService;
+import com.hedera.services.bdd.junit.HapiTestLifecycle;
+import com.hedera.services.bdd.junit.LeakyRepeatableHapiTest;
+import com.hedera.services.bdd.junit.RepeatableHapiTest;
+import com.hedera.services.bdd.junit.TargetEmbeddedMode;
+import com.hedera.services.bdd.junit.support.TestLifecycle;
+import com.hedera.services.bdd.junit.support.translators.inputs.TransactionParts;
+import com.hedera.services.bdd.spec.HapiSpec;
+import com.hedera.services.bdd.spec.SpecOperation;
+import com.hedera.services.bdd.spec.infrastructure.RegistryNotFound;
+import com.hedera.services.bdd.spec.utilops.streams.assertions.BlockStreamAssertion;
+import com.swirlds.state.spi.ReadableKVState;
+import com.swirlds.state.spi.WritableKVState;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.DynamicTest;
+import org.junit.jupiter.api.MethodOrderer.OrderAnnotation;
+import org.junit.jupiter.api.Order;
+import org.junit.jupiter.api.Tag;
+import org.junit.jupiter.api.TestMethodOrder;
+
+@Order(2)
+@Tag(INTEGRATION)
+@HapiTestLifecycle
+@TargetEmbeddedMode(REPEATABLE)
+@TestMethodOrder(OrderAnnotation.class)
+public class RepeatableHip423Tests {
+ private static final long ONE_MINUTE = 60;
+
+ @BeforeAll
+ static void beforeAll(@NonNull final TestLifecycle testLifecycle) {
+ testLifecycle.overrideInClass(Map.of("scheduling.longTermEnabled", "true"));
+ }
+
+ /**
+ * Tests the ingest throttle limits the total number of transactions that can be scheduled in a single second.
+ */
+ @LeakyRepeatableHapiTest(
+ value = NEEDS_LAST_ASSIGNED_CONSENSUS_TIME,
+ overrides = {"scheduling.maxTxnPerSec"})
+ final Stream cannotScheduleTooManyTxnsInOneSecond() {
+ final AtomicLong expiry = new AtomicLong();
+ final var oddLifetime = 123 * ONE_MINUTE;
+ return hapiTest(
+ overriding("scheduling.maxTxnPerSec", "2"),
+ cryptoCreate(CIVILIAN_PAYER).balance(10 * ONE_HUNDRED_HBARS),
+ scheduleCreate("first", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 123L)))
+ .payingWith(CIVILIAN_PAYER)
+ .fee(ONE_HBAR)
+ .expiringIn(oddLifetime),
+ // Consensus time advances exactly one second per transaction in repeatable mode
+ exposeSpecSecondTo(now -> expiry.set(now + oddLifetime - 1)),
+ sourcing(() -> scheduleCreate("second", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 456L)))
+ .payingWith(CIVILIAN_PAYER)
+ .fee(ONE_HBAR)
+ .expiringAt(expiry.get())),
+ sourcing(() -> scheduleCreate("third", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 789)))
+ .payingWith(CIVILIAN_PAYER)
+ .fee(ONE_HBAR)
+ .expiringAt(expiry.get())
+ .hasPrecheck(BUSY)),
+ purgeExpiringWithin(oddLifetime));
+ }
+
+ /**
+ * Tests that expiration time must be in the future---but not too far in the future.
+ */
+ @LeakyRepeatableHapiTest(
+ value = NEEDS_LAST_ASSIGNED_CONSENSUS_TIME,
+ overrides = {"scheduling.maxExpirationFutureSeconds"})
+ final Stream expiryMustBeValid() {
+ final var lastSecond = new AtomicLong();
+ return hapiTest(
+ overriding("scheduling.maxExpirationFutureSeconds", "" + ONE_MINUTE),
+ exposeSpecSecondTo(lastSecond::set),
+ sourcing(() -> scheduleCreate("tooSoon", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 12L)))
+ .expiringAt(lastSecond.get())
+ .hasKnownStatus(SCHEDULE_EXPIRY_MUST_BE_FUTURE)),
+ exposeSpecSecondTo(lastSecond::set),
+ sourcing(() -> scheduleCreate("tooLate", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 34L)))
+ .expiringAt(lastSecond.get() + 1 + ONE_MINUTE + 1)
+ .hasKnownStatus(SCHEDULE_EXPIRY_TOO_LONG)));
+ }
+
+ /**
+ * Tests that the consensus {@link com.hedera.hapi.node.base.HederaFunctionality#SCHEDULE_CREATE} throttle is
+ * enforced by overriding the dev throttles to the more restrictive mainnet throttles and scheduling one more
+ * {@link com.hedera.hapi.node.base.HederaFunctionality#CONSENSUS_CREATE_TOPIC} that is allowed.
+ */
+ @LeakyRepeatableHapiTest(
+ value = {
+ NEEDS_LAST_ASSIGNED_CONSENSUS_TIME,
+ NEEDS_VIRTUAL_TIME_FOR_FAST_EXECUTION,
+ NEEDS_STATE_ACCESS,
+ THROTTLE_OVERRIDES
+ },
+ overrides = {
+ "scheduling.whitelist",
+ },
+ throttles = "testSystemFiles/mainnet-throttles.json")
+ final Stream throttlingAndExecutionAsExpected() {
+ final var expirySecond = new AtomicLong();
+ final var maxLifetime = new AtomicLong();
+ final var maxSchedulableTopicCreates = new AtomicInteger();
+ return hapiTest(
+ overriding("scheduling.whitelist", "ConsensusCreateTopic"),
+ doWithStartupConfigNow(
+ "scheduling.maxExpirationFutureSeconds",
+ (value, specTime) -> doAdhoc(() -> {
+ maxLifetime.set(Long.parseLong(value));
+ expirySecond.set(specTime.getEpochSecond() + maxLifetime.get());
+ })),
+ cryptoCreate(CIVILIAN_PAYER).balance(ONE_MILLION_HBARS),
+ exposeMaxSchedulable(ConsensusCreateTopic, maxSchedulableTopicCreates::set),
+ // Schedule the maximum number of topic creations allowed
+ sourcing(() -> blockingOrder(IntStream.range(0, maxSchedulableTopicCreates.get())
+ .mapToObj(i -> scheduleCreate(
+ "topic" + i, createTopic("t" + i).topicMemo("m" + i))
+ .expiringAt(expirySecond.get())
+ .payingWith(CIVILIAN_PAYER)
+ .fee(ONE_HUNDRED_HBARS))
+ .toArray(SpecOperation[]::new))),
+ // And confirm the next is throttled
+ sourcing(() -> scheduleCreate(
+ "throttledTopicCreation", createTopic("NTB").topicMemo("NOPE"))
+ .expiringAt(expirySecond.get())
+ .payingWith(CIVILIAN_PAYER)
+ .fee(ONE_HUNDRED_HBARS)
+ .hasKnownStatus(SCHEDULE_EXPIRY_IS_BUSY)),
+ sourcingContextual(spec -> purgeExpiringWithin(maxLifetime.get())));
+ }
+
+ /**
+ * Tests that execution of scheduled transactions purges the associated state as expected when a single
+ * user transaction fully executes multiple seconds. The test uses three scheduled transactions, two of
+ * them in one second and the third one in the next second. After sleeping past the expiration time of
+ * all three transactions, executes them via a single triggering transaction and validates the schedule
+ * state is as expected.
+ */
+ @RepeatableHapiTest(value = {NEEDS_LAST_ASSIGNED_CONSENSUS_TIME, NEEDS_STATE_ACCESS})
+ final Stream executionPurgesScheduleStateAsExpectedInSingleUserTransactions() {
+ final var lastSecond = new AtomicLong();
+ final AtomicReference startingSizes = new AtomicReference<>();
+ final AtomicReference currentSizes = new AtomicReference<>();
+ return hapiTest(
+ viewScheduleStateSizes(startingSizes::set),
+ exposeSpecSecondTo(lastSecond::set),
+ cryptoCreate("luckyYou").balance(0L),
+ // Schedule the three transfers to lucky you
+ sourcing(() -> scheduleCreate("one", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 1L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE)),
+ sourcing(() -> scheduleCreate("two", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 2L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE)),
+ sourcing(() -> scheduleCreate("three", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 3L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE + 1)),
+ viewScheduleStateSizes(currentSizes::set),
+ // Check that schedule state sizes changed as expected
+ doAdhoc(() -> currentSizes.get().assertChangesFrom(startingSizes.get(), 3, 2, 2, 3, 3)),
+ // Let all the schedules expire
+ sleepFor((ONE_MINUTE + 2) * 1_000),
+ // Trigger them all in a single user transaction
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)),
+ getAccountBalance("luckyYou").hasTinyBars(1L + 2L + 3L),
+ viewScheduleStateSizes(currentSizes::set),
+ doAdhoc(() -> currentSizes.get().assertChangesFrom(startingSizes.get(), 0, 0, 0, 0, 0)));
+ }
+
+ /**
+ * Tests that execution of scheduled transactions purges the associated state as expected when a single
+ * user transaction fully executes multiple seconds. The test uses three scheduled transactions, two of
+ * them in one second and the third one in the next second. After sleeping past the expiration time of
+ * all three transactions, executes them via a single triggering transaction and validates the schedule
+ * state is as expected.
+ */
+ @RepeatableHapiTest(value = {NEEDS_LAST_ASSIGNED_CONSENSUS_TIME, NEEDS_STATE_ACCESS})
+ final Stream executeImmediateAndDeletedLongTermAreStillPurgedWhenTimePasses() {
+ final var lastExecuteImmediateExpiry = new AtomicLong();
+ final AtomicReference startingSizes = new AtomicReference<>();
+ final AtomicReference currentSizes = new AtomicReference<>();
+ return hapiTest(
+ exposeSpecSecondTo(lastExecuteImmediateExpiry::set),
+ newKeyNamed("adminKey"),
+ cryptoCreate("luckyYou").balance(0L),
+ viewScheduleStateSizes(startingSizes::set),
+ scheduleCreate("first", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 1L)))
+ .waitForExpiry(false),
+ scheduleCreate("last", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 2L)))
+ .waitForExpiry(false),
+ getAccountBalance("luckyYou").hasTinyBars(1L + 2L),
+ doingContextual(spec -> lastExecuteImmediateExpiry.set(expiryOf("last", spec))),
+ sourcing(() -> scheduleCreate("deleted", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 3L)))
+ .adminKey("adminKey")
+ .expiringAt(lastExecuteImmediateExpiry.get())),
+ scheduleDelete("deleted").signedBy(DEFAULT_PAYER, "adminKey"),
+ viewScheduleStateSizes(currentSizes::set),
+ doAdhoc(() -> currentSizes.get().assertChangesFrom(startingSizes.get(), 3, 2, 2, 3, 3)),
+ sourcingContextual(spec -> sleepForSeconds(
+ lastExecuteImmediateExpiry.get() - spec.consensusTime().getEpochSecond() + 1)),
+ // Trigger all three to be purged in a single user transaction
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)),
+ viewScheduleStateSizes(currentSizes::set),
+ doAdhoc(() -> currentSizes.get().assertChangesFrom(startingSizes.get(), 0, 0, 0, 0, 0)));
+ }
+
+ /**
+ * Tests that execution of scheduled transactions purges the associated state as expected when it takes multiple
+ * user transactions to fully execute a given second, by artificially restricting to a single executions per user
+ * transaction. The test uses three scheduled transactions, two of them in one second and the third one in the
+ * next second. After sleeping past the expiration time of all three transactions, executes them in a sequence of
+ * three triggering transactions and validates the schedule state is as expected.
+ */
+ @LeakyRepeatableHapiTest(
+ value = {NEEDS_LAST_ASSIGNED_CONSENSUS_TIME, NEEDS_STATE_ACCESS},
+ overrides = {"scheduling.maxExecutionsPerUserTxn"})
+ final Stream executionPurgesScheduleStateAsExpectedSplitAcrossUserTransactions() {
+ final var lastSecond = new AtomicLong();
+ final AtomicReference firstScheduleHash = new AtomicReference<>();
+ final AtomicReference firstScheduleId = new AtomicReference<>();
+ final AtomicReference startingSizes = new AtomicReference<>();
+ final AtomicReference currentSizes = new AtomicReference<>();
+ return hapiTest(
+ overriding("scheduling.maxExecutionsPerUserTxn", "1"),
+ viewScheduleStateSizes(startingSizes::set),
+ exposeSpecSecondTo(lastSecond::set),
+ cryptoCreate("luckyYou").balance(0L),
+ // Schedule the three transfers to lucky you
+ sourcing(() -> scheduleCreate("one", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 1L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE)),
+ sourcing(() -> scheduleCreate("two", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 2L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE)),
+ sourcing(() -> scheduleCreate("three", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 3L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE + 1)),
+ viewScheduleStateSizes(currentSizes::set),
+ // Check that schedule state sizes changed as expected
+ doAdhoc(() -> currentSizes.get().assertChangesFrom(startingSizes.get(), 3, 2, 2, 3, 3)),
+ // Let all the schedules expire
+ sleepForSeconds(ONE_MINUTE + 2),
+ viewScheduleState((byId, counts, usages, orders, byEquality) -> {
+ final var firstExpiry = lastSecond.get() + ONE_MINUTE;
+ final var firstOrder = new ScheduledOrder(firstExpiry, 0);
+ firstScheduleId.set(requireNonNull(orders.get(firstOrder)));
+ final var firstSchedule = requireNonNull(byId.get(firstScheduleId.get()));
+ final var equalityHash = calculateBytesHash(firstSchedule);
+ firstScheduleHash.set(new ProtoBytes(equalityHash));
+ assertNotNull(byEquality.get(firstScheduleHash.get()), "No equality entry for first schedule");
+ }),
+ // Now execute them one at a time and assert the expected changes to state
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)),
+ viewScheduleState((byId, counts, usages, orders, byEquality) -> {
+ final var firstExpiry = lastSecond.get() + ONE_MINUTE;
+ final var firstKey = new TimestampSeconds(firstExpiry);
+ final var firstCounts = requireNonNull(counts.get(firstKey));
+ assertEquals(1, firstCounts.numberProcessed(), "Wrong number processed for first expiry");
+ assertEquals(2, firstCounts.numberScheduled(), "Wrong number scheduled for first expiry");
+ assertNotNull(usages.get(firstKey), "No usage snapshot for first expiry");
+ // The first transaction's information should be fully purged
+ final var firstOrder = new ScheduledOrder(firstExpiry, 0);
+ assertNull(orders.get(firstOrder), "Order not purged for first transaction");
+ assertNull(byId.get(firstScheduleId.get()), "Schedule not purged for first transaction");
+ assertNull(byEquality.get(firstScheduleHash.get()), "Equality not purged for first transaction");
+ // The following second should not have changed yet
+ final var secondKey = new TimestampSeconds(firstExpiry + 1);
+ final var secondCounts = requireNonNull(counts.get(secondKey));
+ assertEquals(0, secondCounts.numberProcessed(), "Wrong number processed for second expiry");
+ assertEquals(1, secondCounts.numberScheduled(), "Wrong number scheduled for second expiry");
+ }),
+ getAccountBalance("luckyYou").hasTinyBars(1L),
+ // The second execution, in a separate user transaction
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)),
+ viewScheduleState((byId, counts, usages, orders, byEquality) -> {
+ final var firstExpiry = lastSecond.get() + ONE_MINUTE;
+ final var firstKey = new TimestampSeconds(firstExpiry);
+ // The counts and usages should be expired for this second
+ assertNull(counts.get(firstKey), "Counts not purged for first expiry");
+ assertNull(usages.get(firstKey), "Usages not purged for first expiry");
+ // Nothing should be different about the following second
+ final var secondKey = new TimestampSeconds(firstExpiry + 1);
+ final var secondCounts = requireNonNull(counts.get(secondKey));
+ assertEquals(0, secondCounts.numberProcessed(), "Wrong number processed for second expiry");
+ assertEquals(1, secondCounts.numberScheduled(), "Wrong number scheduled for second expiry");
+ }),
+ getAccountBalance("luckyYou").hasTinyBars(1L + 2L),
+ // The third execution, again in a separate user transaction
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)),
+ viewScheduleState((byId, counts, usages, orders, byEquality) -> {
+ // Now everything should be purged
+ final var firstExpiry = lastSecond.get() + ONE_MINUTE;
+ final var secondKey = new TimestampSeconds(firstExpiry + 1);
+ assertNull(counts.get(secondKey), "Counts not purged for second expiry");
+ assertNull(usages.get(secondKey), "Usages not purged for second expiry");
+ }),
+ getAccountBalance("luckyYou").hasTinyBars(1L + 2L + 3L));
+ }
+
+ /**
+ * Tests that a "backlog" of scheduled transactions to execute does not affect detection of stake period
+ * boundary crossings.
+ */
+ @LeakyRepeatableHapiTest(
+ value = {NEEDS_LAST_ASSIGNED_CONSENSUS_TIME, NEEDS_VIRTUAL_TIME_FOR_FAST_EXECUTION},
+ overrides = {"scheduling.maxExecutionsPerUserTxn"})
+ final Stream lastProcessTimeDoesNotAffectStakePeriodBoundaryCrossingDetection() {
+ final var lastSecond = new AtomicLong();
+ final var stakePeriodMins = new AtomicLong();
+ return hapiTest(
+ overriding("scheduling.maxExecutionsPerUserTxn", "1"),
+ doWithStartupConfig(
+ "staking.periodMins", value -> doAdhoc(() -> stakePeriodMins.set(Long.parseLong(value)))),
+ sourcing(() -> waitUntilStartOfNextStakingPeriod(stakePeriodMins.get())),
+ exposeSpecSecondTo(lastSecond::set),
+ cryptoCreate("luckyYou").balance(0L),
+ // Schedule the three transfers to lucky you
+ sourcing(() -> scheduleCreate("one", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 1L)))
+ .expiringAt(lastSecond.get() + stakePeriodMins.get() * ONE_MINUTE - 1)),
+ sourcing(() -> scheduleCreate("two", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 2L)))
+ .expiringAt(lastSecond.get() + stakePeriodMins.get() * ONE_MINUTE - 1)),
+ sourcing(() -> scheduleCreate("three", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 3L)))
+ .expiringAt(lastSecond.get() + stakePeriodMins.get() * ONE_MINUTE - 1)),
+ sourcing(() -> waitUntilStartOfNextStakingPeriod(stakePeriodMins.get())),
+ // Now execute them one at a time and assert the expected changes to state
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)).via("boundaryCrossing"),
+ getAccountBalance("luckyYou").hasTinyBars(1L),
+ getTxnRecord("boundaryCrossing").hasChildRecordCount(1),
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 2L)).via("undistinguishedOne"),
+ getTxnRecord("undistinguishedOne").hasChildRecordCount(0),
+ getAccountBalance("luckyYou").hasTinyBars(1L + 2L),
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 2L)).via("undistinguishedTwo"),
+ getTxnRecord("undistinguishedTwo").hasChildRecordCount(0),
+ getAccountBalance("luckyYou").hasTinyBars(1L + 2L + 3L));
+ }
+
+ /**
+ * Tests that execution of scheduled transactions purges the associated state as expected when multiple
+ * user transactions are required due to running out of consensus times. The test uses four scheduled
+ * transactions, two of them in one second and two of them a few seconds later. After sleeping past
+ * the expiration time of all four transactions, executes them via two triggering transactions, the first
+ * of which has available consensus times for three transactions and the second of which has available
+ * consensus times for the fourth transaction.
+ */
+ @LeakyRepeatableHapiTest(
+ value = {NEEDS_LAST_ASSIGNED_CONSENSUS_TIME, NEEDS_STATE_ACCESS},
+ overrides = {
+ "consensus.handle.maxPrecedingRecords",
+ "consensus.handle.maxFollowingRecords",
+ "scheduling.consTimeSeparationNanos",
+ })
+ final Stream executionPurgesScheduleStateAsWhenRunningOutOfConsensusTimes() {
+ final var lastSecond = new AtomicLong();
+ final AtomicReference startingSizes = new AtomicReference<>();
+ final AtomicReference currentSizes = new AtomicReference<>();
+ return hapiTest(
+ exposeSpecSecondTo(lastSecond::set),
+ cryptoCreate("luckyYou").balance(0L),
+ // From time T, the first transfer will be at T+3, the second at T+6, and the third at T+9;
+ // so for a T+12 attempt to run out of time, the separating nanos must be no more than 15
+ overridingAllOf(Map.of(
+ "consensus.handle.maxPrecedingRecords", "2",
+ "consensus.handle.maxFollowingRecords", "1",
+ "scheduling.consTimeSeparationNanos", "15")),
+ // Schedule the four transfers to lucky you
+ sourcing(() -> scheduleCreate("one", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 1L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE)),
+ sourcing(() -> scheduleCreate("two", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 2L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE)),
+ sourcing(() -> scheduleCreate("three", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 3L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE + 3)),
+ sourcing(() -> scheduleCreate("four", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 4L)))
+ .expiringAt(lastSecond.get() + ONE_MINUTE + 3)),
+ // Let all the schedules expire
+ sleepFor((ONE_MINUTE + 4) * 1_000),
+ viewScheduleStateSizes(startingSizes::set),
+ // Trigger as many as possible in a single user transaction
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)),
+ // Verify that was only the first three
+ getAccountBalance("luckyYou").hasTinyBars(1L + 2L + 3L),
+ viewScheduleStateSizes(currentSizes::set),
+ doAdhoc(() -> currentSizes.get().assertChangesFrom(startingSizes.get(), -3, -1, -1, -3, -3)),
+ // Then trigger the last one
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)),
+ getAccountBalance("luckyYou").hasTinyBars(1L + 2L + 3L + 4L),
+ viewScheduleStateSizes(currentSizes::set),
+ doAdhoc(() -> currentSizes.get().assertChangesFrom(startingSizes.get(), -4, -2, -2, -4, -4)));
+ }
+
+ @RepeatableHapiTest(NEEDS_VIRTUAL_TIME_FOR_FAST_EXECUTION)
+ final Stream executionResultsAreStreamedAsExpected() {
+ return hapiTest(
+ blockStreamMustIncludePassFrom(scheduledExecutionResult("one", withStatus(SUCCESS))),
+ blockStreamMustIncludePassFrom(scheduledExecutionResult("two", withStatus(INVALID_SIGNATURE))),
+ cryptoCreate("luckyYou").balance(0L),
+ cryptoCreate("cautiousYou").balance(0L).receiverSigRequired(true),
+ sourcing(
+ () -> scheduleCreate("payerOnly", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "luckyYou", 1L)))
+ .expiringIn(ONE_MINUTE)
+ .via("one")),
+ sourcing(() -> scheduleCreate(
+ "receiverSigRequired", cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, "cautiousYou", 2L)))
+ .expiringIn(ONE_MINUTE)
+ .via("two")),
+ sleepForSeconds(ONE_MINUTE),
+ // Trigger the executions
+ cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)));
+ }
+
+ private static BiConsumer withStatus(@NonNull final ResponseCodeEnum status) {
+ requireNonNull(status);
+ return (body, result) -> assertEquals(status, result.status());
+ }
+
+ private static Function scheduledExecutionResult(
+ @NonNull final String creationTxn, @NonNull final BiConsumer observer) {
+ requireNonNull(creationTxn);
+ requireNonNull(observer);
+ return spec -> block -> {
+ final com.hederahashgraph.api.proto.java.TransactionID creationTxnId;
+ try {
+ creationTxnId = spec.registry().getTxnId(creationTxn);
+ } catch (RegistryNotFound ignore) {
+ return false;
+ }
+ final var executionTxnId =
+ protoToPbj(creationTxnId.toBuilder().setScheduled(true).build(), TransactionID.class);
+ final var items = block.items();
+ for (int i = 0, n = items.size(); i < n; i++) {
+ final var item = items.get(i);
+ if (item.hasEventTransaction()) {
+ final var parts =
+ TransactionParts.from(item.eventTransactionOrThrow().applicationTransactionOrThrow());
+ if (parts.transactionIdOrThrow().equals(executionTxnId)) {
+ for (int j = i + 1; j < n; j++) {
+ final var followingItem = items.get(j);
+ if (followingItem.hasTransactionResult()) {
+ observer.accept(parts.body(), followingItem.transactionResultOrThrow());
+ return true;
+ }
+ }
+ }
+ }
+ }
+ return false;
+ };
+ }
+
+ private record ScheduleStateSizes(
+ int schedulesById,
+ int scheduledCounts,
+ int scheduledUsages,
+ int scheduledOrders,
+ int scheduleIdByEquality) {
+ /**
+ * Asserts that the changes from a starting state are as expected.
+ * @param startingSizes the starting state sizes
+ * @param schedulesById the expected change in the number of schedules by ID
+ * @param scheduledCounts the expected change in the number of scheduled counts
+ * @param scheduledUsages the expected change in the number of scheduled usages
+ * @param scheduledOrders the expected change in the number of scheduled orders
+ * @param scheduleIdByEquality the expected change in the number of schedules by equality
+ */
+ public void assertChangesFrom(
+ @NonNull final ScheduleStateSizes startingSizes,
+ final int schedulesById,
+ final int scheduledCounts,
+ final int scheduledUsages,
+ final int scheduledOrders,
+ final int scheduleIdByEquality) {
+ requireNonNull(startingSizes);
+ assertEquals(
+ startingSizes.schedulesById + schedulesById, this.schedulesById, "Wrong number of schedules by ID");
+ assertEquals(
+ startingSizes.scheduledCounts + scheduledCounts,
+ this.scheduledCounts,
+ "Wrong number of scheduled counts");
+ assertEquals(
+ startingSizes.scheduledUsages + scheduledUsages,
+ this.scheduledUsages,
+ "Wrong number of scheduled usages");
+ assertEquals(
+ startingSizes.scheduledOrders + scheduledOrders,
+ this.scheduledOrders,
+ "Wrong number of scheduled orders");
+ assertEquals(
+ startingSizes.scheduleIdByEquality + scheduleIdByEquality,
+ this.scheduleIdByEquality,
+ "Wrong number of schedules by equality");
+ }
+ }
+
+ private interface ScheduleStateConsumer {
+ void accept(
+ @NonNull ReadableKVState schedulesById,
+ @NonNull ReadableKVState scheduledCounts,
+ @NonNull ReadableKVState scheduledUsages,
+ @NonNull ReadableKVState scheduledOrders,
+ @NonNull ReadableKVState scheduleIdByEquality);
+ }
+
+ private static SpecOperation viewScheduleStateSizes(@NonNull final Consumer consumer) {
+ return viewScheduleState((byId, counts, usages, orders, byEquality) -> consumer.accept(new ScheduleStateSizes(
+ (int) byId.size(), (int) counts.size(), (int) usages.size(), (int) orders.size(), (int)
+ byEquality.size())));
+ }
+
+ private static SpecOperation viewScheduleState(@NonNull final ScheduleStateConsumer consumer) {
+ return withOpContext((spec, opLog) -> {
+ final var state = spec.embeddedStateOrThrow();
+ final var readableStates = state.getReadableStates(ScheduleService.NAME);
+ consumer.accept(
+ readableStates.get(SCHEDULES_BY_ID_KEY),
+ readableStates.get(SCHEDULED_COUNTS_KEY),
+ readableStates.get(SCHEDULED_USAGES_KEY),
+ readableStates.get(SCHEDULED_ORDERS_KEY),
+ readableStates.get(SCHEDULE_ID_BY_EQUALITY_KEY));
+ });
+ }
+
+ private static SpecOperation purgeExpiringWithin(final long seconds) {
+ return doingContextual(spec -> {
+ final var lastExpiry = spec.consensusTime().getEpochSecond() + seconds;
+ allRunFor(spec, sleepFor(seconds * 1_000L));
+ final WritableKVState counts = spec.embeddedStateOrThrow()
+ .getWritableStates(ScheduleService.NAME)
+ .get(SCHEDULED_COUNTS_KEY);
+ final int numEarlier =
+ (int) StreamSupport.stream(spliteratorUnknownSize(counts.keys(), DISTINCT | NONNULL), false)
+ .filter(k -> k.seconds() <= lastExpiry)
+ .count();
+ final var expectedSize = (int) counts.size() - numEarlier;
+ for (int i = 0; i < numEarlier; i++) {
+ allRunFor(spec, cryptoTransfer(tinyBarsFromTo(DEFAULT_PAYER, FUNDING, 1L)));
+ if (counts.size() == expectedSize) {
+ break;
+ }
+ }
+ assertEquals(expectedSize, counts.size(), "Failed to purge all expired seconds");
+ });
+ }
+
+ /**
+ * Returns the calculated expiration second of the given schedule in the given spec.
+ * @param schedule the name of the schedule
+ * @param spec the spec
+ * @return the calculated expiration second of the schedule
+ */
+ private static long expiryOf(@NonNull final String schedule, @NonNull final HapiSpec spec) {
+ final ReadableKVState schedules = spec.embeddedStateOrThrow()
+ .getReadableStates(ScheduleService.NAME)
+ .get(SCHEDULES_BY_ID_KEY);
+ return requireNonNull(schedules.get(protoToPbj(spec.registry().getScheduleId(schedule), ScheduleID.class)))
+ .calculatedExpirationSecond();
+ }
+}
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/RepeatableIntegrationTests.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/RepeatableTssTests.java
similarity index 99%
rename from hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/RepeatableIntegrationTests.java
rename to hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/RepeatableTssTests.java
index 1d2119f40967..2f4d5aa82a74 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/RepeatableIntegrationTests.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/integration/RepeatableTssTests.java
@@ -52,11 +52,13 @@
import java.util.List;
import java.util.stream.Stream;
import org.junit.jupiter.api.DynamicTest;
+import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Tag;
+@Order(1)
@Tag(INTEGRATION)
@TargetEmbeddedMode(REPEATABLE)
-public class RepeatableIntegrationTests {
+public class RepeatableTssTests {
/**
* Validates behavior of the {@link BlockStreamManager} under specific conditions related to signature requests
* and block creation.
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/FutureSchedulableOpsTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/FutureSchedulableOpsTest.java
index a6c86223a989..31778f75b8e7 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/FutureSchedulableOpsTest.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/FutureSchedulableOpsTest.java
@@ -44,8 +44,11 @@
import static com.hedera.services.bdd.spec.transactions.TxnVerbs.uploadInitCode;
import static com.hedera.services.bdd.spec.transactions.crypto.HapiCryptoTransfer.tinyBarsFromTo;
import static com.hedera.services.bdd.spec.utilops.CustomSpecAssert.allRunFor;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.doAdhoc;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.doWithStartupConfig;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.newKeyNamed;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sleepFor;
+import static com.hedera.services.bdd.spec.utilops.UtilVerbs.sourcing;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.uploadScheduledContractPrices;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.validateChargedUsdWithin;
import static com.hedera.services.bdd.spec.utilops.UtilVerbs.withOpContext;
@@ -74,6 +77,7 @@
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.NOT_SUPPORTED;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SCHEDULE_ALREADY_EXECUTED;
import static com.hederahashgraph.api.proto.java.ResponseCodeEnum.SUCCESS;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import com.hedera.services.bdd.junit.HapiTest;
import com.hedera.services.bdd.junit.HapiTestLifecycle;
@@ -84,8 +88,8 @@
import edu.umd.cs.findbugs.annotations.NonNull;
import java.math.BigInteger;
import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Stream;
-import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.DynamicTest;
@@ -171,7 +175,7 @@ final Stream scheduledPermissionedFileUpdateWorksAsExpected() {
var triggeredTx = getTxnRecord(SUCCESS_TXN).scheduled();
allRunFor(spec, triggeredTx);
- Assertions.assertEquals(
+ assertEquals(
SUCCESS,
triggeredTx.getResponseRecord().getReceipt().getStatus(),
SCHEDULED_TRANSACTION_MUST_SUCCEED);
@@ -198,7 +202,7 @@ final Stream scheduledPermissionedFileUpdateUnauthorizedPayerFails(
var triggeredTx = getTxnRecord(SUCCESS_TXN).scheduled();
allRunFor(spec, triggeredTx);
- Assertions.assertEquals(
+ assertEquals(
AUTHORIZATION_FAILED,
triggeredTx.getResponseRecord().getReceipt().getStatus(),
"Scheduled transaction be AUTHORIZATION_FAILED!");
@@ -226,7 +230,7 @@ final Stream scheduledSystemDeleteWorksAsExpected() {
var triggeredTx = getTxnRecord(SUCCESS_TXN).scheduled();
allRunFor(spec, triggeredTx);
- Assertions.assertEquals(
+ assertEquals(
SUCCESS,
triggeredTx.getResponseRecord().getReceipt().getStatus(),
SCHEDULED_TRANSACTION_MUST_SUCCEED);
@@ -235,17 +239,21 @@ final Stream scheduledSystemDeleteWorksAsExpected() {
@HapiTest
final Stream hapiTestScheduledSystemDeleteUnauthorizedPayerFails() {
+ final AtomicReference unprivilegedThrottleExemptPayerId = new AtomicReference<>();
return hapiTest(
+ doWithStartupConfig(
+ "accounts.lastThrottleExempt",
+ value -> doAdhoc(() -> unprivilegedThrottleExemptPayerId.set("0.0." + value))),
cryptoCreate(PAYING_ACCOUNT),
- cryptoCreate(PAYING_ACCOUNT_2),
fileCreate("misc").lifetime(THREE_MONTHS_IN_SECONDS).contents(ORIG_FILE),
- scheduleCreate(A_SCHEDULE, systemFileDelete("misc").updatingExpiry(1L))
+ sourcing(() -> scheduleCreate(
+ A_SCHEDULE, systemFileDelete("misc").updatingExpiry(1L))
.withEntityMemo(randomUppercase(100))
- .designatingPayer(PAYING_ACCOUNT_2)
+ .designatingPayer(unprivilegedThrottleExemptPayerId.get())
.payingWith(PAYING_ACCOUNT)
- .via(SUCCESS_TXN),
+ .via(SUCCESS_TXN)),
scheduleSign(A_SCHEDULE)
- .alsoSigningWith(PAYING_ACCOUNT_2)
+ .alsoSigningWith(GENESIS)
.payingWith(PAYING_ACCOUNT)
.via(SIGN_TX)
.hasKnownStatus(SUCCESS),
@@ -254,8 +262,7 @@ final Stream hapiTestScheduledSystemDeleteUnauthorizedPayerFails()
withOpContext((spec, opLog) -> {
var triggeredTx = getTxnRecord(SUCCESS_TXN).scheduled();
allRunFor(spec, triggeredTx);
-
- Assertions.assertEquals(
+ assertEquals(
NOT_SUPPORTED,
triggeredTx.getResponseRecord().getReceipt().getStatus(),
"Scheduled transaction be NOT_SUPPORTED!");
diff --git a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignTest.java b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignTest.java
index b431f7f5ab4e..30cc90734ad0 100644
--- a/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignTest.java
+++ b/hedera-node/test-clients/src/main/java/com/hedera/services/bdd/suites/schedule/ScheduleSignTest.java
@@ -628,11 +628,11 @@ final Stream signFailsDueToDeletedExpiration() {
return hapiTest(
cryptoCreate(SENDER).balance(1L),
cryptoCreate(RECEIVER).balance(0L).receiverSigRequired(true),
- overriding("ledger.schedule.txExpiryTimeSecs", "0"),
+ overriding("ledger.schedule.txExpiryTimeSecs", "1"),
scheduleCreate(TWO_SIG_XFER, cryptoTransfer(tinyBarsFromTo(SENDER, RECEIVER, 1)))
.alsoSigningWith(SENDER),
getAccountBalance(RECEIVER).hasTinyBars(0L),
- sleepFor(1000),
+ sleepFor(2000),
scheduleSign(TWO_SIG_XFER)
.alsoSigningWith(RECEIVER)
.hasPrecheckFrom(OK, INVALID_SCHEDULE_ID)