Skip to content

Commit

Permalink
merge master
Browse files Browse the repository at this point in the history
  • Loading branch information
mehdi-aouadi committed Feb 20, 2025
2 parents 7081fd9 + 52a7aa5 commit 57de9b7
Show file tree
Hide file tree
Showing 7 changed files with 223 additions and 7 deletions.
14 changes: 13 additions & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,19 @@ Start by looking through the 'good first issue' and 'help wanted' issues:
* [Good First Issue][search-label-good-first-issue] - issues which should only require a few lines of code, and a test or two.
* [Help wanted issues][search-label-help-wanted] - issues that are a bit more involved than `good first issue` issues.

Please keep in mind that we do not accept non-code contributions like fixing comments, typos or some other trivial fixes. Although we appreciate the extra help, managing lots of these small contributions is unfeasible, and puts extra pressure in our continuous delivery systems (running all tests, etc). Feel free to open an issue pointing any of those errors and we will batch them into a single change.
Please reach out in discord if you're looking to help out, and we can assist you in finding a good candidate ticket to work on, or discuss the idea you have.

We have a [Teku](https://discord.com/channels/697535391594446898/697539289042649190) channel, and also a [Teku Contributors](https://discord.com/channels/697535391594446898/1050616638497640548) channel.

Due to the prevalence of 'airdrop farming' type practices, this unfortunately puts heightened scrutiny on first time contributors, but if you're genuinely looking to help out, we'd really love to assist you in any way we can.
This does mean however that we will generally reject 'random' fixes such as 'TODO' fixes, typos, and generally things that add no value that we haven't identified as something we need. These are likely to be rejected with 'due to contribution guidelines' type responses.
This includes but is not limited to
* code replacement of TODO's that are not well tested or justified by performance and regression tests to prove their worth.
* typos, even if valid, will be worked into other PRs or just ignored completely if they're from first time contributors with no substantative value.
* things like replacing RuntimeException with a new exception type that's not well tested and adding value.
* rewording of comments

Minimal discussion will be given in PR's due to the volume we're needing to deal with currently of this type of PR, which is taking away from actual development time, so please don't be offended if you're genuinely trying to help out; and we say 'see contribution guidelines'.

### Local Development
The codebase is maintained using the "*contributor workflow*" where everyone without exception contributes patch proposals using "*pull-requests*". This facilitates social contribution, easy testing and peer review.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,8 @@ private InProgressSync startSync(final SyncTarget syncTarget) {
syncResult.finishAsync(
this::onSyncComplete,
error -> {
LOG.error("Error encountered during sync", error);
LOG.error("Sync process failed to complete");
LOG.debug("Error encountered during sync", error);
onSyncComplete(SyncResult.FAILED);
},
eventThread);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import static java.util.Objects.requireNonNull;

import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URL;
import java.util.Map;
import java.util.Optional;
Expand Down Expand Up @@ -120,8 +121,12 @@ private <S extends SszData> RequestBody createOctetStreamRequestBody(final S req
return new RequestBody() {

@Override
public void writeTo(final BufferedSink bufferedSink) {
requestBodyObject.sszSerialize(bufferedSink.outputStream());
public void writeTo(final BufferedSink bufferedSink) throws IOException {
try {
requestBodyObject.sszSerialize(bufferedSink.outputStream());
} catch (final UncheckedIOException e) {
throw e.getCause();
}
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import tech.pegasys.teku.infrastructure.async.SafeFuture;
import tech.pegasys.teku.infrastructure.ssz.SszList;
import tech.pegasys.teku.infrastructure.unsigned.UInt64;
import tech.pegasys.teku.spec.SpecVersion;
import tech.pegasys.teku.spec.datastructures.blocks.BeaconBlock;
import tech.pegasys.teku.spec.datastructures.blocks.blockbody.BeaconBlockBodySchema;
Expand All @@ -30,6 +31,7 @@ public class BeaconBlockBuilder {
private final SpecVersion spec;
private final DataStructureUtil dataStructureUtil;

private UInt64 slot;
private SszList<ProposerSlashing> proposerSlashings;
private SyncAggregate syncAggregate;
private ExecutionPayload executionPayload;
Expand All @@ -45,6 +47,11 @@ public BeaconBlockBuilder(final SpecVersion spec, final DataStructureUtil dataSt
this.syncAggregate = dataStructureUtil.randomSyncAggregate();
}

public BeaconBlockBuilder slot(final UInt64 slot) {
this.slot = slot;
return this;
}

public BeaconBlockBuilder syncAggregate(final SyncAggregate syncAggregate) {
this.syncAggregate = syncAggregate;
return this;
Expand Down Expand Up @@ -127,14 +134,17 @@ public SafeFuture<BeaconBlock> build() {
if (builder.supportsBlsToExecutionChanges()) {
builder.blsToExecutionChanges(blsToExecutionChanges);
}
if (builder.supportsKzgCommitments()) {
builder.blobKzgCommitments(dataStructureUtil.randomBlobKzgCommitments());
}
return SafeFuture.COMPLETE;
})
.thenApply(
blockBody ->
spec.getSchemaDefinitions()
.getBeaconBlockSchema()
.create(
dataStructureUtil.randomUInt64(),
slot != null ? slot : dataStructureUtil.randomUInt64(),
dataStructureUtil.randomUInt64(),
dataStructureUtil.randomBytes32(),
dataStructureUtil.randomBytes32(),
Expand Down
2 changes: 2 additions & 0 deletions ethereum/statetransition/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,6 @@ dependencies {
testImplementation testFixtures(project(':infrastructure:logging'))

jmhImplementation testFixtures(project(':infrastructure:bls'))
jmhImplementation testFixtures(project(':ethereum:spec'))
jmhImplementation 'org.mockito:mockito-core'
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,175 @@
/*
* Copyright Consensys Software Inc., 2025
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/

package tech.pegasys.teku.statetransition.validation.signatures;

import static org.mockito.Mockito.mock;
import static tech.pegasys.teku.infrastructure.logging.Converter.gweiToEth;
import static tech.pegasys.teku.statetransition.attestation.AggregatingAttestationPool.DEFAULT_MAXIMUM_ATTESTATION_COUNT;

import java.io.FileInputStream;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import org.apache.tuweni.bytes.Bytes;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import tech.pegasys.teku.infrastructure.unsigned.UInt64;
import tech.pegasys.teku.spec.Spec;
import tech.pegasys.teku.spec.TestSpecFactory;
import tech.pegasys.teku.spec.datastructures.attestation.ValidatableAttestation;
import tech.pegasys.teku.spec.datastructures.blocks.SignedBeaconBlock;
import tech.pegasys.teku.spec.datastructures.state.beaconstate.BeaconState;
import tech.pegasys.teku.spec.logic.common.util.BlockRewardCalculatorUtil;
import tech.pegasys.teku.spec.logic.common.util.BlockRewardCalculatorUtil.BlockRewardData;
import tech.pegasys.teku.spec.util.DataStructureUtil;
import tech.pegasys.teku.statetransition.attestation.AggregatingAttestationPool;
import tech.pegasys.teku.statetransition.attestation.AttestationForkChecker;
import tech.pegasys.teku.storage.client.RecentChainData;

@Warmup(iterations = 5, time = 2000, timeUnit = TimeUnit.MILLISECONDS)
@Measurement(iterations = 10, time = 2000, timeUnit = TimeUnit.MILLISECONDS)
@BenchmarkMode(Mode.AverageTime)
@Fork(1)
@State(Scope.Thread)
public class AggregatingAttestationPoolBenchmark {
private static final Spec SPEC = TestSpecFactory.createMainnetDeneb();

// pool dump can be created via something similar to
// https://github.com/tbenr/teku/commit/bd37ec8f5c6ce02edb3e375a1561e1d934b7d191
// state and actual block can be obtained the usual ways

// a reference file can be obtained here
// https://drive.google.com/file/d/139bA7r88riFODZ7S0FpvtO7hmWmdC_XC/view?usp=drive_link
private static final String STATE_PATH =
"BeaconStateDeneb_3630479_03664f196162fb81a4406c508674dd1ede09b883d37d0f3d0f076897f68741d2.ssz";

// a reference file can be obtained here
// https://drive.google.com/file/d/1I5vXK-x8ZH9wh40wNf1oACXeF_U3to8J/view?usp=drive_link
private static final String POOL_DUMP_PATH = "attestations_3630479.multi_ssz";

// a reference file can be obtained here
// https://drive.google.com/file/d/1PN0OToyNOV0SyjeQaS7oF3J4cKbmy1nX/view?usp=drive_link
private static final String ACTUAL_BLOCK_PATH =
"block-3630480-e652bd51c7e4e528fea0728a3ad96f86ceb92e9daa227f315e96a9884ceb187b.ssz";

private BeaconState state;
private BeaconState newBlockState;
private List<ValidatableAttestation> attestations;
private AggregatingAttestationPool pool;
private RecentChainData recentChainData;
private AttestationForkChecker attestationForkChecker;

@Setup(Level.Trial)
public void init() throws Exception {

this.pool =
new AggregatingAttestationPool(
SPEC, recentChainData, new NoOpMetricsSystem(), DEFAULT_MAXIMUM_ATTESTATION_COUNT);
this.recentChainData = mock(RecentChainData.class);

try (final FileInputStream fileInputStream = new FileInputStream(STATE_PATH)) {
this.state =
SPEC.getGenesisSpec()
.getSchemaDefinitions()
.getBeaconStateSchema()
.sszDeserialize(Bytes.wrap(fileInputStream.readAllBytes()));
}

this.attestationForkChecker = new AttestationForkChecker(SPEC, state);

var attestationSchema = SPEC.getGenesisSpec().getSchemaDefinitions().getAttestationSchema();

try (final Stream<String> attestationLinesStream = Files.lines(Paths.get(POOL_DUMP_PATH))) {
attestationLinesStream
.map(line -> attestationSchema.sszDeserialize(Bytes.fromHexString(line)))
.map(attestation -> ValidatableAttestation.from(SPEC, attestation))
.forEach(
attestation -> {
attestation.saveCommitteeShufflingSeedAndCommitteesSize(state);
pool.add(attestation);
});
}

this.newBlockState = SPEC.processSlots(state, state.getSlot().increment());

System.out.println("init done. Pool size: " + pool.getSize());
}

@Benchmark
public void getAttestationsForBlock(final Blackhole bh) {
var attestationsForBlock = pool.getAttestationsForBlock(newBlockState, attestationForkChecker);
bh.consume(attestationsForBlock);
}

public void printBlockRewardData() throws Exception {
final BlockRewardCalculatorUtil blockRewardCalculatorUtil = new BlockRewardCalculatorUtil(SPEC);
final DataStructureUtil dataStructureUtil = new DataStructureUtil(SPEC);
final UInt64 blockSlot = state.getSlot().increment();

var block =
dataStructureUtil
.blockBuilder(blockSlot.longValue())
.slot(blockSlot)
.attestations(pool.getAttestationsForBlock(newBlockState, attestationForkChecker))
.build()
.getImmediately();

BlockRewardData blockRewardData = blockRewardCalculatorUtil.getBlockRewardData(block, state);
System.out.println(
"Block attestation rewards: "
+ gweiToEth(UInt64.valueOf(blockRewardData.attestations()))
+ " ETH");

final SignedBeaconBlock actualBlock;
try (final FileInputStream fileInputStream = new FileInputStream(ACTUAL_BLOCK_PATH)) {
actualBlock =
SPEC.getGenesisSpec()
.getSchemaDefinitions()
.getSignedBeaconBlockSchema()
.sszDeserialize(Bytes.wrap(fileInputStream.readAllBytes()));
}

blockRewardData = blockRewardCalculatorUtil.getBlockRewardData(actualBlock.getMessage(), state);
System.out.println(
"Block attestation rewards: "
+ gweiToEth(UInt64.valueOf(blockRewardData.attestations()))
+ " ETH (actual block)");
}

public static void main(String[] args) throws Exception {
AggregatingAttestationPoolBenchmark benchmark = new AggregatingAttestationPoolBenchmark();
benchmark.init();
benchmark.printBlockRewardData();

var bh =
new Blackhole(
"Today's password is swordfish. I understand instantiating Blackholes directly is dangerous.");

for (int i = 0; i < 100; i++) {
benchmark.getAttestationsForBlock(bh);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
import tech.pegasys.teku.infrastructure.exceptions.ExceptionUtil;
import tech.pegasys.teku.infrastructure.ssz.SszData;
import tech.pegasys.teku.infrastructure.ssz.schema.SszSchema;
import tech.pegasys.teku.infrastructure.time.Throttler;
import tech.pegasys.teku.infrastructure.time.TimeProvider;
import tech.pegasys.teku.infrastructure.unsigned.UInt64;
import tech.pegasys.teku.networking.eth2.gossip.encoding.DecodingException;
import tech.pegasys.teku.networking.eth2.gossip.encoding.Eth2PreparedGossipMessageFactory;
Expand Down Expand Up @@ -56,6 +58,10 @@ public class Eth2TopicHandler<MessageT extends SszData> implements TopicHandler
private final NetworkingSpecConfig networkingConfig;
private final DebugDataDumper debugDataDumper;
private final String topic;
final TimeProvider timeProvider;

// every slot of mainnet config
private final Throttler<Logger> loggerThrottler = new Throttler<>(LOG, UInt64.valueOf(12));

public Eth2TopicHandler(
final RecentChainData recentChainData,
Expand All @@ -79,6 +85,7 @@ public Eth2TopicHandler(
gossipEncoding.createPreparedGossipMessageFactory(
recentChainData::getMilestoneByForkDigest);
this.debugDataDumper = debugDataDumper;
this.timeProvider = recentChainData.getStore();
this.topic = GossipTopics.getTopic(forkDigest, topicName, gossipEncoding);
}

Expand Down Expand Up @@ -173,8 +180,12 @@ protected ValidationResult handleMessageProcessingError(
P2P_LOG.onGossipMessageDecodingError(getTopic(), message.getOriginalMessage(), err);
response = ValidationResult.Invalid;
} else if (ExceptionUtil.hasCause(err, RejectedExecutionException.class)) {
LOG.warn(
"Discarding gossip message for topic {} because the executor queue is full", getTopic());
loggerThrottler.invoke(
timeProvider.getTimeInSeconds(),
(log) ->
LOG.warn(
"Discarding gossip message for topic {} because the executor queue is full",
getTopic()));
response = ValidationResult.Ignore;
} else if (ExceptionUtil.hasCause(err, ServiceCapacityExceededException.class)) {
LOG.warn(
Expand Down

0 comments on commit 57de9b7

Please sign in to comment.