Skip to content
This repository has been archived by the owner on Feb 6, 2025. It is now read-only.

Commit

Permalink
Merge remote-tracking branch 'origin/develop' into merge-v1.1.0
Browse files Browse the repository at this point in the history
  • Loading branch information
pythonberg1997 committed Oct 22, 2024
2 parents e1c8689 + 01ee568 commit 8498a79
Show file tree
Hide file tree
Showing 6 changed files with 130 additions and 6 deletions.
67 changes: 67 additions & 0 deletions .github/workflows/build-check.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
name: build-check

on:
pull_request:
branches: [ main, develop ]

env:
CARGO_TERM_COLOR: always
TOOL_CHAIN: "1.81"

jobs:
extract-version:
name: extract version
runs-on: ubuntu-latest
steps:
- name: Extract version
run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT
id: extract_version
outputs:
VERSION: ${{ steps.extract_version.outputs.VERSION }}

build:
name: build release
runs-on: ${{ matrix.configs.os }}
needs: extract-version
strategy:
matrix:
configs:
- target: x86_64-unknown-linux-gnu
os: ubuntu-20.04
profile: maxperf
- target: aarch64-unknown-linux-gnu
os: ubuntu-20.04
profile: maxperf
- target: x86_64-apple-darwin
os: macos-13
profile: maxperf
- target: aarch64-apple-darwin
os: macos-14
profile: maxperf
- target: x86_64-pc-windows-gnu
os: ubuntu-20.04
profile: maxperf
build:
- command: op-build
binary: op-reth
- command: bsc-build
binary: bsc-reth
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
target: ${{ matrix.configs.target }}
toolchain: ${{ env.TOOL_CHAIN }}
- uses: taiki-e/install-action@cross
- uses: Swatinem/rust-cache@v2
with:
cache-on-failure: true

- name: Apple M1 setup
if: matrix.configs.target == 'aarch64-apple-darwin'
run: |
echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV
echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV
- name: Build Reth
run: make PROFILE=${{ matrix.configs.profile }} ${{ matrix.build.command }}-${{ matrix.configs.target }}
7 changes: 3 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ members = [
"crates/bsc/chainspec/",
"crates/bsc/cli/",
"crates/bsc/consensus/",
"crates/bsc/engine/",
"crates/bsc/evm/",
"crates/bsc/hardforks/",
"crates/bsc/node/",
"crates/bsc/payload/",
"crates/bsc/primitives/",
Expand Down Expand Up @@ -133,9 +135,6 @@ members = [
"crates/trie/parallel/",
"crates/trie/prefetch/",
"crates/trie/trie",
"crates/bsc/node/",
"crates/bsc/engine/",
"crates/bsc/hardforks",
"examples/beacon-api-sidecar-fetcher/",
"examples/beacon-api-sse/",
"examples/bsc-p2p",
Expand Down Expand Up @@ -309,6 +308,7 @@ reth-bsc-chainspec = { path = "crates/bsc/chainspec" }
reth-bsc-forks = { path = "crates/bsc/hardforks" }
reth-bsc-cli = { path = "crates/bsc/cli" }
reth-bsc-consensus = { path = "crates/bsc/consensus" }
reth-bsc-engine = { path = "crates/bsc/engine" }
reth-bsc-evm = { path = "crates/bsc/evm" }
reth-bsc-node = { path = "crates/bsc/node" }
reth-bsc-payload-builder = { path = "crates/bsc/payload" }
Expand Down Expand Up @@ -418,7 +418,6 @@ reth-static-file-types = { path = "crates/static-file/types" }
reth-storage-api = { path = "crates/storage/storage-api" }
reth-storage-errors = { path = "crates/storage/errors" }
reth-tasks = { path = "crates/tasks" }
reth-bsc-engine = { path = "crates/bsc/engine" }
reth-testing-utils = { path = "testing/testing-utils" }
reth-tokio-util = { path = "crates/tokio-util" }
reth-tracing = { path = "crates/tracing" }
Expand Down
5 changes: 5 additions & 0 deletions crates/bsc/engine/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ where
provider: Provider,
parlia: Parlia,
snapshot_reader: SnapshotReader<SnapShotProvider>,
merkle_clean_threshold: u64,
_marker: PhantomData<N>,
}

Expand All @@ -146,6 +147,7 @@ where
to_engine: UnboundedSender<BeaconEngineMessage<N::Engine>>,
network_block_event_rx: Arc<Mutex<UnboundedReceiver<EngineMessage>>>,
fetch_client: Client,
merkle_clean_threshold: u64,
_marker: PhantomData<N>,
) -> Self {
let latest_header = provider.latest_header().ok().flatten().unwrap_or_else(|| {
Expand All @@ -172,6 +174,7 @@ where
to_engine,
network_block_event_rx,
fetch_client,
merkle_clean_threshold,
_marker,
}
}
Expand All @@ -188,6 +191,7 @@ where
provider,
parlia,
snapshot_reader,
merkle_clean_threshold,
_marker,
} = self;
let parlia_client = ParliaClient::new(storage.clone(), fetch_client);
Expand All @@ -203,6 +207,7 @@ where
storage,
parlia_client.clone(),
period,
merkle_clean_threshold,
);
}
parlia_client
Expand Down
55 changes: 53 additions & 2 deletions crates/bsc/engine/src/task.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@ use tracing::{debug, error, info, trace};

use crate::{client::ParliaClient, Storage};

// Minimum number of blocks for rebuilding the merkle tree
// When the number of blocks between the trusted header and the new header is less than this value,
// executing stage sync in batch can save time by avoiding merkle tree rebuilding.
const MIN_BLOCKS_FOR_MERKLE_REBUILD: u64 = 100_000;

/// All message variants that can be sent to beacon engine.
#[derive(Debug)]
enum ForkChoiceMessage {
Expand Down Expand Up @@ -88,6 +93,9 @@ pub(crate) struct ParliaEngineTask<
chain_tracker_tx: UnboundedSender<ForkChoiceMessage>,
/// The channel to receive chain tracker messages
chain_tracker_rx: Arc<Mutex<UnboundedReceiver<ForkChoiceMessage>>>,
/// The threshold (in number of blocks) for switching from incremental trie building of changes
/// to whole rebuild.
merkle_clean_threshold: u64,
}

// === impl ParliaEngineTask ===
Expand All @@ -110,6 +118,7 @@ impl<
storage: Storage,
block_fetcher: ParliaClient<Client>,
block_interval: u64,
merkle_clean_threshold: u64,
) {
let (fork_choice_tx, fork_choice_rx) = mpsc::unbounded_channel();
let (chain_tracker_tx, chain_tracker_rx) = mpsc::unbounded_channel();
Expand All @@ -127,6 +136,7 @@ impl<
fork_choice_rx: Arc::new(Mutex::new(fork_choice_rx)),
chain_tracker_tx,
chain_tracker_rx: Arc::new(Mutex::new(chain_tracker_rx)),
merkle_clean_threshold,
};

this.start_block_event_listening();
Expand All @@ -147,6 +157,7 @@ impl<
let fork_choice_tx = self.fork_choice_tx.clone();
let chain_tracker_tx = self.chain_tracker_tx.clone();
let fetch_header_timeout_duration = Duration::from_secs(block_interval);
let merkle_clean_threshold = self.merkle_clean_threshold;

tokio::spawn(async move {
loop {
Expand Down Expand Up @@ -260,7 +271,9 @@ impl<
let mut trusted_header = latest_unsafe_header.clone();
// if parent hash is not equal to latest unsafe hash
// may be a fork chain detected, we need to trust the finalized header
if latest_header.parent_hash != latest_unsafe_header.hash() {
if latest_header.number - 1 == latest_unsafe_header.number &&
latest_header.parent_hash != latest_unsafe_header.hash()
{
trusted_header = finalized_header.clone();
}

Expand All @@ -273,7 +286,7 @@ impl<
block_interval * (latest_header.number - 1 - trusted_header.number);
let sealed = latest_header.clone().seal_slow();
let (header, seal) = sealed.into_parts();
let sealed_header = SealedHeader::new(header, seal);
let mut sealed_header = SealedHeader::new(header, seal);
let is_valid_header = match consensus
.validate_header_with_predicted_timestamp(&sealed_header, predicted_timestamp)
{
Expand Down Expand Up @@ -358,6 +371,44 @@ impl<
}
};

// if the target header is not far enough from the trusted header, make sure not to
// rebuild the merkle tree
if pipeline_sync &&
(sealed_header.number - trusted_header.number > merkle_clean_threshold &&
sealed_header.number - trusted_header.number <
MIN_BLOCKS_FOR_MERKLE_REBUILD)
{
let fetch_headers_result = match timeout(
fetch_header_timeout_duration,
block_fetcher.get_headers(HeadersRequest {
start: (trusted_header.number + merkle_clean_threshold - 1).into(),
limit: 1,
direction: HeadersDirection::Falling,
}),
)
.await
{
Ok(result) => result,
Err(_) => {
trace!(target: "consensus::parlia", "Fetch header timeout");
continue
}
};
if fetch_headers_result.is_err() {
trace!(target: "consensus::parlia", "Failed to fetch header");
continue
}

let headers = fetch_headers_result.unwrap().into_data();
if headers.is_empty() {
continue
}

let sealed = headers[0].clone().seal_slow();
let (header, seal) = sealed.into_parts();
sealed_header = SealedHeader::new(header, seal);
};

disconnected_headers.insert(0, sealed_header.clone());
disconnected_headers.reverse();
// cache header and block
Expand Down
1 change: 1 addition & 0 deletions crates/node/builder/src/launch/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,6 +251,7 @@ where
consensus_engine_tx.clone(),
engine_rx,
network_client.clone(),
ctx.toml_config().stages.merkle.clean_threshold,
PhantomData::<Types>,
)
.build(ctx.node_config().debug.tip.is_none());
Expand Down
1 change: 1 addition & 0 deletions crates/node/builder/src/launch/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ where
consensus_engine_tx.clone(),
engine_rx,
network_client.clone(),
ctx.toml_config().stages.merkle.clean_threshold,
PhantomData::<Types>,
)
.build(ctx.node_config().debug.tip.is_none());
Expand Down

0 comments on commit 8498a79

Please sign in to comment.