From 352721370d223c2335670843994ff7b57c7b0a5f Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Wed, 15 Nov 2023 17:38:13 +0530 Subject: [PATCH 01/77] use reth_primitives instead of revm_primitives (#5434) --- crates/interfaces/src/executor.rs | 4 ++-- crates/payload/builder/src/database.rs | 11 +++++++---- crates/payload/builder/src/error.rs | 3 +-- crates/payload/builder/src/payload.rs | 6 +++--- crates/primitives/src/account.rs | 6 ++++-- crates/primitives/src/chain/spec.rs | 2 +- crates/primitives/src/constants/mod.rs | 6 ++++-- crates/primitives/src/eip4844.rs | 4 +++- crates/primitives/src/forkid.rs | 3 +-- crates/primitives/src/transaction/optimism.rs | 3 +-- crates/rpc/rpc/src/debug.rs | 12 ++++++------ crates/rpc/rpc/src/eth/api/mod.rs | 2 +- crates/rpc/rpc/src/eth/api/pending_block.rs | 6 +++--- crates/rpc/rpc/src/eth/api/server.rs | 3 +-- crates/rpc/rpc/src/eth/api/transactions.rs | 2 +- crates/rpc/rpc/src/eth/bundle.rs | 7 +++++-- crates/rpc/rpc/src/eth/error.rs | 3 +-- crates/rpc/rpc/src/trace.rs | 4 ++-- 18 files changed, 47 insertions(+), 40 deletions(-) diff --git a/crates/interfaces/src/executor.rs b/crates/interfaces/src/executor.rs index 3ca47013539dd..fa677ad814c84 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/interfaces/src/executor.rs @@ -1,8 +1,8 @@ use crate::RethError; use reth_primitives::{ - BlockNumHash, Bloom, GotExpected, GotExpectedBoxed, PruneSegmentError, B256, + revm_primitives::EVMError, BlockNumHash, Bloom, GotExpected, GotExpectedBoxed, + PruneSegmentError, B256, }; -use revm_primitives::EVMError; use thiserror::Error; /// Transaction validation errors diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index 2631503cd489f..04998c45b7e64 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -1,10 +1,13 @@ //! Database adapters for payload building. -use reth_primitives::U256; -use revm_primitives::{ - db::{Database, DatabaseRef}, - AccountInfo, Address, Bytecode, B256, +use reth_primitives::{ + revm_primitives::{ + db::{Database, DatabaseRef}, + AccountInfo, Address, Bytecode, B256, + }, + U256, }; + use std::{ cell::RefCell, collections::{hash_map::Entry, HashMap}, diff --git a/crates/payload/builder/src/error.rs b/crates/payload/builder/src/error.rs index 0a597a4745f43..7d8360864800a 100644 --- a/crates/payload/builder/src/error.rs +++ b/crates/payload/builder/src/error.rs @@ -1,9 +1,8 @@ //! Error types emitted by types or implementations of this crate. use reth_interfaces::RethError; -use reth_primitives::B256; +use reth_primitives::{revm_primitives::EVMError, B256}; use reth_transaction_pool::BlobStoreError; -use revm_primitives::EVMError; use tokio::sync::oneshot; /// Possible error variants during payload building. diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index b3e68da2b8a4d..e360cfefaa243 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -2,8 +2,9 @@ use alloy_rlp::{Encodable, Error as DecodeError}; use reth_primitives::{ - revm::config::revm_spec_by_timestamp_after_merge, Address, BlobTransactionSidecar, ChainSpec, - Header, SealedBlock, Withdrawal, B256, U256, + revm::config::revm_spec_by_timestamp_after_merge, + revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, SpecId}, + Address, BlobTransactionSidecar, ChainSpec, Header, SealedBlock, Withdrawal, B256, U256, }; use reth_rpc_types::engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadV1, PayloadAttributes, @@ -14,7 +15,6 @@ use reth_rpc_types_compat::engine::payload::{ block_to_payload_v3, convert_block_to_payload_field_v2, convert_standalone_withdraw_to_withdrawal, try_block_to_payload_v1, }; -use revm_primitives::{BlobExcessGasAndPrice, BlockEnv, CfgEnv, SpecId}; #[cfg(feature = "optimism")] use reth_primitives::TransactionSigned; diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs index 390663b0c095b..ab6761e415348 100644 --- a/crates/primitives/src/account.rs +++ b/crates/primitives/src/account.rs @@ -1,8 +1,10 @@ -use crate::{B256, KECCAK_EMPTY, U256}; +use crate::{ + revm_primitives::{Bytecode as RevmBytecode, BytecodeState, Bytes, JumpMap}, + B256, KECCAK_EMPTY, U256, +}; use byteorder::{BigEndian, ReadBytesExt}; use bytes::Buf; use reth_codecs::{main_codec, Compact}; -use revm_primitives::{Bytecode as RevmBytecode, BytecodeState, Bytes, JumpMap}; use serde::{Deserialize, Serialize}; use std::ops::Deref; diff --git a/crates/primitives/src/chain/spec.rs b/crates/primitives/src/chain/spec.rs index 3a545f3e23f68..4f5e4f6374c46 100644 --- a/crates/primitives/src/chain/spec.rs +++ b/crates/primitives/src/chain/spec.rs @@ -6,11 +6,11 @@ use crate::{ forkid::ForkFilterKey, header::Head, proofs::genesis_state_root, + revm_primitives::{address, b256}, Address, BlockNumber, Chain, ForkFilter, ForkHash, ForkId, Genesis, Hardfork, Header, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, }; use once_cell::sync::Lazy; -use revm_primitives::{address, b256}; use serde::{Deserialize, Serialize}; use std::{ collections::BTreeMap, diff --git a/crates/primitives/src/constants/mod.rs b/crates/primitives/src/constants/mod.rs index e6ec8a0e72f0d..2e3c691d9c153 100644 --- a/crates/primitives/src/constants/mod.rs +++ b/crates/primitives/src/constants/mod.rs @@ -1,7 +1,9 @@ //! Ethereum protocol-related constants -use crate::{Address, B256, U256}; -use revm_primitives::{address, b256}; +use crate::{ + revm_primitives::{address, b256}, + Address, B256, U256, +}; use std::time::Duration; /// [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844#parameters) constants. diff --git a/crates/primitives/src/eip4844.rs b/crates/primitives/src/eip4844.rs index c93017e8dda1b..4f65cc7ee0632 100644 --- a/crates/primitives/src/eip4844.rs +++ b/crates/primitives/src/eip4844.rs @@ -6,7 +6,9 @@ use crate::{constants::eip4844::VERSIONED_HASH_VERSION_KZG, B256}; use sha2::{Digest, Sha256}; // re-exports from revm for calculating blob fee -pub use revm_primitives::{calc_blob_gasprice, calc_excess_blob_gas as calculate_excess_blob_gas}; +pub use crate::revm_primitives::{ + calc_blob_gasprice, calc_excess_blob_gas as calculate_excess_blob_gas, +}; /// Calculates the versioned hash for a KzgCommitment /// diff --git a/crates/primitives/src/forkid.rs b/crates/primitives/src/forkid.rs index 3de97bf3c2918..89ef34a52bdbe 100644 --- a/crates/primitives/src/forkid.rs +++ b/crates/primitives/src/forkid.rs @@ -379,8 +379,7 @@ impl Cache { #[cfg(test)] mod tests { use super::*; - use crate::hex_literal::hex; - use revm_primitives::b256; + use crate::{hex_literal::hex, revm_primitives::b256}; const GENESIS_HASH: B256 = b256!("d4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3"); diff --git a/crates/primitives/src/transaction/optimism.rs b/crates/primitives/src/transaction/optimism.rs index 7f4076322c650..133a507b3b9a2 100644 --- a/crates/primitives/src/transaction/optimism.rs +++ b/crates/primitives/src/transaction/optimism.rs @@ -147,10 +147,9 @@ impl TxDeposit { #[cfg(test)] mod tests { use super::*; - use crate::{Bytes, TransactionSigned}; + use crate::{revm_primitives::hex_literal::hex, Bytes, TransactionSigned}; use alloy_rlp::Decodable; use bytes::BytesMut; - use revm_primitives::hex_literal::hex; #[test] fn test_rlp_roundtrip() { diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 7bad7e8c4e261..b8b313d804bfc 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -14,8 +14,12 @@ use alloy_rlp::{Decodable, Encodable}; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_primitives::{ - revm::env::tx_env_with_recovered, Account, Address, Block, BlockId, BlockNumberOrTag, Bytes, - TransactionSigned, B256, + revm::env::tx_env_with_recovered, + revm_primitives::{ + db::{DatabaseCommit, DatabaseRef}, + BlockEnv, CfgEnv, + }, + Account, Address, Block, BlockId, BlockNumberOrTag, Bytes, TransactionSigned, B256, }; use reth_provider::{BlockReaderIdExt, HeaderProvider, StateProviderBox}; use reth_revm::{ @@ -38,10 +42,6 @@ use revm::{ db::{CacheDB, EmptyDB}, primitives::Env, }; -use revm_primitives::{ - db::{DatabaseCommit, DatabaseRef}, - BlockEnv, CfgEnv, -}; use std::sync::Arc; use tokio::sync::{mpsc, AcquireError, OwnedSemaphorePermit}; use tokio_stream::{wrappers::ReceiverStream, StreamExt}; diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index ef40989df40de..39b241beba15b 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -14,6 +14,7 @@ use async_trait::async_trait; use reth_interfaces::RethResult; use reth_network_api::NetworkInfo; use reth_primitives::{ + revm_primitives::{BlockEnv, CfgEnv}, Address, BlockId, BlockNumberOrTag, ChainInfo, SealedBlock, B256, U256, U64, }; use reth_provider::{ @@ -22,7 +23,6 @@ use reth_provider::{ use reth_rpc_types::{SyncInfo, SyncStatus}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use reth_transaction_pool::TransactionPool; -use revm_primitives::{BlockEnv, CfgEnv}; use std::{ future::Future, sync::Arc, diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index f17d3b8799a92..827dfec1a17b6 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -5,6 +5,9 @@ use reth_primitives::{ constants::{eip4844::MAX_DATA_GAS_PER_BLOCK, BEACON_NONCE}, proofs, revm::{compat::into_reth_log, env::tx_env_with_recovered}, + revm_primitives::{ + BlockEnv, CfgEnv, EVMError, Env, InvalidTransaction, ResultAndState, SpecId, + }, Block, BlockId, BlockNumberOrTag, ChainSpec, Header, IntoRecoveredTransaction, Receipt, Receipts, SealedBlock, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, }; @@ -15,9 +18,6 @@ use reth_revm::{ }; use reth_transaction_pool::TransactionPool; use revm::{db::states::bundle_state::BundleRetention, Database, DatabaseCommit, State}; -use revm_primitives::{ - BlockEnv, CfgEnv, EVMError, Env, InvalidTransaction, ResultAndState, SpecId, -}; use std::time::Instant; /// Configured [BlockEnv] and [CfgEnv] for a pending block diff --git a/crates/rpc/rpc/src/eth/api/server.rs b/crates/rpc/rpc/src/eth/api/server.rs index 687f35c3753d2..08bfa54db0310 100644 --- a/crates/rpc/rpc/src/eth/api/server.rs +++ b/crates/rpc/rpc/src/eth/api/server.rs @@ -399,7 +399,7 @@ mod tests { use reth_network_api::noop::NoopNetwork; use reth_primitives::{ basefee::calculate_next_block_base_fee, constants::ETHEREUM_BLOCK_GAS_LIMIT, BaseFeeParams, - Block, BlockNumberOrTag, Header, TransactionSigned, U256, + Block, BlockNumberOrTag, Header, TransactionSigned, B256, U256, }; use reth_provider::{ test_utils::{MockEthProvider, NoopProvider}, @@ -408,7 +408,6 @@ mod tests { use reth_rpc_api::EthApiServer; use reth_rpc_types::FeeHistory; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; - use revm_primitives::B256; fn build_test_eth_api< P: BlockReaderIdExt diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 130231d96801e..4989be4219707 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -16,6 +16,7 @@ use reth_network_api::NetworkInfo; use reth_primitives::{ eip4844::calc_blob_gasprice, revm::env::{fill_block_env_with_coinbase, tx_env_with_recovered}, + revm_primitives::{db::DatabaseCommit, Env, ExecutionResult, ResultAndState, SpecId, State}, Address, BlockId, BlockNumberOrTag, Bytes, FromRecoveredPooledTransaction, Header, IntoRecoveredTransaction, Receipt, SealedBlock, TransactionKind::{Call, Create}, @@ -39,7 +40,6 @@ use revm::{ primitives::{BlockEnv, CfgEnv}, Inspector, }; -use revm_primitives::{db::DatabaseCommit, Env, ExecutionResult, ResultAndState, SpecId, State}; #[cfg(feature = "optimism")] use crate::eth::api::optimism::OptimismTxMeta; diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 0ce269b81908d..4f52e0579d776 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -9,14 +9,17 @@ use crate::{ }, BlockingTaskGuard, }; -use reth_primitives::{keccak256, U256}; +use reth_primitives::{ + keccak256, + revm_primitives::db::{DatabaseCommit, DatabaseRef}, + U256, +}; use reth_revm::database::StateProviderDatabase; use reth_rpc_types::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use revm::{ db::CacheDB, primitives::{Env, ResultAndState, TxEnv}, }; -use revm_primitives::db::{DatabaseCommit, DatabaseRef}; use std::sync::Arc; /// `Eth` bundle implementation. diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 491c35c3cd0a3..1b682aed9c127 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -7,7 +7,7 @@ use jsonrpsee::{ types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObject}, }; use reth_interfaces::RethError; -use reth_primitives::{Address, Bytes, U256}; +use reth_primitives::{revm_primitives::InvalidHeader, Address, Bytes, U256}; use reth_revm::tracing::js::JsInspectorError; use reth_rpc_types::{error::EthRpcErrorCode, BlockError, CallInputError}; use reth_transaction_pool::error::{ @@ -15,7 +15,6 @@ use reth_transaction_pool::error::{ PoolTransactionError, }; use revm::primitives::{EVMError, ExecutionResult, Halt, OutOfGasError}; -use revm_primitives::InvalidHeader; use std::time::Duration; /// Result alias diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index d81c6430316ed..5399ec49071a5 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -11,7 +11,8 @@ use async_trait::async_trait; use jsonrpsee::core::RpcResult as Result; use reth_consensus_common::calc::{base_block_reward, block_reward}; use reth_primitives::{ - revm::env::tx_env_with_recovered, BlockId, BlockNumberOrTag, Bytes, SealedHeader, B256, U256, + revm::env::tx_env_with_recovered, revm_primitives::db::DatabaseCommit, BlockId, + BlockNumberOrTag, Bytes, SealedHeader, B256, U256, }; use reth_provider::{BlockReader, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_revm::{ @@ -25,7 +26,6 @@ use reth_rpc_types::{ BlockError, BlockOverrides, CallRequest, Index, }; use revm::{db::CacheDB, primitives::Env}; -use revm_primitives::db::DatabaseCommit; use std::{collections::HashSet, sync::Arc}; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; From e109896dbef4e2bef0872761d056e56f0684f33f Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 15 Nov 2023 04:15:37 -0800 Subject: [PATCH 02/77] chore: remove unnecessary async_trait usage (#5433) --- crates/interfaces/src/consensus.rs | 2 -- crates/interfaces/src/test_utils/bodies.rs | 1 - crates/interfaces/src/test_utils/headers.rs | 1 - crates/stages/src/test_utils/runner.rs | 1 - crates/transaction-pool/src/test_utils/mod.rs | 1 - 5 files changed, 6 deletions(-) diff --git a/crates/interfaces/src/consensus.rs b/crates/interfaces/src/consensus.rs index 339db8403ed38..bdb1c04eaf879 100644 --- a/crates/interfaces/src/consensus.rs +++ b/crates/interfaces/src/consensus.rs @@ -1,4 +1,3 @@ -use async_trait::async_trait; use reth_primitives::{ BlockHash, BlockNumber, GotExpected, GotExpectedBoxed, Header, InvalidTransactionError, SealedBlock, SealedHeader, B256, U256, @@ -9,7 +8,6 @@ use std::fmt::Debug; pub use reth_rpc_types::engine::ForkchoiceState; /// Consensus is a protocol that chooses canonical chain. -#[async_trait] #[auto_impl::auto_impl(&, Arc)] pub trait Consensus: Debug + Send + Sync { /// Validate if header is correct and follows consensus specification. diff --git a/crates/interfaces/src/test_utils/bodies.rs b/crates/interfaces/src/test_utils/bodies.rs index 3f4daccb5fd8e..2c79476adc0d4 100644 --- a/crates/interfaces/src/test_utils/bodies.rs +++ b/crates/interfaces/src/test_utils/bodies.rs @@ -4,7 +4,6 @@ use crate::p2p::{ error::PeerRequestResult, priority::Priority, }; -use async_trait::async_trait; use futures::{future, Future, FutureExt}; use reth_primitives::{BlockBody, WithPeerId, B256}; use std::{ diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/interfaces/src/test_utils/headers.rs index 7fee42101dcc3..bf761846b27ab 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/interfaces/src/test_utils/headers.rs @@ -282,7 +282,6 @@ impl TestConsensus { } } -#[async_trait::async_trait] impl Consensus for TestConsensus { fn validate_header(&self, _header: &SealedHeader) -> Result<(), ConsensusError> { if self.fail_validation() { diff --git a/crates/stages/src/test_utils/runner.rs b/crates/stages/src/test_utils/runner.rs index 190ea3b53a998..ab8e231363deb 100644 --- a/crates/stages/src/test_utils/runner.rs +++ b/crates/stages/src/test_utils/runner.rs @@ -18,7 +18,6 @@ pub(crate) enum TestRunnerError { } /// A generic test runner for stages. -#[async_trait::async_trait] pub(crate) trait StageTestRunner { type S: Stage + 'static; diff --git a/crates/transaction-pool/src/test_utils/mod.rs b/crates/transaction-pool/src/test_utils/mod.rs index 8013fa3b9120d..05879c0cbf184 100644 --- a/crates/transaction-pool/src/test_utils/mod.rs +++ b/crates/transaction-pool/src/test_utils/mod.rs @@ -9,7 +9,6 @@ use crate::{ blobstore::InMemoryBlobStore, noop::MockTransactionValidator, Pool, PoolTransaction, TransactionOrigin, TransactionValidationOutcome, TransactionValidator, }; -use async_trait::async_trait; pub use gen::*; pub use mock::*; use std::{marker::PhantomData, sync::Arc}; From b98d7c8c12c7be7e5cc63f25046d1dcfe289e2f6 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 15 Nov 2023 13:22:35 +0000 Subject: [PATCH 03/77] chore(storage): rebuild MDBX if sources changed (#5435) --- .gitignore | 3 +++ crates/storage/libmdbx-rs/mdbx-sys/build.rs | 2 ++ 2 files changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index d31f88928252d..2a7f2c58955d2 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,6 @@ lcov.info # Generated by ./etc/generate-jwt.sh jwttoken/ + +# Generated by CMake due to MDBX sources +crates/storage/libmdbx-rs/mdbx-sys/libmdbx/cmake-build-debug \ No newline at end of file diff --git a/crates/storage/libmdbx-rs/mdbx-sys/build.rs b/crates/storage/libmdbx-rs/mdbx-sys/build.rs index 698841baa9c05..194ffaa33712a 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/build.rs +++ b/crates/storage/libmdbx-rs/mdbx-sys/build.rs @@ -55,6 +55,8 @@ fn main() { let mut mdbx = PathBuf::from(&env::var("CARGO_MANIFEST_DIR").unwrap()); mdbx.push("libmdbx"); + println!("cargo:rerun-if-changed={}", mdbx.display()); + let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); let bindings = bindgen::Builder::default() From f42db5b4a2bbf2a22e79a5e18b5d447f0bb9bc29 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Nov 2023 15:30:09 +0100 Subject: [PATCH 04/77] chore: put env types into container type (#5436) Co-authored-by: DaniPopes <57450786+DaniPopes@users.noreply.github.com> --- crates/storage/libmdbx-rs/src/environment.rs | 42 +++++++++++--------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 7315c9f082a48..e8ace3115f672 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -70,8 +70,7 @@ pub struct Environment where E: EnvironmentKind, { - env: *mut ffi::MDBX_env, - txn_manager: Option>, + inner: EnvironmentInner, _marker: PhantomData, } @@ -102,7 +101,7 @@ where /// Requires [Mode::ReadWrite] and returns None otherwise. #[inline] pub(crate) fn txn_manager(&self) -> Option<&SyncSender> { - self.txn_manager.as_ref() + self.inner.txn_manager.as_ref() } /// Returns a raw pointer to the underlying MDBX environment. @@ -111,7 +110,7 @@ where /// environment. #[inline] pub fn env(&self) -> *mut ffi::MDBX_env { - self.env + self.inner.env } /// Create a read-only transaction for use with the environment. @@ -223,6 +222,24 @@ where } } +/// Container type for Environment internals. +/// +/// This holds the raw pointer to the MDBX environment and the transaction manager. +/// The env is opened via [mdbx_env_create](ffi::mdbx_env_create) and closed when this type drops. +struct EnvironmentInner { + env: *mut ffi::MDBX_env, + txn_manager: Option>, +} + +impl Drop for EnvironmentInner { + fn drop(&mut self) { + // Close open mdbx environment on drop + unsafe { + ffi::mdbx_env_close_ex(self.env, false); + } + } +} + /// Environment statistics. /// /// Contains information about the size and layout of an MDBX environment or database. @@ -338,18 +355,7 @@ where E: EnvironmentKind, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Environment").finish() - } -} - -impl Drop for Environment -where - E: EnvironmentKind, -{ - fn drop(&mut self) { - unsafe { - ffi::mdbx_env_close_ex(self.env, false); - } + f.debug_struct("Environment").finish_non_exhaustive() } } @@ -511,7 +517,7 @@ where } } - let mut env = Environment { env, txn_manager: None, _marker: PhantomData }; + let mut env = EnvironmentInner { env, txn_manager: None }; if let Mode::ReadWrite { .. } = self.flags.mode { let (tx, rx) = std::sync::mpsc::sync_channel(0); @@ -556,7 +562,7 @@ where env.txn_manager = Some(tx); } - Ok(env) + Ok(Environment { inner: env, _marker: Default::default() }) } /// Sets the provided options in the environment. From 4e1e0463f77b9baca056fbe55824409b30235b16 Mon Sep 17 00:00:00 2001 From: Armin Sabouri Date: Wed, 15 Nov 2023 09:32:58 -0500 Subject: [PATCH 05/77] docs: fix spelling mistakes in eth-wire.md (#5438) Co-authored-by: DaniPopes <57450786+DaniPopes@users.noreply.github.com> --- docs/crates/eth-wire.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index dfc85e37dbaa6..41bc065f90894 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -5,10 +5,10 @@ The `eth-wire` crate provides abstractions over the [RLPx](https://github.com/et This crate can be thought of as having 2 components: -1. Data structures which serialize and deserialize the eth protcol messages into Rust compatible types. -2. Abstractions over Tokio Streams which operate on these types. +1. Data structures that serialize and deserialize the Ethereum protocol messages into Rust-compatible types. +2. Abstractions over Tokio Streams that operate on these types. -(Note that ECIES is implemented in a seperate `reth-ecies` crate.) +(Note that ECIES is implemented in a separate `reth-ecies` crate.) ## Types The most basic Eth-wire type is an `ProtocolMessage`. It describes all messages that reth can send/receive. @@ -59,7 +59,7 @@ pub struct RequestPair { } ``` -Every `Ethmessage` has a correspoding rust struct which implements the `Encodable` and `Decodable` traits. +Every `Ethmessage` has a corresponding rust struct that implements the `Encodable` and `Decodable` traits. These traits are defined as follows: [Crate: crates/rlp](https://github.com/paradigmxyz/reth/tree/1563506aea09049a85e5cc72c2894f3f7a371581/crates/rlp) From de0cca24889e996cfd3a4d757024626fa19b49c4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Nov 2023 15:46:07 +0100 Subject: [PATCH 06/77] refactor: transaction internals (#5437) --- crates/storage/libmdbx-rs/src/cursor.rs | 30 +-- crates/storage/libmdbx-rs/src/database.rs | 8 +- crates/storage/libmdbx-rs/src/transaction.rs | 261 ++++++++++++------- 3 files changed, 182 insertions(+), 117 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 5d30d7abdd3bb..d9dd917591ebc 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -2,7 +2,7 @@ use crate::{ error::{mdbx_result, Error, Result}, flags::*, mdbx_try_optional, - transaction::{txn_execute, TransactionKind, RW}, + transaction::{TransactionKind, TransactionPtr, RW}, EnvironmentKind, TableObject, Transaction, }; use ffi::{ @@ -12,15 +12,14 @@ use ffi::{ MDBX_PREV_NODUP, MDBX_SET, MDBX_SET_KEY, MDBX_SET_LOWERBOUND, MDBX_SET_RANGE, }; use libc::c_void; -use parking_lot::Mutex; -use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr, rc::Rc}; +use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr}; /// A cursor for navigating the items within a database. pub struct Cursor<'txn, K> where K: TransactionKind, { - txn: Rc>, + txn: TransactionPtr, cursor: *mut ffi::MDBX_cursor, _marker: PhantomData, } @@ -34,10 +33,9 @@ where dbi: ffi::MDBX_dbi, ) -> Result { let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut(); - - let txn = txn.txn_mutex(); + let txn = txn.txn_ptr(); unsafe { - mdbx_result(txn_execute(&txn, |txn| ffi::mdbx_cursor_open(txn, dbi, &mut cursor)))?; + mdbx_result(txn.txn_execute(|txn| ffi::mdbx_cursor_open(txn, dbi, &mut cursor)))?; } Ok(Self { txn, cursor, _marker: PhantomData }) } @@ -81,7 +79,7 @@ where let mut data_val = slice_to_val(data); let key_ptr = key_val.iov_base; let data_ptr = data_val.iov_base; - txn_execute(&self.txn, |txn| { + self.txn.txn_execute(|txn| { let v = mdbx_result(ffi::mdbx_cursor_get( self.cursor, &mut key_val, @@ -431,7 +429,7 @@ impl<'txn> Cursor<'txn, RW> { let mut data_val: ffi::MDBX_val = ffi::MDBX_val { iov_len: data.len(), iov_base: data.as_ptr() as *mut c_void }; mdbx_result(unsafe { - txn_execute(&self.txn, |_| { + self.txn.txn_execute(|_| { ffi::mdbx_cursor_put(self.cursor, &key_val, &mut data_val, flags.bits()) }) })?; @@ -447,7 +445,7 @@ impl<'txn> Cursor<'txn, RW> { /// current key, if the database was opened with [DatabaseFlags::DUP_SORT]. pub fn del(&mut self, flags: WriteFlags) -> Result<()> { mdbx_result(unsafe { - txn_execute(&self.txn, |_| ffi::mdbx_cursor_del(self.cursor, flags.bits())) + self.txn.txn_execute(|_| ffi::mdbx_cursor_del(self.cursor, flags.bits())) })?; Ok(()) @@ -459,7 +457,7 @@ where K: TransactionKind, { fn clone(&self) -> Self { - txn_execute(&self.txn, |_| Self::new_at_position(self).unwrap()) + self.txn.txn_execute(|_| Self::new_at_position(self).unwrap()) } } @@ -468,7 +466,7 @@ where K: TransactionKind, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Cursor").finish() + f.debug_struct("Cursor").finish_non_exhaustive() } } @@ -477,7 +475,7 @@ where K: TransactionKind, { fn drop(&mut self) { - txn_execute(&self.txn, |_| unsafe { ffi::mdbx_cursor_close(self.cursor) }) + self.txn.txn_execute(|_| unsafe { ffi::mdbx_cursor_close(self.cursor) }) } } @@ -565,7 +563,7 @@ where let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let op = mem::replace(op, *next_op); unsafe { - txn_execute(&cursor.txn, |txn| { + cursor.txn.txn_execute(|txn| { match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) { ffi::MDBX_SUCCESS => { let key = match Key::decode_val::(txn, &key) { @@ -656,7 +654,7 @@ where let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let op = mem::replace(op, *next_op); unsafe { - txn_execute(&cursor.txn, |txn| { + cursor.txn.txn_execute(|txn| { match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) { ffi::MDBX_SUCCESS => { let key = match Key::decode_val::(txn, &key) { @@ -753,7 +751,7 @@ where let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let op = mem::replace(op, ffi::MDBX_NEXT_NODUP); - txn_execute(&cursor.txn, |_| { + cursor.txn.txn_execute(|_| { let err_code = unsafe { ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) }; diff --git a/crates/storage/libmdbx-rs/src/database.rs b/crates/storage/libmdbx-rs/src/database.rs index f76a7d66dc587..8609c4e49b3ef 100644 --- a/crates/storage/libmdbx-rs/src/database.rs +++ b/crates/storage/libmdbx-rs/src/database.rs @@ -1,7 +1,7 @@ use crate::{ environment::EnvironmentKind, error::{mdbx_result, Result}, - transaction::{txn_execute, TransactionKind}, + transaction::TransactionKind, Transaction, }; use ffi::MDBX_db_flags_t; @@ -29,9 +29,9 @@ impl<'txn> Database<'txn> { let c_name = name.map(|n| CString::new(n).unwrap()); let name_ptr = if let Some(c_name) = &c_name { c_name.as_ptr() } else { ptr::null() }; let mut dbi: ffi::MDBX_dbi = 0; - mdbx_result(txn_execute(&txn.txn_mutex(), |txn| unsafe { - ffi::mdbx_dbi_open(txn, name_ptr, flags, &mut dbi) - }))?; + mdbx_result( + txn.txn_execute(|txn| unsafe { ffi::mdbx_dbi_open(txn, name_ptr, flags, &mut dbi) }), + )?; Ok(Self::new_from_ptr(dbi)) } diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 229125e9fe710..bd8cbae8f03fa 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -10,8 +10,12 @@ use indexmap::IndexSet; use libc::{c_uint, c_void}; use parking_lot::Mutex; use std::{ - fmt, fmt::Debug, marker::PhantomData, mem::size_of, ptr, rc::Rc, slice, - sync::mpsc::sync_channel, + fmt, + fmt::Debug, + marker::PhantomData, + mem::size_of, + ptr, slice, + sync::{atomic::AtomicBool, mpsc::sync_channel, Arc}, }; mod private { @@ -61,11 +65,7 @@ where K: TransactionKind, E: EnvironmentKind, { - txn: Rc>, - primed_dbis: Mutex>, - committed: bool, - env: &'env Environment, - _marker: PhantomData, + inner: Arc>, } impl<'env, K, E> Transaction<'env, K, E> @@ -88,35 +88,43 @@ where } pub(crate) fn new_from_ptr(env: &'env Environment, txn: *mut ffi::MDBX_txn) -> Self { - Self { - txn: Rc::new(Mutex::new(txn)), + let inner = TransactionInner { + txn: TransactionPtr::new(txn), primed_dbis: Mutex::new(IndexSet::new()), - committed: false, + committed: AtomicBool::new(false), env, _marker: PhantomData, - } + }; + Self { inner: Arc::new(inner) } } - /// Returns a raw pointer to the underlying MDBX transaction. + /// Executes the given closure once the lock on the transaction is acquired. /// /// The caller **must** ensure that the pointer is not used after the /// lifetime of the transaction. - pub(crate) fn txn_mutex(&self) -> Rc> { - self.txn.clone() + #[inline] + pub(crate) fn txn_execute T, T>(&self, f: F) -> T { + self.inner.txn_execute(f) + } + + pub(crate) fn txn_ptr(&self) -> TransactionPtr { + self.inner.txn.clone() } + /// Returns a copy of the raw pointer to the underlying MDBX transaction. + #[doc(hidden)] pub fn txn(&self) -> *mut ffi::MDBX_txn { - *self.txn.lock() + self.inner.txn.txn } /// Returns a raw pointer to the MDBX environment. pub fn env(&self) -> &Environment { - self.env + self.inner.env } /// Returns the transaction id. pub fn id(&self) -> u64 { - txn_execute(&self.txn, |txn| unsafe { ffi::mdbx_txn_id(txn) }) + self.txn_execute(|txn| unsafe { ffi::mdbx_txn_id(txn) }) } /// Gets an item from a database. @@ -135,7 +143,7 @@ where ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void }; let mut data_val: ffi::MDBX_val = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; - txn_execute(&self.txn, |txn| unsafe { + self.txn_execute(|txn| unsafe { match ffi::mdbx_get(txn, dbi, &key_val, &mut data_val) { ffi::MDBX_SUCCESS => Key::decode_val::(txn, &data_val).map(Some), ffi::MDBX_NOTFOUND => Ok(None), @@ -152,28 +160,39 @@ where } pub fn prime_for_permaopen(&self, db: Database<'_>) { - self.primed_dbis.lock().insert(db.dbi()); + self.inner.primed_dbis.lock().insert(db.dbi()); } /// Commits the transaction and returns table handles permanently open for the lifetime of /// `Environment`. - pub fn commit_and_rebind_open_dbs(mut self) -> Result<(bool, Vec>)> { - let txnlck = self.txn.lock(); - let txn = *txnlck; - let result = if K::ONLY_CLEAN { - mdbx_result(unsafe { ffi::mdbx_txn_commit_ex(txn, ptr::null_mut()) }) - } else { - let (sender, rx) = sync_channel(0); - self.env - .txn_manager() - .unwrap() - .send(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender }) - .unwrap(); - rx.recv().unwrap() + pub fn commit_and_rebind_open_dbs(self) -> Result<(bool, Vec>)> { + let result = { + let result = self.txn_execute(|txn| { + if K::ONLY_CLEAN { + mdbx_result(unsafe { ffi::mdbx_txn_commit_ex(txn, ptr::null_mut()) }) + } else { + let (sender, rx) = sync_channel(0); + self.env() + .txn_manager() + .unwrap() + .send(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender }) + .unwrap(); + rx.recv().unwrap() + } + }); + self.inner.set_committed(); + result }; - self.committed = true; result.map(|v| { - (v, self.primed_dbis.lock().iter().map(|&dbi| Database::new_from_ptr(dbi)).collect()) + ( + v, + self.inner + .primed_dbis + .lock() + .iter() + .map(|&dbi| Database::new_from_ptr(dbi)) + .collect(), + ) }) } @@ -188,7 +207,7 @@ where /// The returned database handle may be shared among any transaction in the environment. /// /// The database name may not contain the null character. - pub fn open_db<'txn>(&'txn self, name: Option<&str>) -> Result> { + pub fn open_db(&self, name: Option<&str>) -> Result> { Database::new(self, name, 0) } @@ -196,7 +215,7 @@ where pub fn db_flags<'txn>(&'txn self, db: &Database<'txn>) -> Result { let mut flags: c_uint = 0; unsafe { - mdbx_result(txn_execute(&self.txn, |txn| { + mdbx_result(self.txn_execute(|txn| { ffi::mdbx_dbi_flags_ex(txn, db.dbi(), &mut flags, ptr::null_mut()) }))?; } @@ -215,7 +234,7 @@ where pub fn db_stat_with_dbi(&self, dbi: ffi::MDBX_dbi) -> Result { unsafe { let mut stat = Stat::new(); - mdbx_result(txn_execute(&self.txn, |txn| { + mdbx_result(self.txn_execute(|txn| { ffi::mdbx_dbi_stat(txn, dbi, stat.mdb_stat(), size_of::()) }))?; Ok(stat) @@ -233,23 +252,73 @@ where } } -pub(crate) fn txn_execute T, T>( - txn: &Mutex<*mut ffi::MDBX_txn>, - f: F, -) -> T { - let lck = txn.lock(); - (f)(*lck) +/// Internals of a transaction. +struct TransactionInner<'env, K, E> +where + K: TransactionKind, + E: EnvironmentKind, +{ + /// The transaction pointer itself. + txn: TransactionPtr, + /// A set of database handles that are primed for permaopen. + primed_dbis: Mutex>, + /// Whether the transaction has committed. + committed: AtomicBool, + env: &'env Environment, + _marker: PhantomData, +} + +impl<'env, K, E> TransactionInner<'env, K, E> +where + K: TransactionKind, + E: EnvironmentKind, +{ + /// Marks the transaction as committed. + fn set_committed(&self) { + self.committed.store(true, std::sync::atomic::Ordering::SeqCst); + } + + fn has_committed(&self) -> bool { + self.committed.load(std::sync::atomic::Ordering::SeqCst) + } + + #[inline] + fn txn_execute T, T>(&self, f: F) -> T { + self.txn.txn_execute(f) + } +} + +impl<'env, K, E> Drop for TransactionInner<'env, K, E> +where + K: TransactionKind, + E: EnvironmentKind, +{ + fn drop(&mut self) { + self.txn_execute(|txn| { + if !self.has_committed() { + if K::ONLY_CLEAN { + unsafe { + ffi::mdbx_txn_abort(txn); + } + } else { + let (sender, rx) = sync_channel(0); + self.env + .txn_manager() + .unwrap() + .send(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender }) + .unwrap(); + rx.recv().unwrap().unwrap(); + } + } + }) + } } impl<'env, E> Transaction<'env, RW, E> where E: EnvironmentKind, { - fn open_db_with_flags<'txn>( - &'txn self, - name: Option<&str>, - flags: DatabaseFlags, - ) -> Result> { + fn open_db_with_flags(&self, name: Option<&str>, flags: DatabaseFlags) -> Result> { Database::new(self, name, flags.bits()) } @@ -265,11 +334,7 @@ where /// /// This function will fail with [Error::BadRslot] if called by a thread with an open /// transaction. - pub fn create_db<'txn>( - &'txn self, - name: Option<&str>, - flags: DatabaseFlags, - ) -> Result> { + pub fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> Result> { self.open_db_with_flags(name, flags | DatabaseFlags::CREATE) } @@ -292,7 +357,7 @@ where ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void }; let mut data_val: ffi::MDBX_val = ffi::MDBX_val { iov_len: data.len(), iov_base: data.as_ptr() as *mut c_void }; - mdbx_result(txn_execute(&self.txn, |txn| unsafe { + mdbx_result(self.txn_execute(|txn| unsafe { ffi::mdbx_put(txn, dbi, &key_val, &mut data_val, flags.bits()) }))?; @@ -315,7 +380,7 @@ where let mut data_val: ffi::MDBX_val = ffi::MDBX_val { iov_len: len, iov_base: ptr::null_mut::() }; unsafe { - mdbx_result(txn_execute(&self.txn, |txn| { + mdbx_result(self.txn_execute(|txn| { ffi::mdbx_put( txn, db.dbi(), @@ -352,7 +417,7 @@ where }); mdbx_result({ - txn_execute(&self.txn, |txn| { + self.txn_execute(|txn| { if let Some(d) = data_val { unsafe { ffi::mdbx_del(txn, dbi, &key_val, &d) } } else { @@ -369,7 +434,7 @@ where /// Empties the given database. All items will be removed. pub fn clear_db(&self, dbi: ffi::MDBX_dbi) -> Result<()> { - mdbx_result(txn_execute(&self.txn, |txn| unsafe { ffi::mdbx_drop(txn, dbi, false) }))?; + mdbx_result(self.txn_execute(|txn| unsafe { ffi::mdbx_drop(txn, dbi, false) }))?; Ok(()) } @@ -380,7 +445,7 @@ where /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi /// BEFORE calling this function. pub unsafe fn drop_db<'txn>(&'txn self, db: Database<'txn>) -> Result<()> { - mdbx_result(txn_execute(&self.txn, |txn| ffi::mdbx_drop(txn, db.dbi(), true)))?; + mdbx_result(self.txn_execute(|txn| ffi::mdbx_drop(txn, db.dbi(), true)))?; Ok(()) } @@ -396,7 +461,7 @@ where /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi /// BEFORE calling this function. pub unsafe fn close_db(&self, db: Database<'_>) -> Result<()> { - mdbx_result(ffi::mdbx_dbi_close(self.env.env(), db.dbi()))?; + mdbx_result(ffi::mdbx_dbi_close(self.env().env(), db.dbi()))?; Ok(()) } @@ -405,9 +470,9 @@ where impl<'env> Transaction<'env, RW, NoWriteMap> { /// Begins a new nested transaction inside of this transaction. pub fn begin_nested_txn(&mut self) -> Result> { - txn_execute(&self.txn, |txn| { + self.txn_execute(|txn| { let (tx, rx) = sync_channel(0); - self.env + self.env() .txn_manager() .unwrap() .send(TxnManagerMessage::Begin { @@ -417,7 +482,7 @@ impl<'env> Transaction<'env, RW, NoWriteMap> { }) .unwrap(); - rx.recv().unwrap().map(|ptr| Transaction::new_from_ptr(self.env, ptr.0)) + rx.recv().unwrap().map(|ptr| Transaction::new_from_ptr(self.env(), ptr.0)) }) } } @@ -428,46 +493,48 @@ where E: EnvironmentKind, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RoTransaction").finish() + f.debug_struct("RoTransaction").finish_non_exhaustive() } } -impl<'env, K, E> Drop for Transaction<'env, K, E> -where - K: TransactionKind, - E: EnvironmentKind, -{ - fn drop(&mut self) { - txn_execute(&self.txn, |txn| { - if !self.committed { - if K::ONLY_CLEAN { - unsafe { - ffi::mdbx_txn_abort(txn); - } - } else { - let (sender, rx) = sync_channel(0); - self.env - .txn_manager() - .unwrap() - .send(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender }) - .unwrap(); - rx.recv().unwrap().unwrap(); - } - } - }) - } +/// A shareable pointer to an MDBX transaction. +#[derive(Clone)] +pub(crate) struct TransactionPtr { + txn: *mut ffi::MDBX_txn, + lock: Arc>, } -unsafe impl<'env, K, E> Send for Transaction<'env, K, E> -where - K: TransactionKind, - E: EnvironmentKind, -{ +impl TransactionPtr { + fn new(txn: *mut ffi::MDBX_txn) -> Self { + Self { txn, lock: Arc::new(Mutex::new(())) } + } + + /// Executes the given closure once the lock on the transaction is acquired. + #[inline] + pub(crate) fn txn_execute T, T>(&self, f: F) -> T { + let _lck = self.lock.lock(); + (f)(self.txn) + } } -unsafe impl<'env, K, E> Sync for Transaction<'env, K, E> -where - K: TransactionKind, - E: EnvironmentKind, -{ +// SAFETY: Access to the transaction is synchronized by the lock. +unsafe impl Send for TransactionPtr {} + +// SAFETY: Access to the transaction is synchronized by the lock. +unsafe impl Sync for TransactionPtr {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::WriteMap; + + fn assert_send_sync() {} + + #[allow(dead_code)] + fn test_txn_send_sync() { + assert_send_sync::>(); + assert_send_sync::>(); + assert_send_sync::>(); + assert_send_sync::>(); + } } From dc72cad8389504eae0ebc88e803ef0622fc0a3f2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Nov 2023 17:42:34 +0100 Subject: [PATCH 07/77] feat: phase out environment trait (#5439) --- bin/reth/src/db/diff.rs | 4 +- bin/reth/src/db/list.rs | 6 +- bin/reth/src/db/snapshots/bench.rs | 6 +- bin/reth/src/db/snapshots/mod.rs | 19 ++- crates/consensus/beacon/src/engine/sync.rs | 4 +- .../storage/db/src/implementation/mdbx/mod.rs | 101 ++++++------- .../storage/db/src/implementation/mdbx/tx.rs | 24 ++-- crates/storage/db/src/lib.rs | 24 ++-- crates/storage/libmdbx-rs/benches/utils.rs | 4 +- crates/storage/libmdbx-rs/src/cursor.rs | 7 +- crates/storage/libmdbx-rs/src/database.rs | 5 +- crates/storage/libmdbx-rs/src/environment.rs | 135 ++++++++++-------- crates/storage/libmdbx-rs/src/error.rs | 1 + crates/storage/libmdbx-rs/src/flags.rs | 29 ++-- crates/storage/libmdbx-rs/src/lib.rs | 5 +- crates/storage/libmdbx-rs/src/transaction.rs | 59 +++----- crates/storage/libmdbx-rs/tests/cursor.rs | 2 - .../storage/libmdbx-rs/tests/environment.rs | 2 - .../storage/libmdbx-rs/tests/transaction.rs | 2 - 19 files changed, 212 insertions(+), 227 deletions(-) diff --git a/bin/reth/src/db/diff.rs b/bin/reth/src/db/diff.rs index 1fff88e08d26f..7170f15303d7b 100644 --- a/bin/reth/src/db/diff.rs +++ b/bin/reth/src/db/diff.rs @@ -17,7 +17,7 @@ use clap::Parser; use reth_db::{ cursor::DbCursorRO, database::Database, open_db_read_only, table::Table, transaction::DbTx, AccountChangeSet, AccountHistory, AccountsTrie, BlockBodyIndices, BlockOmmers, - BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnvRO, HashedAccount, HashedStorage, + BlockWithdrawals, Bytecodes, CanonicalHeaders, DatabaseEnv, HashedAccount, HashedStorage, HeaderNumbers, HeaderTD, Headers, PlainAccountState, PlainStorageState, PruneCheckpoints, Receipts, StorageChangeSet, StorageHistory, StoragesTrie, SyncStage, SyncStageProgress, Tables, TransactionBlock, Transactions, TxHashNumber, TxSenders, @@ -58,7 +58,7 @@ impl Command { /// /// The discrepancies and extra elements, along with a brief summary of the diff results are /// then written to a file in the output directory. - pub fn execute(self, tool: &DbTool<'_, DatabaseEnvRO>) -> eyre::Result<()> { + pub fn execute(self, tool: &DbTool<'_, DatabaseEnv>) -> eyre::Result<()> { // open second db let second_db_path: PathBuf = self.secondary_datadir.join("db").into(); let second_db = open_db_read_only(&second_db_path, self.second_db.log_level)?; diff --git a/bin/reth/src/db/list.rs b/bin/reth/src/db/list.rs index 854efbe9f9286..b6c04051addaf 100644 --- a/bin/reth/src/db/list.rs +++ b/bin/reth/src/db/list.rs @@ -2,7 +2,7 @@ use super::tui::DbListTUI; use crate::utils::{DbTool, ListFilter}; use clap::Parser; use eyre::WrapErr; -use reth_db::{database::Database, table::Table, DatabaseEnvRO, RawValue, TableViewer, Tables}; +use reth_db::{database::Database, table::Table, DatabaseEnv, RawValue, TableViewer, Tables}; use reth_primitives::hex; use std::cell::RefCell; use tracing::error; @@ -50,7 +50,7 @@ pub struct Command { impl Command { /// Execute `db list` command - pub fn execute(self, tool: &DbTool<'_, DatabaseEnvRO>) -> eyre::Result<()> { + pub fn execute(self, tool: &DbTool<'_, DatabaseEnv>) -> eyre::Result<()> { self.table.view(&ListTableViewer { tool, args: &self }) } @@ -81,7 +81,7 @@ impl Command { } struct ListTableViewer<'a> { - tool: &'a DbTool<'a, DatabaseEnvRO>, + tool: &'a DbTool<'a, DatabaseEnv>, args: &'a Command, } diff --git a/bin/reth/src/db/snapshots/bench.rs b/bin/reth/src/db/snapshots/bench.rs index 47c5ec2fa0775..2505b23d4015f 100644 --- a/bin/reth/src/db/snapshots/bench.rs +++ b/bin/reth/src/db/snapshots/bench.rs @@ -1,4 +1,4 @@ -use reth_db::DatabaseEnvRO; +use reth_db::DatabaseEnv; use reth_primitives::{ snapshot::{Compression, Filters}, ChainSpec, SnapshotSegment, @@ -16,7 +16,7 @@ pub(crate) enum BenchKind { pub(crate) fn bench( bench_kind: BenchKind, - db: (DatabaseEnvRO, Arc), + db: (DatabaseEnv, Arc), segment: SnapshotSegment, filters: Filters, compression: Compression, @@ -25,7 +25,7 @@ pub(crate) fn bench( ) -> eyre::Result<()> where F1: FnMut() -> eyre::Result, - F2: Fn(DatabaseProviderRO<'_, DatabaseEnvRO>) -> eyre::Result, + F2: Fn(DatabaseProviderRO<'_, DatabaseEnv>) -> eyre::Result, R: Debug + PartialEq, { let (db, chain) = db; diff --git a/bin/reth/src/db/snapshots/mod.rs b/bin/reth/src/db/snapshots/mod.rs index 80f0813c539d7..1113d7086830b 100644 --- a/bin/reth/src/db/snapshots/mod.rs +++ b/bin/reth/src/db/snapshots/mod.rs @@ -1,6 +1,6 @@ use clap::Parser; use itertools::Itertools; -use reth_db::{open_db_read_only, DatabaseEnvRO}; +use reth_db::{open_db_read_only, DatabaseEnv}; use reth_interfaces::db::LogLevel; use reth_primitives::{ snapshot::{Compression, InclusionFilter, PerfectHashingFunction}, @@ -71,22 +71,21 @@ impl Command { if !self.only_bench { for ((mode, compression), phf) in all_combinations.clone() { match mode { - SnapshotSegment::Headers => self - .generate_headers_snapshot::( - &provider, - *compression, - InclusionFilter::Cuckoo, - *phf, - )?, + SnapshotSegment::Headers => self.generate_headers_snapshot::( + &provider, + *compression, + InclusionFilter::Cuckoo, + *phf, + )?, SnapshotSegment::Transactions => self - .generate_transactions_snapshot::( + .generate_transactions_snapshot::( &provider, *compression, InclusionFilter::Cuckoo, *phf, )?, SnapshotSegment::Receipts => self - .generate_receipts_snapshot::( + .generate_receipts_snapshot::( &provider, *compression, InclusionFilter::Cuckoo, diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 0d95412f2e49a..07780d4330e1f 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -395,7 +395,7 @@ mod tests { use assert_matches::assert_matches; use futures::poll; use reth_db::{ - mdbx::{Env, WriteMap}, + mdbx::DatabaseEnv, test_utils::{create_test_rw_db, TempDatabase}, }; use reth_interfaces::{p2p::either::EitherDownloader, test_utils::TestFullBlockClient}; @@ -449,7 +449,7 @@ mod tests { } /// Builds the pipeline. - fn build(self, chain_spec: Arc) -> Pipeline>>> { + fn build(self, chain_spec: Arc) -> Pipeline>> { reth_tracing::init_test_tracing(); let db = create_test_rw_db(); diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 1154005c6b0b3..161d87e415ed3 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -8,8 +8,7 @@ use crate::{ }; use reth_interfaces::db::LogLevel; use reth_libmdbx::{ - DatabaseFlags, Environment, EnvironmentFlags, EnvironmentKind, Geometry, Mode, PageSize, - SyncMode, RO, RW, + DatabaseFlags, Environment, EnvironmentFlags, Geometry, Mode, PageSize, SyncMode, RO, RW, }; use std::{ops::Deref, path::Path}; use tx::Tx; @@ -25,7 +24,7 @@ const DEFAULT_MAX_READERS: u64 = 32_000; /// Environment used when opening a MDBX environment. RO/RW. #[derive(Debug)] -pub enum EnvKind { +pub enum DatabaseEnvKind { /// Read-only MDBX environment. RO, /// Read-write MDBX environment. @@ -34,19 +33,19 @@ pub enum EnvKind { /// Wrapper for the libmdbx environment. #[derive(Debug)] -pub struct Env { +pub struct DatabaseEnv { /// Libmdbx-sys environment. - pub inner: Environment, + pub inner: Environment, /// Whether to record metrics or not. with_metrics: bool, } -impl<'a, E: EnvironmentKind> DatabaseGAT<'a> for Env { - type TX = tx::Tx<'a, RO, E>; - type TXMut = tx::Tx<'a, RW, E>; +impl<'a> DatabaseGAT<'a> for DatabaseEnv { + type TX = tx::Tx<'a, RO>; + type TXMut = tx::Tx<'a, RW>; } -impl Database for Env { +impl Database for DatabaseEnv { fn tx(&self) -> Result<>::TX, DatabaseError> { Ok(Tx::new_with_metrics( self.inner.begin_ro_txn().map_err(|e| DatabaseError::InitTx(e.into()))?, @@ -62,21 +61,26 @@ impl Database for Env { } } -impl Env { +impl DatabaseEnv { /// Opens the database at the specified path with the given `EnvKind`. /// - /// It does not create the tables, for that call [`Env::create_tables`]. + /// It does not create the tables, for that call [`DatabaseEnv::create_tables`]. pub fn open( path: &Path, - kind: EnvKind, + kind: DatabaseEnvKind, log_level: Option, - ) -> Result, DatabaseError> { + ) -> Result { + let mut inner_env = Environment::builder(); + let mode = match kind { - EnvKind::RO => Mode::ReadOnly, - EnvKind::RW => Mode::ReadWrite { sync_mode: SyncMode::Durable }, + DatabaseEnvKind::RO => Mode::ReadOnly, + DatabaseEnvKind::RW => { + // enable writemap mode in RW mode + inner_env.write_map(); + Mode::ReadWrite { sync_mode: SyncMode::Durable } + } }; - let mut inner_env = Environment::builder(); inner_env.set_max_dbs(Tables::ALL.len()); inner_env.set_geometry(Geometry { // Maximum database size of 4 terabytes @@ -124,7 +128,7 @@ impl Env { } } - let env = Env { + let env = DatabaseEnv { inner: inner_env.open(path).map_err(|e| DatabaseError::Open(e.into()))?, with_metrics: false, }; @@ -158,8 +162,8 @@ impl Env { } } -impl Deref for Env { - type Target = Environment; +impl Deref for DatabaseEnv { + type Target = Environment; fn deref(&self) -> &Self::Target { &self.inner @@ -180,13 +184,12 @@ mod tests { AccountChangeSet, }; use reth_interfaces::db::{DatabaseWriteError, DatabaseWriteOperation}; - use reth_libmdbx::{NoWriteMap, WriteMap}; use reth_primitives::{Account, Address, Header, IntegerList, StorageEntry, B256, U256}; use std::{path::Path, str::FromStr, sync::Arc}; use tempfile::TempDir; /// Create database for testing - fn create_test_db(kind: EnvKind) -> Arc> { + fn create_test_db(kind: DatabaseEnvKind) -> Arc { Arc::new(create_test_db_with_path( kind, &tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(), @@ -194,8 +197,8 @@ mod tests { } /// Create database for testing with specified path - fn create_test_db_with_path(kind: EnvKind, path: &Path) -> Env { - let env = Env::::open(path, kind, None).expect(ERROR_DB_CREATION); + fn create_test_db_with_path(kind: DatabaseEnvKind, path: &Path) -> DatabaseEnv { + let env = DatabaseEnv::open(path, kind, None).expect(ERROR_DB_CREATION); env.create_tables().expect(ERROR_TABLE_CREATION); env } @@ -212,12 +215,12 @@ mod tests { #[test] fn db_creation() { - create_test_db::(EnvKind::RW); + create_test_db(DatabaseEnvKind::RW); } #[test] fn db_manual_put_get() { - let env = create_test_db::(EnvKind::RW); + let env = create_test_db(DatabaseEnvKind::RW); let value = Header::default(); let key = 1u64; @@ -236,7 +239,7 @@ mod tests { #[test] fn db_cursor_walk() { - let env = create_test_db::(EnvKind::RW); + let env = create_test_db(DatabaseEnvKind::RW); let value = Header::default(); let key = 1u64; @@ -261,7 +264,7 @@ mod tests { #[test] fn db_cursor_walk_range() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); // PUT (0, 0), (1, 0), (2, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -325,7 +328,7 @@ mod tests { #[test] fn db_cursor_walk_range_on_dup_table() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); let address0 = Address::ZERO; let address1 = Address::with_last_byte(1); @@ -367,7 +370,7 @@ mod tests { #[allow(clippy::reversed_empty_ranges)] #[test] fn db_cursor_walk_range_invalid() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); // PUT (0, 0), (1, 0), (2, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -395,7 +398,7 @@ mod tests { #[test] fn db_walker() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); // PUT (0, 0), (1, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -425,7 +428,7 @@ mod tests { #[test] fn db_reverse_walker() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); // PUT (0, 0), (1, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -455,7 +458,7 @@ mod tests { #[test] fn db_walk_back() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); // PUT (0, 0), (1, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -494,7 +497,7 @@ mod tests { #[test] fn db_cursor_seek_exact_or_previous_key() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); // PUT let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -520,7 +523,7 @@ mod tests { #[test] fn db_cursor_insert() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); // PUT let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -563,7 +566,7 @@ mod tests { #[test] fn db_cursor_insert_dup() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); let tx = db.tx_mut().expect(ERROR_INIT_TX); let mut dup_cursor = tx.cursor_dup_write::().unwrap(); @@ -581,7 +584,7 @@ mod tests { #[test] fn db_cursor_delete_current_non_existent() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); let tx = db.tx_mut().expect(ERROR_INIT_TX); let key1 = Address::with_last_byte(1); @@ -609,7 +612,7 @@ mod tests { #[test] fn db_cursor_insert_wherever_cursor_is() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); let tx = db.tx_mut().expect(ERROR_INIT_TX); // PUT @@ -642,7 +645,7 @@ mod tests { #[test] fn db_cursor_append() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); // PUT let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -669,7 +672,7 @@ mod tests { #[test] fn db_cursor_append_failure() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); // PUT let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -706,7 +709,7 @@ mod tests { #[test] fn db_cursor_upsert() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); let tx = db.tx_mut().expect(ERROR_INIT_TX); let mut cursor = tx.cursor_write::().unwrap(); @@ -741,7 +744,7 @@ mod tests { #[test] fn db_cursor_dupsort_append() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); let transition_id = 2; @@ -810,28 +813,28 @@ mod tests { .expect(ERROR_ETH_ADDRESS); { - let env = create_test_db_with_path::(EnvKind::RW, &path); + let env = create_test_db_with_path(DatabaseEnvKind::RW, &path); // PUT let result = env.update(|tx| { tx.put::(key, value).expect(ERROR_PUT); 200 }); - assert!(result.expect(ERROR_RETURN_VALUE) == 200); + assert_eq!(result.expect(ERROR_RETURN_VALUE), 200); } - let env = Env::::open(&path, EnvKind::RO, None).expect(ERROR_DB_CREATION); + let env = DatabaseEnv::open(&path, DatabaseEnvKind::RO, None).expect(ERROR_DB_CREATION); // GET let result = env.view(|tx| tx.get::(key).expect(ERROR_GET)).expect(ERROR_GET); - assert!(result == Some(value)) + assert_eq!(result, Some(value)) } #[test] fn db_dup_sort() { - let env = create_test_db::(EnvKind::RW); + let env = create_test_db(DatabaseEnvKind::RW); let key = Address::from_str("0xa2c122be93b0074270ebee7f6b7292c7deb45047") .expect(ERROR_ETH_ADDRESS); @@ -875,7 +878,7 @@ mod tests { #[test] fn db_iterate_over_all_dup_values() { - let env = create_test_db::(EnvKind::RW); + let env = create_test_db(DatabaseEnvKind::RW); let key1 = Address::from_str("0x1111111111111111111111111111111111111111") .expect(ERROR_ETH_ADDRESS); let key2 = Address::from_str("0x2222222222222222222222222222222222222222") @@ -921,7 +924,7 @@ mod tests { #[test] fn dup_value_with_same_subkey() { - let env = create_test_db::(EnvKind::RW); + let env = create_test_db(DatabaseEnvKind::RW); let key1 = Address::new([0x11; 20]); let key2 = Address::new([0x22; 20]); @@ -964,7 +967,7 @@ mod tests { #[test] fn db_sharded_key() { - let db: Arc> = create_test_db(EnvKind::RW); + let db: Arc = create_test_db(DatabaseEnvKind::RW); let real_key = Address::from_str("0xa2c122be93b0074270ebee7f6b7292c7deb45047").unwrap(); for i in 1..5 { diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 689a0cd76267f..78c52172d5915 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -12,14 +12,14 @@ use crate::{ }; use parking_lot::RwLock; use reth_interfaces::db::{DatabaseWriteError, DatabaseWriteOperation}; -use reth_libmdbx::{ffi::DBI, EnvironmentKind, Transaction, TransactionKind, WriteFlags, RW}; +use reth_libmdbx::{ffi::DBI, Transaction, TransactionKind, WriteFlags, RW}; use std::{marker::PhantomData, str::FromStr, sync::Arc, time::Instant}; /// Wrapper for the libmdbx transaction. #[derive(Debug)] -pub struct Tx<'a, K: TransactionKind, E: EnvironmentKind> { +pub struct Tx<'a, K: TransactionKind> { /// Libmdbx-sys transaction. - pub inner: Transaction<'a, K, E>, + pub inner: Transaction<'a, K>, /// Database table handle cache. pub(crate) db_handles: Arc; NUM_TABLES]>>, /// Handler for metrics with its own [Drop] implementation for cases when the transaction isn't @@ -29,9 +29,9 @@ pub struct Tx<'a, K: TransactionKind, E: EnvironmentKind> { metrics_handler: Option>, } -impl<'env, K: TransactionKind, E: EnvironmentKind> Tx<'env, K, E> { +impl<'env, K: TransactionKind> Tx<'env, K> { /// Creates new `Tx` object with a `RO` or `RW` transaction. - pub fn new<'a>(inner: Transaction<'a, K, E>) -> Self + pub fn new<'a>(inner: Transaction<'a, K>) -> Self where 'a: 'env, { @@ -39,7 +39,7 @@ impl<'env, K: TransactionKind, E: EnvironmentKind> Tx<'env, K, E> { } /// Creates new `Tx` object with a `RO` or `RW` transaction and optionally enables metrics. - pub fn new_with_metrics<'a>(inner: Transaction<'a, K, E>, with_metrics: bool) -> Self + pub fn new_with_metrics<'a>(inner: Transaction<'a, K>, with_metrics: bool) -> Self where 'a: 'env, { @@ -128,7 +128,7 @@ impl<'env, K: TransactionKind, E: EnvironmentKind> Tx<'env, K, E> { &self, operation: Operation, value_size: Option, - f: impl FnOnce(&Transaction<'_, K, E>) -> R, + f: impl FnOnce(&Transaction<'_, K>) -> R, ) -> R { if self.metrics_handler.is_some() { OperationMetrics::record(T::NAME, operation, value_size, || f(&self.inner)) @@ -173,19 +173,19 @@ impl Drop for MetricsHandler { } } -impl<'a, K: TransactionKind, E: EnvironmentKind> DbTxGAT<'a> for Tx<'_, K, E> { +impl<'a, K: TransactionKind> DbTxGAT<'a> for Tx<'_, K> { type Cursor = Cursor<'a, K, T>; type DupCursor = Cursor<'a, K, T>; } -impl<'a, K: TransactionKind, E: EnvironmentKind> DbTxMutGAT<'a> for Tx<'_, K, E> { +impl<'a, K: TransactionKind> DbTxMutGAT<'a> for Tx<'_, K> { type CursorMut = Cursor<'a, RW, T>; type DupCursorMut = Cursor<'a, RW, T>; } -impl TableImporter for Tx<'_, RW, E> {} +impl TableImporter for Tx<'_, RW> {} -impl DbTx for Tx<'_, K, E> { +impl DbTx for Tx<'_, K> { fn get(&self, key: T::Key) -> Result::Value>, DatabaseError> { self.execute_with_operation_metric::(Operation::Get, None, |tx| { tx.get(self.get_dbi::()?, key.encode().as_ref()) @@ -229,7 +229,7 @@ impl DbTx for Tx<'_, K, E> { } } -impl DbTxMut for Tx<'_, RW, E> { +impl DbTxMut for Tx<'_, RW> { fn put(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { let key = key.encode(); let value = value.compress(); diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index a1511fb09ed64..250177dfb1821 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -87,15 +87,7 @@ pub use tables::*; pub use utils::is_database_empty; #[cfg(feature = "mdbx")] -use mdbx::{Env, EnvKind, NoWriteMap, WriteMap}; - -#[cfg(feature = "mdbx")] -/// Alias type for the database environment in use. Read/Write mode. -pub type DatabaseEnv = Env; - -#[cfg(feature = "mdbx")] -/// Alias type for the database engine in use. Read only mode. -pub type DatabaseEnvRO = Env; +pub use mdbx::{DatabaseEnv, DatabaseEnvKind}; use eyre::WrapErr; use reth_interfaces::db::LogLevel; @@ -120,7 +112,7 @@ pub fn init_db>(path: P, log_level: Option) -> eyre::Re } #[cfg(feature = "mdbx")] { - let db = DatabaseEnv::open(rpath, EnvKind::RW, log_level)?; + let db = DatabaseEnv::open(rpath, DatabaseEnvKind::RW, log_level)?; db.create_tables()?; Ok(db) } @@ -131,10 +123,10 @@ pub fn init_db>(path: P, log_level: Option) -> eyre::Re } /// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing. -pub fn open_db_read_only(path: &Path, log_level: Option) -> eyre::Result { +pub fn open_db_read_only(path: &Path, log_level: Option) -> eyre::Result { #[cfg(feature = "mdbx")] { - Env::::open(path, EnvKind::RO, log_level) + DatabaseEnv::open(path, DatabaseEnvKind::RO, log_level) .with_context(|| format!("Could not open database at path: {}", path.display())) } #[cfg(not(feature = "mdbx"))] @@ -143,12 +135,12 @@ pub fn open_db_read_only(path: &Path, log_level: Option) -> eyre::Resu } } -/// Opens up an existing database. Read/Write mode. It doesn't create it or create tables if -/// missing. +/// Opens up an existing database. Read/Write mode with WriteMap enabled. It doesn't create it or +/// create tables if missing. pub fn open_db(path: &Path, log_level: Option) -> eyre::Result { #[cfg(feature = "mdbx")] { - Env::::open(path, EnvKind::RW, log_level) + DatabaseEnv::open(path, DatabaseEnvKind::RW, log_level) .with_context(|| format!("Could not open database at path: {}", path.display())) } #[cfg(not(feature = "mdbx"))] @@ -234,7 +226,7 @@ pub mod test_utils { } /// Create read only database for testing - pub fn create_test_ro_db() -> Arc> { + pub fn create_test_ro_db() -> Arc> { let path = tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(); { init_db(path.as_path(), None).expect(ERROR_DB_CREATION); diff --git a/crates/storage/libmdbx-rs/benches/utils.rs b/crates/storage/libmdbx-rs/benches/utils.rs index 7388e1f676c35..bf24b4866ae4e 100644 --- a/crates/storage/libmdbx-rs/benches/utils.rs +++ b/crates/storage/libmdbx-rs/benches/utils.rs @@ -1,4 +1,4 @@ -use reth_libmdbx::{Environment, NoWriteMap, WriteFlags}; +use reth_libmdbx::{Environment, WriteFlags}; use tempfile::{tempdir, TempDir}; pub fn get_key(n: u32) -> String { @@ -9,7 +9,7 @@ pub fn get_data(n: u32) -> String { format!("data{n}") } -pub fn setup_bench_db(num_rows: u32) -> (TempDir, Environment) { +pub fn setup_bench_db(num_rows: u32) -> (TempDir, Environment) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index d9dd917591ebc..dfbdcc52c9890 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -3,7 +3,7 @@ use crate::{ flags::*, mdbx_try_optional, transaction::{TransactionKind, TransactionPtr, RW}, - EnvironmentKind, TableObject, Transaction, + TableObject, Transaction, }; use ffi::{ MDBX_cursor_op, MDBX_FIRST, MDBX_FIRST_DUP, MDBX_GET_BOTH, MDBX_GET_BOTH_RANGE, @@ -28,10 +28,7 @@ impl<'txn, K> Cursor<'txn, K> where K: TransactionKind, { - pub(crate) fn new( - txn: &'txn Transaction<'_, K, E>, - dbi: ffi::MDBX_dbi, - ) -> Result { + pub(crate) fn new(txn: &'txn Transaction<'_, K>, dbi: ffi::MDBX_dbi) -> Result { let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut(); let txn = txn.txn_ptr(); unsafe { diff --git a/crates/storage/libmdbx-rs/src/database.rs b/crates/storage/libmdbx-rs/src/database.rs index 8609c4e49b3ef..f0f48951aff10 100644 --- a/crates/storage/libmdbx-rs/src/database.rs +++ b/crates/storage/libmdbx-rs/src/database.rs @@ -1,5 +1,4 @@ use crate::{ - environment::EnvironmentKind, error::{mdbx_result, Result}, transaction::TransactionKind, Transaction, @@ -21,8 +20,8 @@ impl<'txn> Database<'txn> { /// /// Prefer using `Environment::open_db`, `Environment::create_db`, `TransactionExt::open_db`, /// or `RwTransaction::create_db`. - pub(crate) fn new<'env, K: TransactionKind, E: EnvironmentKind>( - txn: &'txn Transaction<'env, K, E>, + pub(crate) fn new<'env, K: TransactionKind>( + txn: &'txn Transaction<'env, K>, name: Option<&str>, flags: MDBX_db_flags_t, ) -> Result { diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index e8ace3115f672..b0338826b0f52 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -11,7 +11,6 @@ use std::{ ffi::CString, fmt, fmt::Debug, - marker::PhantomData, mem, ops::{Bound, RangeBounds}, path::Path, @@ -21,32 +20,41 @@ use std::{ time::Duration, }; -mod private { - use super::*; - - pub trait Sealed {} - - impl Sealed for NoWriteMap {} - impl Sealed for WriteMap {} -} - -pub trait EnvironmentKind: private::Sealed + Debug + 'static { - const EXTRA_FLAGS: ffi::MDBX_env_flags_t; +/// Determines how data is mapped into memory +/// +/// It only takes affect when the environment is opened. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum EnvironmentKind { + /// Open the environment in default mode, without WRITEMAP. + #[default] + Default, + /// Open the environment as mdbx-WRITEMAP. + /// Use a writeable memory map unless the environment is opened as MDBX_RDONLY + /// ([Mode::ReadOnly]). + /// + /// All data will be mapped into memory in the read-write mode [Mode::ReadWrite]. This offers a + /// significant performance benefit, since the data will be modified directly in mapped + /// memory and then flushed to disk by single system call, without any memory management + /// nor copying. + /// + /// This mode is incompatible with nested transactions. + WriteMap, } -#[derive(Debug)] -#[non_exhaustive] -pub struct NoWriteMap; - -#[derive(Debug)] -#[non_exhaustive] -pub struct WriteMap; +impl EnvironmentKind { + /// Returns true if the environment was opened as WRITEMAP. + #[inline] + pub const fn is_write_map(&self) -> bool { + matches!(self, EnvironmentKind::WriteMap) + } -impl EnvironmentKind for NoWriteMap { - const EXTRA_FLAGS: ffi::MDBX_env_flags_t = ffi::MDBX_ENV_DEFAULTS; -} -impl EnvironmentKind for WriteMap { - const EXTRA_FLAGS: ffi::MDBX_env_flags_t = ffi::MDBX_WRITEMAP; + /// Additional flags required when opening the environment. + pub(crate) fn extra_flags(&self) -> ffi::MDBX_env_flags_t { + match self { + EnvironmentKind::Default => ffi::MDBX_ENV_DEFAULTS, + EnvironmentKind::WriteMap => ffi::MDBX_WRITEMAP, + } + } } #[derive(Copy, Clone, Debug)] @@ -66,20 +74,13 @@ pub(crate) enum TxnManagerMessage { } /// An environment supports multiple databases, all residing in the same shared-memory map. -pub struct Environment -where - E: EnvironmentKind, -{ +pub struct Environment { inner: EnvironmentInner, - _marker: PhantomData, } -impl Environment -where - E: EnvironmentKind, -{ +impl Environment { /// Creates a new builder for specifying options for opening an MDBX environment. - pub fn builder() -> EnvironmentBuilder { + pub fn builder() -> EnvironmentBuilder { EnvironmentBuilder { flags: EnvironmentFlags::default(), max_readers: None, @@ -92,10 +93,20 @@ where spill_min_denominator: None, geometry: None, log_level: None, - _marker: PhantomData, + kind: Default::default(), } } + /// Returns true if the environment was opened as WRITEMAP. + pub fn is_write_map(&self) -> bool { + self.inner.env_kind.is_write_map() + } + + /// Returns the kind of the environment. + pub fn env_kind(&self) -> EnvironmentKind { + self.inner.env_kind + } + /// Returns the manager that handles transaction messages. /// /// Requires [Mode::ReadWrite] and returns None otherwise. @@ -115,13 +126,13 @@ where /// Create a read-only transaction for use with the environment. #[inline] - pub fn begin_ro_txn(&self) -> Result> { + pub fn begin_ro_txn(&self) -> Result> { Transaction::new(self) } /// Create a read-write transaction for use with the environment. This method will block while /// there are any other read-write transactions open on the environment. - pub fn begin_rw_txn(&self) -> Result> { + pub fn begin_rw_txn(&self) -> Result> { let sender = self.txn_manager().ok_or(Error::Access)?; let txn = loop { let (tx, rx) = sync_channel(0); @@ -183,9 +194,8 @@ where /// /// ``` /// # use reth_libmdbx::Environment; - /// # use reth_libmdbx::NoWriteMap; /// let dir = tempfile::tempdir().unwrap(); - /// let env = Environment::::builder().open(dir.path()).unwrap(); + /// let env = Environment::builder().open(dir.path()).unwrap(); /// let info = env.info().unwrap(); /// let stat = env.stat().unwrap(); /// let freelist = env.freelist().unwrap(); @@ -228,6 +238,7 @@ where /// The env is opened via [mdbx_env_create](ffi::mdbx_env_create) and closed when this type drops. struct EnvironmentInner { env: *mut ffi::MDBX_env, + env_kind: EnvironmentKind, txn_manager: Option>, } @@ -347,15 +358,12 @@ impl Info { } } -unsafe impl Send for Environment where E: EnvironmentKind {} -unsafe impl Sync for Environment where E: EnvironmentKind {} +unsafe impl Send for Environment {} +unsafe impl Sync for Environment {} -impl fmt::Debug for Environment -where - E: EnvironmentKind, -{ +impl fmt::Debug for Environment { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Environment").finish_non_exhaustive() + f.debug_struct("Environment").field("kind", &self.inner.env_kind).finish_non_exhaustive() } } @@ -385,10 +393,7 @@ impl Default for Geometry { /// Options for opening or creating an environment. #[derive(Debug, Clone)] -pub struct EnvironmentBuilder -where - E: EnvironmentKind, -{ +pub struct EnvironmentBuilder { flags: EnvironmentFlags, max_readers: Option, max_dbs: Option, @@ -400,17 +405,14 @@ where spill_min_denominator: Option, geometry: Option, Option)>>, log_level: Option, - _marker: PhantomData, + kind: EnvironmentKind, } -impl EnvironmentBuilder -where - E: EnvironmentKind, -{ +impl EnvironmentBuilder { /// Open an environment. /// /// Database files will be opened with 644 permissions. - pub fn open(&self, path: &Path) -> Result> { + pub fn open(&self, path: &Path) -> Result { self.open_with_permissions(path, 0o644) } @@ -421,7 +423,7 @@ where &self, path: &Path, mode: ffi::mdbx_mode_t, - ) -> Result> { + ) -> Result { let mut env: *mut ffi::MDBX_env = ptr::null_mut(); unsafe { if let Some(log_level) = self.log_level { @@ -505,7 +507,7 @@ where mdbx_result(ffi::mdbx_env_open( env, path.as_ptr(), - self.flags.make_flags() | E::EXTRA_FLAGS, + self.flags.make_flags() | self.kind.extra_flags(), mode, ))?; @@ -517,7 +519,7 @@ where } } - let mut env = EnvironmentInner { env, txn_manager: None }; + let mut env = EnvironmentInner { env, txn_manager: None, env_kind: self.kind }; if let Mode::ReadWrite { .. } = self.flags.mode { let (tx, rx) = std::sync::mpsc::sync_channel(0); @@ -562,7 +564,20 @@ where env.txn_manager = Some(tx); } - Ok(Environment { inner: env, _marker: Default::default() }) + Ok(Environment { inner: env }) + } + + /// Configures how this environment will be opened. + pub fn set_kind(&mut self, kind: EnvironmentKind) -> &mut Self { + self.kind = kind; + self + } + + /// Opens the environment with mdbx WRITEMAP + /// + /// See also [EnvironmentKind] + pub fn write_map(&mut self) -> &mut Self { + self.set_kind(EnvironmentKind::WriteMap) } /// Sets the provided options in the environment. diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index ebe5a43a3d43f..274fcc47f7a6c 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -55,6 +55,7 @@ pub enum Error { Access, TooLarge, DecodeErrorLenDiff, + NestedTransactionsUnsupportedWithWriteMap, Other(i32), } diff --git a/crates/storage/libmdbx-rs/src/flags.rs b/crates/storage/libmdbx-rs/src/flags.rs index df9f817de1090..e464a3b20a05b 100644 --- a/crates/storage/libmdbx-rs/src/flags.rs +++ b/crates/storage/libmdbx-rs/src/flags.rs @@ -28,11 +28,11 @@ pub enum SyncMode { /// are recycled the MVCC snapshots corresponding to previous "steady" transactions (see /// below). /// - /// With [crate::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use asynchronous - /// mmap-flushes to disk. Asynchronous mmap-flushes means that actually all writes will - /// scheduled and performed by operation system on it own manner, i.e. unordered. - /// MDBX itself just notify operating system that it would be nice to write data to disk, but - /// no more. + /// With [crate::EnvironmentKind::WriteMap] the [SyncMode::SafeNoSync] instructs MDBX to use + /// asynchronous mmap-flushes to disk. Asynchronous mmap-flushes means that actually all + /// writes will scheduled and performed by operation system on it own manner, i.e. + /// unordered. MDBX itself just notify operating system that it would be nice to write data + /// to disk, but no more. /// /// Depending on the platform and hardware, with [SyncMode::SafeNoSync] you may get a multiple /// increase of write performance, even 10 times or more. @@ -70,17 +70,18 @@ pub enum SyncMode { /// you may get a multiple increase of write performance, even 100 times or more. /// /// If the filesystem preserves write order (which is rare and never provided unless explicitly - /// noted) and the [WriteMap](crate::WriteMap) and [EnvironmentFlags::liforeclaim] flags are - /// not used, then a system crash can't corrupt the database, but you can lose the last - /// transactions, if at least one buffer is not yet flushed to disk. The risk is governed - /// by how often the system flushes dirty buffers to disk and how often - /// [Environment::sync()](crate::Environment::sync) is called. So, transactions exhibit ACI - /// (atomicity, consistency, isolation) properties and only lose D (durability). - /// I.e. database integrity is maintained, but a system crash may undo the final transactions. + /// noted) and the [WriteMap](crate::EnvironmentKind::WriteMap) and + /// [EnvironmentFlags::liforeclaim] flags are not used, then a system crash can't corrupt + /// the database, but you can lose the last transactions, if at least one buffer is not yet + /// flushed to disk. The risk is governed by how often the system flushes dirty buffers to + /// disk and how often [Environment::sync()](crate::Environment::sync) is called. So, + /// transactions exhibit ACI (atomicity, consistency, isolation) properties and only lose D + /// (durability). I.e. database integrity is maintained, but a system crash may undo the + /// final transactions. /// /// Otherwise, if the filesystem not preserves write order (which is typically) or - /// [WriteMap](crate::WriteMap) or [EnvironmentFlags::liforeclaim] flags are used, you should - /// expect the corrupted database after a system crash. + /// [WriteMap](crate::EnvironmentKind::WriteMap) or [EnvironmentFlags::liforeclaim] flags are + /// used, you should expect the corrupted database after a system crash. /// /// So, most important thing about [SyncMode::UtterlyNoSync]: /// - A system crash immediately after commit the write transaction high likely lead to diff --git a/crates/storage/libmdbx-rs/src/lib.rs b/crates/storage/libmdbx-rs/src/lib.rs index cad77bc6f691b..14e20de72188c 100644 --- a/crates/storage/libmdbx-rs/src/lib.rs +++ b/crates/storage/libmdbx-rs/src/lib.rs @@ -15,8 +15,7 @@ pub use crate::{ cursor::{Cursor, Iter, IterDup}, database::Database, environment::{ - Environment, EnvironmentBuilder, EnvironmentKind, Geometry, Info, NoWriteMap, PageSize, - Stat, WriteMap, + Environment, EnvironmentBuilder, EnvironmentKind, Geometry, Info, PageSize, Stat, }, error::{Error, Result}, flags::*, @@ -40,8 +39,6 @@ mod test_utils { use byteorder::{ByteOrder, LittleEndian}; use tempfile::tempdir; - type Environment = crate::Environment; - /// Regression test for https://github.com/danburkert/lmdb-rs/issues/21. /// This test reliably segfaults when run against lmbdb compiled with opt level -O3 and newer /// GCC compilers. diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index bd8cbae8f03fa..ddea959125bf8 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -1,6 +1,6 @@ use crate::{ database::Database, - environment::{Environment, EnvironmentKind, NoWriteMap, TxnManagerMessage, TxnPtr}, + environment::{Environment, TxnManagerMessage, TxnPtr}, error::{mdbx_result, Result}, flags::{DatabaseFlags, WriteFlags}, Cursor, Error, Stat, TableObject, @@ -12,7 +12,6 @@ use parking_lot::Mutex; use std::{ fmt, fmt::Debug, - marker::PhantomData, mem::size_of, ptr, slice, sync::{atomic::AtomicBool, mpsc::sync_channel, Arc}, @@ -60,20 +59,18 @@ impl TransactionKind for RW { /// An MDBX transaction. /// /// All database operations require a transaction. -pub struct Transaction<'env, K, E> +pub struct Transaction<'env, K> where K: TransactionKind, - E: EnvironmentKind, { - inner: Arc>, + inner: Arc>, } -impl<'env, K, E> Transaction<'env, K, E> +impl<'env, K> Transaction<'env, K> where K: TransactionKind, - E: EnvironmentKind, { - pub(crate) fn new(env: &'env Environment) -> Result { + pub(crate) fn new(env: &'env Environment) -> Result { let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); unsafe { mdbx_result(ffi::mdbx_txn_begin_ex( @@ -87,13 +84,13 @@ where } } - pub(crate) fn new_from_ptr(env: &'env Environment, txn: *mut ffi::MDBX_txn) -> Self { + pub(crate) fn new_from_ptr(env: &'env Environment, txn: *mut ffi::MDBX_txn) -> Self { let inner = TransactionInner { txn: TransactionPtr::new(txn), primed_dbis: Mutex::new(IndexSet::new()), committed: AtomicBool::new(false), env, - _marker: PhantomData, + _marker: Default::default(), }; Self { inner: Arc::new(inner) } } @@ -118,7 +115,7 @@ where } /// Returns a raw pointer to the MDBX environment. - pub fn env(&self) -> &Environment { + pub fn env(&self) -> &Environment { self.inner.env } @@ -253,10 +250,9 @@ where } /// Internals of a transaction. -struct TransactionInner<'env, K, E> +struct TransactionInner<'env, K> where K: TransactionKind, - E: EnvironmentKind, { /// The transaction pointer itself. txn: TransactionPtr, @@ -264,14 +260,13 @@ where primed_dbis: Mutex>, /// Whether the transaction has committed. committed: AtomicBool, - env: &'env Environment, - _marker: PhantomData, + env: &'env Environment, + _marker: std::marker::PhantomData, } -impl<'env, K, E> TransactionInner<'env, K, E> +impl<'env, K> TransactionInner<'env, K> where K: TransactionKind, - E: EnvironmentKind, { /// Marks the transaction as committed. fn set_committed(&self) { @@ -288,10 +283,9 @@ where } } -impl<'env, K, E> Drop for TransactionInner<'env, K, E> +impl<'env, K> Drop for TransactionInner<'env, K> where K: TransactionKind, - E: EnvironmentKind, { fn drop(&mut self) { self.txn_execute(|txn| { @@ -314,10 +308,7 @@ where } } -impl<'env, E> Transaction<'env, RW, E> -where - E: EnvironmentKind, -{ +impl<'env> Transaction<'env, RW> { fn open_db_with_flags(&self, name: Option<&str>, flags: DatabaseFlags) -> Result> { Database::new(self, name, flags.bits()) } @@ -451,10 +442,7 @@ where } } -impl<'env, E> Transaction<'env, RO, E> -where - E: EnvironmentKind, -{ +impl<'env> Transaction<'env, RO> { /// Closes the database handle. /// /// # Safety @@ -467,9 +455,12 @@ where } } -impl<'env> Transaction<'env, RW, NoWriteMap> { +impl<'env> Transaction<'env, RW> { /// Begins a new nested transaction inside of this transaction. - pub fn begin_nested_txn(&mut self) -> Result> { + pub fn begin_nested_txn(&mut self) -> Result> { + if self.inner.env.is_write_map() { + return Err(Error::NestedTransactionsUnsupportedWithWriteMap) + } self.txn_execute(|txn| { let (tx, rx) = sync_channel(0); self.env() @@ -487,10 +478,9 @@ impl<'env> Transaction<'env, RW, NoWriteMap> { } } -impl<'env, K, E> fmt::Debug for Transaction<'env, K, E> +impl<'env, K> fmt::Debug for Transaction<'env, K> where K: TransactionKind, - E: EnvironmentKind, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RoTransaction").finish_non_exhaustive() @@ -526,15 +516,12 @@ unsafe impl Sync for TransactionPtr {} #[cfg(test)] mod tests { use super::*; - use crate::WriteMap; fn assert_send_sync() {} #[allow(dead_code)] fn test_txn_send_sync() { - assert_send_sync::>(); - assert_send_sync::>(); - assert_send_sync::>(); - assert_send_sync::>(); + assert_send_sync::>(); + assert_send_sync::>(); } } diff --git a/crates/storage/libmdbx-rs/tests/cursor.rs b/crates/storage/libmdbx-rs/tests/cursor.rs index efa1d85f0cb69..0e2ce403edf7c 100644 --- a/crates/storage/libmdbx-rs/tests/cursor.rs +++ b/crates/storage/libmdbx-rs/tests/cursor.rs @@ -2,8 +2,6 @@ use reth_libmdbx::*; use std::borrow::Cow; use tempfile::tempdir; -type Environment = reth_libmdbx::Environment; - #[test] fn test_get() { let dir = tempdir().unwrap(); diff --git a/crates/storage/libmdbx-rs/tests/environment.rs b/crates/storage/libmdbx-rs/tests/environment.rs index 8dfb61ad4d7cc..85cf9a62a584c 100644 --- a/crates/storage/libmdbx-rs/tests/environment.rs +++ b/crates/storage/libmdbx-rs/tests/environment.rs @@ -2,8 +2,6 @@ use byteorder::{ByteOrder, LittleEndian}; use reth_libmdbx::*; use tempfile::tempdir; -type Environment = reth_libmdbx::Environment; - #[test] fn test_open() { let dir = tempdir().unwrap(); diff --git a/crates/storage/libmdbx-rs/tests/transaction.rs b/crates/storage/libmdbx-rs/tests/transaction.rs index c577017f4b5bb..361bca0e2d49b 100644 --- a/crates/storage/libmdbx-rs/tests/transaction.rs +++ b/crates/storage/libmdbx-rs/tests/transaction.rs @@ -7,8 +7,6 @@ use std::{ }; use tempfile::tempdir; -type Environment = reth_libmdbx::Environment; - #[test] fn test_put_get_del() { let dir = tempdir().unwrap(); From a389a2b42dd0427dd7dec43d1439d4fb722b93eb Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Wed, 15 Nov 2023 16:53:28 +0000 Subject: [PATCH 08/77] feat: search for a snapshot that fulfills a queried `BlockHash` or `TxHash` (#5373) Co-authored-by: Alexey Shekhirin Co-authored-by: Matthias Seitz --- bin/reth/src/db/snapshots/headers.rs | 33 +- bin/reth/src/db/snapshots/receipts.rs | 30 +- bin/reth/src/db/snapshots/transactions.rs | 27 +- crates/consensus/common/src/validation.rs | 4 +- crates/interfaces/src/provider.rs | 13 +- crates/primitives/src/snapshot/mod.rs | 31 +- crates/primitives/src/snapshot/segment.rs | 156 +++++++++- crates/snapshot/src/segments/mod.rs | 2 +- crates/snapshot/src/snapshotter.rs | 48 ++- .../src/providers/snapshot/manager.rs | 291 +++++++++++++++--- .../provider/src/providers/snapshot/mod.rs | 9 +- 11 files changed, 516 insertions(+), 128 deletions(-) diff --git a/bin/reth/src/db/snapshots/headers.rs b/bin/reth/src/db/snapshots/headers.rs index 8b9ea080aac29..e4537cd6c3da1 100644 --- a/bin/reth/src/db/snapshots/headers.rs +++ b/bin/reth/src/db/snapshots/headers.rs @@ -10,7 +10,8 @@ use reth_primitives::{ BlockHash, ChainSpec, Header, SnapshotSegment, }; use reth_provider::{ - providers::SnapshotProvider, DatabaseProviderRO, HeaderProvider, ProviderError, ProviderFactory, + providers::SnapshotProvider, DatabaseProviderRO, HeaderProvider, ProviderError, + ProviderFactory, TransactionsProviderExt, }; use reth_snapshot::{segments, segments::Segment}; use std::{ @@ -38,9 +39,15 @@ impl Command { segment.snapshot::(provider, PathBuf::default(), range.clone())?; // Default name doesn't have any configuration + let tx_range = provider.transaction_range_by_block_range(range.clone())?; reth_primitives::fs::rename( - SnapshotSegment::Headers.filename(&range), - SnapshotSegment::Headers.filename_with_configuration(filters, compression, &range), + SnapshotSegment::Headers.filename(&range, &tx_range), + SnapshotSegment::Headers.filename_with_configuration( + filters, + compression, + &range, + &tx_range, + ), )?; Ok(()) @@ -61,16 +68,24 @@ impl Command { Filters::WithoutFilters }; - let range = self.block_range(); + let block_range = self.block_range(); - let mut row_indexes = range.clone().collect::>(); + let mut row_indexes = block_range.clone().collect::>(); let mut rng = rand::thread_rng(); - let path = SnapshotSegment::Headers - .filename_with_configuration(filters, compression, &range) + + let tx_range = ProviderFactory::new(open_db_read_only(db_path, log_level)?, chain.clone()) + .provider()? + .transaction_range_by_block_range(block_range.clone())?; + + let path: PathBuf = SnapshotSegment::Headers + .filename_with_configuration(filters, compression, &block_range, &tx_range) .into(); let provider = SnapshotProvider::default(); - let jar_provider = - provider.get_segment_provider(SnapshotSegment::Headers, self.from, Some(path))?; + let jar_provider = provider.get_segment_provider_from_block( + SnapshotSegment::Headers, + self.from, + Some(&path), + )?; let mut cursor = jar_provider.cursor()?; for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] { diff --git a/bin/reth/src/db/snapshots/receipts.rs b/bin/reth/src/db/snapshots/receipts.rs index 84e47acf4ba86..dc8708ac04031 100644 --- a/bin/reth/src/db/snapshots/receipts.rs +++ b/bin/reth/src/db/snapshots/receipts.rs @@ -27,21 +27,26 @@ impl Command { inclusion_filter: InclusionFilter, phf: PerfectHashingFunction, ) -> eyre::Result<()> { - let range = self.block_range(); + let block_range = self.block_range(); let filters = if self.with_filters { Filters::WithFilters(inclusion_filter, phf) } else { Filters::WithoutFilters }; - let segment = segments::Receipts::new(compression, filters); - - segment.snapshot::(provider, PathBuf::default(), range.clone())?; + let segment: segments::Receipts = segments::Receipts::new(compression, filters); + segment.snapshot::(provider, PathBuf::default(), block_range.clone())?; // Default name doesn't have any configuration + let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; reth_primitives::fs::rename( - SnapshotSegment::Receipts.filename(&range), - SnapshotSegment::Receipts.filename_with_configuration(filters, compression, &range), + SnapshotSegment::Receipts.filename(&block_range, &tx_range), + SnapshotSegment::Receipts.filename_with_configuration( + filters, + compression, + &block_range, + &tx_range, + ), )?; Ok(()) @@ -62,7 +67,7 @@ impl Command { Filters::WithoutFilters }; - let block_range = self.from..=(self.from + self.block_interval - 1); + let block_range = self.block_range(); let mut rng = rand::thread_rng(); @@ -72,13 +77,16 @@ impl Command { let mut row_indexes = tx_range.clone().collect::>(); - let path = SnapshotSegment::Receipts - .filename_with_configuration(filters, compression, &block_range) + let path: PathBuf = SnapshotSegment::Receipts + .filename_with_configuration(filters, compression, &block_range, &tx_range) .into(); let provider = SnapshotProvider::default(); - let jar_provider = - provider.get_segment_provider(SnapshotSegment::Receipts, self.from, Some(path))?; + let jar_provider = provider.get_segment_provider_from_block( + SnapshotSegment::Receipts, + self.from, + Some(&path), + )?; let mut cursor = jar_provider.cursor()?; for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] { diff --git a/bin/reth/src/db/snapshots/transactions.rs b/bin/reth/src/db/snapshots/transactions.rs index 0b7d8b0163dbd..00c06102e8d7a 100644 --- a/bin/reth/src/db/snapshots/transactions.rs +++ b/bin/reth/src/db/snapshots/transactions.rs @@ -27,7 +27,7 @@ impl Command { inclusion_filter: InclusionFilter, phf: PerfectHashingFunction, ) -> eyre::Result<()> { - let range = self.block_range(); + let block_range = self.block_range(); let filters = if self.with_filters { Filters::WithFilters(inclusion_filter, phf) } else { @@ -36,12 +36,18 @@ impl Command { let segment = segments::Transactions::new(compression, filters); - segment.snapshot::(provider, PathBuf::default(), range.clone())?; + segment.snapshot::(provider, PathBuf::default(), block_range.clone())?; // Default name doesn't have any configuration + let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; reth_primitives::fs::rename( - SnapshotSegment::Transactions.filename(&range), - SnapshotSegment::Transactions.filename_with_configuration(filters, compression, &range), + SnapshotSegment::Transactions.filename(&block_range, &tx_range), + SnapshotSegment::Transactions.filename_with_configuration( + filters, + compression, + &block_range, + &tx_range, + ), )?; Ok(()) @@ -62,7 +68,7 @@ impl Command { Filters::WithoutFilters }; - let block_range = self.from..=(self.from + self.block_interval - 1); + let block_range = self.block_range(); let mut rng = rand::thread_rng(); @@ -72,12 +78,15 @@ impl Command { let mut row_indexes = tx_range.clone().collect::>(); - let path = SnapshotSegment::Transactions - .filename_with_configuration(filters, compression, &block_range) + let path: PathBuf = SnapshotSegment::Transactions + .filename_with_configuration(filters, compression, &block_range, &tx_range) .into(); let provider = SnapshotProvider::default(); - let jar_provider = - provider.get_segment_provider(SnapshotSegment::Transactions, self.from, Some(path))?; + let jar_provider = provider.get_segment_provider_from_block( + SnapshotSegment::Transactions, + self.from, + Some(&path), + )?; let mut cursor = jar_provider.cursor()?; for bench_kind in [BenchKind::Walk, BenchKind::RandomAll] { diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index d85b2cc5e7fee..efc9b136b4b03 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -489,8 +489,8 @@ mod tests { }; use reth_primitives::{ constants::eip4844::DATA_GAS_PER_BLOB, hex_literal::hex, proofs, Account, Address, - BlockBody, BlockHash, BlockHashOrNumber, Bytes, ChainSpecBuilder, ForkCondition, Header, - Signature, TransactionKind, TransactionSigned, Withdrawal, MAINNET, U256, + BlockBody, BlockHash, BlockHashOrNumber, Bytes, ChainSpecBuilder, Header, Signature, + TransactionKind, TransactionSigned, Withdrawal, MAINNET, U256, }; use std::ops::RangeBounds; diff --git a/crates/interfaces/src/provider.rs b/crates/interfaces/src/provider.rs index da079fcdd1e23..a5a72c942ad67 100644 --- a/crates/interfaces/src/provider.rs +++ b/crates/interfaces/src/provider.rs @@ -1,6 +1,8 @@ use reth_primitives::{ - Address, BlockHash, BlockHashOrNumber, BlockNumber, GotExpected, TxHashOrNumber, TxNumber, B256, + Address, BlockHash, BlockHashOrNumber, BlockNumber, GotExpected, SnapshotSegment, + TxHashOrNumber, TxNumber, B256, }; +use std::path::PathBuf; use thiserror::Error; /// Bundled errors variants thrown by various providers. @@ -94,6 +96,15 @@ pub enum ProviderError { /// Provider does not support this particular request. #[error("this provider does not support this request")] UnsupportedProvider, + /// Snapshot file is not found at specified path. + #[error("not able to find {0} snapshot file at {1}")] + MissingSnapshotPath(SnapshotSegment, PathBuf), + /// Snapshot file is not found for requested block. + #[error("not able to find {0} snapshot file for block number {1}")] + MissingSnapshotBlock(SnapshotSegment, BlockNumber), + /// Snapshot file is not found for requested transaction. + #[error("not able to find {0} snapshot file for transaction id {1}")] + MissingSnapshotTx(SnapshotSegment, TxNumber), } /// A root mismatch error at a given block height. diff --git a/crates/primitives/src/snapshot/mod.rs b/crates/primitives/src/snapshot/mod.rs index 8c595c75c1c7b..b8fe3a1e85cfe 100644 --- a/crates/primitives/src/snapshot/mod.rs +++ b/crates/primitives/src/snapshot/mod.rs @@ -4,11 +4,14 @@ mod compression; mod filters; mod segment; -use alloy_primitives::BlockNumber; +use alloy_primitives::{BlockNumber, TxNumber}; pub use compression::Compression; pub use filters::{Filters, InclusionFilter, PerfectHashingFunction}; pub use segment::{SegmentConfig, SegmentHeader, SnapshotSegment}; +use crate::fs::FsPathError; +use std::{ops::RangeInclusive, path::Path}; + /// Default snapshot block count. pub const BLOCKS_PER_SNAPSHOT: u64 = 500_000; @@ -35,4 +38,30 @@ impl HighestSnapshots { SnapshotSegment::Receipts => self.receipts, } } + + /// Returns a mutable reference to a snapshot segment + pub fn as_mut(&mut self, segment: SnapshotSegment) -> &mut Option { + match segment { + SnapshotSegment::Headers => &mut self.headers, + SnapshotSegment::Transactions => &mut self.transactions, + SnapshotSegment::Receipts => &mut self.receipts, + } + } +} + +/// Given the snapshot's location, it returns an iterator over the existing snapshots in the format +/// of a tuple composed by the segment, block range and transaction range. +pub fn iter_snapshots( + path: impl AsRef, +) -> Result< + impl Iterator, RangeInclusive)>, + FsPathError, +> { + let entries = crate::fs::read_dir(path.as_ref())?.filter_map(Result::ok); + Ok(entries.filter_map(|entry| { + if entry.metadata().map_or(false, |metadata| metadata.is_file()) { + return SnapshotSegment::parse_filename(&entry.file_name()) + } + None + })) } diff --git a/crates/primitives/src/snapshot/segment.rs b/crates/primitives/src/snapshot/segment.rs index f016e3e85bff5..d8357fc169cbb 100644 --- a/crates/primitives/src/snapshot/segment.rs +++ b/crates/primitives/src/snapshot/segment.rs @@ -2,8 +2,9 @@ use crate::{ snapshot::{Compression, Filters, InclusionFilter}, BlockNumber, TxNumber, }; +use derive_more::Display; use serde::{Deserialize, Serialize}; -use std::{ops::RangeInclusive, str::FromStr}; +use std::{ffi::OsStr, ops::RangeInclusive, str::FromStr}; use strum::{AsRefStr, EnumString}; #[derive( @@ -19,6 +20,7 @@ use strum::{AsRefStr, EnumString}; Serialize, EnumString, AsRefStr, + Display, )] #[cfg_attr(feature = "clap", derive(clap::ValueEnum))] /// Segment of the data that can be snapshotted. @@ -53,10 +55,21 @@ impl SnapshotSegment { } /// Returns the default file name for the provided segment and range. - pub fn filename(&self, range: &RangeInclusive) -> String { + pub fn filename( + &self, + block_range: &RangeInclusive, + tx_range: &RangeInclusive, + ) -> String { // ATTENTION: if changing the name format, be sure to reflect those changes in // [`Self::parse_filename`]. - format!("snapshot_{}_{}_{}", self.as_ref(), range.start(), range.end(),) + format!( + "snapshot_{}_{}_{}_{}_{}", + self.as_ref(), + block_range.start(), + block_range.end(), + tx_range.start(), + tx_range.end(), + ) } /// Returns file name for the provided segment and range, alongisde filters, compression. @@ -64,9 +77,10 @@ impl SnapshotSegment { &self, filters: Filters, compression: Compression, - range: &RangeInclusive, + block_range: &RangeInclusive, + tx_range: &RangeInclusive, ) -> String { - let prefix = self.filename(range); + let prefix = self.filename(block_range, tx_range); let filters_name = match filters { Filters::WithFilters(inclusion_filter, phf) => { @@ -80,20 +94,41 @@ impl SnapshotSegment { format!("{prefix}_{}_{}", filters_name, compression.as_ref()) } - /// Takes a filename and parses the [`SnapshotSegment`] and its inclusive range. - pub fn parse_filename(name: &str) -> Option<(Self, RangeInclusive)> { - let parts: Vec<&str> = name.split('_').collect(); - if let (Ok(segment), true) = (Self::from_str(parts[1]), parts.len() >= 4) { - let start: u64 = parts[2].parse().unwrap_or(0); - let end: u64 = parts[3].parse().unwrap_or(0); + /// Parses a filename into a `SnapshotSegment` and its corresponding block and transaction + /// ranges. + /// + /// The filename is expected to follow the format: + /// "snapshot_{segment}_{block_start}_{block_end}_{tx_start}_{tx_end}". This function checks + /// for the correct prefix ("snapshot"), and then parses the segment and the inclusive + /// ranges for blocks and transactions. It ensures that the start of each range is less than the + /// end. + /// + /// # Returns + /// - `Some((segment, block_range, tx_range))` if parsing is successful and all conditions are + /// met. + /// - `None` if any condition fails, such as an incorrect prefix, parsing error, or invalid + /// range. + /// + /// # Note + /// This function is tightly coupled with the naming convention defined in [`Self::filename`]. + /// Any changes in the filename format in `filename` should be reflected here. + pub fn parse_filename( + name: &OsStr, + ) -> Option<(Self, RangeInclusive, RangeInclusive)> { + let mut parts = name.to_str()?.split('_'); + if parts.next() != Some("snapshot") { + return None; + } - if start <= end || parts[0] != "snapshot" { - return None - } + let segment = Self::from_str(parts.next()?).ok()?; + let (block_start, block_end) = (parts.next()?.parse().ok()?, parts.next()?.parse().ok()?); + let (tx_start, tx_end) = (parts.next()?.parse().ok()?, parts.next()?.parse().ok()?); - return Some((segment, start..=end)) + if block_start >= block_end || tx_start > tx_end { + return None; } - None + + Some((segment, block_start..=block_end, tx_start..=tx_end)) } } @@ -145,3 +180,92 @@ pub struct SegmentConfig { /// Compression used on the segment pub compression: Compression, } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_filename() { + let test_vectors = [ + (SnapshotSegment::Headers, 2..=30, 0..=1, "snapshot_headers_2_30_0_1", None), + ( + SnapshotSegment::Receipts, + 30..=300, + 110..=1000, + "snapshot_receipts_30_300_110_1000", + None, + ), + ( + SnapshotSegment::Transactions, + 1_123_233..=11_223_233, + 1_123_233..=2_123_233, + "snapshot_transactions_1123233_11223233_1123233_2123233", + None, + ), + ( + SnapshotSegment::Headers, + 2..=30, + 0..=1, + "snapshot_headers_2_30_0_1_cuckoo-fmph_lz4", + Some(( + Compression::Lz4, + Filters::WithFilters( + InclusionFilter::Cuckoo, + crate::snapshot::PerfectHashingFunction::Fmph, + ), + )), + ), + ( + SnapshotSegment::Headers, + 2..=30, + 0..=1, + "snapshot_headers_2_30_0_1_cuckoo-fmph_zstd", + Some(( + Compression::Zstd, + Filters::WithFilters( + InclusionFilter::Cuckoo, + crate::snapshot::PerfectHashingFunction::Fmph, + ), + )), + ), + ( + SnapshotSegment::Headers, + 2..=30, + 0..=1, + "snapshot_headers_2_30_0_1_cuckoo-fmph_zstd-dict", + Some(( + Compression::ZstdWithDictionary, + Filters::WithFilters( + InclusionFilter::Cuckoo, + crate::snapshot::PerfectHashingFunction::Fmph, + ), + )), + ), + ]; + + for (segment, block_range, tx_range, filename, configuration) in test_vectors { + if let Some((compression, filters)) = configuration { + assert_eq!( + segment.filename_with_configuration( + filters, + compression, + &block_range, + &tx_range + ), + filename + ); + } else { + assert_eq!(segment.filename(&block_range, &tx_range), filename); + } + + assert_eq!( + SnapshotSegment::parse_filename(OsStr::new(filename)), + Some((segment, block_range, tx_range)) + ); + } + + assert_eq!(SnapshotSegment::parse_filename(OsStr::new("snapshot_headers_2_30_3_2")), None); + assert_eq!(SnapshotSegment::parse_filename(OsStr::new("snapshot_headers_2_30_1")), None); + } +} diff --git a/crates/snapshot/src/segments/mod.rs b/crates/snapshot/src/segments/mod.rs index 37727953a59ef..0603dbeed4402 100644 --- a/crates/snapshot/src/segments/mod.rs +++ b/crates/snapshot/src/segments/mod.rs @@ -69,7 +69,7 @@ pub(crate) fn prepare_jar( let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; let mut nippy_jar = NippyJar::new( COLUMNS, - &directory.as_ref().join(segment.filename(&block_range).as_str()), + &directory.as_ref().join(segment.filename(&block_range, &tx_range).as_str()), SegmentHeader::new(block_range, tx_range, segment), ); diff --git a/crates/snapshot/src/snapshotter.rs b/crates/snapshot/src/snapshotter.rs index cd583b5e14eb3..d9c1f6aeb003f 100644 --- a/crates/snapshot/src/snapshotter.rs +++ b/crates/snapshot/src/snapshotter.rs @@ -4,10 +4,16 @@ use crate::{segments, segments::Segment, SnapshotterError}; use reth_db::database::Database; use reth_interfaces::{RethError, RethResult}; use reth_primitives::{ - snapshot::HighestSnapshots, BlockNumber, ChainSpec, SnapshotSegment, TxNumber, + snapshot::{iter_snapshots, HighestSnapshots}, + BlockNumber, ChainSpec, TxNumber, +}; +use reth_provider::{BlockReader, DatabaseProviderRO, ProviderFactory, TransactionsProviderExt}; +use std::{ + collections::HashMap, + ops::RangeInclusive, + path::{Path, PathBuf}, + sync::Arc, }; -use reth_provider::{BlockReader, DatabaseProviderRO, ProviderFactory}; -use std::{collections::HashMap, ops::RangeInclusive, path::PathBuf, sync::Arc}; use tokio::sync::watch; use tracing::warn; @@ -89,7 +95,7 @@ impl Snapshotter { /// Creates a new [Snapshotter]. pub fn new( db: DB, - snapshots_path: PathBuf, + snapshots_path: impl AsRef, chain_spec: Arc, block_interval: u64, ) -> RethResult { @@ -97,8 +103,7 @@ impl Snapshotter { let mut snapshotter = Self { provider_factory: ProviderFactory::new(db, chain_spec), - snapshots_path, - // TODO(alexey): fill from on-disk snapshot data + snapshots_path: snapshots_path.as_ref().into(), highest_snapshots: HighestSnapshots::default(), highest_snapshots_notifier, highest_snapshots_tracker, @@ -152,23 +157,10 @@ impl Snapshotter { // It walks over the directory and parses the snapshot filenames extracting // `SnapshotSegment` and their inclusive range. It then takes the maximum block // number for each specific segment. - for (segment, range) in reth_primitives::fs::read_dir(&self.snapshots_path)? - .filter_map(Result::ok) - .filter_map(|entry| { - if let Ok(true) = entry.metadata().map(|metadata| metadata.is_file()) { - return SnapshotSegment::parse_filename(&entry.file_name().to_string_lossy()) - } - None - }) - { - let max_segment_block = match segment { - SnapshotSegment::Headers => &mut self.highest_snapshots.headers, - SnapshotSegment::Transactions => &mut self.highest_snapshots.transactions, - SnapshotSegment::Receipts => &mut self.highest_snapshots.receipts, - }; - - if max_segment_block.map_or(true, |block| block < *range.end()) { - *max_segment_block = Some(*range.end()); + for (segment, block_range, _) in iter_snapshots(&self.snapshots_path)? { + let max_segment_block = self.highest_snapshots.as_mut(segment); + if max_segment_block.map_or(true, |block| block < *block_range.end()) { + *max_segment_block = Some(*block_range.end()); } } @@ -218,13 +210,11 @@ impl Snapshotter { ) -> RethResult<()> { if let Some(block_range) = block_range { let temp = self.snapshots_path.join(TEMPORARY_SUBDIRECTORY); - let filename = S::segment().filename(&block_range); + let provider = self.provider_factory.provider()?; + let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; + let filename = S::segment().filename(&block_range, &tx_range); - S::default().snapshot::( - &self.provider_factory.provider()?, - temp.clone(), - block_range.clone(), - )?; + S::default().snapshot::(&provider, temp.clone(), block_range)?; reth_primitives::fs::rename(temp.join(&filename), self.snapshots_path.join(filename))?; } diff --git a/crates/storage/provider/src/providers/snapshot/manager.rs b/crates/storage/provider/src/providers/snapshot/manager.rs index f75990059b322..7cdbdae316b15 100644 --- a/crates/storage/provider/src/providers/snapshot/manager.rs +++ b/crates/storage/provider/src/providers/snapshot/manager.rs @@ -1,23 +1,45 @@ use super::{LoadedJar, SnapshotJarProvider}; use crate::{BlockHashReader, BlockNumReader, HeaderProvider, TransactionsProvider}; use dashmap::DashMap; -use reth_interfaces::RethResult; +use parking_lot::RwLock; +use reth_db::{ + codecs::CompactU256, + snapshot::{HeaderMask, TransactionMask}, +}; +use reth_interfaces::{provider::ProviderError, RethResult}; use reth_nippy_jar::NippyJar; use reth_primitives::{ - snapshot::{HighestSnapshots, BLOCKS_PER_SNAPSHOT}, - Address, BlockHash, BlockHashOrNumber, BlockNumber, ChainInfo, Header, SealedHeader, - SnapshotSegment, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, - B256, U256, + snapshot::HighestSnapshots, Address, BlockHash, BlockHashOrNumber, BlockNumber, ChainInfo, + Header, SealedHeader, SnapshotSegment, TransactionMeta, TransactionSigned, + TransactionSignedNoHash, TxHash, TxNumber, B256, U256, +}; +use revm::primitives::HashMap; +use std::{ + collections::BTreeMap, + ops::{RangeBounds, RangeInclusive}, + path::{Path, PathBuf}, }; -use std::{ops::RangeBounds, path::PathBuf}; use tokio::sync::watch; -/// SnapshotProvider +/// Alias type for a map that can be queried for transaction/block ranges from a block/transaction +/// segment respectively. It uses `BlockNumber` to represent the block end of a snapshot range or +/// `TxNumber` to represent the transaction end of a snapshot range. +/// +/// Can be in one of the two formats: +/// - `HashMap>>` +/// - `HashMap>>` +type SegmentRanges = HashMap>>; + +/// [`SnapshotProvider`] manages all existing [`SnapshotJarProvider`]. #[derive(Debug, Default)] pub struct SnapshotProvider { /// Maintains a map which allows for concurrent access to different `NippyJars`, over different /// segments and ranges. map: DashMap<(BlockNumber, SnapshotSegment), LoadedJar>, + /// Available snapshot ranges on disk indexed by max blocks. + snapshots_block_index: RwLock, + /// Available snapshot ranges on disk indexed by max transactions. + snapshots_tx_index: RwLock, /// Tracks the highest snapshot of every segment. highest_tracker: Option>>, /// Directory where snapshots are located @@ -26,8 +48,14 @@ pub struct SnapshotProvider { impl SnapshotProvider { /// Creates a new [`SnapshotProvider`]. - pub fn new(path: PathBuf) -> Self { - Self { map: Default::default(), highest_tracker: None, path } + pub fn new(path: impl AsRef) -> Self { + Self { + map: Default::default(), + snapshots_block_index: Default::default(), + snapshots_tx_index: Default::default(), + highest_tracker: None, + path: path.as_ref().to_path_buf(), + } } /// Adds a highest snapshot tracker to the provider @@ -39,30 +67,137 @@ impl SnapshotProvider { self } - /// Gets the provider of the requested segment and range. - pub fn get_segment_provider( + /// Gets the [`SnapshotJarProvider`] of the requested segment and block. + pub fn get_segment_provider_from_block( &self, segment: SnapshotSegment, block: BlockNumber, - mut path: Option, + path: Option<&Path>, ) -> RethResult> { - // TODO this invalidates custom length snapshots. - let snapshot = block / BLOCKS_PER_SNAPSHOT; - let key = (snapshot, segment); + self.get_segment_provider( + segment, + || self.get_segment_ranges_from_block(segment, block), + path, + )? + .ok_or_else(|| ProviderError::MissingSnapshotBlock(segment, block).into()) + } - if let Some(jar) = self.map.get(&key) { - return Ok(jar.into()) + /// Gets the [`SnapshotJarProvider`] of the requested segment and transaction. + pub fn get_segment_provider_from_transaction( + &self, + segment: SnapshotSegment, + tx: TxNumber, + path: Option<&Path>, + ) -> RethResult> { + self.get_segment_provider( + segment, + || self.get_segment_ranges_from_transaction(segment, tx), + path, + )? + .ok_or_else(|| ProviderError::MissingSnapshotTx(segment, tx).into()) + } + + /// Gets the [`SnapshotJarProvider`] of the requested segment and block or transaction. + pub fn get_segment_provider( + &self, + segment: SnapshotSegment, + fn_ranges: impl Fn() -> Option<(RangeInclusive, RangeInclusive)>, + path: Option<&Path>, + ) -> RethResult>> { + // If we have a path, then get the block range and transaction range from its name. + // Otherwise, check `self.available_snapshots` + let snapshot_ranges = match path { + Some(path) => { + SnapshotSegment::parse_filename(path.file_name().ok_or_else(|| { + ProviderError::MissingSnapshotPath(segment, path.to_path_buf()) + })?) + .and_then(|(parsed_segment, block_range, tx_range)| { + if parsed_segment == segment { + return Some((block_range, tx_range)); + } + None + }) + } + None => fn_ranges(), + }; + + // Return cached `LoadedJar` or insert it for the first time, and then, return it. + if let Some((block_range, tx_range)) = snapshot_ranges { + return Ok(Some(self.get_or_create_jar_provider(segment, &block_range, &tx_range)?)); } - if let Some(path) = &path { - self.map.insert(key, LoadedJar::new(NippyJar::load(path)?)?); + Ok(None) + } + + /// Given a segment, block range and transaction range it returns a cached + /// [`SnapshotJarProvider`]. TODO: we should check the size and pop N if there's too many. + fn get_or_create_jar_provider( + &self, + segment: SnapshotSegment, + block_range: &RangeInclusive, + tx_range: &RangeInclusive, + ) -> Result, reth_interfaces::RethError> { + let key = (*block_range.end(), segment); + if let Some(jar) = self.map.get(&key) { + Ok(jar.into()) } else { - path = Some(self.path.join(segment.filename( - &((snapshot * BLOCKS_PER_SNAPSHOT)..=((snapshot + 1) * BLOCKS_PER_SNAPSHOT - 1)), - ))); + self.map.insert( + key, + LoadedJar::new(NippyJar::load( + &self.path.join(segment.filename(block_range, tx_range)), + )?)?, + ); + Ok(self.map.get(&key).expect("qed").into()) + } + } + + /// Gets a snapshot segment's block range and transaction range from the provider inner block + /// index. + fn get_segment_ranges_from_block( + &self, + segment: SnapshotSegment, + block: u64, + ) -> Option<(RangeInclusive, RangeInclusive)> { + let snapshots = self.snapshots_block_index.read(); + let segment_snapshots = snapshots.get(&segment)?; + + // It's more probable that the request comes from a newer block height, so we iterate + // the snapshots in reverse. + let mut snapshots_rev_iter = segment_snapshots.iter().rev().peekable(); + + while let Some((block_end, tx_range)) = snapshots_rev_iter.next() { + // `unwrap_or(0) is safe here as it sets block_start to 0 if the iterator is empty, + // indicating the lowest height snapshot has been reached. + let block_start = + snapshots_rev_iter.peek().map(|(block_end, _)| *block_end + 1).unwrap_or(0); + if block_start <= block { + return Some((block_start..=*block_end, tx_range.clone())); + } } + None + } - self.get_segment_provider(segment, block, path) + /// Gets a snapshot segment's block range and transaction range from the provider inner + /// transaction index. + fn get_segment_ranges_from_transaction( + &self, + segment: SnapshotSegment, + tx: u64, + ) -> Option<(RangeInclusive, RangeInclusive)> { + let snapshots = self.snapshots_tx_index.read(); + let segment_snapshots = snapshots.get(&segment)?; + + // It's more probable that the request comes from a newer tx height, so we iterate + // the snapshots in reverse. + let mut snapshots_rev_iter = segment_snapshots.iter().rev().peekable(); + + while let Some((tx_end, block_range)) = snapshots_rev_iter.next() { + let tx_start = snapshots_rev_iter.peek().map(|(tx_end, _)| *tx_end + 1).unwrap_or(0); + if tx_start <= tx { + return Some((block_range.clone(), tx_start..=*tx_end)); + } + } + None } /// Gets the highest snapshot if it exists for a snapshot segment. @@ -71,23 +206,72 @@ impl SnapshotProvider { .as_ref() .and_then(|tracker| tracker.borrow().and_then(|highest| highest.highest(segment))) } + + /// Iterates through segment snapshots in reverse order, executing a function until it returns + /// some object. Useful for finding objects by [`TxHash`] or [`BlockHash`]. + pub fn find_snapshot( + &self, + segment: SnapshotSegment, + func: impl Fn(SnapshotJarProvider<'_>) -> RethResult>, + ) -> RethResult> { + let snapshots = self.snapshots_block_index.read(); + if let Some(segment_snapshots) = snapshots.get(&segment) { + // It's more probable that the request comes from a newer block height, so we iterate + // the snapshots in reverse. + let mut snapshots_rev_iter = segment_snapshots.iter().rev().peekable(); + + while let Some((block_end, tx_range)) = snapshots_rev_iter.next() { + // `unwrap_or(0) is safe here as it sets block_start to 0 if the iterator + // is empty, indicating the lowest height snapshot has been reached. + let block_start = + snapshots_rev_iter.peek().map(|(block_end, _)| *block_end + 1).unwrap_or(0); + + if let Some(res) = func(self.get_or_create_jar_provider( + segment, + &(block_start..=*block_end), + tx_range, + )?)? { + return Ok(Some(res)) + } + } + } + + Ok(None) + } } impl HeaderProvider for SnapshotProvider { - fn header(&self, _block_hash: &BlockHash) -> RethResult> { - todo!() + fn header(&self, block_hash: &BlockHash) -> RethResult> { + self.find_snapshot(SnapshotSegment::Headers, |jar_provider| { + Ok(jar_provider + .cursor()? + .get_two::>(block_hash.into())? + .and_then(|(header, hash)| { + if &hash == block_hash { + return Some(header) + } + None + })) + }) } fn header_by_number(&self, num: BlockNumber) -> RethResult> { - self.get_segment_provider(SnapshotSegment::Headers, num, None)?.header_by_number(num) + self.get_segment_provider_from_block(SnapshotSegment::Headers, num, None)? + .header_by_number(num) } - fn header_td(&self, _block_hash: &BlockHash) -> RethResult> { - todo!() + fn header_td(&self, block_hash: &BlockHash) -> RethResult> { + self.find_snapshot(SnapshotSegment::Headers, |jar_provider| { + Ok(jar_provider + .cursor()? + .get_two::>(block_hash.into())? + .and_then(|(td, hash)| (&hash == block_hash).then_some(td.0))) + }) } - fn header_td_by_number(&self, _number: BlockNumber) -> RethResult> { - todo!(); + fn header_td_by_number(&self, num: BlockNumber) -> RethResult> { + self.get_segment_provider_from_block(SnapshotSegment::Headers, num, None)? + .header_td_by_number(num) } fn headers_range(&self, _range: impl RangeBounds) -> RethResult> { @@ -101,14 +285,15 @@ impl HeaderProvider for SnapshotProvider { todo!(); } - fn sealed_header(&self, _number: BlockNumber) -> RethResult> { - todo!(); + fn sealed_header(&self, num: BlockNumber) -> RethResult> { + self.get_segment_provider_from_block(SnapshotSegment::Headers, num, None)? + .sealed_header(num) } } impl BlockHashReader for SnapshotProvider { - fn block_hash(&self, _number: u64) -> RethResult> { - todo!() + fn block_hash(&self, num: u64) -> RethResult> { + self.get_segment_provider_from_block(SnapshotSegment::Headers, num, None)?.block_hash(num) } fn canonical_hashes_range( @@ -139,26 +324,42 @@ impl BlockNumReader for SnapshotProvider { } impl TransactionsProvider for SnapshotProvider { - fn transaction_id(&self, _tx_hash: TxHash) -> RethResult> { - todo!() + fn transaction_id(&self, tx_hash: TxHash) -> RethResult> { + self.find_snapshot(SnapshotSegment::Transactions, |jar_provider| { + let mut cursor = jar_provider.cursor()?; + if cursor + .get_one::>((&tx_hash).into())? + .and_then(|tx| (tx.hash() == tx_hash).then_some(tx)) + .is_some() + { + Ok(Some(cursor.number())) + } else { + Ok(None) + } + }) } fn transaction_by_id(&self, num: TxNumber) -> RethResult> { - // TODO `num` is provided after checking the index - let block_num = num; - self.get_segment_provider(SnapshotSegment::Transactions, block_num, None)? + self.get_segment_provider_from_transaction(SnapshotSegment::Transactions, num, None)? .transaction_by_id(num) } fn transaction_by_id_no_hash( &self, - _id: TxNumber, + num: TxNumber, ) -> RethResult> { - todo!() + self.get_segment_provider_from_transaction(SnapshotSegment::Transactions, num, None)? + .transaction_by_id_no_hash(num) } - fn transaction_by_hash(&self, _hash: TxHash) -> RethResult> { - todo!() + fn transaction_by_hash(&self, hash: TxHash) -> RethResult> { + self.find_snapshot(SnapshotSegment::Transactions, |jar_provider| { + Ok(jar_provider + .cursor()? + .get_one::>((&hash).into())? + .map(|tx| tx.with_hash()) + .and_then(|tx| (tx.hash_ref() == &hash).then_some(tx))) + }) } fn transaction_by_hash_with_meta( @@ -197,7 +398,7 @@ impl TransactionsProvider for SnapshotProvider { todo!() } - fn transaction_sender(&self, _id: TxNumber) -> RethResult> { - todo!() + fn transaction_sender(&self, id: TxNumber) -> RethResult> { + Ok(self.transaction_by_id_no_hash(id)?.and_then(|tx| tx.recover_signer())) } } diff --git a/crates/storage/provider/src/providers/snapshot/mod.rs b/crates/storage/provider/src/providers/snapshot/mod.rs index f7fed480f4b9f..cde7dbfc1ac83 100644 --- a/crates/storage/provider/src/providers/snapshot/mod.rs +++ b/crates/storage/provider/src/providers/snapshot/mod.rs @@ -66,7 +66,8 @@ mod test { // Data sources let db = create_test_rw_db(); let factory = ProviderFactory::new(&db, MAINNET.clone()); - let snap_file = tempfile::NamedTempFile::new().unwrap(); + let snap_path = tempfile::tempdir().unwrap(); + let snap_file = snap_path.path().join(SnapshotSegment::Headers.filename(&range, &range)); // Setup data let mut headers = random_header_range( @@ -96,7 +97,7 @@ mod test { let with_compression = true; let with_filter = true; - let mut nippy_jar = NippyJar::new(3, snap_file.path(), segment_header); + let mut nippy_jar = NippyJar::new(3, snap_file.as_path(), segment_header); if with_compression { nippy_jar = nippy_jar.with_zstd(false, 0); @@ -134,9 +135,9 @@ mod test { // Use providers to query Header data and compare if it matches { let db_provider = factory.provider().unwrap(); - let manager = SnapshotProvider::default(); + let manager = SnapshotProvider::new(snap_path.path()); let jar_provider = manager - .get_segment_provider(SnapshotSegment::Headers, 0, Some(snap_file.path().into())) + .get_segment_provider_from_block(SnapshotSegment::Headers, 0, Some(&snap_file)) .unwrap(); assert!(!headers.is_empty()); From 0583a967df14d8c3a101db4f3a2a7a8f2008b51e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Nov 2023 20:01:31 +0100 Subject: [PATCH 09/77] chore: make it harder to misuse raw pointers (#5447) --- .../storage/libmdbx-rs/benches/transaction.rs | 60 +++++++++--------- crates/storage/libmdbx-rs/src/environment.rs | 61 +++++++++++++++---- crates/storage/libmdbx-rs/src/error.rs | 1 + crates/storage/libmdbx-rs/src/transaction.rs | 35 +++++++++-- 4 files changed, 110 insertions(+), 47 deletions(-) diff --git a/crates/storage/libmdbx-rs/benches/transaction.rs b/crates/storage/libmdbx-rs/benches/transaction.rs index 43991f3d0ba77..01fb8ebdf0f3f 100644 --- a/crates/storage/libmdbx-rs/benches/transaction.rs +++ b/crates/storage/libmdbx-rs/benches/transaction.rs @@ -32,30 +32,31 @@ fn bench_get_rand(c: &mut Criterion) { fn bench_get_rand_raw(c: &mut Criterion) { let n = 100u32; let (_dir, env) = setup_bench_db(n); - let _txn = env.begin_ro_txn().unwrap(); - let db = _txn.open_db(None).unwrap(); + let txn = env.begin_ro_txn().unwrap(); + let db = txn.open_db(None).unwrap(); let mut keys: Vec = (0..n).map(get_key).collect(); keys.shuffle(&mut XorShiftRng::from_seed(Default::default())); let dbi = db.dbi(); - let txn = _txn.txn(); let mut key_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let mut data_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; c.bench_function("bench_get_rand_raw", |b| { b.iter(|| unsafe { - let mut i: size_t = 0; - for key in &keys { - key_val.iov_len = key.len() as size_t; - key_val.iov_base = key.as_bytes().as_ptr() as *mut _; - - mdbx_get(txn, dbi, &key_val, &mut data_val); - - i += key_val.iov_len; - } - black_box(i); + txn.with_raw_tx_ptr(|txn| { + let mut i: size_t = 0; + for key in &keys { + key_val.iov_len = key.len() as size_t; + key_val.iov_base = key.as_bytes().as_ptr() as *mut _; + + mdbx_get(txn, dbi, &key_val, &mut data_val); + + i += key_val.iov_len; + } + black_box(i); + }); }) }); } @@ -84,13 +85,12 @@ fn bench_put_rand(c: &mut Criterion) { fn bench_put_rand_raw(c: &mut Criterion) { let n = 100u32; - let (_dir, _env) = setup_bench_db(0); + let (_dir, env) = setup_bench_db(0); let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect(); items.shuffle(&mut XorShiftRng::from_seed(Default::default())); - let dbi = _env.begin_ro_txn().unwrap().open_db(None).unwrap().dbi(); - let env = _env.env(); + let dbi = env.begin_ro_txn().unwrap().open_db(None).unwrap().dbi(); let mut key_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let mut data_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; @@ -98,19 +98,21 @@ fn bench_put_rand_raw(c: &mut Criterion) { c.bench_function("bench_put_rand_raw", |b| { b.iter(|| unsafe { let mut txn: *mut MDBX_txn = ptr::null_mut(); - mdbx_txn_begin_ex(env, ptr::null_mut(), 0, &mut txn, ptr::null_mut()); - - let mut i: ::libc::c_int = 0; - for (key, data) in items.iter() { - key_val.iov_len = key.len() as size_t; - key_val.iov_base = key.as_bytes().as_ptr() as *mut _; - data_val.iov_len = data.len() as size_t; - data_val.iov_base = data.as_bytes().as_ptr() as *mut _; - - i += mdbx_put(txn, dbi, &key_val, &mut data_val, 0); - } - assert_eq!(0, i); - mdbx_txn_abort(txn); + env.with_raw_env_ptr(|env| { + mdbx_txn_begin_ex(env, ptr::null_mut(), 0, &mut txn, ptr::null_mut()); + + let mut i: ::libc::c_int = 0; + for (key, data) in items.iter() { + key_val.iov_len = key.len() as size_t; + key_val.iov_base = key.as_bytes().as_ptr() as *mut _; + data_val.iov_len = data.len() as size_t; + data_val.iov_base = data.as_bytes().as_ptr() as *mut _; + + i += mdbx_put(txn, dbi, &key_val, &mut data_val, 0); + } + assert_eq!(0, i); + mdbx_txn_abort(txn); + }); }) }); } diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index b0338826b0f52..83860c18c7238 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -98,15 +98,29 @@ impl Environment { } /// Returns true if the environment was opened as WRITEMAP. + #[inline] pub fn is_write_map(&self) -> bool { self.inner.env_kind.is_write_map() } /// Returns the kind of the environment. + #[inline] pub fn env_kind(&self) -> EnvironmentKind { self.inner.env_kind } + /// Returns true if the environment was opened in [Mode::ReadWrite] mode. + #[inline] + pub fn is_read_write(&self) -> bool { + self.inner.txn_manager.is_some() + } + + /// Returns true if the environment was opened in [Mode::ReadOnly] mode. + #[inline] + pub fn is_read_only(&self) -> bool { + self.inner.txn_manager.is_none() + } + /// Returns the manager that handles transaction messages. /// /// Requires [Mode::ReadWrite] and returns None otherwise. @@ -115,15 +129,6 @@ impl Environment { self.inner.txn_manager.as_ref() } - /// Returns a raw pointer to the underlying MDBX environment. - /// - /// The caller **must** ensure that the pointer is not dereferenced after the lifetime of the - /// environment. - #[inline] - pub fn env(&self) -> *mut ffi::MDBX_env { - self.inner.env - } - /// Create a read-only transaction for use with the environment. #[inline] pub fn begin_ro_txn(&self) -> Result> { @@ -133,7 +138,7 @@ impl Environment { /// Create a read-write transaction for use with the environment. This method will block while /// there are any other read-write transactions open on the environment. pub fn begin_rw_txn(&self) -> Result> { - let sender = self.txn_manager().ok_or(Error::Access)?; + let sender = self.txn_manager().ok_or(Error::WriteTransactionUnsupportedInReadOnlyMode)?; let txn = loop { let (tx, rx) = sync_channel(0); sender @@ -154,9 +159,32 @@ impl Environment { Ok(Transaction::new_from_ptr(self, txn.0)) } + /// Returns a raw pointer to the underlying MDBX environment. + /// + /// The caller **must** ensure that the pointer is never dereferenced after the environment has + /// been dropped. + #[inline] + pub(crate) fn env_ptr(&self) -> *mut ffi::MDBX_env { + self.inner.env + } + + /// Executes the given closure once + /// + /// This is only intended to be used when accessing mdbx ffi functions directly is required. + /// + /// The caller **must** ensure that the pointer is only used within the closure. + #[inline] + #[doc(hidden)] + pub fn with_raw_env_ptr(&self, f: F) -> T + where + F: FnOnce(*mut ffi::MDBX_env) -> T, + { + (f)(self.env_ptr()) + } + /// Flush the environment data buffers to disk. pub fn sync(&self, force: bool) -> Result { - mdbx_result(unsafe { ffi::mdbx_env_sync_ex(self.env(), force, false) }) + mdbx_result(unsafe { ffi::mdbx_env_sync_ex(self.env_ptr(), force, false) }) } /// Retrieves statistics about this environment. @@ -164,7 +192,7 @@ impl Environment { unsafe { let mut stat = Stat::new(); mdbx_result(ffi::mdbx_env_stat_ex( - self.env(), + self.env_ptr(), ptr::null(), stat.mdb_stat(), size_of::(), @@ -178,7 +206,7 @@ impl Environment { unsafe { let mut info = Info(mem::zeroed()); mdbx_result(ffi::mdbx_env_info_ex( - self.env(), + self.env_ptr(), ptr::null(), &mut info.0, size_of::(), @@ -237,8 +265,15 @@ impl Environment { /// This holds the raw pointer to the MDBX environment and the transaction manager. /// The env is opened via [mdbx_env_create](ffi::mdbx_env_create) and closed when this type drops. struct EnvironmentInner { + /// The raw pointer to the MDBX environment. + /// + /// Accessing the environment is thread-safe as long as long as this type exists. env: *mut ffi::MDBX_env, + /// Whether the environment was opened as WRITEMAP. env_kind: EnvironmentKind, + /// the sender half of the transaction manager channel + /// + /// Only set if the environment was opened in [Mode::ReadWrite] mode. txn_manager: Option>, } diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index 274fcc47f7a6c..250ca23cada17 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -56,6 +56,7 @@ pub enum Error { TooLarge, DecodeErrorLenDiff, NestedTransactionsUnsupportedWithWriteMap, + WriteTransactionUnsupportedInReadOnlyMode, Other(i32), } diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index ddea959125bf8..6d3615b208662 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -74,7 +74,7 @@ where let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); unsafe { mdbx_result(ffi::mdbx_txn_begin_ex( - env.env(), + env.env_ptr(), ptr::null_mut(), K::OPEN_FLAGS, &mut txn, @@ -100,10 +100,14 @@ where /// The caller **must** ensure that the pointer is not used after the /// lifetime of the transaction. #[inline] - pub(crate) fn txn_execute T, T>(&self, f: F) -> T { + pub(crate) fn txn_execute(&self, f: F) -> T + where + F: FnOnce(*mut ffi::MDBX_txn) -> T, + { self.inner.txn_execute(f) } + /// Returns a copy of the pointer to the underlying MDBX transaction. pub(crate) fn txn_ptr(&self) -> TransactionPtr { self.inner.txn.clone() } @@ -114,6 +118,21 @@ where self.inner.txn.txn } + /// Executes the given closure once + /// + /// This is only intended to be used when accessing mdbx ffi functions directly is required. + /// + /// The caller **must** ensure that the pointer is only used within the closure. + #[inline] + #[doc(hidden)] + pub fn with_raw_tx_ptr(&self, f: F) -> T + where + F: FnOnce(*mut ffi::MDBX_txn) -> T, + { + let _lock = self.inner.txn.lock.lock(); + f(self.inner.txn.txn) + } + /// Returns a raw pointer to the MDBX environment. pub fn env(&self) -> &Environment { self.inner.env @@ -278,7 +297,10 @@ where } #[inline] - fn txn_execute T, T>(&self, f: F) -> T { + fn txn_execute(&self, f: F) -> T + where + F: FnOnce(*mut ffi::MDBX_txn) -> T, + { self.txn.txn_execute(f) } } @@ -449,7 +471,7 @@ impl<'env> Transaction<'env, RO> { /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi /// BEFORE calling this function. pub unsafe fn close_db(&self, db: Database<'_>) -> Result<()> { - mdbx_result(ffi::mdbx_dbi_close(self.env().env(), db.dbi()))?; + mdbx_result(ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()))?; Ok(()) } @@ -501,7 +523,10 @@ impl TransactionPtr { /// Executes the given closure once the lock on the transaction is acquired. #[inline] - pub(crate) fn txn_execute T, T>(&self, f: F) -> T { + pub(crate) fn txn_execute(&self, f: F) -> T + where + F: FnOnce(*mut ffi::MDBX_txn) -> T, + { let _lck = self.lock.lock(); (f)(self.txn) } From 49f4606583ee8ee94d6a11145ffcb42c381ceec9 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Wed, 15 Nov 2023 19:03:34 +0000 Subject: [PATCH 10/77] fix(net): check bounds on message logging (#5448) --- crates/net/eth-wire/src/ethstream.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index a574b180f1b9a..23f64040a4383 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -242,9 +242,14 @@ where let msg = match ProtocolMessage::decode_message(*this.version, &mut bytes.as_ref()) { Ok(m) => m, Err(err) => { + let msg = if bytes.len() > 50 { + format!("{:02x?}...{:x?}", &bytes[..10], &bytes[bytes.len() - 10..]) + } else { + format!("{:02x?}", bytes) + }; debug!( version=?this.version, - msg=format!("{:02x?}...{:x?}", &bytes[..10], &bytes[bytes.len() - 10..]), + %msg, "failed to decode protocol message" ); return Poll::Ready(Some(Err(err))) From 8459fb0ada0ee9a7bf97e1c8f4a6fcd40e80d195 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Nov 2023 20:03:54 +0100 Subject: [PATCH 11/77] chore: make inner private (#5445) --- crates/storage/db/src/implementation/mdbx/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 161d87e415ed3..8a9475b40bc4e 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -31,11 +31,11 @@ pub enum DatabaseEnvKind { RW, } -/// Wrapper for the libmdbx environment. +/// Wrapper for the libmdbx environment: [Environment] #[derive(Debug)] pub struct DatabaseEnv { /// Libmdbx-sys environment. - pub inner: Environment, + inner: Environment, /// Whether to record metrics or not. with_metrics: bool, } From 187f6faa0966e721e9fb76c728786c0e2c40607a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Nov 2023 20:04:03 +0100 Subject: [PATCH 12/77] chore: some blob reinject improvements (#5441) --- crates/transaction-pool/src/blobstore/disk.rs | 15 +++++++++++++++ crates/transaction-pool/src/blobstore/mem.rs | 5 +++++ crates/transaction-pool/src/blobstore/mod.rs | 3 +++ crates/transaction-pool/src/blobstore/noop.rs | 4 ++++ crates/transaction-pool/src/blobstore/tracker.rs | 11 +++++++++-- crates/transaction-pool/src/lib.rs | 3 +++ crates/transaction-pool/src/maintain.rs | 5 +++-- crates/transaction-pool/src/validate/eth.rs | 6 +++++- 8 files changed, 47 insertions(+), 5 deletions(-) diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index 1dcc9abf13b1d..43caf112b7939 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -75,6 +75,10 @@ impl BlobStore for DiskFileBlobStore { self.inner.get_one(tx) } + fn contains(&self, tx: B256) -> Result { + self.inner.contains(tx) + } + fn get_all( &self, txs: Vec, @@ -183,6 +187,15 @@ impl DiskFileBlobStoreInner { Ok(()) } + /// Returns true if the blob for the given transaction hash is in the blob cache or on disk. + fn contains(&self, tx: B256) -> Result { + if self.blob_cache.lock().get(&tx).is_some() { + return Ok(true) + } + // we only check if the file exists and assume it's valid + Ok(self.blob_disk_file(tx).is_file()) + } + /// Retrieves the blob for the given transaction hash from the blob cache or disk. fn get_one(&self, tx: B256) -> Result, BlobStoreError> { if let Some(blob) = self.blob_cache.lock().get(&tx) { @@ -438,6 +451,7 @@ mod tests { assert!(blobs.contains(&(tx, blob)), "missing blob {:?}", tx); } + assert!(store.contains(all_hashes[0]).unwrap()); store.delete_all(all_hashes.clone()).unwrap(); store.clear_cache(); @@ -446,6 +460,7 @@ mod tests { let all = store.get_all(all_hashes.clone()).unwrap(); assert!(all.is_empty()); + assert!(!store.contains(all_hashes[0]).unwrap()); assert!(store.get_exact(all_hashes).is_err()); } } diff --git a/crates/transaction-pool/src/blobstore/mem.rs b/crates/transaction-pool/src/blobstore/mem.rs index af2f4e746d04f..568fa5ec69a0a 100644 --- a/crates/transaction-pool/src/blobstore/mem.rs +++ b/crates/transaction-pool/src/blobstore/mem.rs @@ -67,6 +67,11 @@ impl BlobStore for InMemoryBlobStore { Ok(store.get(&tx).cloned()) } + fn contains(&self, tx: B256) -> Result { + let store = self.inner.store.read(); + Ok(store.contains_key(&tx)) + } + fn get_all( &self, txs: Vec, diff --git a/crates/transaction-pool/src/blobstore/mod.rs b/crates/transaction-pool/src/blobstore/mod.rs index 0fffdb16d4bd8..6dee69d4b33fa 100644 --- a/crates/transaction-pool/src/blobstore/mod.rs +++ b/crates/transaction-pool/src/blobstore/mod.rs @@ -34,6 +34,9 @@ pub trait BlobStore: fmt::Debug + Send + Sync + 'static { /// Retrieves the decoded blob data for the given transaction hash. fn get(&self, tx: B256) -> Result, BlobStoreError>; + /// Checks if the given transaction hash is in the blob store. + fn contains(&self, tx: B256) -> Result; + /// Retrieves all decoded blob data for the given transaction hashes. /// /// This only returns the blobs that were found in the store. diff --git a/crates/transaction-pool/src/blobstore/noop.rs b/crates/transaction-pool/src/blobstore/noop.rs index b3d4915dd1361..81b844d48c806 100644 --- a/crates/transaction-pool/src/blobstore/noop.rs +++ b/crates/transaction-pool/src/blobstore/noop.rs @@ -27,6 +27,10 @@ impl BlobStore for NoopBlobStore { Ok(None) } + fn contains(&self, _tx: B256) -> Result { + Ok(false) + } + fn get_all( &self, _txs: Vec, diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index a3dd30bca04fa..c9221002ad46c 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -22,6 +22,8 @@ impl BlobStoreCanonTracker { } /// Adds all blocks to the tracked list of blocks. + /// + /// Replaces any previously tracked blocks with the set of transactions. pub fn add_blocks( &mut self, blocks: impl IntoIterator)>, @@ -32,6 +34,9 @@ impl BlobStoreCanonTracker { } /// Adds all blob transactions from the given chain to the tracker. + /// + /// Note: In case this is a chain that's part of a reorg, this replaces previously tracked + /// blocks. pub fn add_new_chain_blocks(&mut self, blocks: &ChainBlocks<'_>) { let blob_txs = blocks.iter().map(|(num, blocks)| { let iter = @@ -42,10 +47,12 @@ impl BlobStoreCanonTracker { } /// Invoked when a block is finalized. - pub fn on_finalized_block(&mut self, number: BlockNumber) -> BlobStoreUpdates { + /// + /// This returns all blob transactions that were included in blocks that are now finalized. + pub fn on_finalized_block(&mut self, finalized_block: BlockNumber) -> BlobStoreUpdates { let mut finalized = Vec::new(); while let Some(entry) = self.blob_txs_in_blocks.first_entry() { - if *entry.key() <= number { + if *entry.key() <= finalized_block { finalized.extend(entry.remove_entry().1); } else { break diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index f0ab1124bde71..a1d8b4ba7aa7f 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -351,6 +351,9 @@ where origin: TransactionOrigin, transactions: Vec, ) -> PoolResult>> { + if transactions.is_empty() { + return Ok(Vec::new()) + } let validated = self.validate_all(origin, transactions).await?; let transactions = diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 4da9987b721f9..19c3d6d513d0d 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -304,11 +304,12 @@ pub async fn maintain_transaction_pool( // to be re-injected // // Note: we no longer know if the tx was local or external + // Because the transactions are not finalized, the corresponding blobs are still in + // blob store (if we previously received them from the network) metrics.inc_reinserted_transactions(pruned_old_transactions.len()); let _ = pool.add_external_transactions(pruned_old_transactions).await; - // keep track of mined blob transactions - // TODO(mattsse): handle reorged transactions + // keep track of new mined blob transactions blob_store_tracker.add_new_chain_blocks(&new_blocks); } CanonStateNotification::Commit { new } => { diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 51c6470bc10b0..c8ca6891d767e 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -307,7 +307,11 @@ where ) } EthBlobTransactionSidecar::Missing => { - if let Ok(Some(_)) = self.blob_store.get(*transaction.hash()) { + // This can happen for re-injected blob transactions (on re-org), since the blob + // is stripped from the transaction and not included in a block. + // check if the blob is in the store, if it's included we previously validated + // it and inserted it + if let Ok(true) = self.blob_store.contains(*transaction.hash()) { // validated transaction is already in the store } else { return TransactionValidationOutcome::Invalid( From 820740135958dc00998d71ac7fa5485a02b9c499 Mon Sep 17 00:00:00 2001 From: DoTheBestToGetTheBest <146037313+DoTheBestToGetTheBest@users.noreply.github.com> Date: Wed, 15 Nov 2023 11:43:45 -0800 Subject: [PATCH 13/77] feat(bin) : refactor auth_jwt_secret function (#5451) --- bin/reth/src/args/rpc_server_args.rs | 11 ++--------- bin/reth/src/utils.rs | 14 +++++++++++++- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 0a1ec745cb4af..32005b1638f24 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -10,6 +10,7 @@ use crate::{ config::RethRpcConfig, ext::RethNodeCommandConfig, }, + utils::get_or_create_jwt_secret_from_path, }; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, @@ -449,15 +450,7 @@ impl RethRpcConfig for RpcServerArgs { debug!(target: "reth::cli", user_path=?fpath, "Reading JWT auth secret file"); JwtSecret::from_file(fpath) } - None => { - if default_jwt_path.exists() { - debug!(target: "reth::cli", ?default_jwt_path, "Reading JWT auth secret file"); - JwtSecret::from_file(&default_jwt_path) - } else { - info!(target: "reth::cli", ?default_jwt_path, "Creating JWT auth secret file"); - JwtSecret::try_create(&default_jwt_path) - } - } + None => get_or_create_jwt_secret_from_path(&default_jwt_path), } } diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index e2c74384e4ed9..6994fd81e8c1b 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -18,13 +18,14 @@ use reth_interfaces::p2p::{ use reth_primitives::{ fs, BlockHashOrNumber, ChainSpec, HeadersDirection, SealedBlock, SealedHeader, }; +use reth_rpc::{JwtError, JwtSecret}; use std::{ env::VarError, path::{Path, PathBuf}, rc::Rc, sync::Arc, }; -use tracing::info; +use tracing::{debug, info}; /// Exposing `open_db_read_only` function pub mod db { @@ -247,3 +248,14 @@ impl ListFilter { self.len = len; } } +/// Attempts to retrieve or create a JWT secret from the specified path. + +pub fn get_or_create_jwt_secret_from_path(path: &Path) -> Result { + if path.exists() { + debug!(target: "reth::cli", ?path, "Reading JWT auth secret file"); + JwtSecret::from_file(path) + } else { + info!(target: "reth::cli", ?path, "Creating JWT auth secret file"); + JwtSecret::try_create(path) + } +} From f9725a4f88c44d419be35927a48a46a25e1879d2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Nov 2023 21:25:54 +0100 Subject: [PATCH 14/77] feat: add blobstore file path functions (#5442) --- bin/reth/src/dirs.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/bin/reth/src/dirs.rs b/bin/reth/src/dirs.rs index 9d1a1f552c34f..064b18e7f462b 100644 --- a/bin/reth/src/dirs.rs +++ b/bin/reth/src/dirs.rs @@ -285,6 +285,14 @@ impl ChainPath { self.0.join("known-peers.json").into() } + /// Returns the path to the blobstore directory for this chain where blobs of unfinalized + /// transactions are stored. + /// + /// `//blobstore` + pub fn blobstore_path(&self) -> PathBuf { + self.0.join("blobstore").into() + } + /// Returns the path to the config file for this chain. /// /// `//reth.toml` From 5e605d20a4241d3dd19e293d2f30b61ae421bc83 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Nov 2023 22:16:26 +0100 Subject: [PATCH 15/77] feat: add rpc-api trait for call bundle (#5449) --- crates/rpc/rpc-api/src/bundle.rs | 29 +++++++++++++++++++++-------- crates/rpc/rpc-api/src/lib.rs | 4 ++-- crates/rpc/rpc/src/eth/bundle.rs | 12 ++++++++++++ 3 files changed, 35 insertions(+), 10 deletions(-) diff --git a/crates/rpc/rpc-api/src/bundle.rs b/crates/rpc/rpc-api/src/bundle.rs index b4f76c9e845e8..6493edeafd751 100644 --- a/crates/rpc/rpc-api/src/bundle.rs +++ b/crates/rpc/rpc-api/src/bundle.rs @@ -1,15 +1,28 @@ +//! Additional `eth_` functions for bundles +//! +//! See also use jsonrpsee::proc_macros::rpc; use reth_primitives::{Bytes, B256}; use reth_rpc_types::{ - CancelBundleRequest, CancelPrivateTransactionRequest, EthBundleHash, EthCallBundleResponse, - EthCallBundleTransactionResult, EthSendBundle, PrivateTransactionRequest, + CancelBundleRequest, CancelPrivateTransactionRequest, EthBundleHash, EthCallBundle, + EthCallBundleResponse, EthSendBundle, PrivateTransactionRequest, }; -/// Eth bundle rpc interface. -/// -/// See also +/// A subset of the [EthBundleApi] API interface that only supports `eth_callBundle`. +#[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] +#[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] +#[async_trait::async_trait] +pub trait EthCallBundleApi { + /// `eth_callBundle` can be used to simulate a bundle against a specific block number, + /// including simulating a bundle at the top of the next block. + #[method(name = "callBundle")] + async fn call_bundle( + &self, + request: EthCallBundle, + ) -> jsonrpsee::core::RpcResult; +} -/// Eth bundle rpc interface. +/// The __full__ Eth bundle rpc interface. /// /// See also #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] @@ -26,8 +39,8 @@ pub trait EthBundleApi { #[method(name = "callBundle")] async fn call_bundle( &self, - request: EthCallBundleResponse, - ) -> jsonrpsee::core::RpcResult; + request: EthCallBundle, + ) -> jsonrpsee::core::RpcResult; /// `eth_cancelBundle` is used to prevent a submitted bundle from being included on-chain. See [bundle cancellations](https://docs.flashbots.net/flashbots-auction/searchers/advanced/bundle-cancellations) for more information. #[method(name = "cancelBundle")] diff --git a/crates/rpc/rpc-api/src/lib.rs b/crates/rpc/rpc-api/src/lib.rs index 57583080a9388..5fbd9e557241b 100644 --- a/crates/rpc/rpc-api/src/lib.rs +++ b/crates/rpc/rpc-api/src/lib.rs @@ -38,7 +38,7 @@ pub use servers::*; pub mod servers { pub use crate::{ admin::AdminApiServer, - bundle::EthBundleApiServer, + bundle::{EthBundleApiServer, EthCallBundleApiServer}, debug::DebugApiServer, engine::{EngineApiServer, EngineEthApiServer}, eth::EthApiServer, @@ -64,7 +64,7 @@ pub use clients::*; pub mod clients { pub use crate::{ admin::AdminApiClient, - bundle::EthBundleApiClient, + bundle::{EthBundleApiClient, EthCallBundleApiClient}, debug::DebugApiClient, engine::{EngineApiClient, EngineEthApiClient}, eth::EthApiClient, diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 4f52e0579d776..c60f77d3f9b3a 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -9,12 +9,14 @@ use crate::{ }, BlockingTaskGuard, }; +use jsonrpsee::core::RpcResult; use reth_primitives::{ keccak256, revm_primitives::db::{DatabaseCommit, DatabaseRef}, U256, }; use reth_revm::database::StateProviderDatabase; +use reth_rpc_api::EthCallBundleApiServer; use reth_rpc_types::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use revm::{ db::CacheDB, @@ -175,6 +177,16 @@ where } } +#[async_trait::async_trait] +impl EthCallBundleApiServer for EthBundle +where + Eth: EthTransactions + 'static, +{ + async fn call_bundle(&self, request: EthCallBundle) -> RpcResult { + Ok(EthBundle::call_bundle(self, request).await?) + } +} + /// Container type for `EthBundle` internals #[derive(Debug)] struct EthBundleInner { From 0b46e16dcdc9012810726bee410047fea8be7cf9 Mon Sep 17 00:00:00 2001 From: DoTheBestToGetTheBest <146037313+DoTheBestToGetTheBest@users.noreply.github.com> Date: Wed, 15 Nov 2023 15:31:45 -0800 Subject: [PATCH 16/77] feat(eth-wire): update docs (#5450) --- crates/net/eth-wire/src/errors/eth.rs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index 6839bccac3e4a..c941b787d8872 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -56,24 +56,37 @@ impl From for EthStreamError { } } -/// Error variants that can occur during the `eth` sub-protocol handshake. +/// Error that can occur during the `eth` sub-protocol handshake. #[derive(thiserror::Error, Debug)] -#[allow(missing_docs)] + pub enum EthHandshakeError { + /// Status message received or sent outside of the handshake process. #[error("status message can only be recv/sent in handshake")] StatusNotInHandshake, + /// Receiving a non-status message during the handshake phase. #[error("received non-status message when trying to handshake")] NonStatusMessageInHandshake, #[error("no response received when sending out handshake")] + /// No response received during the handshake process. NoResponse, #[error(transparent)] + /// Invalid fork data. InvalidFork(#[from] ValidationError), #[error("mismatched genesis in status message: {0}")] + /// Mismatch in the genesis block during status exchange. MismatchedGenesis(GotExpectedBoxed), #[error("mismatched protocol version in status message: {0}")] + /// Mismatched protocol versions in status messages. MismatchedProtocolVersion(GotExpected), #[error("mismatched chain in status message: {0}")] + /// Mismatch in chain details in status messages. MismatchedChain(GotExpected), #[error("total difficulty bitlen is too large: got {got}, maximum {maximum}")] - TotalDifficultyBitLenTooLarge { got: usize, maximum: usize }, + /// Excessively large total difficulty bit lengths. + TotalDifficultyBitLenTooLarge { + /// The actual bit length of the total difficulty. + got: usize, + /// The maximum allowed bit length for the total difficulty. + maximum: usize, + }, } From e5362882e0da7e39912210152eebc4b08b0e6c7c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Nov 2023 03:17:06 +0100 Subject: [PATCH 17/77] chore: add recovery test (#5453) --- crates/primitives/src/transaction/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 2d539b1f36f9e..968336c873b70 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1543,6 +1543,18 @@ mod tests { assert_eq!(tx_bytes, encoded[..]); } + #[test] + fn test_decode_recover_mainnet_tx() { + // random mainnet tx + let tx_bytes = hex!("02f872018307910d808507204d2cb1827d0094388c818ca8b9251b393131c08a736a67ccb19297880320d04823e2701c80c001a0cf024f4815304df2867a1a74e9d2707b6abda0337d2d54a4438d453f4160f190a07ac0e6b3bc9395b5b9c8b9e6d77204a236577a5b18467b9175c01de4faa208d9"); + + let decoded = TransactionSigned::decode_enveloped(tx_bytes[..].to_vec().into()).unwrap(); + assert_eq!( + decoded.recover_signer(), + Some(Address::from_str("0x95222290DD7278Aa3Ddd389Cc1E1d165CC4BAfe5").unwrap()) + ); + } + #[test] fn decode_transaction_consumes_buffer() { let bytes = &mut &hex!("b87502f872041a8459682f008459682f0d8252089461815774383099e24810ab832a5b2a5425c154d58829a2241af62c000080c001a059e6b67f48fb32e7e570dfb11e042b5ad2e55e3ce3ce9cd989c7e06e07feeafda0016b83f4f980694ed2eee4d10667242b1f40dc406901b34125b008d334d47469")[..]; From 1b39096edda7fb0fb685076cb94baa53b09b538a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Nov 2023 15:12:40 +0100 Subject: [PATCH 18/77] chore: more libmdbx cleanup (#5455) --- crates/storage/libmdbx-rs/src/environment.rs | 10 ++++- crates/storage/libmdbx-rs/src/error.rs | 43 ++++++++++++++------ crates/storage/libmdbx-rs/src/transaction.rs | 6 +-- 3 files changed, 42 insertions(+), 17 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 83860c18c7238..2728fad4bbf4d 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -129,6 +129,14 @@ impl Environment { self.inner.txn_manager.as_ref() } + /// Returns the manager that handles transaction messages. + /// + /// Requires [Mode::ReadWrite] and returns None otherwise. + #[inline] + pub(crate) fn ensure_txn_manager(&self) -> Result<&SyncSender> { + self.txn_manager().ok_or(Error::WriteTransactionUnsupportedInReadOnlyMode) + } + /// Create a read-only transaction for use with the environment. #[inline] pub fn begin_ro_txn(&self) -> Result> { @@ -138,7 +146,7 @@ impl Environment { /// Create a read-write transaction for use with the environment. This method will block while /// there are any other read-write transactions open on the environment. pub fn begin_rw_txn(&self) -> Result> { - let sender = self.txn_manager().ok_or(Error::WriteTransactionUnsupportedInReadOnlyMode)?; + let sender = self.ensure_txn_manager()?; let txn = loop { let (tx, rx) = sync_channel(0); sender diff --git a/crates/storage/libmdbx-rs/src/error.rs b/crates/storage/libmdbx-rs/src/error.rs index 250ca23cada17..3b33caa865d0b 100644 --- a/crates/storage/libmdbx-rs/src/error.rs +++ b/crates/storage/libmdbx-rs/src/error.rs @@ -1,6 +1,9 @@ use libc::c_int; use std::{ffi::CStr, fmt, result, str}; +/// An MDBX result. +pub type Result = result::Result; + /// An MDBX error kind. #[derive(Debug, thiserror::Error, Clone, PartialEq, Eq)] pub enum Error { @@ -55,7 +58,12 @@ pub enum Error { Access, TooLarge, DecodeErrorLenDiff, + /// If the [Environment](crate::Environment) was opened with + /// [EnvironmentKind::WriteMap](crate::EnvironmentKind::WriteMap) flag, nested transactions are + /// not supported. NestedTransactionsUnsupportedWithWriteMap, + /// If the [Environment](crate::Environment) was opened with in read-only mode + /// [Mode::ReadOnly](crate::flags::Mode::ReadOnly), write transactions can't be opened.. WriteTransactionUnsupportedInReadOnlyMode, Other(i32), } @@ -125,12 +133,30 @@ impl Error { Error::Multival => ffi::MDBX_EMULTIVAL, Error::WannaRecovery => ffi::MDBX_WANNA_RECOVERY, Error::KeyMismatch => ffi::MDBX_EKEYMISMATCH, - Error::DecodeError => ffi::MDBX_EINVAL, + Error::DecodeErrorLenDiff | Error::DecodeError => ffi::MDBX_EINVAL, Error::Access => ffi::MDBX_EACCESS, Error::TooLarge => ffi::MDBX_TOO_LARGE, Error::BadSignature => ffi::MDBX_EBADSIGN, + Error::WriteTransactionUnsupportedInReadOnlyMode => ffi::MDBX_EACCESS, + Error::NestedTransactionsUnsupportedWithWriteMap => ffi::MDBX_EACCESS, Error::Other(err_code) => *err_code, - _ => unreachable!(), + } + } + + /// Returns the message for this error + pub fn as_str(&self) -> &str { + match self { + Self::DecodeErrorLenDiff => "mismatched data length", + Self::NestedTransactionsUnsupportedWithWriteMap => { + "nested transactions are not supported on an environment with writemap" + } + Self::WriteTransactionUnsupportedInReadOnlyMode => { + "write transactions are not supported on an environment opened in read-only mode" + } + _ => unsafe { + let err = ffi::mdbx_strerror(self.to_err_code()); + str::from_utf8_unchecked(CStr::from_ptr(err).to_bytes()) + }, } } } @@ -143,20 +169,11 @@ impl From for i32 { impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - let value = match self { - Self::DecodeErrorLenDiff => "Mismatched data length", - _ => unsafe { - let err = ffi::mdbx_strerror(self.to_err_code()); - str::from_utf8_unchecked(CStr::from_ptr(err).to_bytes()) - }, - }; - write!(fmt, "{value}") + write!(fmt, "{}", self.as_str()) } } -/// An MDBX result. -pub type Result = result::Result; - +#[inline] pub fn mdbx_result(err_code: c_int) -> Result { match err_code { ffi::MDBX_SUCCESS => Ok(false), diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 6d3615b208662..60df280e69ad0 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -189,7 +189,7 @@ where } else { let (sender, rx) = sync_channel(0); self.env() - .txn_manager() + .ensure_txn_manager() .unwrap() .send(TxnManagerMessage::Commit { tx: TxnPtr(txn), sender }) .unwrap(); @@ -319,7 +319,7 @@ where } else { let (sender, rx) = sync_channel(0); self.env - .txn_manager() + .ensure_txn_manager() .unwrap() .send(TxnManagerMessage::Abort { tx: TxnPtr(txn), sender }) .unwrap(); @@ -486,7 +486,7 @@ impl<'env> Transaction<'env, RW> { self.txn_execute(|txn| { let (tx, rx) = sync_channel(0); self.env() - .txn_manager() + .ensure_txn_manager() .unwrap() .send(TxnManagerMessage::Begin { parent: TxnPtr(txn), From fb981cec4d14b356da894b0f6692414cac8f292e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Nov 2023 15:13:24 +0100 Subject: [PATCH 19/77] chore: move criterion and pprof to workspace (#5461) --- Cargo.lock | 4 ++-- Cargo.toml | 3 ++- crates/primitives/Cargo.toml | 4 ++-- crates/stages/Cargo.toml | 4 ++-- crates/storage/db/Cargo.toml | 4 ++-- crates/storage/libmdbx-rs/Cargo.toml | 4 ++-- crates/transaction-pool/Cargo.toml | 2 +- crates/trie/Cargo.toml | 2 +- 8 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 934b8fb06e75e..e1f81c2c2c593 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5058,9 +5058,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "pprof" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978385d59daf9269189d052ca8a84c1acfd0715c0599a5d5188d4acc078ca46a" +checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb" dependencies = [ "backtrace", "cfg-if", diff --git a/Cargo.toml b/Cargo.toml index d003e1e511902..d0bbf106e8794 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -214,7 +214,8 @@ toml = "0.8" arbitrary = "1.1" assert_matches = "1.5.0" tempfile = "3.8" - +criterion = "0.5" +pprof = "0.13" proptest = "1.0" proptest-derive = "0.4" serial_test = "2" diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 9bd00d6eeaaaa..4a9cfcedc4ab0 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -81,8 +81,8 @@ hash-db = "~0.15" # necessary so we don't hit a "undeclared 'std'": # https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 secp256k1.workspace = true -criterion = "0.5" -pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } +criterion.workspace = true +pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } [features] default = ["c-kzg"] diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 08e04fdaf6d32..6da02ad00a702 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -70,8 +70,8 @@ rand.workspace = true paste = "1.0" # Stage benchmarks -pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } -criterion = { version = "0.5", features = ["async_futures"] } +pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } +criterion = { workspace = true, features = ["async_futures"] } # io serde_json.workspace = true diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index c6103d7329546..60e13ad2f67f6 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -62,8 +62,8 @@ reth-interfaces.workspace = true tempfile.workspace = true test-fuzz = "4" -pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } -criterion = "0.5" +pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } +criterion.workspace = true iai = "0.1.1" tokio = { workspace = true, features = ["full"] } diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index 094724f588e93..dc65f34faae22 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -29,8 +29,8 @@ default = [] return-borrowed = [] [dev-dependencies] -pprof = { version = "0.12", features = ["flamegraph", "frame-pointer", "criterion"] } -criterion = "0.5" +pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterion"] } +criterion.workspace = true rand.workspace = true rand_xorshift = "0.3" tempfile.workspace = true diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index f654fe9a6da93..93548f83f4a40 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -56,7 +56,7 @@ reth-provider = { workspace = true, features = ["test-utils"] } paste = "1.0" rand = "0.8" proptest.workspace = true -criterion = "0.5" +criterion.workspace = true assert_matches.workspace = true tempfile.workspace = true diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index f4cd70f21158e..1aaf2be53be9c 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -46,7 +46,7 @@ tokio-stream.workspace = true once_cell.workspace = true serde_json.workspace = true pretty_assertions = "1.3.0" -criterion = "0.5" +criterion.workspace = true [features] test-utils = ["triehash"] From 460b5f6fda19ee39c35c867bf84a71bd8cd3ef09 Mon Sep 17 00:00:00 2001 From: clabby Date: Thu, 16 Nov 2023 10:32:53 -0500 Subject: [PATCH 20/77] feat(book): `op-reth` runbook (#5440) --- book/SUMMARY.md | 1 + book/run/optimism.md | 125 +++++++++++++++++++++++++++++++++++++++++ book/run/run-a-node.md | 1 + 3 files changed, 127 insertions(+) create mode 100644 book/run/optimism.md diff --git a/book/SUMMARY.md b/book/SUMMARY.md index d207b9a817ac7..1c77704a950be 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -9,6 +9,7 @@ 1. [Update Priorities](./installation/priorities.md) 1. [Run a Node](./run/run-a-node.md) 1. [Mainnet or official testnets](./run/mainnet.md) + 1. [OP Stack](./run/optimism.md) 1. [Private testnet](./run/private-testnet.md) 1. [Metrics](./run/observability.md) 1. [Configuring Reth](./run/config.md) diff --git a/book/run/optimism.md b/book/run/optimism.md new file mode 100644 index 0000000000000..1e6a884af8fef --- /dev/null +++ b/book/run/optimism.md @@ -0,0 +1,125 @@ +# Running Reth on OP Stack chains + +`reth` ships with the `optimism` feature flag in several crates, including the binary, enabling support for OP Stack chains out of the box. Optimism has a small diff from the [L1 EELS][l1-el-spec], +comprising of the following key changes: +1. A new transaction type, [`0x7E (Deposit)`][deposit-spec], which is used to deposit funds from L1 to L2. +1. Modifications to the `PayloadAttributes` that allow the [sequencer][sequencer] to submit transactions to the EL through the Engine API. Payloads will be built with deposit transactions at the top of the block, + with the first deposit transaction always being the "L1 Info Transaction." +1. EIP-1559 denominator and elasticity parameters have been adjusted to account for the lower block time (2s) on L2. Otherwise, the 1559 formula remains the same. +1. Network fees are distributed to the various [fee vaults][l2-el-spec]. +1. ... and some other minor changes. + +For a more in-depth list of changes and their rationale, as well as specifics about the OP Stack specification such as transaction ordering and more, see the documented [`op-geth` diff][op-geth-forkdiff], +the [L2 EL specification][l2-el-spec], and the [OP Stack specification][op-stack-spec]. + +## Running on Optimism + +> `op-reth` is currently in the *alpha* stage of development. It is not yet ready for production use, and therefore does not have a stable release. To run it, you must build the `op-reth` binary from source. +> If you do encounter any bugs during this early stage of development, please report them in an issue on the [GitHub repository][reth]. +> +> `op-reth` also does **not** currently support OP Stack chains with legacy, pre-Bedrock state, i.e. `Optimism Mainnet` and `Optimism Goerli`. This will be possible once a database migration tool for pre-Bedrock +> state is released, with the capability to extract the legacy state from the old `l2geth` LevelDB datadir and transplant it into Reth's MDBX database. + +You will need three things to run `op-reth`: +1. An archival L1 node, synced to the settlement layer of the OP Stack chain you want to sync (e.g. `reth`, `geth`, `besu`, `nethermind`, etc.) +1. A rollup node (e.g. `op-node`, `magi`, `hildr`, etc.) +1. An instance of `op-reth`. + +For this example, we'll start a `Base Mainnet` node. + +### Installing `op-reth` + +To run Reth on Optimism, first install `op-reth` via the `Makefile` in the workspace root: + +```sh +git clone git@github.com:paradigmxyz/reth.git && \ + cd reth && \ + make install-op +``` + +This will install the `op-reth` binary to `~/.cargo/bin/op-reth`. + +### Installing a Rollup Node + +Next, you'll need to install a [Rollup Node][rollup-node-spec], which is the equivalent to the Consensus Client on the OP Stack. Available options include: +1. [`op-node`][op-node] +1. [`magi`][magi] +1. [`hildr`][hildr] + +For the sake of this tutorial, we'll use the reference implementation of the Rollup Node maintained by OP Labs, the `op-node`. The `op-node` can be built from source, or pulled from a [Docker image available on Google Cloud][op-node-docker]. + +**`rethdb` build tag** +The `op-node` also comes with an experimental `rethdb` build tag, which allows it to read receipts directly from an L1 `reth` database during [derivation][derivation-spec]. This can speed up sync times, but it is not required if you do not +have access to the L1 archive node on the same machine as your L2 node. + +To build the `op-node` with the `rethdb` build tag enabled: +```sh +git clone git@github.com:ethereum-optimism/optimism.git && \ + (cd optimism/op-service/rethdb-reader && cargo build --release) && \ + cd optimism/op-node && \ + go build -v -tags rethdb -o ./bin/op-node ./cmd/main.go && \ + mv bin/op-node /usr/bin/op-node +``` +This will build the `rethdb-reader` dylib and instruct the `op-node` build to statically link this dylib into the binary. The `op-node` binary will be installed to `/usr/bin/op-node`. + +### Running `op-reth` + +The `optimism` feature flag in `op-reth` adds several new CLI flags to the `reth` binary: +1. `--rollup.sequencer-http ` - The sequencer endpoint to connect to. Transactions sent to the `op-reth` EL are also forwarded to this sequencer endpoint for inclusion, as the sequencer is the entity that builds blocks on OP Stack chains. +1. `--rollup.disable-tx-pool-gossip` - Disables gossiping of transactions in the mempool to peers. This can be ommitted for personal nodes, though providers should always opt to enable this flag. +1. `--rollup.enable-genesis-walkback` - Disables setting the forkchoice status to tip on startup, making the `op-node` walk back to genesis and verify the integrity of the chain before starting to sync. This can be ommitted unless a corruption of local chainstate is suspected. + +Base's `rollup.json` files, which contain various configuration fields for the rollup, can be found in their [node][base-node] repository, under the respective L1 settlement layer's directory (`mainnet`, `goerli`, & `sepolia`). + +First, ensure that your L1 archival node is running and synced to tip. Then, start `op-reth` with the `--rollup.sequencer-http` flag set to the `Base Mainnet` sequencer endpoint: +```sh +op-reth node \ + --chain base \ + --rollup.sequencer-http https://sequencer.base.org \ + --http \ + --ws \ + --authrpc.port 9551 \ + --authrpc.jwtsecret /path/to/jwt.hex +``` + +Then, once `op-reth` has been started, start up the `op-node`: +```sh +op-node \ + --l1= \ + --l2=http://localhost:9551 \ + --rollup.config=/path/to/rollup.json \ + --l2.jwt-secret=/path/to/jwt.hex \ + --rpc.addr=0.0.0.0 \ + --rpc.port=7000 \ + --l1.trustrpc +``` + +If you opted to build the `op-node` with the `rethdb` build tag, this "`RPCKind`" can be enabled via appending two extra flags to the `op-node` invocation: + +> Note, the `reth_db_path` is the path to the `db` folder inside of the reth datadir, not the `mdbx.dat` file itself. This can be fetched from `op-reth db path [--chain ]`, or if you are using a custom datadir location via the `--datadir` flag, +> by appending `/db` to the end of the path. + +```sh +op-node \ + # ... + --l1.rpckind=reth_db \ + --l1.rethdb= +``` + +[l1-el-spec]: https://github.com/ethereum/execution-specs +[rollup-node-spec]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/rollup-node.md +[op-geth-forkdiff]: https://op-geth.optimism.io +[sequencer]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/introduction.md#sequencers +[op-stack-spec]: https://github.com/ethereum-optimism/optimism/tree/develop/specs +[l2-el-spec]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/exec-engine.md +[deposit-spec]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/deposits.md +[derivation-spec]: https://github.com/ethereum-optimism/optimism/blob/develop/specs/derivation.md + +[op-node-docker]: https://console.cloud.google.com/artifacts/docker/oplabs-tools-artifacts/us/images/op-node + +[reth]: https://github.com/paradigmxyz/reth +[op-node]: https://github.com/ethereum-optimism/optimism/tree/develop/op-node +[magi]: https://github.com/a16z/magi +[hildr]: https://github.com/optimism-java/hildr + +[base-node]: https://github.com/base-org/node/tree/main diff --git a/book/run/run-a-node.md b/book/run/run-a-node.md index 67d73614a2f5c..a836c9ecf1d54 100644 --- a/book/run/run-a-node.md +++ b/book/run/run-a-node.md @@ -4,6 +4,7 @@ Congratulations, now that you have installed Reth, it's time to run it! In this chapter we'll go through a few different topics you'll encounter when running Reth, including: 1. [Running on mainnet or official testnets](./mainnet.md) +1. [Running on OP Stack chains](./optimism.md) 1. [Logs and Observability](./observability.md) 1. [Configuring reth.toml](./config.md) 1. [Transaction types](./transactions.md) From ec3dbfe9959c3487e10bd6e68939e3798dc00c93 Mon Sep 17 00:00:00 2001 From: clabby Date: Thu, 16 Nov 2023 12:49:02 -0500 Subject: [PATCH 21/77] chore(op-reth): Add temporary `Canyon` warning (#5462) --- bin/reth/src/optimism.rs | 13 +++++++++++++ book/run/optimism.md | 3 +++ 2 files changed, 16 insertions(+) diff --git a/bin/reth/src/optimism.rs b/bin/reth/src/optimism.rs index 554b91a3ae280..61f6bf2220811 100644 --- a/bin/reth/src/optimism.rs +++ b/bin/reth/src/optimism.rs @@ -8,8 +8,21 @@ compile_error!("Cannot build the `op-reth` binary with the `optimism` feature fl #[cfg(feature = "optimism")] fn main() { + print_canyon_warning(); if let Err(err) = reth::cli::run() { eprintln!("Error: {err:?}"); std::process::exit(1); } } + +#[inline] +fn print_canyon_warning() { + println!("---------------------- [ WARNING! ] ----------------------"); + println!("`op-reth` does not currently support the Canyon Hardfork,"); + println!("which went live on 2023-14-11 12PM EST on Sepolia and Goerli."); + println!("The node will cease to sync at that blocktime (1699981200)."); + println!("Please consult the Canyon Hardfork tracking issue to follow"); + println!("along with the progress of the hardfork implementation:"); + println!("https://github.com/paradigmxyz/reth/issues/5210"); + println!("----------------------------------------------------------\n"); +} diff --git a/book/run/optimism.md b/book/run/optimism.md index 1e6a884af8fef..a623f4a471f06 100644 --- a/book/run/optimism.md +++ b/book/run/optimism.md @@ -1,5 +1,8 @@ # Running Reth on OP Stack chains +> **Note**: `op-reth` does not currently support the Canyon Hardfork, which went live on 2023-14-11 12PM EST on Sepolia and Goerli. If the network being synced has enabled Canyon, `op-reth` will cease to sync at that blocktime (`1699981200`). +> Please consult the [Canyon Hardfork tracking issue](https://github.com/paradigmxyz/reth/issues/5210) to follow along with the progress of the hardfork implementation. + `reth` ships with the `optimism` feature flag in several crates, including the binary, enabling support for OP Stack chains out of the box. Optimism has a small diff from the [L1 EELS][l1-el-spec], comprising of the following key changes: 1. A new transaction type, [`0x7E (Deposit)`][deposit-spec], which is used to deposit funds from L1 to L2. From 2b4eb8438c6b3fffe99682e49d0e8abf6f1cbd8d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Nov 2023 19:54:07 +0100 Subject: [PATCH 22/77] fix: fetch 4844 blob before reinjected reorged blob txs (#5460) --- crates/net/network/src/transactions.rs | 2 +- crates/primitives/src/transaction/mod.rs | 2 +- crates/primitives/src/transaction/pooled.rs | 41 ++++++++++++++++++- crates/rpc/rpc/src/eth/api/transactions.rs | 5 ++- crates/transaction-pool/src/maintain.rs | 29 ++++++++++++- .../transaction-pool/src/test_utils/mock.rs | 2 +- crates/transaction-pool/src/traits.rs | 10 ++--- crates/transaction-pool/src/validate/eth.rs | 5 ++- 8 files changed, 80 insertions(+), 16 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 7b35e846890f7..24f6f8ffb24ac 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -734,7 +734,7 @@ where } Entry::Vacant(entry) => { // this is a new transaction that should be imported into the pool - let pool_transaction = ::from_recovered_transaction(tx); + let pool_transaction = ::from_recovered_pooled_transaction(tx); let pool = self.pool.clone(); diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 968336c873b70..91cbb3c830dbd 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1484,7 +1484,7 @@ impl FromRecoveredTransaction for TransactionSignedEcRecovered { #[cfg(feature = "c-kzg")] pub trait FromRecoveredPooledTransaction { /// Converts to this type from the given [`PooledTransactionsElementEcRecovered`]. - fn from_recovered_transaction(tx: PooledTransactionsElementEcRecovered) -> Self; + fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self; } /// The inverse of [`FromRecoveredTransaction`] that ensure the transaction can be sent over the diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 55302c497599f..493c723a8342f 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -4,8 +4,9 @@ #![cfg_attr(docsrs, doc(cfg(feature = "c-kzg")))] use crate::{ - Address, BlobTransaction, Bytes, Signature, Transaction, TransactionSigned, - TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxHash, TxLegacy, B256, EIP4844_TX_TYPE_ID, + Address, BlobTransaction, BlobTransactionSidecar, Bytes, Signature, Transaction, + TransactionSigned, TransactionSignedEcRecovered, TxEip1559, TxEip2930, TxHash, TxLegacy, B256, + EIP4844_TX_TYPE_ID, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE}; use bytes::Buf; @@ -71,6 +72,27 @@ impl PooledTransactionsElement { Ok(tx.into()) } + /// Converts from an EIP-4844 [TransactionSignedEcRecovered] to a + /// [PooledTransactionsElementEcRecovered] with the given sidecar. + /// + /// Returns the transaction is not an EIP-4844 transaction. + pub fn try_from_blob_transaction( + tx: TransactionSigned, + sidecar: BlobTransactionSidecar, + ) -> Result { + let TransactionSigned { transaction, signature, hash } = tx; + if let Transaction::Eip4844(tx) = transaction { + Ok(PooledTransactionsElement::BlobTransaction(BlobTransaction { + transaction: tx, + signature, + hash, + sidecar, + })) + } else { + Err(TransactionSigned { transaction, signature, hash }) + } + } + /// Heavy operation that return signature hash over rlp encoded transaction. /// It is only for signature signing or signer recovery. pub fn signature_hash(&self) -> B256 { @@ -575,6 +597,21 @@ impl PooledTransactionsElementEcRecovered { ) -> Self { Self { transaction, signer } } + + /// Converts from an EIP-4844 [TransactionSignedEcRecovered] to a + /// [PooledTransactionsElementEcRecovered] with the given sidecar. + /// + /// Returns the transaction is not an EIP-4844 transaction. + pub fn try_from_blob_transaction( + tx: TransactionSignedEcRecovered, + sidecar: BlobTransactionSidecar, + ) -> Result { + let TransactionSignedEcRecovered { signer, signed_transaction } = tx; + let transaction = + PooledTransactionsElement::try_from_blob_transaction(signed_transaction, sidecar) + .map_err(|tx| TransactionSignedEcRecovered { signer, signed_transaction: tx })?; + Ok(Self { transaction, signer }) + } } impl From for PooledTransactionsElementEcRecovered { diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 4989be4219707..78307ee333726 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -491,7 +491,7 @@ where self.forward_to_sequencer(&tx).await?; let recovered = recover_raw_transaction(tx)?; - let pool_transaction = ::from_recovered_transaction(recovered); + let pool_transaction = ::from_recovered_pooled_transaction(recovered); // submit the transaction to the pool with a `Local` origin let hash = self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; @@ -578,7 +578,8 @@ where let recovered = signed_tx.into_ecrecovered().ok_or(EthApiError::InvalidTransactionSignature)?; - let pool_transaction = ::from_recovered_transaction(recovered.into()); + let pool_transaction = + ::from_recovered_pooled_transaction(recovered.into()); // submit the transaction to the pool with a `Local` origin let hash = self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 19c3d6d513d0d..692a5177bb24c 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -12,7 +12,8 @@ use futures_util::{ }; use reth_interfaces::RethError; use reth_primitives::{ - Address, BlockHash, BlockNumber, BlockNumberOrTag, FromRecoveredTransaction, + Address, BlockHash, BlockNumber, BlockNumberOrTag, FromRecoveredPooledTransaction, + FromRecoveredTransaction, PooledTransactionsElementEcRecovered, }; use reth_provider::{ BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotification, ChainSpecProvider, @@ -286,7 +287,31 @@ pub async fn maintain_transaction_pool( let pruned_old_transactions = old_blocks .transactions_ecrecovered() .filter(|tx| !new_mined_transactions.contains(&tx.hash)) - .map(

::Transaction::from_recovered_transaction) + .filter_map(|tx| { + if tx.is_eip4844() { + // reorged blobs no longer include the blob, which is necessary for + // validating the transaction. Even though the transaction could have + // been validated previously, we still need the blob in order to + // accurately set the transaction's + // encoded-length which is propagated over the network. + pool.get_blob(tx.hash) + .ok() + .flatten() + .and_then(|sidecar| { + PooledTransactionsElementEcRecovered::try_from_blob_transaction( + tx, sidecar, + ) + .ok() + }) + .map( +

::Transaction::from_recovered_pooled_transaction, + ) + } else { + Some(

::Transaction::from_recovered_transaction( + tx, + )) + } + }) .collect::>(); // update the pool first diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index a59f173ad6358..d289597d2b652 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -778,7 +778,7 @@ impl FromRecoveredTransaction for MockTransaction { } impl FromRecoveredPooledTransaction for MockTransaction { - fn from_recovered_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { + fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { FromRecoveredTransaction::from_recovered_transaction(tx.into_ecrecovered_transaction()) } } diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 177f462cff609..91455df3d8668 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -921,6 +921,10 @@ impl PoolTransaction for EthPooledTransaction { } } + fn access_list(&self) -> Option<&AccessList> { + self.transaction.access_list() + } + /// Returns the EIP-1559 Priority fee the caller is paying to the block author. /// /// This will return `None` for non-EIP1559 transactions @@ -939,10 +943,6 @@ impl PoolTransaction for EthPooledTransaction { self.transaction.max_fee_per_blob_gas() } - fn access_list(&self) -> Option<&AccessList> { - self.transaction.access_list() - } - /// Returns the effective tip for this transaction. /// /// For EIP-1559 transactions: `min(max_fee_per_gas - base_fee, max_priority_fee_per_gas)`. @@ -1029,7 +1029,7 @@ impl FromRecoveredTransaction for EthPooledTransaction { } impl FromRecoveredPooledTransaction for EthPooledTransaction { - fn from_recovered_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { + fn from_recovered_pooled_transaction(tx: PooledTransactionsElementEcRecovered) -> Self { EthPooledTransaction::from(tx) } } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index c8ca6891d767e..169f17263e4af 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -771,8 +771,9 @@ mod tests { let data = hex::decode(raw).unwrap(); let tx = PooledTransactionsElement::decode_enveloped(data.into()).unwrap(); - let transaction = - EthPooledTransaction::from_recovered_transaction(tx.try_into_ecrecovered().unwrap()); + let transaction = EthPooledTransaction::from_recovered_pooled_transaction( + tx.try_into_ecrecovered().unwrap(), + ); let res = ensure_intrinsic_gas(&transaction, false); assert!(res.is_ok()); let res = ensure_intrinsic_gas(&transaction, true); From 64500e93a2be0d3ada3a104e332dbed3b5e3ea81 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Nov 2023 19:56:39 +0100 Subject: [PATCH 23/77] chore: make decode_enveloped args consistent with other fns (#5456) --- crates/payload/builder/src/payload.rs | 2 +- crates/primitives/src/transaction/mod.rs | 12 +++++------- crates/primitives/src/transaction/optimism.rs | 2 +- crates/revm/src/optimism/mod.rs | 2 +- crates/rpc/rpc-types-compat/src/engine/payload.rs | 2 +- 5 files changed, 9 insertions(+), 11 deletions(-) diff --git a/crates/payload/builder/src/payload.rs b/crates/payload/builder/src/payload.rs index e360cfefaa243..919c0705afcde 100644 --- a/crates/payload/builder/src/payload.rs +++ b/crates/payload/builder/src/payload.rs @@ -173,7 +173,7 @@ impl PayloadBuilderAttributes { .as_deref() .unwrap_or(&[]) .iter() - .map(|tx| TransactionSigned::decode_enveloped(tx.clone())) + .map(|tx| TransactionSigned::decode_enveloped(&mut tx.as_ref())) .collect::>()?; (payload_id(&parent, &attributes, &transactions), transactions) }; diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 91cbb3c830dbd..cf7169a01867e 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1241,9 +1241,7 @@ impl TransactionSigned { /// /// To decode EIP-4844 transactions in `eth_sendRawTransaction`, use /// [PooledTransactionsElement::decode_enveloped]. - pub fn decode_enveloped(tx: Bytes) -> alloy_rlp::Result { - let mut data = tx.as_ref(); - + pub fn decode_enveloped(data: &mut &[u8]) -> alloy_rlp::Result { if data.is_empty() { return Err(RlpError::InputTooShort) } @@ -1251,9 +1249,9 @@ impl TransactionSigned { // Check if the tx is a list if data[0] >= EMPTY_LIST_CODE { // decode as legacy transaction - TransactionSigned::decode_rlp_legacy_transaction(&mut data) + TransactionSigned::decode_rlp_legacy_transaction(data) } else { - TransactionSigned::decode_enveloped_typed_transaction(&mut data) + TransactionSigned::decode_enveloped_typed_transaction(data) } } @@ -1548,7 +1546,7 @@ mod tests { // random mainnet tx let tx_bytes = hex!("02f872018307910d808507204d2cb1827d0094388c818ca8b9251b393131c08a736a67ccb19297880320d04823e2701c80c001a0cf024f4815304df2867a1a74e9d2707b6abda0337d2d54a4438d453f4160f190a07ac0e6b3bc9395b5b9c8b9e6d77204a236577a5b18467b9175c01de4faa208d9"); - let decoded = TransactionSigned::decode_enveloped(tx_bytes[..].to_vec().into()).unwrap(); + let decoded = TransactionSigned::decode_enveloped(&mut &tx_bytes[..]).unwrap(); assert_eq!( decoded.recover_signer(), Some(Address::from_str("0x95222290DD7278Aa3Ddd389Cc1E1d165CC4BAfe5").unwrap()) @@ -1728,7 +1726,7 @@ mod tests { fn test_envelop_decode() { // random tx: let input = bytes!("02f871018302a90f808504890aef60826b6c94ddf4c5025d1a5742cf12f74eec246d4432c295e487e09c3bbcc12b2b80c080a0f21a4eacd0bf8fea9c5105c543be5a1d8c796516875710fafafdf16d16d8ee23a001280915021bb446d1973501a67f93d2b38894a514b976e7b46dc2fe54598d76"); - let decoded = TransactionSigned::decode_enveloped(input.clone()).unwrap(); + let decoded = TransactionSigned::decode_enveloped(&mut input.as_ref()).unwrap(); let encoded = decoded.envelope_encoded(); assert_eq!(encoded, input); diff --git a/crates/primitives/src/transaction/optimism.rs b/crates/primitives/src/transaction/optimism.rs index 133a507b3b9a2..498e9b6873ce6 100644 --- a/crates/primitives/src/transaction/optimism.rs +++ b/crates/primitives/src/transaction/optimism.rs @@ -155,7 +155,7 @@ mod tests { fn test_rlp_roundtrip() { let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); - let tx_a = TransactionSigned::decode_enveloped(bytes.clone()).unwrap(); + let tx_a = TransactionSigned::decode_enveloped(&mut bytes.as_ref()).unwrap(); let tx_b = TransactionSigned::decode(&mut &bytes[..]).unwrap(); let mut buf_a = BytesMut::default(); diff --git a/crates/revm/src/optimism/mod.rs b/crates/revm/src/optimism/mod.rs index 715d43be7d2b3..ced44bd91ca96 100644 --- a/crates/revm/src/optimism/mod.rs +++ b/crates/revm/src/optimism/mod.rs @@ -159,7 +159,7 @@ mod test_l1_fee { use reth_primitives::{hex_literal::hex, Bytes, Header, TransactionSigned}; let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); - let l1_info_tx = TransactionSigned::decode_enveloped(bytes).unwrap(); + let l1_info_tx = TransactionSigned::decode_enveloped(&mut bytes.as_ref()).unwrap(); let mock_block = Block { header: Header::default(), body: vec![l1_info_tx], diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index adb7c6cd0308e..b007accc5b3f0 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -23,7 +23,7 @@ pub fn try_payload_v1_to_block(payload: ExecutionPayloadV1) -> Result, _>>()?; let transactions_root = proofs::calculate_transaction_root(&transactions); From baf407f5edba76140ce53b7aee0bc0fbb5653dd0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 16 Nov 2023 20:06:29 +0100 Subject: [PATCH 24/77] feat: add eth-call-bundle module (#5465) --- crates/rpc/rpc-builder/src/lib.rs | 103 +++++++++++++++++++++--------- 1 file changed, 74 insertions(+), 29 deletions(-) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index e69103628074b..b23029b75fc2e 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -135,19 +135,32 @@ #![warn(missing_debug_implementations, missing_docs, unreachable_pub, rustdoc::all)] #![deny(unused_must_use, rust_2018_idioms)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use crate::{ - auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcServerMetrics, - RpcModuleSelection::Selection, + +use std::{ + collections::{HashMap, HashSet}, + fmt, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + str::FromStr, + time::{Duration, SystemTime, UNIX_EPOCH}, }; -use constants::*; -use error::{RpcError, ServerKind}; + use hyper::{header::AUTHORIZATION, HeaderMap}; +pub use jsonrpsee::server::ServerBuilder; use jsonrpsee::{ server::{IdProvider, Server, ServerHandle}, Methods, RpcModule, }; +use serde::{Deserialize, Serialize, Serializer}; +use strum::{AsRefStr, EnumVariantNames, ParseError, VariantNames}; +use tower::layer::util::{Identity, Stack}; +use tower_http::cors::CorsLayer; +use tracing::{instrument, trace}; + +use constants::*; +use error::{RpcError, ServerKind}; use reth_ipc::server::IpcServer; -use reth_network_api::{NetworkInfo, Peers}; +pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; +use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_provider::{ AccountReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, StateProviderFactory, @@ -156,6 +169,7 @@ use reth_rpc::{ eth::{ cache::{cache_new_blocks_task, EthStateCache}, gas_oracle::GasPriceOracle, + EthBundle, }, AdminApi, AuthLayer, BlockingTaskGuard, BlockingTaskPool, Claims, DebugApi, EngineEthApi, EthApi, EthFilter, EthPubSub, EthSubscriptionIdProvider, JwtAuthValidator, JwtSecret, NetApi, @@ -163,19 +177,14 @@ use reth_rpc::{ }; use reth_rpc_api::{servers::*, EngineApiServer}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; -use reth_transaction_pool::TransactionPool; -use serde::{Deserialize, Serialize, Serializer}; -use std::{ - collections::{HashMap, HashSet}, - fmt, - net::{Ipv4Addr, SocketAddr, SocketAddrV4}, - str::FromStr, - time::{Duration, SystemTime, UNIX_EPOCH}, +use reth_transaction_pool::{noop::NoopTransactionPool, TransactionPool}; + +use crate::{ + auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcServerMetrics, + RpcModuleSelection::Selection, }; -use strum::{AsRefStr, EnumString, EnumVariantNames, ParseError, VariantNames}; -use tower::layer::util::{Identity, Stack}; -use tower_http::cors::CorsLayer; -use tracing::{instrument, trace}; +// re-export for convenience +pub use crate::eth::{EthConfig, EthHandlers}; /// Auth server utilities. pub mod auth; @@ -195,14 +204,6 @@ pub mod constants; // Rpc server metrics mod metrics; -// re-export for convenience -pub use crate::eth::{EthConfig, EthHandlers}; -pub use jsonrpsee::server::ServerBuilder; -pub use reth_ipc::server::{Builder as IpcServerBuilder, Endpoint}; -use reth_network_api::noop::NoopNetwork; -use reth_rpc::eth::EthBundle; -use reth_transaction_pool::noop::NoopTransactionPool; - /// Convenience function for starting a server in one step. pub async fn launch( provider: Provider, @@ -706,9 +707,7 @@ impl fmt::Display for RpcModuleSelection { } /// Represents RPC modules that are supported by reth -#[derive( - Debug, Clone, Copy, Eq, PartialEq, Hash, AsRefStr, EnumVariantNames, EnumString, Deserialize, -)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, AsRefStr, EnumVariantNames, Deserialize)] #[serde(rename_all = "snake_case")] #[strum(serialize_all = "kebab-case")] pub enum RethRpcModule { @@ -732,6 +731,11 @@ pub enum RethRpcModule { Reth, /// `ots_` module Ots, + /// For single non-standard `eth_` namespace call `eth_callBundle` + /// + /// This is separate from [RethRpcModule::Eth] because it is a non standardized call that + /// should be opt-in. + EthCallBundle, } // === impl RethRpcModule === @@ -743,6 +747,34 @@ impl RethRpcModule { } } +impl FromStr for RethRpcModule { + type Err = ParseError; + + fn from_str(s: &str) -> Result { + Ok(match s { + "admin" => RethRpcModule::Admin, + "debug" => RethRpcModule::Debug, + "eth" => RethRpcModule::Eth, + "net" => RethRpcModule::Net, + "trace" => RethRpcModule::Trace, + "txpool" => RethRpcModule::Txpool, + "web3" => RethRpcModule::Web3, + "rpc" => RethRpcModule::Rpc, + "reth" => RethRpcModule::Reth, + "ots" => RethRpcModule::Ots, + "eth-call-bundle" | "eth_callBundle" => RethRpcModule::EthCallBundle, + _ => return Err(ParseError::VariantNotFound), + }) + } +} + +impl TryFrom<&str> for RethRpcModule { + type Error = ParseError; + fn try_from(s: &str) -> Result>::Error> { + FromStr::from_str(s) + } +} + impl fmt::Display for RethRpcModule { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad(self.as_ref()) @@ -1038,6 +1070,11 @@ where .into_rpc() .into() } + RethRpcModule::EthCallBundle => { + EthBundle::new(eth_api.clone(), self.blocking_pool_guard.clone()) + .into_rpc() + .into() + } }) .clone() }) @@ -2036,6 +2073,14 @@ impl fmt::Debug for RpcServerHandle { mod tests { use super::*; + #[test] + fn parse_eth_call_bundle() { + let selection = "eth-call-bundle".parse::().unwrap(); + assert_eq!(selection, RethRpcModule::EthCallBundle); + let selection = "eth_callBundle".parse::().unwrap(); + assert_eq!(selection, RethRpcModule::EthCallBundle); + } + #[test] fn parse_rpc_module_selection() { let selection = "all".parse::().unwrap(); From b4cbd0b04f7c50be78ec3c0ebec09a7f435808d0 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:53:44 -0500 Subject: [PATCH 25/77] chore: use i64 for blob priority function (#5466) --- crates/transaction-pool/src/pool/blob.rs | 44 ++++++++++++++++++++---- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index 15f337ecc8929..15677bf55dfd2 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -284,12 +284,19 @@ impl Ord for BlobTransaction { /// /// This is supposed to get the number of fee jumps required to get from the current fee to the fee /// cap, or where the transaction would not be executable any more. -fn fee_delta(max_tx_fee: u128, current_fee: u128) -> f64 { +fn fee_delta(max_tx_fee: u128, current_fee: u128) -> i64 { // jumps = log1.125(txfee) - log1.125(basefee) - // TODO: should we do this without f64? let jumps = (max_tx_fee as f64).log(1.125) - (current_fee as f64).log(1.125); + // delta = sign(jumps) * log(abs(jumps)) - jumps.signum() * jumps.abs().log2() + match (jumps as i64).cmp(&0) { + Ordering::Equal => { + // can't take ilog2 of 0 + 0 + } + Ordering::Greater => (jumps.ceil() as i64).ilog2() as i64, + Ordering::Less => -((-jumps.floor() as i64).ilog2() as i64), + } } /// Returns the priority for the transaction, based on the "delta" blob fee and priority fee. @@ -298,12 +305,12 @@ fn blob_tx_priority( blob_fee: u128, max_priority_fee: u128, base_fee: u128, -) -> f64 { +) -> i64 { let delta_blob_fee = fee_delta(blob_fee_cap, blob_fee); let delta_priority_fee = fee_delta(max_priority_fee, base_fee); // priority = min(delta-basefee, delta-blobfee, 0) - delta_blob_fee.min(delta_priority_fee).min(0.0) + delta_blob_fee.min(delta_priority_fee).min(0) } #[derive(Debug, Clone)] @@ -312,7 +319,7 @@ struct BlobOrd { pub(crate) submission_id: u64, // The priority for this transaction, calculated using the [`blob_tx_priority`] function, // taking into account both the blob and priority fee. - pub(crate) priority: f64, + pub(crate) priority: i64, } impl Eq for BlobOrd {} @@ -331,7 +338,7 @@ impl PartialOrd for BlobOrd { impl Ord for BlobOrd { fn cmp(&self, other: &Self) -> Ordering { - let ord = other.priority.total_cmp(&self.priority); + let ord = other.priority.cmp(&self.priority); // use submission_id to break ties if ord == Ordering::Equal { @@ -565,4 +572,27 @@ mod tests { ); } } + + #[test] + fn priority_tests() { + // Test vectors from: + // + let vectors = vec![ + (7u128, 10u128, 2i64), + (17_200_000_000, 17_200_000_000, 0), + (9_853_941_692, 11_085_092_510, 0), + (11_544_106_391, 10_356_781_100, 0), + (17_200_000_000, 7, -7), + (7, 17_200_000_000, 7), + ]; + + for (base_fee, tx_fee, expected) in vectors { + let actual = fee_delta(tx_fee, base_fee); + assert_eq!( + actual, expected, + "fee_delta({}, {}) = {}, expected: {}", + tx_fee, base_fee, actual, expected + ); + } + } } From 7312ada852c1482e47d4b0401e526fadff4665fb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Nov 2023 11:22:07 +0100 Subject: [PATCH 26/77] chore: add optimism_deposit_tx_signature function (#5457) --- crates/primitives/src/transaction/mod.rs | 11 +++++------ crates/primitives/src/transaction/signature.rs | 9 +++++++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index cf7169a01867e..47a262705d4b1 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1209,7 +1209,7 @@ impl TransactionSigned { #[cfg(feature = "optimism")] let signature = if tx_type == DEPOSIT_TX_TYPE_ID { - Signature::default() + Signature::optimism_deposit_tx_signature() } else { Signature::decode(data)? }; @@ -1358,11 +1358,10 @@ impl proptest::arbitrary::Arbitrary for TransactionSigned { } #[cfg(feature = "optimism")] - let sig = if transaction.is_deposit() { - Signature { r: crate::U256::ZERO, s: crate::U256::ZERO, odd_y_parity: false } - } else { - sig - }; + let sig = transaction + .is_deposit() + .then(Signature::optimism_deposit_tx_signature) + .unwrap_or(sig); let mut tx = TransactionSigned { hash: Default::default(), signature: sig, transaction }; diff --git a/crates/primitives/src/transaction/signature.rs b/crates/primitives/src/transaction/signature.rs index aaf2e0a869e89..76c4a893f6f38 100644 --- a/crates/primitives/src/transaction/signature.rs +++ b/crates/primitives/src/transaction/signature.rs @@ -19,6 +19,15 @@ pub struct Signature { pub odd_y_parity: bool, } +impl Signature { + /// Returns the signature for the optimism deposit transactions, which don't include a + /// signature. + #[cfg(feature = "optimism")] + pub(crate) const fn optimism_deposit_tx_signature() -> Self { + Signature { r: U256::ZERO, s: U256::ZERO, odd_y_parity: false } + } +} + impl Compact for Signature { fn to_compact(self, buf: &mut B) -> usize where From eadbe5dce967666c9d900bb0730c15e166fe60fe Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 17 Nov 2023 02:21:35 -0800 Subject: [PATCH 27/77] feat(provider): `HeaderProvider::sealed_headers_while` (#5470) --- crates/consensus/common/src/validation.rs | 11 +++--- .../provider/src/providers/database/mod.rs | 12 +++++-- .../src/providers/database/provider.rs | 35 ++++++++++--------- crates/storage/provider/src/providers/mod.rs | 12 +++++-- .../provider/src/providers/snapshot/jar.rs | 23 +++++++----- .../src/providers/snapshot/manager.rs | 23 ++++++------ .../storage/provider/src/test_utils/mock.rs | 18 ++++++---- .../storage/provider/src/test_utils/noop.rs | 11 +++--- crates/storage/provider/src/traits/header.rs | 17 ++++++--- 9 files changed, 102 insertions(+), 60 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index efc9b136b4b03..5af6f36a46b3c 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -567,16 +567,17 @@ mod tests { Ok(vec![]) } - fn sealed_headers_range( + fn sealed_header(&self, _block_number: BlockNumber) -> RethResult> { + Ok(None) + } + + fn sealed_headers_while( &self, _range: impl RangeBounds, + _predicate: impl FnMut(&SealedHeader) -> bool, ) -> RethResult> { Ok(vec![]) } - - fn sealed_header(&self, _block_number: BlockNumber) -> RethResult> { - Ok(None) - } } impl WithdrawalsProvider for Provider { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 4e4f14de82ef6..357a0919674d7 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -214,6 +214,10 @@ impl HeaderProvider for ProviderFactory { self.provider()?.headers_range(range) } + fn sealed_header(&self, number: BlockNumber) -> RethResult> { + self.provider()?.sealed_header(number) + } + fn sealed_headers_range( &self, range: impl RangeBounds, @@ -221,8 +225,12 @@ impl HeaderProvider for ProviderFactory { self.provider()?.sealed_headers_range(range) } - fn sealed_header(&self, number: BlockNumber) -> RethResult> { - self.provider()?.sealed_header(number) + fn sealed_headers_while( + &self, + range: impl RangeBounds, + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> RethResult> { + self.provider()?.sealed_headers_while(range, predicate) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8da8df8396656..cf29d76ec547b 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -911,9 +911,21 @@ impl HeaderProvider for DatabaseProvider { .collect::>>() } - fn sealed_headers_range( + fn sealed_header(&self, number: BlockNumber) -> RethResult> { + if let Some(header) = self.header_by_number(number)? { + let hash = self + .block_hash(number)? + .ok_or_else(|| ProviderError::HeaderNotFound(number.into()))?; + Ok(Some(header.seal(hash))) + } else { + Ok(None) + } + } + + fn sealed_headers_while( &self, range: impl RangeBounds, + mut predicate: impl FnMut(&SealedHeader) -> bool, ) -> RethResult> { let mut headers = vec![]; for entry in self.tx.cursor_read::()?.walk_range(range)? { @@ -921,21 +933,14 @@ impl HeaderProvider for DatabaseProvider { let hash = self .block_hash(number)? .ok_or_else(|| ProviderError::HeaderNotFound(number.into()))?; - headers.push(header.seal(hash)); + let sealed = header.seal(hash); + if !predicate(&sealed) { + break + } + headers.push(sealed); } Ok(headers) } - - fn sealed_header(&self, number: BlockNumber) -> RethResult> { - if let Some(header) = self.header_by_number(number)? { - let hash = self - .block_hash(number)? - .ok_or_else(|| ProviderError::HeaderNotFound(number.into()))?; - Ok(Some(header.seal(hash))) - } else { - Ok(None) - } - } } impl BlockHashReader for DatabaseProvider { @@ -1055,9 +1060,7 @@ impl BlockReader for DatabaseProvider { id: BlockHashOrNumber, transaction_kind: TransactionVariant, ) -> RethResult> { - let Some(block_number) = self.convert_hash_or_number(id)? else { - return Ok(None); - }; + let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; let Some(header) = self.header_by_number(block_number)? else { return Ok(None) }; diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index f3800a86cf031..43380487c1bb2 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -140,6 +140,10 @@ where self.database.provider()?.headers_range(range) } + fn sealed_header(&self, number: BlockNumber) -> RethResult> { + self.database.provider()?.sealed_header(number) + } + fn sealed_headers_range( &self, range: impl RangeBounds, @@ -147,8 +151,12 @@ where self.database.provider()?.sealed_headers_range(range) } - fn sealed_header(&self, number: BlockNumber) -> RethResult> { - self.database.provider()?.sealed_header(number) + fn sealed_headers_while( + &self, + range: impl RangeBounds, + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> RethResult> { + self.database.provider()?.sealed_headers_while(range, predicate) } } diff --git a/crates/storage/provider/src/providers/snapshot/jar.rs b/crates/storage/provider/src/providers/snapshot/jar.rs index 1e04c8003c2ff..60d59a3f9bb0e 100644 --- a/crates/storage/provider/src/providers/snapshot/jar.rs +++ b/crates/storage/provider/src/providers/snapshot/jar.rs @@ -95,9 +95,17 @@ impl<'a> HeaderProvider for SnapshotJarProvider<'a> { Ok(headers) } - fn sealed_headers_range( + fn sealed_header(&self, number: BlockNumber) -> RethResult> { + Ok(self + .cursor()? + .get_two::>(number.into())? + .map(|(header, hash)| header.seal(hash))) + } + + fn sealed_headers_while( &self, range: impl RangeBounds, + mut predicate: impl FnMut(&SealedHeader) -> bool, ) -> RethResult> { let range = to_range(range); @@ -108,18 +116,15 @@ impl<'a> HeaderProvider for SnapshotJarProvider<'a> { if let Some((header, hash)) = cursor.get_two::>(number.into())? { - headers.push(header.seal(hash)) + let sealed = header.seal(hash); + if !predicate(&sealed) { + break + } + headers.push(sealed); } } Ok(headers) } - - fn sealed_header(&self, number: BlockNumber) -> RethResult> { - Ok(self - .cursor()? - .get_two::>(number.into())? - .map(|(header, hash)| header.seal(hash))) - } } impl<'a> BlockHashReader for SnapshotJarProvider<'a> { diff --git a/crates/storage/provider/src/providers/snapshot/manager.rs b/crates/storage/provider/src/providers/snapshot/manager.rs index 7cdbdae316b15..d4124f78518f0 100644 --- a/crates/storage/provider/src/providers/snapshot/manager.rs +++ b/crates/storage/provider/src/providers/snapshot/manager.rs @@ -113,7 +113,7 @@ impl SnapshotProvider { })?) .and_then(|(parsed_segment, block_range, tx_range)| { if parsed_segment == segment { - return Some((block_range, tx_range)); + return Some((block_range, tx_range)) } None }) @@ -123,7 +123,7 @@ impl SnapshotProvider { // Return cached `LoadedJar` or insert it for the first time, and then, return it. if let Some((block_range, tx_range)) = snapshot_ranges { - return Ok(Some(self.get_or_create_jar_provider(segment, &block_range, &tx_range)?)); + return Ok(Some(self.get_or_create_jar_provider(segment, &block_range, &tx_range)?)) } Ok(None) @@ -171,7 +171,7 @@ impl SnapshotProvider { let block_start = snapshots_rev_iter.peek().map(|(block_end, _)| *block_end + 1).unwrap_or(0); if block_start <= block { - return Some((block_start..=*block_end, tx_range.clone())); + return Some((block_start..=*block_end, tx_range.clone())) } } None @@ -194,7 +194,7 @@ impl SnapshotProvider { while let Some((tx_end, block_range)) = snapshots_rev_iter.next() { let tx_start = snapshots_rev_iter.peek().map(|(tx_end, _)| *tx_end + 1).unwrap_or(0); if tx_start <= tx { - return Some((block_range.clone(), tx_start..=*tx_end)); + return Some((block_range.clone(), tx_start..=*tx_end)) } } None @@ -278,17 +278,18 @@ impl HeaderProvider for SnapshotProvider { todo!(); } - fn sealed_headers_range( - &self, - _range: impl RangeBounds, - ) -> RethResult> { - todo!(); - } - fn sealed_header(&self, num: BlockNumber) -> RethResult> { self.get_segment_provider_from_block(SnapshotSegment::Headers, num, None)? .sealed_header(num) } + + fn sealed_headers_while( + &self, + _range: impl RangeBounds, + _predicate: impl FnMut(&SealedHeader) -> bool, + ) -> RethResult> { + todo!() + } } impl BlockHashReader for SnapshotProvider { diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 5c794c77a82a1..8f18732f44aac 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -162,15 +162,21 @@ impl HeaderProvider for MockEthProvider { Ok(headers) } - fn sealed_headers_range( + fn sealed_header(&self, number: BlockNumber) -> RethResult> { + Ok(self.header_by_number(number)?.map(|h| h.seal_slow())) + } + + fn sealed_headers_while( &self, range: impl RangeBounds, + mut predicate: impl FnMut(&SealedHeader) -> bool, ) -> RethResult> { - Ok(self.headers_range(range)?.into_iter().map(|h| h.seal_slow()).collect()) - } - - fn sealed_header(&self, number: BlockNumber) -> RethResult> { - Ok(self.header_by_number(number)?.map(|h| h.seal_slow())) + Ok(self + .headers_range(range)? + .into_iter() + .map(|h| h.seal_slow()) + .take_while(|h| predicate(h)) + .collect()) } } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index dabe5702ce421..6780959c120b9 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -235,16 +235,17 @@ impl HeaderProvider for NoopProvider { Ok(vec![]) } - fn sealed_headers_range( + fn sealed_header(&self, _number: BlockNumber) -> RethResult> { + Ok(None) + } + + fn sealed_headers_while( &self, _range: impl RangeBounds, + _predicate: impl FnMut(&SealedHeader) -> bool, ) -> RethResult> { Ok(vec![]) } - - fn sealed_header(&self, _number: BlockNumber) -> RethResult> { - Ok(None) - } } impl AccountReader for NoopProvider { diff --git a/crates/storage/provider/src/traits/header.rs b/crates/storage/provider/src/traits/header.rs index a553b79bb8ca8..3beebd1706a4e 100644 --- a/crates/storage/provider/src/traits/header.rs +++ b/crates/storage/provider/src/traits/header.rs @@ -37,12 +37,21 @@ pub trait HeaderProvider: Send + Sync { /// Get headers in range of block numbers fn headers_range(&self, range: impl RangeBounds) -> RethResult>; - /// Get headers in range of block numbers + /// Get a single sealed header by block number. + fn sealed_header(&self, number: BlockNumber) -> RethResult>; + + /// Get headers in range of block numbers. fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> RethResult>; + ) -> RethResult> { + self.sealed_headers_while(range, |_| true) + } - /// Get a single sealed header by block number - fn sealed_header(&self, number: BlockNumber) -> RethResult>; + /// Get sealed headers while `predicate` returns `true` or the range is exhausted. + fn sealed_headers_while( + &self, + range: impl RangeBounds, + predicate: impl FnMut(&SealedHeader) -> bool, + ) -> RethResult>; } From c0fe64700a4afb15171286b912f003b3ff065239 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Nov 2023 11:48:24 +0100 Subject: [PATCH 28/77] chore: add missing helper fns (#5471) --- crates/primitives/src/block.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 2d820734d50d1..a432cdbc20896 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -102,6 +102,34 @@ impl BlockWithSenders { pub fn into_components(self) -> (Block, Vec

) { (self.block, self.senders) } + + /// Returns an iterator over all transactions in the block. + #[inline] + pub fn transactions(&self) -> impl Iterator + '_ { + self.block.body.iter() + } + + /// Returns an iterator over all transactions and their sender. + #[inline] + pub fn transactions_with_sender( + &self, + ) -> impl Iterator + '_ { + self.senders.iter().zip(self.block.body.iter()) + } + + /// Consumes the block and returns the transactions of the block. + #[inline] + pub fn into_transactions(self) -> Vec { + self.block.body + } + + /// Returns an iterator over all transactions in the chain. + #[inline] + pub fn into_transactions_ecrecovered( + self, + ) -> impl Iterator { + self.block.body.into_iter().zip(self.senders).map(|(tx, sender)| tx.with_signer(sender)) + } } impl Deref for BlockWithSenders { From b03d0106ae9f49eebe7356927cfad6b15256ecc5 Mon Sep 17 00:00:00 2001 From: DoTheBestToGetTheBest <146037313+DoTheBestToGetTheBest@users.noreply.github.com> Date: Fri, 17 Nov 2023 04:57:07 -0800 Subject: [PATCH 29/77] feat(eth-wire) add missing docs (#5467) --- crates/net/eth-wire/src/errors/eth.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/crates/net/eth-wire/src/errors/eth.rs b/crates/net/eth-wire/src/errors/eth.rs index c941b787d8872..21645def4a625 100644 --- a/crates/net/eth-wire/src/errors/eth.rs +++ b/crates/net/eth-wire/src/errors/eth.rs @@ -7,20 +7,33 @@ use std::io; /// Errors when sending/receiving messages #[derive(thiserror::Error, Debug)] -#[allow(missing_docs)] + pub enum EthStreamError { #[error(transparent)] + /// Error of the underlying P2P connection. P2PStreamError(#[from] P2PStreamError), #[error(transparent)] + /// Failed to parse peer's version. ParseVersionError(#[from] ParseVersionError), #[error(transparent)] + /// Failed Ethereum handshake. EthHandshakeError(#[from] EthHandshakeError), #[error("message id {1:?} is invalid for version {0:?}")] + /// Flags an unrecognized message ID for a given protocol version. EthInvalidMessageError(EthVersion, EthMessageID), #[error("message size ({0}) exceeds max length (10MB)")] + /// Received a message whose size exceeds the standard limit. MessageTooBig(usize), #[error("TransactionHashes invalid len of fields: hashes_len={hashes_len} types_len={types_len} sizes_len={sizes_len}")] - TransactionHashesInvalidLenOfFields { hashes_len: usize, types_len: usize, sizes_len: usize }, + /// Received malformed transaction hashes message with discrepancies in field lengths. + TransactionHashesInvalidLenOfFields { + /// The number of transaction hashes. + hashes_len: usize, + /// The number of transaction types. + types_len: usize, + /// The number of transaction sizes. + sizes_len: usize, + }, } // === impl EthStreamError === From 30dfc070e181cd82a63643590ddab153165dded0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 17 Nov 2023 06:07:28 -0800 Subject: [PATCH 30/77] chore(provider): migrate providers to `ProviderError` (#5473) --- bin/reth/src/chain/import.rs | 2 +- bin/reth/src/debug_cmd/execution.rs | 7 +- bin/reth/src/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/debug_cmd/merkle.rs | 4 +- bin/reth/src/init.rs | 24 +- bin/reth/src/node/mod.rs | 3 +- bin/reth/src/stage/run.rs | 8 +- crates/consensus/beacon/src/engine/mod.rs | 14 +- crates/consensus/common/src/validation.rs | 34 +-- .../interfaces/src/blockchain_tree/error.rs | 6 +- crates/interfaces/src/error.rs | 6 - crates/interfaces/src/provider.rs | 15 ++ crates/payload/builder/src/error.rs | 8 +- crates/prune/src/segments/mod.rs | 4 +- crates/prune/src/segments/receipts.rs | 4 +- crates/revm/src/processor.rs | 19 +- crates/rpc/rpc/src/eth/api/mod.rs | 6 +- crates/rpc/rpc/src/eth/cache/mod.rs | 38 +-- crates/rpc/rpc/src/eth/filter.rs | 7 +- crates/rpc/rpc/src/result.rs | 1 + crates/snapshot/src/segments/headers.rs | 4 +- crates/snapshot/src/segments/mod.rs | 10 +- crates/snapshot/src/segments/receipts.rs | 4 +- crates/snapshot/src/segments/transactions.rs | 4 +- crates/stages/src/error.rs | 4 +- crates/stages/src/pipeline/mod.rs | 11 +- crates/stages/src/stages/hashing_storage.rs | 2 +- crates/stages/src/test_utils/runner.rs | 6 +- crates/storage/db/src/snapshot/cursor.rs | 15 +- crates/storage/db/src/snapshot/generation.rs | 5 +- crates/storage/nippy-jar/src/error.rs | 2 +- .../src/providers/bundle_state_provider.rs | 18 +- .../provider/src/providers/database/mod.rs | 121 +++++----- .../src/providers/database/provider.rs | 227 +++++++++--------- crates/storage/provider/src/providers/mod.rs | 190 ++++++++------- .../provider/src/providers/snapshot/jar.rs | 85 ++++--- .../src/providers/snapshot/manager.rs | 69 +++--- .../provider/src/providers/snapshot/mod.rs | 4 +- .../src/providers/state/historical.rs | 36 +-- .../provider/src/providers/state/latest.rs | 20 +- .../provider/src/providers/state/macros.rs | 14 +- .../storage/provider/src/test_utils/mock.rs | 160 ++++++------ .../storage/provider/src/test_utils/noop.rs | 147 ++++++------ crates/storage/provider/src/traits/account.rs | 12 +- crates/storage/provider/src/traits/block.rs | 56 ++--- .../storage/provider/src/traits/block_hash.rs | 16 +- .../storage/provider/src/traits/block_id.rs | 38 +-- crates/storage/provider/src/traits/evm_env.rs | 20 +- crates/storage/provider/src/traits/hashing.rs | 12 +- crates/storage/provider/src/traits/header.rs | 22 +- crates/storage/provider/src/traits/history.rs | 16 +- .../provider/src/traits/prune_checkpoint.rs | 9 +- .../storage/provider/src/traits/receipts.rs | 12 +- .../provider/src/traits/stage_checkpoint.rs | 17 +- crates/storage/provider/src/traits/state.rs | 48 ++-- crates/storage/provider/src/traits/storage.rs | 10 +- .../provider/src/traits/transactions.rs | 31 +-- .../provider/src/traits/withdrawals.rs | 6 +- crates/transaction-pool/src/maintain.rs | 5 +- testing/ef-tests/src/cases/blockchain_test.rs | 27 ++- 60 files changed, 923 insertions(+), 804 deletions(-) diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index c0cb2543b835d..984a34f8cf443 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -114,7 +114,7 @@ impl ImportCommand { debug!(target: "reth::cli", ?tip, "Tip manually set"); let factory = ProviderFactory::new(&db, self.chain.clone()); - let provider = factory.provider().map_err(PipelineError::Interface)?; + let provider = factory.provider()?; let latest_block_number = provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index ad74f0bd8bba3..fee6390d2f9eb 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -34,7 +34,7 @@ use reth_stages::{ ExecutionStage, ExecutionStageThresholds, HeaderSyncMode, SenderRecoveryStage, TotalDifficultyStage, }, - Pipeline, PipelineError, StageSet, + Pipeline, StageSet, }; use reth_tasks::TaskExecutor; use std::{ @@ -234,7 +234,7 @@ impl Command { )?; let factory = ProviderFactory::new(&db, self.chain.clone()); - let provider = factory.provider().map_err(PipelineError::Interface)?; + let provider = factory.provider()?; let latest_block_number = provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); @@ -269,8 +269,7 @@ impl Command { // Unwind the pipeline without committing. { factory - .provider_rw() - .map_err(PipelineError::Interface)? + .provider_rw()? .take_block_and_execution_range(&self.chain, next_block..=target_block)?; } diff --git a/bin/reth/src/debug_cmd/in_memory_merkle.rs b/bin/reth/src/debug_cmd/in_memory_merkle.rs index 5ab78d9d90b55..81db51c5ce635 100644 --- a/bin/reth/src/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/debug_cmd/in_memory_merkle.rs @@ -194,7 +194,7 @@ impl Command { provider_rw.insert_block(block.clone(), None, None)?; block_state.write_to_db(provider_rw.tx_ref(), OriginalValuesKnown::No)?; let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; - let storages = provider_rw.plainstate_storages(storage_lists)?; + let storages = provider_rw.plain_state_storages(storage_lists)?; provider_rw.insert_storage_for_hashing(storages)?; let account_lists = provider_rw.changed_accounts_with_range(block.number..=block.number)?; let accounts = provider_rw.basic_accounts(account_lists)?; diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index 4a982e8160642..dc5f98e59eb31 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -28,7 +28,7 @@ use reth_stages::{ AccountHashingStage, ExecutionStage, ExecutionStageThresholds, MerkleStage, StorageHashingStage, MERKLE_STAGE_DEFAULT_CLEAN_THRESHOLD, }, - ExecInput, PipelineError, Stage, + ExecInput, Stage, }; use reth_tasks::TaskExecutor; use std::{ @@ -121,7 +121,7 @@ impl Command { // initialize the database let db = Arc::new(init_db(db_path, self.db.log_level)?); let factory = ProviderFactory::new(&db, self.chain.clone()); - let provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + let provider_rw = factory.provider_rw()?; // Configure and build network let network_secret_path = diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index 3ebc5d4073649..6b3d638040edb 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -5,14 +5,14 @@ use reth_db::{ tables, transaction::{DbTx, DbTxMut}, }; -use reth_interfaces::{db::DatabaseError, RethError}; +use reth_interfaces::{db::DatabaseError, provider::ProviderResult}; use reth_primitives::{ stage::StageId, Account, Bytecode, ChainSpec, Receipts, StorageEntry, B256, U256, }; use reth_provider::{ bundle_state::{BundleStateInit, RevertsInit}, BundleStateWithReceipts, DatabaseProviderRW, HashingWriter, HistoryWriter, OriginalValuesKnown, - ProviderFactory, + ProviderError, ProviderFactory, }; use std::{ collections::{BTreeMap, HashMap}, @@ -33,13 +33,15 @@ pub enum InitDatabaseError { database_hash: B256, }, - /// Low-level database error. + /// Provider error. #[error(transparent)] - DBError(#[from] DatabaseError), + Provider(#[from] ProviderError), +} - /// Internal error. - #[error(transparent)] - InternalError(#[from] RethError), +impl From for InitDatabaseError { + fn from(error: DatabaseError) -> Self { + Self::Provider(ProviderError::Database(error)) + } } /// Write the genesis block if it has not already been written @@ -94,7 +96,7 @@ pub fn init_genesis( pub fn insert_genesis_state( tx: &>::TXMut, genesis: &reth_primitives::Genesis, -) -> Result<(), InitDatabaseError> { +) -> ProviderResult<()> { let mut state_init: BundleStateInit = HashMap::new(); let mut reverts_init = HashMap::new(); let mut contracts: HashMap = HashMap::new(); @@ -160,7 +162,7 @@ pub fn insert_genesis_state( pub fn insert_genesis_hashes( provider: &DatabaseProviderRW<'_, &DB>, genesis: &reth_primitives::Genesis, -) -> Result<(), InitDatabaseError> { +) -> ProviderResult<()> { // insert and hash accounts to hashing table let alloc_accounts = genesis.alloc.clone().into_iter().map(|(addr, account)| (addr, Some(account.into()))); @@ -184,7 +186,7 @@ pub fn insert_genesis_hashes( pub fn insert_genesis_history( provider: &DatabaseProviderRW<'_, &DB>, genesis: &reth_primitives::Genesis, -) -> Result<(), InitDatabaseError> { +) -> ProviderResult<()> { let account_transitions = genesis.alloc.keys().map(|addr| (*addr, vec![0])).collect::>(); provider.insert_account_history_index(account_transitions)?; @@ -204,7 +206,7 @@ pub fn insert_genesis_history( pub fn insert_genesis_header( tx: &>::TXMut, chain: Arc, -) -> Result<(), InitDatabaseError> { +) -> ProviderResult<()> { let header = chain.sealed_genesis_header(); tx.put::(0, header.hash)?; diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 3e6b9cd6a76d1..3db510564a2ae 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -1048,7 +1048,6 @@ mod tests { use super::*; use crate::args::utils::SUPPORTED_CHAINS; use reth_discv4::DEFAULT_DISCOVERY_PORT; - use reth_primitives::DEV; use std::{ net::{IpAddr, Ipv4Addr}, path::Path, @@ -1154,7 +1153,7 @@ mod tests { #[cfg(not(feature = "optimism"))] // dev mode not yet supported in op-reth fn parse_dev() { let cmd = NodeCommand::<()>::parse_from(["reth", "--dev"]); - let chain = DEV.clone(); + let chain = reth_primitives::DEV.clone(); assert_eq!(cmd.chain.chain, chain.chain); assert_eq!(cmd.chain.genesis_hash, chain.genesis_hash); assert_eq!( diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index af49f3e4f8fa9..c66792668371f 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -24,7 +24,7 @@ use reth_stages::{ IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, TransactionLookupStage, }, - ExecInput, ExecOutput, PipelineError, Stage, UnwindInput, + ExecInput, ExecOutput, Stage, UnwindInput, }; use std::{any::Any, net::SocketAddr, path::PathBuf, sync::Arc}; use tracing::*; @@ -125,7 +125,7 @@ impl Command { info!(target: "reth::cli", "Database opened"); let factory = ProviderFactory::new(&db, self.chain.clone()); - let mut provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + let mut provider_rw = factory.provider_rw()?; if let Some(listen_addr) = self.metrics { info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr); @@ -247,7 +247,7 @@ impl Command { if self.commit { provider_rw.commit()?; - provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + provider_rw = factory.provider_rw()?; } } } @@ -264,7 +264,7 @@ impl Command { if self.commit { provider_rw.commit()?; - provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + provider_rw = factory.provider_rw()?; } } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 7497044cfa2da..b7242f3358a51 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -355,7 +355,7 @@ where inconsistent_stage_checkpoint = stage_checkpoint, "Pipeline sync progress is inconsistent" ); - return self.blockchain.block_hash(first_stage_checkpoint) + return Ok(self.blockchain.block_hash(first_stage_checkpoint)?) } } @@ -1670,7 +1670,7 @@ where }, Err(error) => { error!(target: "consensus::engine", ?error, "Error getting canonical header for continuous sync"); - return Some(Err(error.into())) + return Some(Err(RethError::Provider(error).into())) } }; self.blockchain.set_canonical_head(max_header); @@ -1836,7 +1836,10 @@ where cx, EngineContext { tip_block_number: this.blockchain.canonical_tip().number, - finalized_block_number: this.blockchain.finalized_block_number()?, + finalized_block_number: this + .blockchain + .finalized_block_number() + .map_err(RethError::Provider)?, }, )? { this.on_hook_result(result)?; @@ -1908,7 +1911,10 @@ where cx, EngineContext { tip_block_number: this.blockchain.canonical_tip().number, - finalized_block_number: this.blockchain.finalized_block_number()?, + finalized_block_number: this + .blockchain + .finalized_block_number() + .map_err(RethError::Provider)?, }, this.sync.is_pipeline_active(), )? { diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 5af6f36a46b3c..8ba4e535fa1ee 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -484,8 +484,8 @@ mod tests { use super::*; use mockall::mock; use reth_interfaces::{ + provider::ProviderResult, test_utils::generators::{self, Rng}, - RethResult, }; use reth_primitives::{ constants::eip4844::DATA_GAS_PER_BLOB, hex_literal::hex, proofs, Account, Address, @@ -498,13 +498,13 @@ mod tests { WithdrawalsProvider {} impl WithdrawalsProvider for WithdrawalsProvider { - fn latest_withdrawal(&self) -> RethResult> ; + fn latest_withdrawal(&self) -> ProviderResult> ; fn withdrawals_by_block( &self, _id: BlockHashOrNumber, _timestamp: u64, - ) -> RethResult>> ; + ) -> ProviderResult>> ; } } @@ -537,37 +537,43 @@ mod tests { } impl AccountReader for Provider { - fn basic_account(&self, _address: Address) -> RethResult> { + fn basic_account(&self, _address: Address) -> ProviderResult> { Ok(self.account) } } impl HeaderProvider for Provider { - fn is_known(&self, _block_hash: &BlockHash) -> RethResult { + fn is_known(&self, _block_hash: &BlockHash) -> ProviderResult { Ok(self.is_known) } - fn header(&self, _block_number: &BlockHash) -> RethResult> { + fn header(&self, _block_number: &BlockHash) -> ProviderResult> { Ok(self.parent.clone()) } - fn header_by_number(&self, _num: u64) -> RethResult> { + fn header_by_number(&self, _num: u64) -> ProviderResult> { Ok(self.parent.clone()) } - fn header_td(&self, _hash: &BlockHash) -> RethResult> { + fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { Ok(None) } - fn header_td_by_number(&self, _number: BlockNumber) -> RethResult> { + fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { Ok(None) } - fn headers_range(&self, _range: impl RangeBounds) -> RethResult> { + fn headers_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { Ok(vec![]) } - fn sealed_header(&self, _block_number: BlockNumber) -> RethResult> { + fn sealed_header( + &self, + _block_number: BlockNumber, + ) -> ProviderResult> { Ok(None) } @@ -575,7 +581,7 @@ mod tests { &self, _range: impl RangeBounds, _predicate: impl FnMut(&SealedHeader) -> bool, - ) -> RethResult> { + ) -> ProviderResult> { Ok(vec![]) } } @@ -585,11 +591,11 @@ mod tests { &self, _id: BlockHashOrNumber, _timestamp: u64, - ) -> RethResult>> { + ) -> ProviderResult>> { self.withdrawals_provider.withdrawals_by_block(_id, _timestamp) } - fn latest_withdrawal(&self) -> RethResult> { + fn latest_withdrawal(&self) -> ProviderResult> { self.withdrawals_provider.latest_withdrawal() } } diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index 695233d2a6ea4..ac7d4552770d5 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -3,6 +3,7 @@ use crate::{ consensus::ConsensusError, executor::{BlockExecutionError, BlockValidationError}, + provider::ProviderError, }; use reth_primitives::{BlockHash, BlockNumber, SealedBlock}; @@ -201,6 +202,9 @@ pub enum InsertBlockErrorKind { /// Block violated tree invariants. #[error(transparent)] Tree(#[from] BlockchainTreeError), + /// Provider error. + #[error(transparent)] + Provider(#[from] ProviderError), /// An internal error occurred, like interacting with the database. #[error(transparent)] Internal(#[from] Box), @@ -260,7 +264,7 @@ impl InsertBlockErrorKind { BlockchainTreeError::BlockBufferingFailed { .. } => false, } } - InsertBlockErrorKind::Internal(_) => { + InsertBlockErrorKind::Provider(_) | InsertBlockErrorKind::Internal(_) => { // any other error, such as database errors, are considered internal errors false } diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index e40a1abd5f676..ef14d1211eb4f 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -33,12 +33,6 @@ impl From for RethError { } } -impl From for RethError { - fn from(err: reth_nippy_jar::NippyJarError) -> Self { - RethError::Custom(err.to_string()) - } -} - impl From for RethError { fn from(err: reth_primitives::fs::FsPathError) -> Self { RethError::Custom(err.to_string()) diff --git a/crates/interfaces/src/provider.rs b/crates/interfaces/src/provider.rs index a5a72c942ad67..f5f0a7fccf6b7 100644 --- a/crates/interfaces/src/provider.rs +++ b/crates/interfaces/src/provider.rs @@ -5,12 +5,21 @@ use reth_primitives::{ use std::path::PathBuf; use thiserror::Error; +/// Provider result type. +pub type ProviderResult = Result; + /// Bundled errors variants thrown by various providers. #[derive(Clone, Debug, Error, PartialEq, Eq)] pub enum ProviderError { /// Database error. #[error(transparent)] Database(#[from] crate::db::DatabaseError), + /// Nippy jar error. + #[error("nippy jar error: {0}")] + NippyJar(String), + /// Error when recovering the sender for a transaction + #[error("failed to recover sender for transaction")] + SenderRecoveryError, /// The header number was not found for the given block hash. #[error("block hash {0} does not exist in Headers table")] BlockHashNotFound(BlockHash), @@ -107,6 +116,12 @@ pub enum ProviderError { MissingSnapshotTx(SnapshotSegment, TxNumber), } +impl From for ProviderError { + fn from(err: reth_nippy_jar::NippyJarError) -> Self { + ProviderError::NippyJar(err.to_string()) + } +} + /// A root mismatch error at a given block height. #[derive(Clone, Debug, Error, PartialEq, Eq)] #[error("root mismatch at #{block_number} ({block_hash}): {root}")] diff --git a/crates/payload/builder/src/error.rs b/crates/payload/builder/src/error.rs index 7d8360864800a..d42366d121ccb 100644 --- a/crates/payload/builder/src/error.rs +++ b/crates/payload/builder/src/error.rs @@ -1,6 +1,6 @@ //! Error types emitted by types or implementations of this crate. -use reth_interfaces::RethError; +use reth_interfaces::{provider::ProviderError, RethError}; use reth_primitives::{revm_primitives::EVMError, B256}; use reth_transaction_pool::BlobStoreError; use tokio::sync::oneshot; @@ -32,6 +32,12 @@ pub enum PayloadBuilderError { Optimism(#[from] OptimismPayloadBuilderError), } +impl From for PayloadBuilderError { + fn from(error: ProviderError) -> Self { + PayloadBuilderError::Internal(RethError::Provider(error)) + } +} + /// Optimism specific payload building errors. #[derive(Debug, thiserror::Error)] pub enum OptimismPayloadBuilderError { diff --git a/crates/prune/src/segments/mod.rs b/crates/prune/src/segments/mod.rs index 82a44f6b08ef2..62fda61958647 100644 --- a/crates/prune/src/segments/mod.rs +++ b/crates/prune/src/segments/mod.rs @@ -22,7 +22,7 @@ pub use transactions::Transactions; use crate::PrunerError; use reth_db::database::Database; -use reth_interfaces::RethResult; +use reth_interfaces::{provider::ProviderResult, RethResult}; use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber}; use reth_provider::{BlockReader, DatabaseProviderRW, PruneCheckpointWriter}; use std::ops::RangeInclusive; @@ -54,7 +54,7 @@ pub trait Segment: Debug + Send + Sync { &self, provider: &DatabaseProviderRW<'_, DB>, checkpoint: PruneCheckpoint, - ) -> RethResult<()> { + ) -> ProviderResult<()> { provider.save_prune_checkpoint(self.segment(), checkpoint) } } diff --git a/crates/prune/src/segments/receipts.rs b/crates/prune/src/segments/receipts.rs index b260cf78b38ed..fb97897e0cd47 100644 --- a/crates/prune/src/segments/receipts.rs +++ b/crates/prune/src/segments/receipts.rs @@ -3,7 +3,7 @@ use crate::{ PrunerError, }; use reth_db::{database::Database, tables}; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_provider::{DatabaseProviderRW, PruneCheckpointWriter, TransactionsProvider}; use tracing::{instrument, trace}; @@ -73,7 +73,7 @@ impl Segment for Receipts { &self, provider: &DatabaseProviderRW<'_, DB>, checkpoint: PruneCheckpoint, - ) -> RethResult<()> { + ) -> ProviderResult<()> { provider.save_prune_checkpoint(PruneSegment::Receipts, checkpoint)?; // `PruneSegment::Receipts` overrides `PruneSegment::ContractLogs`, so we can preemptively diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index baf8b1595b822..f78c0411703fa 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -557,7 +557,7 @@ pub fn verify_receipt<'a>( #[cfg(test)] mod tests { use super::*; - use reth_interfaces::RethResult; + use reth_interfaces::provider::ProviderResult; use reth_primitives::{ bytes, constants::{BEACON_ROOTS_ADDRESS, SYSTEM_ADDRESS}, @@ -599,14 +599,13 @@ mod tests { } impl AccountReader for StateProviderTest { - fn basic_account(&self, address: Address) -> RethResult> { - let ret = Ok(self.accounts.get(&address).map(|(_, acc)| *acc)); - ret + fn basic_account(&self, address: Address) -> ProviderResult> { + Ok(self.accounts.get(&address).map(|(_, acc)| *acc)) } } impl BlockHashReader for StateProviderTest { - fn block_hash(&self, number: u64) -> RethResult> { + fn block_hash(&self, number: u64) -> ProviderResult> { Ok(self.block_hash.get(&number).cloned()) } @@ -614,7 +613,7 @@ mod tests { &self, start: BlockNumber, end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { let range = start..end; Ok(self .block_hash @@ -625,7 +624,7 @@ mod tests { } impl StateRootProvider for StateProviderTest { - fn state_root(&self, _bundle_state: &BundleStateWithReceipts) -> RethResult { + fn state_root(&self, _bundle_state: &BundleStateWithReceipts) -> ProviderResult { unimplemented!("state root computation is not supported") } } @@ -635,18 +634,18 @@ mod tests { &self, account: Address, storage_key: StorageKey, - ) -> RethResult> { + ) -> ProviderResult> { Ok(self .accounts .get(&account) .and_then(|(storage, _)| storage.get(&storage_key).cloned())) } - fn bytecode_by_hash(&self, code_hash: B256) -> RethResult> { + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { Ok(self.contracts.get(&code_hash).cloned()) } - fn proof(&self, _address: Address, _keys: &[B256]) -> RethResult { + fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { unimplemented!("proof generation is not supported") } } diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 39b241beba15b..b30b4db562db8 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -226,12 +226,12 @@ where /// Returns the state at the given block number pub fn state_at_hash(&self, block_hash: B256) -> RethResult> { - self.provider().history_by_block_hash(block_hash) + Ok(self.provider().history_by_block_hash(block_hash)?) } /// Returns the _latest_ state pub fn latest_state(&self) -> RethResult> { - self.provider().latest() + Ok(self.provider().latest()?) } } @@ -364,7 +364,7 @@ where /// Returns the current info for the chain fn chain_info(&self) -> RethResult { - self.provider().chain_info() + Ok(self.provider().chain_info()?) } fn accounts(&self) -> Vec
{ diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index f5c061b7143f7..fb1f65d1db594 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -1,7 +1,7 @@ //! Async caching support for eth RPC use futures::{future::Either, Stream, StreamExt}; -use reth_interfaces::{provider::ProviderError, RethResult}; +use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{Block, Receipt, SealedBlock, TransactionSigned, B256}; use reth_provider::{ BlockReader, BlockSource, CanonStateNotification, EvmEnvProvider, StateProviderFactory, @@ -30,16 +30,17 @@ mod multi_consumer; pub use multi_consumer::MultiConsumerLruCache; /// The type that can send the response to a requested [Block] -type BlockResponseSender = oneshot::Sender>>; +type BlockResponseSender = oneshot::Sender>>; /// The type that can send the response to a requested [Block] -type BlockTransactionsResponseSender = oneshot::Sender>>>; +type BlockTransactionsResponseSender = + oneshot::Sender>>>; /// The type that can send the response to the requested receipts of a block. -type ReceiptsResponseSender = oneshot::Sender>>>; +type ReceiptsResponseSender = oneshot::Sender>>>; /// The type that can send the response to a requested env -type EnvResponseSender = oneshot::Sender>; +type EnvResponseSender = oneshot::Sender>; type BlockLruCache = MultiConsumerLruCache< B256, @@ -127,7 +128,7 @@ impl EthStateCache { /// Requests the [Block] for the block hash /// /// Returns `None` if the block does not exist. - pub(crate) async fn get_block(&self, block_hash: B256) -> RethResult> { + pub(crate) async fn get_block(&self, block_hash: B256) -> ProviderResult> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlock { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? @@ -139,7 +140,7 @@ impl EthStateCache { pub(crate) async fn get_sealed_block( &self, block_hash: B256, - ) -> RethResult> { + ) -> ProviderResult> { Ok(self.get_block(block_hash).await?.map(|block| block.seal(block_hash))) } @@ -149,7 +150,7 @@ impl EthStateCache { pub(crate) async fn get_block_transactions( &self, block_hash: B256, - ) -> RethResult>> { + ) -> ProviderResult>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlockTransactions { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? @@ -159,7 +160,7 @@ impl EthStateCache { pub(crate) async fn get_transactions_and_receipts( &self, block_hash: B256, - ) -> RethResult, Vec)>> { + ) -> ProviderResult, Vec)>> { let transactions = self.get_block_transactions(block_hash); let receipts = self.get_receipts(block_hash); @@ -171,7 +172,10 @@ impl EthStateCache { /// Requests the [Receipt] for the block hash /// /// Returns `None` if the block was not found. - pub(crate) async fn get_receipts(&self, block_hash: B256) -> RethResult>> { + pub(crate) async fn get_receipts( + &self, + block_hash: B256, + ) -> ProviderResult>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetReceipts { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? @@ -181,7 +185,7 @@ impl EthStateCache { pub(crate) async fn get_block_and_receipts( &self, block_hash: B256, - ) -> RethResult)>> { + ) -> ProviderResult)>> { let block = self.get_sealed_block(block_hash); let receipts = self.get_receipts(block_hash); @@ -194,7 +198,7 @@ impl EthStateCache { /// /// Returns an error if the corresponding header (required for populating the envs) was not /// found. - pub(crate) async fn get_evm_env(&self, block_hash: B256) -> RethResult<(CfgEnv, BlockEnv)> { + pub(crate) async fn get_evm_env(&self, block_hash: B256) -> ProviderResult<(CfgEnv, BlockEnv)> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetEnv { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? @@ -251,7 +255,7 @@ where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { - fn on_new_block(&mut self, block_hash: B256, res: RethResult>) { + fn on_new_block(&mut self, block_hash: B256, res: ProviderResult>) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { @@ -274,7 +278,7 @@ where } } - fn on_new_receipts(&mut self, block_hash: B256, res: RethResult>>) { + fn on_new_receipts(&mut self, block_hash: B256, res: ProviderResult>>) { if let Some(queued) = self.receipts_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { @@ -457,9 +461,9 @@ enum CacheAction { GetBlockTransactions { block_hash: B256, response_tx: BlockTransactionsResponseSender }, GetEnv { block_hash: B256, response_tx: EnvResponseSender }, GetReceipts { block_hash: B256, response_tx: ReceiptsResponseSender }, - BlockResult { block_hash: B256, res: RethResult> }, - ReceiptsResult { block_hash: B256, res: RethResult>> }, - EnvResult { block_hash: B256, res: Box> }, + BlockResult { block_hash: B256, res: ProviderResult> }, + ReceiptsResult { block_hash: B256, res: ProviderResult>> }, + EnvResult { block_hash: B256, res: Box> }, CacheNewCanonicalChain { blocks: Vec, receipts: Vec }, } diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 3427a06fe3017..96cb687e6375b 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -11,9 +11,8 @@ use core::fmt; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; -use reth_interfaces::RethError; use reth_primitives::{BlockHashOrNumber, IntoRecoveredTransaction, Receipt, SealedBlock, TxHash}; -use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider}; +use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; use reth_rpc_api::EthFilterApiServer; use reth_rpc_types::{ Filter, FilterBlockOption, FilterChanges, FilterId, FilteredParams, Log, @@ -690,8 +689,8 @@ impl From for jsonrpsee::types::error::ErrorObject<'static> { } } -impl From for FilterError { - fn from(err: RethError) -> Self { +impl From for FilterError { + fn from(err: ProviderError) -> Self { FilterError::EthAPIError(err.into()) } } diff --git a/crates/rpc/rpc/src/result.rs b/crates/rpc/rpc/src/result.rs index d187590115da1..43ceb5d949453 100644 --- a/crates/rpc/rpc/src/result.rs +++ b/crates/rpc/rpc/src/result.rs @@ -104,6 +104,7 @@ macro_rules! impl_to_rpc_result { impl_to_rpc_result!(PayloadError); impl_to_rpc_result!(reth_interfaces::RethError); +impl_to_rpc_result!(reth_interfaces::provider::ProviderError); impl_to_rpc_result!(reth_network_api::NetworkError); /// An extension to used to apply error conversions to various result types diff --git a/crates/snapshot/src/segments/headers.rs b/crates/snapshot/src/segments/headers.rs index 89fc009f9fd77..d6852c73dec34 100644 --- a/crates/snapshot/src/segments/headers.rs +++ b/crates/snapshot/src/segments/headers.rs @@ -3,7 +3,7 @@ use reth_db::{ cursor::DbCursorRO, database::Database, snapshot::create_snapshot_T1_T2_T3, tables, transaction::DbTx, RawKey, RawTable, }; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{ snapshot::{Compression, Filters, SegmentConfig}, BlockNumber, SnapshotSegment, @@ -40,7 +40,7 @@ impl Segment for Headers { provider: &DatabaseProviderRO<'_, DB>, directory: impl AsRef, range: RangeInclusive, - ) -> RethResult<()> { + ) -> ProviderResult<()> { let range_len = range.clone().count(); let mut jar = prepare_jar::( provider, diff --git a/crates/snapshot/src/segments/mod.rs b/crates/snapshot/src/segments/mod.rs index 0603dbeed4402..ec9061ebcb196 100644 --- a/crates/snapshot/src/segments/mod.rs +++ b/crates/snapshot/src/segments/mod.rs @@ -12,7 +12,7 @@ pub use receipts::Receipts; use reth_db::{ cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx, RawKey, RawTable, }; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_nippy_jar::NippyJar; use reth_primitives::{ snapshot::{ @@ -34,7 +34,7 @@ pub trait Segment: Default { provider: &DatabaseProviderRO<'_, DB>, directory: impl AsRef, range: RangeInclusive, - ) -> RethResult<()>; + ) -> ProviderResult<()>; /// Returns this struct's [`SnapshotSegment`]. fn segment() -> SnapshotSegment; @@ -45,7 +45,7 @@ pub trait Segment: Default { provider: &DatabaseProviderRO<'_, DB>, range: &RangeInclusive, range_len: usize, - ) -> RethResult>> { + ) -> ProviderResult>> { let mut cursor = provider.tx_ref().cursor_read::>()?; Ok(cursor .walk_back(Some(RawKey::from(*range.end())))? @@ -64,8 +64,8 @@ pub(crate) fn prepare_jar( segment_config: SegmentConfig, block_range: RangeInclusive, total_rows: usize, - prepare_compression: impl Fn() -> RethResult>, -) -> RethResult> { + prepare_compression: impl Fn() -> ProviderResult>, +) -> ProviderResult> { let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; let mut nippy_jar = NippyJar::new( COLUMNS, diff --git a/crates/snapshot/src/segments/receipts.rs b/crates/snapshot/src/segments/receipts.rs index 75e8aaa8995d2..c40949a0dd65f 100644 --- a/crates/snapshot/src/segments/receipts.rs +++ b/crates/snapshot/src/segments/receipts.rs @@ -1,6 +1,6 @@ use crate::segments::{prepare_jar, Segment}; use reth_db::{database::Database, snapshot::create_snapshot_T1, tables}; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{ snapshot::{Compression, Filters, SegmentConfig, SegmentHeader}, BlockNumber, SnapshotSegment, TxNumber, @@ -37,7 +37,7 @@ impl Segment for Receipts { provider: &DatabaseProviderRO<'_, DB>, directory: impl AsRef, block_range: RangeInclusive, - ) -> RethResult<()> { + ) -> ProviderResult<()> { let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; let tx_range_len = tx_range.clone().count(); diff --git a/crates/snapshot/src/segments/transactions.rs b/crates/snapshot/src/segments/transactions.rs index b9cccfd20d377..4367f1ce0a7fc 100644 --- a/crates/snapshot/src/segments/transactions.rs +++ b/crates/snapshot/src/segments/transactions.rs @@ -1,6 +1,6 @@ use crate::segments::{prepare_jar, Segment}; use reth_db::{database::Database, snapshot::create_snapshot_T1, tables}; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{ snapshot::{Compression, Filters, SegmentConfig, SegmentHeader}, BlockNumber, SnapshotSegment, TxNumber, @@ -37,7 +37,7 @@ impl Segment for Transactions { provider: &DatabaseProviderRO<'_, DB>, directory: impl AsRef, block_range: RangeInclusive, - ) -> RethResult<()> { + ) -> ProviderResult<()> { let tx_range = provider.transaction_range_by_block_range(block_range.clone())?; let tx_range_len = tx_range.clone().count(); diff --git a/crates/stages/src/error.rs b/crates/stages/src/error.rs index 598e229a80406..180a8ca5ae24a 100644 --- a/crates/stages/src/error.rs +++ b/crates/stages/src/error.rs @@ -109,9 +109,9 @@ pub enum PipelineError { /// The pipeline encountered a database error. #[error(transparent)] Database(#[from] DbError), - /// The pipeline encountered an irrecoverable error in one of the stages. + /// Provider error. #[error(transparent)] - Interface(#[from] RethError), + Provider(#[from] ProviderError), /// The pipeline encountered an error while trying to send an event. #[error("pipeline encountered an error while trying to send an event")] Channel(#[from] SendError), diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index 4b58b69c87d23..f5955a5dffbc9 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -263,7 +263,7 @@ where let unwind_pipeline = self.stages.iter_mut().rev(); let factory = ProviderFactory::new(&self.db, self.chain_spec.clone()); - let mut provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + let mut provider_rw = factory.provider_rw()?; for stage in unwind_pipeline { let stage_id = stage.id(); @@ -320,7 +320,7 @@ where .notify(PipelineEvent::Unwound { stage_id, result: unwind_output }); provider_rw.commit()?; - provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + provider_rw = factory.provider_rw()?; } Err(err) => { self.listeners.notify(PipelineEvent::Error { stage_id }); @@ -346,7 +346,7 @@ where let target = self.max_block.or(previous_stage); let factory = ProviderFactory::new(&self.db, self.chain_spec.clone()); - let mut provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + let mut provider_rw = factory.provider_rw()?; loop { let prev_checkpoint = provider_rw.get_stage_checkpoint(stage_id)?; @@ -427,7 +427,7 @@ where // TODO: Make the commit interval configurable provider_rw.commit()?; - provider_rw = factory.provider_rw().map_err(PipelineError::Interface)?; + provider_rw = factory.provider_rw()?; if done { let block_number = checkpoint.block_number; @@ -465,8 +465,7 @@ where // stage not clearing its checkpoint, and // restarting from an invalid place. drop(provider_rw); - provider_rw = - factory.provider_rw().map_err(PipelineError::Interface)?; + provider_rw = factory.provider_rw()?; provider_rw.save_stage_checkpoint_progress( StageId::MerkleExecute, vec![], diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 62bd020acd7a2..2580b58c9783b 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -175,7 +175,7 @@ impl Stage for StorageHashingStage { // iterate over plain state and get newest storage value. // Assumption we are okay with is that plain state represent // `previous_stage_progress` state. - let storages = provider.plainstate_storages(lists)?; + let storages = provider.plain_state_storages(lists)?; provider.insert_storage_for_hashing(storages)?; } diff --git a/crates/stages/src/test_utils/runner.rs b/crates/stages/src/test_utils/runner.rs index ab8e231363deb..9bc08638d34ff 100644 --- a/crates/stages/src/test_utils/runner.rs +++ b/crates/stages/src/test_utils/runner.rs @@ -1,9 +1,9 @@ use super::TestTransaction; use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; use reth_db::DatabaseEnv; -use reth_interfaces::{db::DatabaseError, RethError}; +use reth_interfaces::db::DatabaseError; use reth_primitives::MAINNET; -use reth_provider::ProviderFactory; +use reth_provider::{ProviderError, ProviderFactory}; use std::{borrow::Borrow, sync::Arc}; use tokio::sync::oneshot; @@ -14,7 +14,7 @@ pub(crate) enum TestRunnerError { #[error(transparent)] Internal(#[from] Box), #[error(transparent)] - Interface(#[from] RethError), + Provider(#[from] ProviderError), } /// A generic test runner for stages. diff --git a/crates/storage/db/src/snapshot/cursor.rs b/crates/storage/db/src/snapshot/cursor.rs index 403183f930a12..0f5d3e946632b 100644 --- a/crates/storage/db/src/snapshot/cursor.rs +++ b/crates/storage/db/src/snapshot/cursor.rs @@ -1,7 +1,7 @@ use super::mask::{ColumnSelectorOne, ColumnSelectorThree, ColumnSelectorTwo}; use crate::table::Decompress; use derive_more::{Deref, DerefMut}; -use reth_interfaces::{RethError, RethResult}; +use reth_interfaces::provider::ProviderResult; use reth_nippy_jar::{MmapHandle, NippyJar, NippyJarCursor}; use reth_primitives::{snapshot::SegmentHeader, B256}; @@ -11,10 +11,7 @@ pub struct SnapshotCursor<'a>(NippyJarCursor<'a, SegmentHeader>); impl<'a> SnapshotCursor<'a> { /// Returns a new [`SnapshotCursor`]. - pub fn new( - jar: &'a NippyJar, - mmap_handle: MmapHandle, - ) -> Result { + pub fn new(jar: &'a NippyJar, mmap_handle: MmapHandle) -> ProviderResult { Ok(Self(NippyJarCursor::with_handle(jar, mmap_handle)?)) } @@ -29,7 +26,7 @@ impl<'a> SnapshotCursor<'a> { &mut self, key_or_num: KeyOrNumber<'_>, mask: usize, - ) -> RethResult>> { + ) -> ProviderResult>> { let row = match key_or_num { KeyOrNumber::Key(k) => self.row_by_key_with_cols(k, mask), KeyOrNumber::Number(n) => { @@ -48,7 +45,7 @@ impl<'a> SnapshotCursor<'a> { pub fn get_one( &mut self, key_or_num: KeyOrNumber<'_>, - ) -> RethResult> { + ) -> ProviderResult> { let row = self.get(key_or_num, M::MASK)?; match row { @@ -61,7 +58,7 @@ impl<'a> SnapshotCursor<'a> { pub fn get_two( &mut self, key_or_num: KeyOrNumber<'_>, - ) -> RethResult> { + ) -> ProviderResult> { let row = self.get(key_or_num, M::MASK)?; match row { @@ -75,7 +72,7 @@ impl<'a> SnapshotCursor<'a> { pub fn get_three( &mut self, key_or_num: KeyOrNumber<'_>, - ) -> RethResult> { + ) -> ProviderResult> { let row = self.get(key_or_num, M::MASK)?; match row { diff --git a/crates/storage/db/src/snapshot/generation.rs b/crates/storage/db/src/snapshot/generation.rs index 5a2088ed60c4a..df6a68a1e3650 100644 --- a/crates/storage/db/src/snapshot/generation.rs +++ b/crates/storage/db/src/snapshot/generation.rs @@ -5,9 +5,8 @@ use crate::{ RawKey, RawTable, }; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_nippy_jar::{ColumnResult, NippyJar, PHFKey}; - use reth_tracing::tracing::*; use serde::{Deserialize, Serialize}; use std::{error::Error as StdError, ops::RangeInclusive}; @@ -46,7 +45,7 @@ macro_rules! generate_snapshot_func { keys: Option>>, row_count: usize, nippy_jar: &mut NippyJar - ) -> RethResult<()> + ) -> ProviderResult<()> where K: Key + Copy { let additional = additional.unwrap_or_default(); diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index dbb37c1f868d4..16a01ed05294a 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -1,7 +1,7 @@ use thiserror::Error; /// Errors associated with [`crate::NippyJar`]. -#[derive(Debug, Error)] +#[derive(Error, Debug)] pub enum NippyJarError { #[error(transparent)] Internal(#[from] Box), diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 8c83ebe15cab2..46d9ae702ffd7 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -2,7 +2,7 @@ use crate::{ bundle_state::BundleStateWithReceipts, AccountReader, BlockHashReader, BundleStateDataProvider, StateProvider, StateRootProvider, }; -use reth_interfaces::{provider::ProviderError, RethResult}; +use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{trie::AccountProof, Account, Address, BlockNumber, Bytecode, B256}; /// A state provider that either resolves to data in a wrapped [`crate::BundleStateWithReceipts`], @@ -27,7 +27,7 @@ impl BundleStateProvider BlockHashReader for BundleStateProvider { - fn block_hash(&self, block_number: BlockNumber) -> RethResult> { + fn block_hash(&self, block_number: BlockNumber) -> ProviderResult> { let block_hash = self.post_state_data_provider.block_hash(block_number); if block_hash.is_some() { return Ok(block_hash) @@ -39,7 +39,7 @@ impl BlockHashReader &self, _start: BlockNumber, _end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { unimplemented!() } } @@ -47,7 +47,7 @@ impl BlockHashReader impl AccountReader for BundleStateProvider { - fn basic_account(&self, address: Address) -> RethResult> { + fn basic_account(&self, address: Address) -> ProviderResult> { if let Some(account) = self.post_state_data_provider.state().account(&address) { Ok(account) } else { @@ -59,7 +59,7 @@ impl AccountReader impl StateRootProvider for BundleStateProvider { - fn state_root(&self, post_state: &BundleStateWithReceipts) -> RethResult { + fn state_root(&self, post_state: &BundleStateWithReceipts) -> ProviderResult { let mut state = self.post_state_data_provider.state().clone(); state.extend(post_state.clone()); self.state_provider.state_root(&state) @@ -73,7 +73,7 @@ impl StateProvider &self, account: Address, storage_key: reth_primitives::StorageKey, - ) -> RethResult> { + ) -> ProviderResult> { let u256_storage_key = storage_key.into(); if let Some(value) = self.post_state_data_provider.state().storage(&account, u256_storage_key) @@ -84,7 +84,7 @@ impl StateProvider self.state_provider.storage(account, storage_key) } - fn bytecode_by_hash(&self, code_hash: B256) -> RethResult> { + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { if let Some(bytecode) = self.post_state_data_provider.state().bytecode(&code_hash) { return Ok(Some(bytecode)) } @@ -92,7 +92,7 @@ impl StateProvider self.state_provider.bytecode_by_hash(code_hash) } - fn proof(&self, _address: Address, _keys: &[B256]) -> RethResult { - Err(ProviderError::StateRootNotAvailableForHistoricalBlock.into()) + fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { + Err(ProviderError::StateRootNotAvailableForHistoricalBlock) } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 357a0919674d7..38b4be901d27c 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -9,7 +9,7 @@ use crate::{ TransactionVariant, TransactionsProvider, WithdrawalsProvider, }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; -use reth_interfaces::{db::LogLevel, RethError, RethResult}; +use reth_interfaces::{db::LogLevel, provider::ProviderResult, RethError, RethResult}; use reth_primitives::{ snapshot::HighestSnapshots, stage::{StageCheckpoint, StageId}, @@ -49,7 +49,7 @@ impl ProviderFactory { /// Returns a provider with a created `DbTx` inside, which allows fetching data from the /// database using different types of providers. Example: [`HeaderProvider`] /// [`BlockHashReader`]. This may fail if the inner read database transaction fails to open. - pub fn provider(&self) -> RethResult> { + pub fn provider(&self) -> ProviderResult> { let mut provider = DatabaseProvider::new(self.db.tx()?, self.chain_spec.clone()); if let Some(snapshot_provider) = &self.snapshot_provider { @@ -63,7 +63,7 @@ impl ProviderFactory { /// data from the database using different types of providers. Example: [`HeaderProvider`] /// [`BlockHashReader`]. This may fail if the inner read/write database transaction fails to /// open. - pub fn provider_rw(&self) -> RethResult> { + pub fn provider_rw(&self) -> ProviderResult> { let mut provider = DatabaseProvider::new_rw(self.db.tx_mut()?, self.chain_spec.clone()); if let Some(snapshot_provider) = &self.snapshot_provider { @@ -122,7 +122,7 @@ impl Clone for ProviderFactory { impl ProviderFactory { /// Storage provider for latest block - pub fn latest(&self) -> RethResult> { + pub fn latest(&self) -> ProviderResult> { trace!(target: "providers::db", "Returning latest state provider"); Ok(Box::new(LatestStateProvider::new(self.db.tx()?))) } @@ -131,7 +131,7 @@ impl ProviderFactory { fn state_provider_by_block_number( &self, mut block_number: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { let provider = self.provider()?; if block_number == provider.best_block_number().unwrap_or_default() && @@ -174,14 +174,17 @@ impl ProviderFactory { pub fn history_by_block_number( &self, block_number: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { let state_provider = self.state_provider_by_block_number(block_number)?; trace!(target: "providers::db", ?block_number, "Returning historical state provider for block number"); Ok(state_provider) } /// Storage provider for state at that given block hash - pub fn history_by_block_hash(&self, block_hash: BlockHash) -> RethResult> { + pub fn history_by_block_hash( + &self, + block_hash: BlockHash, + ) -> ProviderResult> { let block_number = self .provider()? .block_number(block_hash)? @@ -194,34 +197,34 @@ impl ProviderFactory { } impl HeaderProvider for ProviderFactory { - fn header(&self, block_hash: &BlockHash) -> RethResult> { + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.provider()?.header(block_hash) } - fn header_by_number(&self, num: BlockNumber) -> RethResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.provider()?.header_by_number(num) } - fn header_td(&self, hash: &BlockHash) -> RethResult> { + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { self.provider()?.header_td(hash) } - fn header_td_by_number(&self, number: BlockNumber) -> RethResult> { + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { self.provider()?.header_td_by_number(number) } - fn headers_range(&self, range: impl RangeBounds) -> RethResult> { + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { self.provider()?.headers_range(range) } - fn sealed_header(&self, number: BlockNumber) -> RethResult> { + fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { self.provider()?.sealed_header(number) } fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { self.provider()?.sealed_headers_range(range) } @@ -229,13 +232,13 @@ impl HeaderProvider for ProviderFactory { &self, range: impl RangeBounds, predicate: impl FnMut(&SealedHeader) -> bool, - ) -> RethResult> { + ) -> ProviderResult> { self.provider()?.sealed_headers_while(range, predicate) } } impl BlockHashReader for ProviderFactory { - fn block_hash(&self, number: u64) -> RethResult> { + fn block_hash(&self, number: u64) -> ProviderResult> { self.provider()?.block_hash(number) } @@ -243,54 +246,54 @@ impl BlockHashReader for ProviderFactory { &self, start: BlockNumber, end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { self.provider()?.canonical_hashes_range(start, end) } } impl BlockNumReader for ProviderFactory { - fn chain_info(&self) -> RethResult { + fn chain_info(&self) -> ProviderResult { self.provider()?.chain_info() } - fn best_block_number(&self) -> RethResult { + fn best_block_number(&self) -> ProviderResult { self.provider()?.best_block_number() } - fn last_block_number(&self) -> RethResult { + fn last_block_number(&self) -> ProviderResult { self.provider()?.last_block_number() } - fn block_number(&self, hash: B256) -> RethResult> { + fn block_number(&self, hash: B256) -> ProviderResult> { self.provider()?.block_number(hash) } } impl BlockReader for ProviderFactory { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> RethResult> { + fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { self.provider()?.find_block_by_hash(hash, source) } - fn block(&self, id: BlockHashOrNumber) -> RethResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { self.provider()?.block(id) } - fn pending_block(&self) -> RethResult> { + fn pending_block(&self) -> ProviderResult> { self.provider()?.pending_block() } - fn pending_block_and_receipts(&self) -> RethResult)>> { + fn pending_block_and_receipts(&self) -> ProviderResult)>> { self.provider()?.pending_block_and_receipts() } - fn ommers(&self, id: BlockHashOrNumber) -> RethResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { self.provider()?.ommers(id) } fn block_body_indices( &self, number: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { self.provider()?.block_body_indices(number) } @@ -298,86 +301,89 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> RethResult> { + ) -> ProviderResult> { self.provider()?.block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> RethResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.provider()?.block_range(range) } } impl TransactionsProvider for ProviderFactory { - fn transaction_id(&self, tx_hash: TxHash) -> RethResult> { + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> RethResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.provider()?.transaction_by_id(id) } fn transaction_by_id_no_hash( &self, id: TxNumber, - ) -> RethResult> { + ) -> ProviderResult> { self.provider()?.transaction_by_id_no_hash(id) } - fn transaction_by_hash(&self, hash: TxHash) -> RethResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> RethResult> { + ) -> ProviderResult> { self.provider()?.transaction_by_hash_with_meta(tx_hash) } - fn transaction_block(&self, id: TxNumber) -> RethResult> { + fn transaction_block(&self, id: TxNumber) -> ProviderResult> { self.provider()?.transaction_block(id) } fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> RethResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> RethResult>> { + ) -> ProviderResult>> { self.provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { self.provider()?.transactions_by_tx_range(range) } - fn senders_by_tx_range(&self, range: impl RangeBounds) -> RethResult> { + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.provider()?.senders_by_tx_range(range) } - fn transaction_sender(&self, id: TxNumber) -> RethResult> { + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { self.provider()?.transaction_sender(id) } } impl ReceiptProvider for ProviderFactory { - fn receipt(&self, id: TxNumber) -> RethResult> { + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.provider()?.receipt(id) } - fn receipt_by_hash(&self, hash: TxHash) -> RethResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> RethResult>> { + fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { self.provider()?.receipts_by_block(block) } } @@ -387,21 +393,21 @@ impl WithdrawalsProvider for ProviderFactory { &self, id: BlockHashOrNumber, timestamp: u64, - ) -> RethResult>> { + ) -> ProviderResult>> { self.provider()?.withdrawals_by_block(id, timestamp) } - fn latest_withdrawal(&self) -> RethResult> { + fn latest_withdrawal(&self) -> ProviderResult> { self.provider()?.latest_withdrawal() } } impl StageCheckpointReader for ProviderFactory { - fn get_stage_checkpoint(&self, id: StageId) -> RethResult> { + fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.provider()?.get_stage_checkpoint(id) } - fn get_stage_checkpoint_progress(&self, id: StageId) -> RethResult>> { + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { self.provider()?.get_stage_checkpoint_progress(id) } } @@ -412,7 +418,7 @@ impl EvmEnvProvider for ProviderFactory { cfg: &mut CfgEnv, block_env: &mut BlockEnv, at: BlockHashOrNumber, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.provider()?.fill_env_at(cfg, block_env, at) } @@ -421,11 +427,15 @@ impl EvmEnvProvider for ProviderFactory { cfg: &mut CfgEnv, block_env: &mut BlockEnv, header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.provider()?.fill_env_with_header(cfg, block_env, header) } - fn fill_block_env_at(&self, block_env: &mut BlockEnv, at: BlockHashOrNumber) -> RethResult<()> { + fn fill_block_env_at( + &self, + block_env: &mut BlockEnv, + at: BlockHashOrNumber, + ) -> ProviderResult<()> { self.provider()?.fill_block_env_at(block_env, at) } @@ -433,15 +443,15 @@ impl EvmEnvProvider for ProviderFactory { &self, block_env: &mut BlockEnv, header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.provider()?.fill_block_env_with_header(block_env, header) } - fn fill_cfg_env_at(&self, cfg: &mut CfgEnv, at: BlockHashOrNumber) -> RethResult<()> { + fn fill_cfg_env_at(&self, cfg: &mut CfgEnv, at: BlockHashOrNumber) -> ProviderResult<()> { self.provider()?.fill_cfg_env_at(cfg, at) } - fn fill_cfg_env_with_header(&self, cfg: &mut CfgEnv, header: &Header) -> RethResult<()> { + fn fill_cfg_env_with_header(&self, cfg: &mut CfgEnv, header: &Header) -> ProviderResult<()> { self.provider()?.fill_cfg_env_with_header(cfg, header) } } @@ -456,7 +466,10 @@ where } impl PruneCheckpointReader for ProviderFactory { - fn get_prune_checkpoint(&self, segment: PruneSegment) -> RethResult> { + fn get_prune_checkpoint( + &self, + segment: PruneSegment, + ) -> ProviderResult> { self.provider()?.get_prune_checkpoint(segment) } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index cf29d76ec547b..198aeb5533acf 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -24,11 +24,7 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, }; -use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError}, - provider::RootMismatch, - RethError, RethResult, -}; +use reth_interfaces::provider::{ProviderResult, RootMismatch}; use reth_primitives::{ keccak256, revm::{ @@ -82,7 +78,7 @@ impl DerefMut for DatabaseProviderRW<'_, DB> { impl<'this, DB: Database> DatabaseProviderRW<'this, DB> { /// Commit database transaction - pub fn commit(self) -> RethResult { + pub fn commit(self) -> ProviderResult { self.0.commit() } @@ -129,7 +125,7 @@ fn unwind_history_shards( start_key: T::Key, block_number: BlockNumber, mut shard_belongs_to_key: impl FnMut(&T::Key) -> bool, -) -> RethResult> +) -> ProviderResult> where T: Table, T::Key: AsRef>, @@ -201,7 +197,7 @@ impl DatabaseProvider { impl DatabaseProvider { /// Commit database transaction. - pub fn commit(self) -> RethResult { + pub fn commit(self) -> ProviderResult { Ok(self.tx.commit()?) } @@ -234,7 +230,7 @@ impl DatabaseProvider { fn unwind_or_peek_state( &self, range: RangeInclusive, - ) -> RethResult { + ) -> ProviderResult { if range.is_empty() { return Ok(BundleStateWithReceipts::default()) } @@ -409,7 +405,7 @@ impl DatabaseProvider { pub(crate) fn get_take_block_transaction_range( &self, range: impl RangeBounds + Clone, - ) -> RethResult)>> { + ) -> ProviderResult)>> { // Raad range of block bodies to get all transactions id's of this range. let block_bodies = self.get_or_take::(range)?; @@ -477,7 +473,7 @@ impl DatabaseProvider { missing_senders.iter().map(|(_, _, tx)| *tx).collect::>(), missing_senders.len(), ) - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?; + .ok_or(ProviderError::SenderRecoveryError)?; // Insert recovered senders along with tx numbers at the corresponding indexes to the // original `senders` vector @@ -547,7 +543,7 @@ impl DatabaseProvider { &self, chain_spec: &ChainSpec, range: impl RangeBounds + Clone, - ) -> RethResult> { + ) -> ProviderResult> { // For block we need Headers, Bodies, Uncles, withdrawals, Transactions, Signers let block_headers = self.get_or_take::(range.clone())?; @@ -746,7 +742,7 @@ impl DatabaseProvider { /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. - fn take_shard(&self, key: T::Key) -> RethResult> + fn take_shard(&self, key: T::Key) -> ProviderResult> where T: Table, { @@ -772,7 +768,7 @@ impl DatabaseProvider { &self, index_updates: BTreeMap>, mut sharded_key_factory: impl FnMut(P, BlockNumber) -> T::Key, - ) -> RethResult<()> + ) -> ProviderResult<()> where P: Copy, T: Table, @@ -806,7 +802,7 @@ impl DatabaseProvider { } impl AccountReader for DatabaseProvider { - fn basic_account(&self, address: Address) -> RethResult> { + fn basic_account(&self, address: Address) -> ProviderResult> { Ok(self.tx.get::(address)?) } } @@ -815,7 +811,7 @@ impl AccountExtReader for DatabaseProvider { fn changed_accounts_with_range( &self, range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { self.tx .cursor_read::()? .walk_range(range)? @@ -828,7 +824,7 @@ impl AccountExtReader for DatabaseProvider { fn basic_accounts( &self, iter: impl IntoIterator, - ) -> RethResult)>> { + ) -> ProviderResult)>> { let mut plain_accounts = self.tx.cursor_read::()?; Ok(iter .into_iter() @@ -839,12 +835,12 @@ impl AccountExtReader for DatabaseProvider { fn changed_accounts_and_blocks_with_range( &self, range: RangeInclusive, - ) -> RethResult>> { + ) -> ProviderResult>> { let mut changeset_cursor = self.tx.cursor_read::()?; let account_transitions = changeset_cursor.walk_range(range)?.try_fold( BTreeMap::new(), - |mut accounts: BTreeMap>, entry| -> RethResult<_> { + |mut accounts: BTreeMap>, entry| -> ProviderResult<_> { let (index, account) = entry?; accounts.entry(account.address).or_default().push(index); Ok(accounts) @@ -859,12 +855,12 @@ impl ChangeSetReader for DatabaseProvider { fn account_block_changeset( &self, block_number: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { let range = block_number..=block_number; self.tx .cursor_read::()? .walk_range(range)? - .map(|result| -> RethResult<_> { + .map(|result| -> ProviderResult<_> { let (_, account_before) = result?; Ok(account_before) }) @@ -873,7 +869,7 @@ impl ChangeSetReader for DatabaseProvider { } impl HeaderProvider for DatabaseProvider { - fn header(&self, block_hash: &BlockHash) -> RethResult> { + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { Ok(self.header_by_number(num)?) } else { @@ -881,11 +877,11 @@ impl HeaderProvider for DatabaseProvider { } } - fn header_by_number(&self, num: BlockNumber) -> RethResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { Ok(self.tx.get::(num)?) } - fn header_td(&self, block_hash: &BlockHash) -> RethResult> { + fn header_td(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { self.header_td_by_number(num) } else { @@ -893,7 +889,7 @@ impl HeaderProvider for DatabaseProvider { } } - fn header_td_by_number(&self, number: BlockNumber) -> RethResult> { + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { if let Some(td) = self.chain_spec.final_paris_total_difficulty(number) { // if this block is higher than the final paris(merge) block, return the final paris // difficulty @@ -903,15 +899,15 @@ impl HeaderProvider for DatabaseProvider { Ok(self.tx.get::(number)?.map(|td| td.0)) } - fn headers_range(&self, range: impl RangeBounds) -> RethResult> { + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { let mut cursor = self.tx.cursor_read::()?; cursor .walk_range(range)? .map(|result| result.map(|(_, header)| header).map_err(Into::into)) - .collect::>>() + .collect::>>() } - fn sealed_header(&self, number: BlockNumber) -> RethResult> { + fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { if let Some(header) = self.header_by_number(number)? { let hash = self .block_hash(number)? @@ -926,7 +922,7 @@ impl HeaderProvider for DatabaseProvider { &self, range: impl RangeBounds, mut predicate: impl FnMut(&SealedHeader) -> bool, - ) -> RethResult> { + ) -> ProviderResult> { let mut headers = vec![]; for entry in self.tx.cursor_read::()?.walk_range(range)? { let (number, header) = entry?; @@ -944,7 +940,7 @@ impl HeaderProvider for DatabaseProvider { } impl BlockHashReader for DatabaseProvider { - fn block_hash(&self, number: u64) -> RethResult> { + fn block_hash(&self, number: u64) -> ProviderResult> { Ok(self.tx.get::(number)?) } @@ -952,41 +948,41 @@ impl BlockHashReader for DatabaseProvider { &self, start: BlockNumber, end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { let range = start..end; let mut cursor = self.tx.cursor_read::()?; cursor .walk_range(range)? .map(|result| result.map(|(_, hash)| hash).map_err(Into::into)) - .collect::>>() + .collect::>>() } } impl BlockNumReader for DatabaseProvider { - fn chain_info(&self) -> RethResult { + fn chain_info(&self) -> ProviderResult { let best_number = self.best_block_number()?; let best_hash = self.block_hash(best_number)?.unwrap_or_default(); Ok(ChainInfo { best_hash, best_number }) } - fn best_block_number(&self) -> RethResult { + fn best_block_number(&self) -> ProviderResult { Ok(self .get_stage_checkpoint(StageId::Finish)? .map(|checkpoint| checkpoint.block_number) .unwrap_or_default()) } - fn last_block_number(&self) -> RethResult { + fn last_block_number(&self) -> ProviderResult { Ok(self.tx.cursor_read::()?.last()?.unwrap_or_default().0) } - fn block_number(&self, hash: B256) -> RethResult> { + fn block_number(&self, hash: B256) -> ProviderResult> { Ok(self.tx.get::(hash)?) } } impl BlockReader for DatabaseProvider { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> RethResult> { + fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { if source.is_database() { self.block(hash.into()) } else { @@ -999,7 +995,7 @@ impl BlockReader for DatabaseProvider { /// If the header for this block is not found, this returns `None`. /// If the header is found, but the transactions either do not exist, or are not indexed, this /// will return None. - fn block(&self, id: BlockHashOrNumber) -> RethResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { if let Some(number) = self.convert_hash_or_number(id)? { if let Some(header) = self.header_by_number(number)? { let withdrawals = self.withdrawals_by_block(number.into(), header.timestamp)?; @@ -1020,15 +1016,15 @@ impl BlockReader for DatabaseProvider { Ok(None) } - fn pending_block(&self) -> RethResult> { + fn pending_block(&self) -> ProviderResult> { Ok(None) } - fn pending_block_and_receipts(&self) -> RethResult)>> { + fn pending_block_and_receipts(&self) -> ProviderResult)>> { Ok(None) } - fn ommers(&self, id: BlockHashOrNumber) -> RethResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { if let Some(number) = self.convert_hash_or_number(id)? { // If the Paris (Merge) hardfork block is known and block is after it, return empty // ommers. @@ -1043,7 +1039,7 @@ impl BlockReader for DatabaseProvider { Ok(None) } - fn block_body_indices(&self, num: u64) -> RethResult> { + fn block_body_indices(&self, num: u64) -> ProviderResult> { Ok(self.tx.get::(num)?) } @@ -1059,7 +1055,7 @@ impl BlockReader for DatabaseProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> RethResult> { + ) -> ProviderResult> { let Some(block_number) = self.convert_hash_or_number(id)? else { return Ok(None) }; let Some(header) = self.header_by_number(block_number)? else { return Ok(None) }; @@ -1102,7 +1098,7 @@ impl BlockReader for DatabaseProvider { Ok(Some(Block { header, body, ommers, withdrawals }.with_senders(senders))) } - fn block_range(&self, range: RangeInclusive) -> RethResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { if range.is_empty() { return Ok(Vec::new()) } @@ -1167,7 +1163,7 @@ impl TransactionsProviderExt for DatabaseProvider { fn transaction_hashes_by_range( &self, tx_range: Range, - ) -> RethResult> { + ) -> ProviderResult> { let mut tx_cursor = self.tx.cursor_read::()?; let tx_range_size = tx_range.clone().count(); let tx_walker = tx_cursor.walk_range(tx_range)?; @@ -1180,7 +1176,7 @@ impl TransactionsProviderExt for DatabaseProvider { fn calculate_hash( entry: Result<(TxNumber, TransactionSignedNoHash), DatabaseError>, rlp_buf: &mut Vec, - ) -> Result<(B256, TxNumber), Box> { + ) -> Result<(B256, TxNumber), Box> { let (tx_id, tx) = entry.map_err(|e| Box::new(e.into()))?; tx.transaction.encode_with_signature(&tx.signature, rlp_buf, false); Ok((keccak256(rlp_buf), tx_id)) @@ -1221,22 +1217,22 @@ impl TransactionsProviderExt for DatabaseProvider { /// Calculates the hash of the given transaction impl TransactionsProvider for DatabaseProvider { - fn transaction_id(&self, tx_hash: TxHash) -> RethResult> { + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { Ok(self.tx.get::(tx_hash)?) } - fn transaction_by_id(&self, id: TxNumber) -> RethResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { Ok(self.tx.get::(id)?.map(Into::into)) } fn transaction_by_id_no_hash( &self, id: TxNumber, - ) -> RethResult> { + ) -> ProviderResult> { Ok(self.tx.get::(id)?) } - fn transaction_by_hash(&self, hash: TxHash) -> RethResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { Ok(self.transaction_by_id_no_hash(id)?.map(|tx| TransactionSigned { hash, @@ -1252,7 +1248,7 @@ impl TransactionsProvider for DatabaseProvider { fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> RethResult> { + ) -> ProviderResult> { let mut transaction_cursor = self.tx.cursor_read::()?; if let Some(transaction_id) = self.transaction_id(tx_hash)? { if let Some(tx) = self.transaction_by_id_no_hash(transaction_id)? { @@ -1292,7 +1288,7 @@ impl TransactionsProvider for DatabaseProvider { Ok(None) } - fn transaction_block(&self, id: TxNumber) -> RethResult> { + fn transaction_block(&self, id: TxNumber) -> ProviderResult> { let mut cursor = self.tx.cursor_read::()?; Ok(cursor.seek(id)?.map(|(_, bn)| bn)) } @@ -1300,7 +1296,7 @@ impl TransactionsProvider for DatabaseProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> RethResult>> { + ) -> ProviderResult>> { let mut tx_cursor = self.tx.cursor_read::()?; if let Some(block_number) = self.convert_hash_or_number(id)? { if let Some(body) = self.block_body_indices(block_number)? { @@ -1322,7 +1318,7 @@ impl TransactionsProvider for DatabaseProvider { fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> RethResult>> { + ) -> ProviderResult>> { let mut results = Vec::new(); let mut body_cursor = self.tx.cursor_read::()?; let mut tx_cursor = self.tx.cursor_read::()?; @@ -1346,7 +1342,7 @@ impl TransactionsProvider for DatabaseProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { Ok(self .tx .cursor_read::()? @@ -1355,7 +1351,10 @@ impl TransactionsProvider for DatabaseProvider { .collect::, _>>()?) } - fn senders_by_tx_range(&self, range: impl RangeBounds) -> RethResult> { + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { Ok(self .tx .cursor_read::()? @@ -1364,17 +1363,17 @@ impl TransactionsProvider for DatabaseProvider { .collect::, _>>()?) } - fn transaction_sender(&self, id: TxNumber) -> RethResult> { + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { Ok(self.tx.get::(id)?) } } impl ReceiptProvider for DatabaseProvider { - fn receipt(&self, id: TxNumber) -> RethResult> { + fn receipt(&self, id: TxNumber) -> ProviderResult> { Ok(self.tx.get::(id)?) } - fn receipt_by_hash(&self, hash: TxHash) -> RethResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(id) = self.transaction_id(hash)? { self.receipt(id) } else { @@ -1382,7 +1381,7 @@ impl ReceiptProvider for DatabaseProvider { } } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> RethResult>> { + fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { if let Some(number) = self.convert_hash_or_number(block)? { if let Some(body) = self.block_body_indices(number)? { let tx_range = body.tx_num_range(); @@ -1407,7 +1406,7 @@ impl WithdrawalsProvider for DatabaseProvider { &self, id: BlockHashOrNumber, timestamp: u64, - ) -> RethResult>> { + ) -> ProviderResult>> { if self.chain_spec.is_shanghai_active_at_timestamp(timestamp) { if let Some(number) = self.convert_hash_or_number(id)? { // If we are past shanghai, then all blocks should have a withdrawal list, even if @@ -1423,7 +1422,7 @@ impl WithdrawalsProvider for DatabaseProvider { Ok(None) } - fn latest_withdrawal(&self) -> RethResult> { + fn latest_withdrawal(&self) -> ProviderResult> { let latest_block_withdrawal = self.tx.cursor_read::()?.last()?; Ok(latest_block_withdrawal .and_then(|(_, mut block_withdrawal)| block_withdrawal.withdrawals.pop())) @@ -1436,7 +1435,7 @@ impl EvmEnvProvider for DatabaseProvider { cfg: &mut CfgEnv, block_env: &mut BlockEnv, at: BlockHashOrNumber, - ) -> RethResult<()> { + ) -> ProviderResult<()> { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; self.fill_env_with_header(cfg, block_env, &header) @@ -1447,7 +1446,7 @@ impl EvmEnvProvider for DatabaseProvider { cfg: &mut CfgEnv, block_env: &mut BlockEnv, header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; @@ -1455,7 +1454,11 @@ impl EvmEnvProvider for DatabaseProvider { Ok(()) } - fn fill_block_env_at(&self, block_env: &mut BlockEnv, at: BlockHashOrNumber) -> RethResult<()> { + fn fill_block_env_at( + &self, + block_env: &mut BlockEnv, + at: BlockHashOrNumber, + ) -> ProviderResult<()> { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; @@ -1466,7 +1469,7 @@ impl EvmEnvProvider for DatabaseProvider { &self, block_env: &mut BlockEnv, header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; @@ -1486,13 +1489,13 @@ impl EvmEnvProvider for DatabaseProvider { Ok(()) } - fn fill_cfg_env_at(&self, cfg: &mut CfgEnv, at: BlockHashOrNumber) -> RethResult<()> { + fn fill_cfg_env_at(&self, cfg: &mut CfgEnv, at: BlockHashOrNumber) -> ProviderResult<()> { let hash = self.convert_number(at)?.ok_or(ProviderError::HeaderNotFound(at))?; let header = self.header(&hash)?.ok_or(ProviderError::HeaderNotFound(at))?; self.fill_cfg_env_with_header(cfg, &header) } - fn fill_cfg_env_with_header(&self, cfg: &mut CfgEnv, header: &Header) -> RethResult<()> { + fn fill_cfg_env_with_header(&self, cfg: &mut CfgEnv, header: &Header) -> ProviderResult<()> { let total_difficulty = self .header_td_by_number(header.number)? .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?; @@ -1502,24 +1505,32 @@ impl EvmEnvProvider for DatabaseProvider { } impl StageCheckpointReader for DatabaseProvider { - fn get_stage_checkpoint(&self, id: StageId) -> RethResult> { + fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { Ok(self.tx.get::(id.to_string())?) } /// Get stage checkpoint progress. - fn get_stage_checkpoint_progress(&self, id: StageId) -> RethResult>> { + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { Ok(self.tx.get::(id.to_string())?) } } impl StageCheckpointWriter for DatabaseProvider { /// Save stage checkpoint progress. - fn save_stage_checkpoint_progress(&self, id: StageId, checkpoint: Vec) -> RethResult<()> { + fn save_stage_checkpoint_progress( + &self, + id: StageId, + checkpoint: Vec, + ) -> ProviderResult<()> { Ok(self.tx.put::(id.to_string(), checkpoint)?) } /// Save stage checkpoint. - fn save_stage_checkpoint(&self, id: StageId, checkpoint: StageCheckpoint) -> RethResult<()> { + fn save_stage_checkpoint( + &self, + id: StageId, + checkpoint: StageCheckpoint, + ) -> ProviderResult<()> { Ok(self.tx.put::(id.to_string(), checkpoint)?) } @@ -1527,7 +1538,7 @@ impl StageCheckpointWriter for DatabaseProvider { &self, block_number: BlockNumber, drop_stage_checkpoint: bool, - ) -> RethResult<()> { + ) -> ProviderResult<()> { // iterate over all existing stages in the table and update its progress. let mut cursor = self.tx.cursor_write::()?; for stage_id in StageId::ALL { @@ -1546,10 +1557,10 @@ impl StageCheckpointWriter for DatabaseProvider { } impl StorageReader for DatabaseProvider { - fn plainstate_storages( + fn plain_state_storages( &self, addresses_with_keys: impl IntoIterator)>, - ) -> RethResult)>> { + ) -> ProviderResult)>> { let mut plain_storage = self.tx.cursor_dup_read::()?; addresses_with_keys @@ -1557,22 +1568,22 @@ impl StorageReader for DatabaseProvider { .map(|(address, storage)| { storage .into_iter() - .map(|key| -> RethResult<_> { + .map(|key| -> ProviderResult<_> { Ok(plain_storage .seek_by_key_subkey(address, key)? .filter(|v| v.key == key) .unwrap_or_else(|| StorageEntry { key, value: Default::default() })) }) - .collect::>>() + .collect::>>() .map(|storage| (address, storage)) }) - .collect::>>() + .collect::>>() } fn changed_storages_with_range( &self, range: RangeInclusive, - ) -> RethResult>> { + ) -> ProviderResult>> { self.tx .cursor_read::()? .walk_range(BlockNumberAddress::range(range))? @@ -1588,13 +1599,13 @@ impl StorageReader for DatabaseProvider { fn changed_storages_and_blocks_with_range( &self, range: RangeInclusive, - ) -> RethResult>> { + ) -> ProviderResult>> { let mut changeset_cursor = self.tx.cursor_read::()?; let storage_changeset_lists = changeset_cursor.walk_range(BlockNumberAddress::range(range))?.try_fold( BTreeMap::new(), - |mut storages: BTreeMap<(Address, B256), Vec>, entry| -> RethResult<_> { + |mut storages: BTreeMap<(Address, B256), Vec>, entry| -> ProviderResult<_> { let (index, storage) = entry?; storages .entry((index.address(), storage.key)) @@ -1614,7 +1625,7 @@ impl HashingWriter for DatabaseProvider { range: RangeInclusive, end_block_hash: B256, expected_state_root: B256, - ) -> RethResult<()> { + ) -> ProviderResult<()> { // Initialize prefix sets. let mut account_prefix_set = PrefixSetMut::default(); let mut storage_prefix_set: HashMap = HashMap::default(); @@ -1625,7 +1636,7 @@ impl HashingWriter for DatabaseProvider { // storage hashing stage { let lists = self.changed_storages_with_range(range.clone())?; - let storages = self.plainstate_storages(lists)?; + let storages = self.plain_state_storages(lists)?; let storage_entries = self.insert_storage_for_hashing(storages)?; for (hashed_address, hashed_slots) in storage_entries { account_prefix_set.insert(Nibbles::unpack(hashed_address)); @@ -1670,8 +1681,7 @@ impl HashingWriter for DatabaseProvider { root: GotExpected { got: state_root, expected: expected_state_root }, block_number: *range.end(), block_hash: end_block_hash, - })) - .into()) + }))) } trie_updates.flush(&self.tx)?; } @@ -1685,7 +1695,7 @@ impl HashingWriter for DatabaseProvider { fn unwind_storage_hashing( &self, range: Range, - ) -> RethResult>> { + ) -> ProviderResult>> { let mut hashed_storage = self.tx.cursor_dup_write::()?; // Aggregate all block changesets and make list of accounts that have been changed. @@ -1719,7 +1729,7 @@ impl HashingWriter for DatabaseProvider { hashed_storages .into_iter() // Apply values to HashedStorage (if Value is zero just remove it); - .try_for_each(|((hashed_address, key), value)| -> RethResult<()> { + .try_for_each(|((hashed_address, key), value)| -> ProviderResult<()> { if hashed_storage .seek_by_key_subkey(hashed_address, key)? .filter(|entry| entry.key == key) @@ -1740,7 +1750,7 @@ impl HashingWriter for DatabaseProvider { fn insert_storage_for_hashing( &self, storages: impl IntoIterator)>, - ) -> RethResult>> { + ) -> ProviderResult>> { // hash values let hashed_storages = storages.into_iter().fold(BTreeMap::new(), |mut map, (address, storage)| { @@ -1761,7 +1771,7 @@ impl HashingWriter for DatabaseProvider { // Hash the address and key and apply them to HashedStorage (if Storage is None // just remove it); hashed_storages.into_iter().try_for_each(|(hashed_address, storage)| { - storage.into_iter().try_for_each(|(key, value)| -> RethResult<()> { + storage.into_iter().try_for_each(|(key, value)| -> ProviderResult<()> { if hashed_storage_cursor .seek_by_key_subkey(hashed_address, key)? .filter(|entry| entry.key == key) @@ -1783,7 +1793,7 @@ impl HashingWriter for DatabaseProvider { fn unwind_account_hashing( &self, range: RangeInclusive, - ) -> RethResult>> { + ) -> ProviderResult>> { let mut hashed_accounts_cursor = self.tx.cursor_write::()?; // Aggregate all block changesets and make a list of accounts that have been changed. @@ -1811,7 +1821,7 @@ impl HashingWriter for DatabaseProvider { hashed_accounts .iter() // Apply values to HashedState (if Account is None remove it); - .try_for_each(|(hashed_address, account)| -> RethResult<()> { + .try_for_each(|(hashed_address, account)| -> ProviderResult<()> { if let Some(account) = account { hashed_accounts_cursor.upsert(*hashed_address, *account)?; } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { @@ -1826,7 +1836,7 @@ impl HashingWriter for DatabaseProvider { fn insert_account_for_hashing( &self, accounts: impl IntoIterator)>, - ) -> RethResult>> { + ) -> ProviderResult>> { let mut hashed_accounts_cursor = self.tx.cursor_write::()?; let hashed_accounts = accounts.into_iter().fold( @@ -1837,7 +1847,7 @@ impl HashingWriter for DatabaseProvider { }, ); - hashed_accounts.iter().try_for_each(|(hashed_address, account)| -> RethResult<()> { + hashed_accounts.iter().try_for_each(|(hashed_address, account)| -> ProviderResult<()> { if let Some(account) = account { hashed_accounts_cursor.upsert(*hashed_address, *account)? } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { @@ -1851,7 +1861,7 @@ impl HashingWriter for DatabaseProvider { } impl HistoryWriter for DatabaseProvider { - fn update_history_indices(&self, range: RangeInclusive) -> RethResult<()> { + fn update_history_indices(&self, range: RangeInclusive) -> ProviderResult<()> { // account history stage { let indices = self.changed_accounts_and_blocks_with_range(range.clone())?; @@ -1870,7 +1880,7 @@ impl HistoryWriter for DatabaseProvider { fn insert_storage_history_index( &self, storage_transitions: BTreeMap<(Address, B256), Vec>, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.append_history_index::<_, tables::StorageHistory>( storage_transitions, |(address, storage_key), highest_block_number| { @@ -1882,14 +1892,14 @@ impl HistoryWriter for DatabaseProvider { fn insert_account_history_index( &self, account_transitions: BTreeMap>, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.append_history_index::<_, tables::AccountHistory>(account_transitions, ShardedKey::new) } fn unwind_storage_history_indices( &self, range: Range, - ) -> RethResult { + ) -> ProviderResult { let storage_changesets = self .tx .cursor_read::()? @@ -1939,7 +1949,7 @@ impl HistoryWriter for DatabaseProvider { fn unwind_account_history_indices( &self, range: RangeInclusive, - ) -> RethResult { + ) -> ProviderResult { let account_changeset = self .tx .cursor_read::()? @@ -1988,7 +1998,7 @@ impl BlockExecutionWriter for DatabaseProvider { &self, chain_spec: &ChainSpec, range: RangeInclusive, - ) -> RethResult { + ) -> ProviderResult { if TAKE { let storage_range = BlockNumberAddress::range(range.clone()); @@ -2053,8 +2063,7 @@ impl BlockExecutionWriter for DatabaseProvider { root: GotExpected { got: new_state_root, expected: parent_state_root }, block_number: parent_number, block_hash: parent_hash, - })) - .into()) + }))) } trie_updates.flush(&self.tx)?; } @@ -2086,7 +2095,7 @@ impl BlockWriter for DatabaseProvider { block: SealedBlock, senders: Option>, prune_modes: Option<&PruneModes>, - ) -> RethResult { + ) -> ProviderResult { let block_number = block.number; let mut durations_recorder = metrics::DurationsRecorder::default(); @@ -2138,9 +2147,8 @@ impl BlockWriter for DatabaseProvider { let tx_iter = if Some(block.body.len()) == senders_len { block.body.into_iter().zip(senders.unwrap()).collect::>() } else { - let senders = TransactionSigned::recover_signers(&block.body, block.body.len()).ok_or( - BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError), - )?; + let senders = TransactionSigned::recover_signers(&block.body, block.body.len()) + .ok_or(ProviderError::SenderRecoveryError)?; durations_recorder.record_relative(metrics::Action::RecoverSigners); debug_assert_eq!(senders.len(), block.body.len(), "missing one or more senders"); block.body.into_iter().zip(senders).collect() @@ -2228,7 +2236,7 @@ impl BlockWriter for DatabaseProvider { blocks: Vec, state: BundleStateWithReceipts, prune_modes: Option<&PruneModes>, - ) -> RethResult<()> { + ) -> ProviderResult<()> { if blocks.is_empty() { return Ok(()) } @@ -2273,7 +2281,10 @@ impl BlockWriter for DatabaseProvider { } impl PruneCheckpointReader for DatabaseProvider { - fn get_prune_checkpoint(&self, segment: PruneSegment) -> RethResult> { + fn get_prune_checkpoint( + &self, + segment: PruneSegment, + ) -> ProviderResult> { Ok(self.tx.get::(segment)?) } } @@ -2283,7 +2294,7 @@ impl PruneCheckpointWriter for DatabaseProvider { &self, segment: PruneSegment, checkpoint: PruneCheckpoint, - ) -> RethResult<()> { + ) -> ProviderResult<()> { Ok(self.tx.put::(segment, checkpoint)?) } } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 43380487c1bb2..898b5a39c065f 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -10,6 +10,7 @@ use reth_db::{database::Database, models::StoredBlockBodyIndices}; use reth_interfaces::{ blockchain_tree::{BlockchainTreeEngine, BlockchainTreeViewer}, consensus::ForkchoiceState, + provider::ProviderResult, RethError, RethResult, }; use reth_primitives::{ @@ -76,7 +77,7 @@ where { /// Create a new provider using only the database and the tree, fetching the latest header from /// the database to initialize the provider. - pub fn new(database: ProviderFactory, tree: Tree) -> RethResult { + pub fn new(database: ProviderFactory, tree: Tree) -> ProviderResult { let provider = database.provider()?; let best: ChainInfo = provider.chain_info()?; match provider.header_by_number(best.best_number)? { @@ -84,9 +85,7 @@ where drop(provider); Ok(Self::with_latest(database, tree, header.seal(best.best_hash))) } - None => { - Err(RethError::Provider(ProviderError::HeaderNotFound(best.best_number.into()))) - } + None => Err(ProviderError::HeaderNotFound(best.best_number.into())), } } } @@ -105,10 +104,10 @@ where /// Instead, we ensure that the `block_number` is within the range of the /// [Self::best_block_number] which is updated when a block is synced. #[inline] - fn ensure_canonical_block(&self, block_number: BlockNumber) -> RethResult<()> { + fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { let latest = self.best_block_number()?; if block_number > latest { - Err(ProviderError::HeaderNotFound(block_number.into()).into()) + Err(ProviderError::HeaderNotFound(block_number.into())) } else { Ok(()) } @@ -120,34 +119,34 @@ where DB: Database, Tree: Send + Sync, { - fn header(&self, block_hash: &BlockHash) -> RethResult> { + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.database.provider()?.header(block_hash) } - fn header_by_number(&self, num: BlockNumber) -> RethResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.database.provider()?.header_by_number(num) } - fn header_td(&self, hash: &BlockHash) -> RethResult> { + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { self.database.provider()?.header_td(hash) } - fn header_td_by_number(&self, number: BlockNumber) -> RethResult> { + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { self.database.provider()?.header_td_by_number(number) } - fn headers_range(&self, range: impl RangeBounds) -> RethResult> { + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { self.database.provider()?.headers_range(range) } - fn sealed_header(&self, number: BlockNumber) -> RethResult> { + fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { self.database.provider()?.sealed_header(number) } fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { self.database.provider()?.sealed_headers_range(range) } @@ -155,7 +154,7 @@ where &self, range: impl RangeBounds, predicate: impl FnMut(&SealedHeader) -> bool, - ) -> RethResult> { + ) -> ProviderResult> { self.database.provider()?.sealed_headers_while(range, predicate) } } @@ -165,7 +164,7 @@ where DB: Database, Tree: Send + Sync, { - fn block_hash(&self, number: u64) -> RethResult> { + fn block_hash(&self, number: u64) -> ProviderResult> { self.database.provider()?.block_hash(number) } @@ -173,7 +172,7 @@ where &self, start: BlockNumber, end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { self.database.provider()?.canonical_hashes_range(start, end) } } @@ -183,19 +182,19 @@ where DB: Database, Tree: BlockchainTreeViewer + Send + Sync, { - fn chain_info(&self) -> RethResult { + fn chain_info(&self) -> ProviderResult { Ok(self.chain_info.chain_info()) } - fn best_block_number(&self) -> RethResult { + fn best_block_number(&self) -> ProviderResult { Ok(self.chain_info.get_canonical_block_number()) } - fn last_block_number(&self) -> RethResult { + fn last_block_number(&self) -> ProviderResult { self.database.provider()?.last_block_number() } - fn block_number(&self, hash: B256) -> RethResult> { + fn block_number(&self, hash: B256) -> ProviderResult> { self.database.provider()?.block_number(hash) } } @@ -205,15 +204,15 @@ where DB: Database, Tree: BlockchainTreeViewer + Send + Sync, { - fn pending_block_num_hash(&self) -> RethResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(self.tree.pending_block_num_hash()) } - fn safe_block_num_hash(&self) -> RethResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(self.chain_info.get_safe_num_hash()) } - fn finalized_block_num_hash(&self) -> RethResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(self.chain_info.get_finalized_num_hash()) } } @@ -223,7 +222,7 @@ where DB: Database, Tree: BlockchainTreeViewer + Send + Sync, { - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> RethResult> { + fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult> { let block = match source { BlockSource::Any => { // check database first @@ -242,29 +241,29 @@ where Ok(block) } - fn block(&self, id: BlockHashOrNumber) -> RethResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { match id { BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), BlockHashOrNumber::Number(num) => self.database.provider()?.block_by_number(num), } } - fn pending_block(&self) -> RethResult> { + fn pending_block(&self) -> ProviderResult> { Ok(self.tree.pending_block()) } - fn pending_block_and_receipts(&self) -> RethResult)>> { + fn pending_block_and_receipts(&self) -> ProviderResult)>> { Ok(self.tree.pending_block_and_receipts()) } - fn ommers(&self, id: BlockHashOrNumber) -> RethResult>> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { self.database.provider()?.ommers(id) } fn block_body_indices( &self, number: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { self.database.provider()?.block_body_indices(number) } @@ -278,11 +277,11 @@ where &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> RethResult> { + ) -> ProviderResult> { self.database.provider()?.block_with_senders(id, transaction_kind) } - fn block_range(&self, range: RangeInclusive) -> RethResult> { + fn block_range(&self, range: RangeInclusive) -> ProviderResult> { self.database.provider()?.block_range(range) } } @@ -292,62 +291,65 @@ where DB: Database, Tree: BlockchainTreeViewer + Send + Sync, { - fn transaction_id(&self, tx_hash: TxHash) -> RethResult> { + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.database.provider()?.transaction_id(tx_hash) } - fn transaction_by_id(&self, id: TxNumber) -> RethResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { self.database.provider()?.transaction_by_id(id) } fn transaction_by_id_no_hash( &self, id: TxNumber, - ) -> RethResult> { + ) -> ProviderResult> { self.database.provider()?.transaction_by_id_no_hash(id) } - fn transaction_by_hash(&self, hash: TxHash) -> RethResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.provider()?.transaction_by_hash(hash) } fn transaction_by_hash_with_meta( &self, tx_hash: TxHash, - ) -> RethResult> { + ) -> ProviderResult> { self.database.provider()?.transaction_by_hash_with_meta(tx_hash) } - fn transaction_block(&self, id: TxNumber) -> RethResult> { + fn transaction_block(&self, id: TxNumber) -> ProviderResult> { self.database.provider()?.transaction_block(id) } fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> RethResult>> { + ) -> ProviderResult>> { self.database.provider()?.transactions_by_block(id) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> RethResult>> { + ) -> ProviderResult>> { self.database.provider()?.transactions_by_block_range(range) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { self.database.provider()?.transactions_by_tx_range(range) } - fn senders_by_tx_range(&self, range: impl RangeBounds) -> RethResult> { + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { self.database.provider()?.senders_by_tx_range(range) } - fn transaction_sender(&self, id: TxNumber) -> RethResult> { + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { self.database.provider()?.transaction_sender(id) } } @@ -357,15 +359,15 @@ where DB: Database, Tree: Send + Sync, { - fn receipt(&self, id: TxNumber) -> RethResult> { + fn receipt(&self, id: TxNumber) -> ProviderResult> { self.database.provider()?.receipt(id) } - fn receipt_by_hash(&self, hash: TxHash) -> RethResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { self.database.provider()?.receipt_by_hash(hash) } - fn receipts_by_block(&self, block: BlockHashOrNumber) -> RethResult>> { + fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>> { self.database.provider()?.receipts_by_block(block) } } @@ -374,7 +376,7 @@ where DB: Database, Tree: BlockchainTreeViewer + Send + Sync, { - fn receipts_by_block_id(&self, block: BlockId) -> RethResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { match block { BlockId::Hash(rpc_block_hash) => { let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; @@ -406,11 +408,11 @@ where &self, id: BlockHashOrNumber, timestamp: u64, - ) -> RethResult>> { + ) -> ProviderResult>> { self.database.provider()?.withdrawals_by_block(id, timestamp) } - fn latest_withdrawal(&self) -> RethResult> { + fn latest_withdrawal(&self) -> ProviderResult> { self.database.provider()?.latest_withdrawal() } } @@ -420,11 +422,11 @@ where DB: Database, Tree: Send + Sync, { - fn get_stage_checkpoint(&self, id: StageId) -> RethResult> { + fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.database.provider()?.get_stage_checkpoint(id) } - fn get_stage_checkpoint_progress(&self, id: StageId) -> RethResult>> { + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { self.database.provider()?.get_stage_checkpoint_progress(id) } } @@ -439,7 +441,7 @@ where cfg: &mut CfgEnv, block_env: &mut BlockEnv, at: BlockHashOrNumber, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.database.provider()?.fill_env_at(cfg, block_env, at) } @@ -448,11 +450,15 @@ where cfg: &mut CfgEnv, block_env: &mut BlockEnv, header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.database.provider()?.fill_env_with_header(cfg, block_env, header) } - fn fill_block_env_at(&self, block_env: &mut BlockEnv, at: BlockHashOrNumber) -> RethResult<()> { + fn fill_block_env_at( + &self, + block_env: &mut BlockEnv, + at: BlockHashOrNumber, + ) -> ProviderResult<()> { self.database.provider()?.fill_block_env_at(block_env, at) } @@ -460,15 +466,15 @@ where &self, block_env: &mut BlockEnv, header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.database.provider()?.fill_block_env_with_header(block_env, header) } - fn fill_cfg_env_at(&self, cfg: &mut CfgEnv, at: BlockHashOrNumber) -> RethResult<()> { + fn fill_cfg_env_at(&self, cfg: &mut CfgEnv, at: BlockHashOrNumber) -> ProviderResult<()> { self.database.provider()?.fill_cfg_env_at(cfg, at) } - fn fill_cfg_env_with_header(&self, cfg: &mut CfgEnv, header: &Header) -> RethResult<()> { + fn fill_cfg_env_with_header(&self, cfg: &mut CfgEnv, header: &Header) -> ProviderResult<()> { self.database.provider()?.fill_cfg_env_with_header(cfg, header) } } @@ -478,7 +484,10 @@ where DB: Database, Tree: Send + Sync, { - fn get_prune_checkpoint(&self, segment: PruneSegment) -> RethResult> { + fn get_prune_checkpoint( + &self, + segment: PruneSegment, + ) -> ProviderResult> { self.database.provider()?.get_prune_checkpoint(segment) } } @@ -499,7 +508,7 @@ where Tree: BlockchainTreePendingStateProvider + BlockchainTreeViewer, { /// Storage provider for latest block - fn latest(&self) -> RethResult> { + fn latest(&self) -> ProviderResult> { trace!(target: "providers::blockchain", "Getting latest block state provider"); self.database.latest() } @@ -507,18 +516,18 @@ where fn history_by_block_number( &self, block_number: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); self.ensure_canonical_block(block_number)?; self.database.history_by_block_number(block_number) } - fn history_by_block_hash(&self, block_hash: BlockHash) -> RethResult> { + fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult> { trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); self.database.history_by_block_hash(block_hash) } - fn state_by_block_hash(&self, block: BlockHash) -> RethResult> { + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult> { trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); let mut state = self.history_by_block_hash(block); @@ -537,7 +546,7 @@ where /// /// If there's no pending block available then the latest state provider is returned: /// [Self::latest] - fn pending(&self) -> RethResult> { + fn pending(&self) -> ProviderResult> { trace!(target: "providers::blockchain", "Getting provider for pending state"); if let Some(block) = self.tree.pending_block_num_hash() { @@ -550,7 +559,10 @@ where self.latest() } - fn pending_state_by_hash(&self, block_hash: B256) -> RethResult>> { + fn pending_state_by_hash( + &self, + block_hash: B256, + ) -> ProviderResult>> { if let Some(state) = self.tree.find_pending_state_provider(block_hash) { return Ok(Some(self.pending_with_provider(state)?)) } @@ -560,7 +572,7 @@ where fn pending_with_provider( &self, post_state_data: Box, - ) -> RethResult> { + ) -> ProviderResult> { let canonical_fork = post_state_data.canonical_fork(); trace!(target: "providers::blockchain", ?canonical_fork, "Returning post state provider"); @@ -714,7 +726,7 @@ where Self: BlockReader + BlockIdReader + ReceiptProviderIdExt, Tree: BlockchainTreeEngine, { - fn block_by_id(&self, id: BlockId) -> RethResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -732,23 +744,23 @@ where } } - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> RethResult> { - match id { - BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head().unseal())), + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { + Ok(match id { + BlockNumberOrTag::Latest => Some(self.chain_info.get_canonical_head().unseal()), BlockNumberOrTag::Finalized => { - Ok(self.chain_info.get_finalized_header().map(|h| h.unseal())) + self.chain_info.get_finalized_header().map(|h| h.unseal()) } - BlockNumberOrTag::Safe => Ok(self.chain_info.get_safe_header().map(|h| h.unseal())), - BlockNumberOrTag::Earliest => self.header_by_number(0), - BlockNumberOrTag::Pending => Ok(self.tree.pending_header().map(|h| h.unseal())), - BlockNumberOrTag::Number(num) => self.header_by_number(num), - } + BlockNumberOrTag::Safe => self.chain_info.get_safe_header().map(|h| h.unseal()), + BlockNumberOrTag::Earliest => self.header_by_number(0)?, + BlockNumberOrTag::Pending => self.tree.pending_header().map(|h| h.unseal()), + BlockNumberOrTag::Number(num) => self.header_by_number(num)?, + }) } fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> RethResult> { + ) -> ProviderResult> { match id { BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head())), BlockNumberOrTag::Finalized => Ok(self.chain_info.get_finalized_header()), @@ -763,23 +775,21 @@ where } } - fn sealed_header_by_id(&self, id: BlockId) -> RethResult> { - match id { - BlockId::Number(num) => self.sealed_header_by_number_or_tag(num), - BlockId::Hash(hash) => { - self.header(&hash.block_hash)?.map_or_else(|| Ok(None), |h| Ok(Some(h.seal_slow()))) - } - } + fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(|h| h.seal_slow()), + }) } - fn header_by_id(&self, id: BlockId) -> RethResult> { - match id { - BlockId::Number(num) => self.header_by_number_or_tag(num), - BlockId::Hash(hash) => self.header(&hash.block_hash), - } + fn header_by_id(&self, id: BlockId) -> ProviderResult> { + Ok(match id { + BlockId::Number(num) => self.header_by_number_or_tag(num)?, + BlockId::Hash(hash) => self.header(&hash.block_hash)?, + }) } - fn ommers_by_id(&self, id: BlockId) -> RethResult>> { + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { match id { BlockId::Number(num) => self.ommers_by_number_or_tag(num), BlockId::Hash(hash) => { @@ -822,7 +832,7 @@ where fn account_block_changeset( &self, block_number: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { self.database.provider()?.account_block_changeset(block_number) } } @@ -833,7 +843,7 @@ where Tree: Sync + Send, { /// Get basic account information. - fn basic_account(&self, address: Address) -> RethResult> { + fn basic_account(&self, address: Address) -> ProviderResult> { self.database.provider()?.basic_account(address) } } diff --git a/crates/storage/provider/src/providers/snapshot/jar.rs b/crates/storage/provider/src/providers/snapshot/jar.rs index 60d59a3f9bb0e..9d996d9e1798e 100644 --- a/crates/storage/provider/src/providers/snapshot/jar.rs +++ b/crates/storage/provider/src/providers/snapshot/jar.rs @@ -6,11 +6,7 @@ use reth_db::{ codecs::CompactU256, snapshot::{HeaderMask, ReceiptMask, SnapshotCursor, TransactionMask}, }; -use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError}, - provider::ProviderError, - RethResult, -}; +use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ Address, BlockHash, BlockHashOrNumber, BlockNumber, ChainInfo, Header, Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, B256, U256, @@ -41,7 +37,7 @@ impl<'a> From> for SnapshotJarProvider<'a> { impl<'a> SnapshotJarProvider<'a> { /// Provides a cursor for more granular data access. - pub fn cursor<'b>(&'b self) -> RethResult> + pub fn cursor<'b>(&'b self) -> ProviderResult> where 'b: 'a, { @@ -56,7 +52,7 @@ impl<'a> SnapshotJarProvider<'a> { } impl<'a> HeaderProvider for SnapshotJarProvider<'a> { - fn header(&self, block_hash: &BlockHash) -> RethResult> { + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? .get_two::>(block_hash.into())? @@ -64,11 +60,11 @@ impl<'a> HeaderProvider for SnapshotJarProvider<'a> { .map(|(header, _)| header)) } - fn header_by_number(&self, num: BlockNumber) -> RethResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.cursor()?.get_one::>(num.into()) } - fn header_td(&self, block_hash: &BlockHash) -> RethResult> { + fn header_td(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? .get_two::>(block_hash.into())? @@ -76,11 +72,11 @@ impl<'a> HeaderProvider for SnapshotJarProvider<'a> { .map(|(td, _)| td.into())) } - fn header_td_by_number(&self, num: BlockNumber) -> RethResult> { + fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { Ok(self.cursor()?.get_one::>(num.into())?.map(Into::into)) } - fn headers_range(&self, range: impl RangeBounds) -> RethResult> { + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; @@ -95,7 +91,7 @@ impl<'a> HeaderProvider for SnapshotJarProvider<'a> { Ok(headers) } - fn sealed_header(&self, number: BlockNumber) -> RethResult> { + fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { Ok(self .cursor()? .get_two::>(number.into())? @@ -106,7 +102,7 @@ impl<'a> HeaderProvider for SnapshotJarProvider<'a> { &self, range: impl RangeBounds, mut predicate: impl FnMut(&SealedHeader) -> bool, - ) -> RethResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; @@ -128,7 +124,7 @@ impl<'a> HeaderProvider for SnapshotJarProvider<'a> { } impl<'a> BlockHashReader for SnapshotJarProvider<'a> { - fn block_hash(&self, number: u64) -> RethResult> { + fn block_hash(&self, number: u64) -> ProviderResult> { self.cursor()?.get_one::>(number.into()) } @@ -136,7 +132,7 @@ impl<'a> BlockHashReader for SnapshotJarProvider<'a> { &self, start: BlockNumber, end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { let mut cursor = self.cursor()?; let mut hashes = Vec::with_capacity((end - start) as usize); @@ -150,22 +146,22 @@ impl<'a> BlockHashReader for SnapshotJarProvider<'a> { } impl<'a> BlockNumReader for SnapshotJarProvider<'a> { - fn chain_info(&self) -> RethResult { + fn chain_info(&self) -> ProviderResult { // Information on live database - Err(ProviderError::UnsupportedProvider.into()) + Err(ProviderError::UnsupportedProvider) } - fn best_block_number(&self) -> RethResult { + fn best_block_number(&self) -> ProviderResult { // Information on live database - Err(ProviderError::UnsupportedProvider.into()) + Err(ProviderError::UnsupportedProvider) } - fn last_block_number(&self) -> RethResult { + fn last_block_number(&self) -> ProviderResult { // Information on live database - Err(ProviderError::UnsupportedProvider.into()) + Err(ProviderError::UnsupportedProvider) } - fn block_number(&self, hash: B256) -> RethResult> { + fn block_number(&self, hash: B256) -> ProviderResult> { let mut cursor = self.cursor()?; Ok(cursor @@ -175,7 +171,7 @@ impl<'a> BlockNumReader for SnapshotJarProvider<'a> { } impl<'a> TransactionsProvider for SnapshotJarProvider<'a> { - fn transaction_id(&self, hash: TxHash) -> RethResult> { + fn transaction_id(&self, hash: TxHash) -> ProviderResult> { let mut cursor = self.cursor()?; Ok(cursor @@ -183,7 +179,7 @@ impl<'a> TransactionsProvider for SnapshotJarProvider<'a> { .and_then(|res| (res.hash() == hash).then(|| cursor.number()))) } - fn transaction_by_id(&self, num: TxNumber) -> RethResult> { + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { Ok(self .cursor()? .get_one::>(num.into())? @@ -193,11 +189,11 @@ impl<'a> TransactionsProvider for SnapshotJarProvider<'a> { fn transaction_by_id_no_hash( &self, num: TxNumber, - ) -> RethResult> { + ) -> ProviderResult> { self.cursor()?.get_one::>(num.into()) } - fn transaction_by_hash(&self, hash: TxHash) -> RethResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { Ok(self .cursor()? .get_one::>((&hash).into())? @@ -207,44 +203,47 @@ impl<'a> TransactionsProvider for SnapshotJarProvider<'a> { fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> RethResult> { + ) -> ProviderResult> { // Information required on indexing table [`tables::TransactionBlock`] - Err(ProviderError::UnsupportedProvider.into()) + Err(ProviderError::UnsupportedProvider) } - fn transaction_block(&self, _id: TxNumber) -> RethResult> { + fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { // Information on indexing table [`tables::TransactionBlock`] - Err(ProviderError::UnsupportedProvider.into()) + Err(ProviderError::UnsupportedProvider) } fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> RethResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call snapshot // provider with `transactions_by_tx_range` instead. - Err(ProviderError::UnsupportedProvider.into()) + Err(ProviderError::UnsupportedProvider) } fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> RethResult>> { + ) -> ProviderResult>> { // Related to indexing tables. Live database should get the tx_range and call snapshot // provider with `transactions_by_tx_range` instead. - Err(ProviderError::UnsupportedProvider.into()) + Err(ProviderError::UnsupportedProvider) } - fn senders_by_tx_range(&self, range: impl RangeBounds) -> RethResult> { + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { let txs = self.transactions_by_tx_range(range)?; - Ok(TransactionSignedNoHash::recover_signers(&txs, txs.len()) - .ok_or(BlockExecutionError::Validation(BlockValidationError::SenderRecoveryError))?) + TransactionSignedNoHash::recover_signers(&txs, txs.len()) + .ok_or(ProviderError::SenderRecoveryError) } fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { let range = to_range(range); let mut cursor = self.cursor()?; let mut txes = Vec::with_capacity((range.end - range.start) as usize); @@ -259,7 +258,7 @@ impl<'a> TransactionsProvider for SnapshotJarProvider<'a> { Ok(txes) } - fn transaction_sender(&self, num: TxNumber) -> RethResult> { + fn transaction_sender(&self, num: TxNumber) -> ProviderResult> { Ok(self .cursor()? .get_one::>(num.into())? @@ -268,11 +267,11 @@ impl<'a> TransactionsProvider for SnapshotJarProvider<'a> { } impl<'a> ReceiptProvider for SnapshotJarProvider<'a> { - fn receipt(&self, num: TxNumber) -> RethResult> { + fn receipt(&self, num: TxNumber) -> ProviderResult> { self.cursor()?.get_one::>(num.into()) } - fn receipt_by_hash(&self, hash: TxHash) -> RethResult> { + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { if let Some(tx_snapshot) = &self.auxiliar_jar { if let Some(num) = tx_snapshot.transaction_id(hash)? { return self.receipt(num) @@ -281,10 +280,10 @@ impl<'a> ReceiptProvider for SnapshotJarProvider<'a> { Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> RethResult>> { + fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { // Related to indexing tables. Snapshot should get the tx_range and call snapshot // provider with `receipt()` instead for each - Err(ProviderError::UnsupportedProvider.into()) + Err(ProviderError::UnsupportedProvider) } } diff --git a/crates/storage/provider/src/providers/snapshot/manager.rs b/crates/storage/provider/src/providers/snapshot/manager.rs index d4124f78518f0..972df40408a53 100644 --- a/crates/storage/provider/src/providers/snapshot/manager.rs +++ b/crates/storage/provider/src/providers/snapshot/manager.rs @@ -6,7 +6,7 @@ use reth_db::{ codecs::CompactU256, snapshot::{HeaderMask, TransactionMask}, }; -use reth_interfaces::{provider::ProviderError, RethResult}; +use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::NippyJar; use reth_primitives::{ snapshot::HighestSnapshots, Address, BlockHash, BlockHashOrNumber, BlockNumber, ChainInfo, @@ -73,13 +73,13 @@ impl SnapshotProvider { segment: SnapshotSegment, block: BlockNumber, path: Option<&Path>, - ) -> RethResult> { + ) -> ProviderResult> { self.get_segment_provider( segment, || self.get_segment_ranges_from_block(segment, block), path, )? - .ok_or_else(|| ProviderError::MissingSnapshotBlock(segment, block).into()) + .ok_or_else(|| ProviderError::MissingSnapshotBlock(segment, block)) } /// Gets the [`SnapshotJarProvider`] of the requested segment and transaction. @@ -88,13 +88,13 @@ impl SnapshotProvider { segment: SnapshotSegment, tx: TxNumber, path: Option<&Path>, - ) -> RethResult> { + ) -> ProviderResult> { self.get_segment_provider( segment, || self.get_segment_ranges_from_transaction(segment, tx), path, )? - .ok_or_else(|| ProviderError::MissingSnapshotTx(segment, tx).into()) + .ok_or_else(|| ProviderError::MissingSnapshotTx(segment, tx)) } /// Gets the [`SnapshotJarProvider`] of the requested segment and block or transaction. @@ -103,7 +103,7 @@ impl SnapshotProvider { segment: SnapshotSegment, fn_ranges: impl Fn() -> Option<(RangeInclusive, RangeInclusive)>, path: Option<&Path>, - ) -> RethResult>> { + ) -> ProviderResult>> { // If we have a path, then get the block range and transaction range from its name. // Otherwise, check `self.available_snapshots` let snapshot_ranges = match path { @@ -136,7 +136,7 @@ impl SnapshotProvider { segment: SnapshotSegment, block_range: &RangeInclusive, tx_range: &RangeInclusive, - ) -> Result, reth_interfaces::RethError> { + ) -> ProviderResult> { let key = (*block_range.end(), segment); if let Some(jar) = self.map.get(&key) { Ok(jar.into()) @@ -212,8 +212,8 @@ impl SnapshotProvider { pub fn find_snapshot( &self, segment: SnapshotSegment, - func: impl Fn(SnapshotJarProvider<'_>) -> RethResult>, - ) -> RethResult> { + func: impl Fn(SnapshotJarProvider<'_>) -> ProviderResult>, + ) -> ProviderResult> { let snapshots = self.snapshots_block_index.read(); if let Some(segment_snapshots) = snapshots.get(&segment) { // It's more probable that the request comes from a newer block height, so we iterate @@ -241,7 +241,7 @@ impl SnapshotProvider { } impl HeaderProvider for SnapshotProvider { - fn header(&self, block_hash: &BlockHash) -> RethResult> { + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.find_snapshot(SnapshotSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? @@ -255,12 +255,12 @@ impl HeaderProvider for SnapshotProvider { }) } - fn header_by_number(&self, num: BlockNumber) -> RethResult> { + fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { self.get_segment_provider_from_block(SnapshotSegment::Headers, num, None)? .header_by_number(num) } - fn header_td(&self, block_hash: &BlockHash) -> RethResult> { + fn header_td(&self, block_hash: &BlockHash) -> ProviderResult> { self.find_snapshot(SnapshotSegment::Headers, |jar_provider| { Ok(jar_provider .cursor()? @@ -269,16 +269,16 @@ impl HeaderProvider for SnapshotProvider { }) } - fn header_td_by_number(&self, num: BlockNumber) -> RethResult> { + fn header_td_by_number(&self, num: BlockNumber) -> ProviderResult> { self.get_segment_provider_from_block(SnapshotSegment::Headers, num, None)? .header_td_by_number(num) } - fn headers_range(&self, _range: impl RangeBounds) -> RethResult> { + fn headers_range(&self, _range: impl RangeBounds) -> ProviderResult> { todo!(); } - fn sealed_header(&self, num: BlockNumber) -> RethResult> { + fn sealed_header(&self, num: BlockNumber) -> ProviderResult> { self.get_segment_provider_from_block(SnapshotSegment::Headers, num, None)? .sealed_header(num) } @@ -287,13 +287,13 @@ impl HeaderProvider for SnapshotProvider { &self, _range: impl RangeBounds, _predicate: impl FnMut(&SealedHeader) -> bool, - ) -> RethResult> { + ) -> ProviderResult> { todo!() } } impl BlockHashReader for SnapshotProvider { - fn block_hash(&self, num: u64) -> RethResult> { + fn block_hash(&self, num: u64) -> ProviderResult> { self.get_segment_provider_from_block(SnapshotSegment::Headers, num, None)?.block_hash(num) } @@ -301,31 +301,31 @@ impl BlockHashReader for SnapshotProvider { &self, _start: BlockNumber, _end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { todo!() } } impl BlockNumReader for SnapshotProvider { - fn chain_info(&self) -> RethResult { + fn chain_info(&self) -> ProviderResult { todo!() } - fn best_block_number(&self) -> RethResult { + fn best_block_number(&self) -> ProviderResult { todo!() } - fn last_block_number(&self) -> RethResult { + fn last_block_number(&self) -> ProviderResult { todo!() } - fn block_number(&self, _hash: B256) -> RethResult> { + fn block_number(&self, _hash: B256) -> ProviderResult> { todo!() } } impl TransactionsProvider for SnapshotProvider { - fn transaction_id(&self, tx_hash: TxHash) -> RethResult> { + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { self.find_snapshot(SnapshotSegment::Transactions, |jar_provider| { let mut cursor = jar_provider.cursor()?; if cursor @@ -340,7 +340,7 @@ impl TransactionsProvider for SnapshotProvider { }) } - fn transaction_by_id(&self, num: TxNumber) -> RethResult> { + fn transaction_by_id(&self, num: TxNumber) -> ProviderResult> { self.get_segment_provider_from_transaction(SnapshotSegment::Transactions, num, None)? .transaction_by_id(num) } @@ -348,12 +348,12 @@ impl TransactionsProvider for SnapshotProvider { fn transaction_by_id_no_hash( &self, num: TxNumber, - ) -> RethResult> { + ) -> ProviderResult> { self.get_segment_provider_from_transaction(SnapshotSegment::Transactions, num, None)? .transaction_by_id_no_hash(num) } - fn transaction_by_hash(&self, hash: TxHash) -> RethResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { self.find_snapshot(SnapshotSegment::Transactions, |jar_provider| { Ok(jar_provider .cursor()? @@ -366,40 +366,43 @@ impl TransactionsProvider for SnapshotProvider { fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> RethResult> { + ) -> ProviderResult> { todo!() } - fn transaction_block(&self, _id: TxNumber) -> RethResult> { + fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { todo!() } fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> RethResult>> { + ) -> ProviderResult>> { todo!() } fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> RethResult>> { + ) -> ProviderResult>> { todo!() } - fn senders_by_tx_range(&self, _range: impl RangeBounds) -> RethResult> { + fn senders_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { todo!() } fn transactions_by_tx_range( &self, _range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { todo!() } - fn transaction_sender(&self, id: TxNumber) -> RethResult> { + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { Ok(self.transaction_by_id_no_hash(id)?.and_then(|tx| tx.recover_signer())) } } diff --git a/crates/storage/provider/src/providers/snapshot/mod.rs b/crates/storage/provider/src/providers/snapshot/mod.rs index cde7dbfc1ac83..a5244c78e8910 100644 --- a/crates/storage/provider/src/providers/snapshot/mod.rs +++ b/crates/storage/provider/src/providers/snapshot/mod.rs @@ -4,7 +4,7 @@ pub use manager::SnapshotProvider; mod jar; pub use jar::SnapshotJarProvider; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_nippy_jar::NippyJar; use reth_primitives::{snapshot::SegmentHeader, SnapshotSegment}; use std::ops::Deref; @@ -20,7 +20,7 @@ pub struct LoadedJar { } impl LoadedJar { - fn new(jar: NippyJar) -> RethResult { + fn new(jar: NippyJar) -> ProviderResult { let mmap_handle = jar.open_data()?; Ok(Self { jar, mmap_handle }) } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index d91d2f5898962..c76ea75d53e93 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -10,7 +10,7 @@ use reth_db::{ transaction::DbTx, BlockNumberList, }; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{ trie::AccountProof, Account, Address, BlockNumber, Bytecode, StorageKey, StorageValue, B256, }; @@ -61,9 +61,9 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { } /// Lookup an account in the AccountHistory table - pub fn account_history_lookup(&self, address: Address) -> RethResult { + pub fn account_history_lookup(&self, address: Address) -> ProviderResult { if !self.lowest_available_blocks.is_account_history_available(self.block_number) { - return Err(ProviderError::StateAtBlockPruned(self.block_number).into()) + return Err(ProviderError::StateAtBlockPruned(self.block_number)) } // history key to search IntegerList of block number changesets. @@ -80,9 +80,9 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { &self, address: Address, storage_key: StorageKey, - ) -> RethResult { + ) -> ProviderResult { if !self.lowest_available_blocks.is_storage_history_available(self.block_number) { - return Err(ProviderError::StateAtBlockPruned(self.block_number).into()) + return Err(ProviderError::StateAtBlockPruned(self.block_number)) } // history key to search IntegerList of block number changesets. @@ -99,7 +99,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { key: K, key_filter: impl Fn(&K) -> bool, lowest_available_block_number: Option, - ) -> RethResult + ) -> ProviderResult where T: Table, { @@ -153,7 +153,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { impl<'b, TX: DbTx> AccountReader for HistoricalStateProviderRef<'b, TX> { /// Get basic account information. - fn basic_account(&self, address: Address) -> RethResult> { + fn basic_account(&self, address: Address) -> ProviderResult> { match self.account_history_lookup(address)? { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(self @@ -175,7 +175,7 @@ impl<'b, TX: DbTx> AccountReader for HistoricalStateProviderRef<'b, TX> { impl<'b, TX: DbTx> BlockHashReader for HistoricalStateProviderRef<'b, TX> { /// Get block hash by number. - fn block_hash(&self, number: u64) -> RethResult> { + fn block_hash(&self, number: u64) -> ProviderResult> { self.tx.get::(number).map_err(Into::into) } @@ -183,7 +183,7 @@ impl<'b, TX: DbTx> BlockHashReader for HistoricalStateProviderRef<'b, TX> { &self, start: BlockNumber, end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { let range = start..end; self.tx .cursor_read::() @@ -191,15 +191,15 @@ impl<'b, TX: DbTx> BlockHashReader for HistoricalStateProviderRef<'b, TX> { cursor .walk_range(range)? .map(|result| result.map(|(_, hash)| hash).map_err(Into::into)) - .collect::>>() + .collect::>>() })? .map_err(Into::into) } } impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { - fn state_root(&self, _post_state: &BundleStateWithReceipts) -> RethResult { - Err(ProviderError::StateRootNotAvailableForHistoricalBlock.into()) + fn state_root(&self, _post_state: &BundleStateWithReceipts) -> ProviderResult { + Err(ProviderError::StateRootNotAvailableForHistoricalBlock) } } @@ -209,7 +209,7 @@ impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { &self, address: Address, storage_key: StorageKey, - ) -> RethResult> { + ) -> ProviderResult> { match self.storage_history_lookup(address, storage_key)? { HistoryInfo::NotYetWritten => Ok(None), HistoryInfo::InChangeset(changeset_block_number) => Ok(Some( @@ -235,13 +235,13 @@ impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { } /// Get account code by its hash - fn bytecode_by_hash(&self, code_hash: B256) -> RethResult> { + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { self.tx.get::(code_hash).map_err(Into::into) } /// Get account and storage proofs. - fn proof(&self, _address: Address, _keys: &[B256]) -> RethResult { - Err(ProviderError::StateRootNotAvailableForHistoricalBlock.into()) + fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { + Err(ProviderError::StateRootNotAvailableForHistoricalBlock) } } @@ -566,11 +566,11 @@ mod tests { ); assert_eq!( provider.account_history_lookup(ADDRESS), - Err(ProviderError::StateAtBlockPruned(provider.block_number).into()) + Err(ProviderError::StateAtBlockPruned(provider.block_number)) ); assert_eq!( provider.storage_history_lookup(ADDRESS, STORAGE), - Err(ProviderError::StateAtBlockPruned(provider.block_number).into()) + Err(ProviderError::StateAtBlockPruned(provider.block_number)) ); // provider block_number == lowest available block number, diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index be70c19455088..1b45555fc2183 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -7,7 +7,7 @@ use reth_db::{ tables, transaction::DbTx, }; -use reth_interfaces::{provider::ProviderError, RethError, RethResult}; +use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, BlockNumber, Bytecode, StorageKey, StorageValue, B256, @@ -29,14 +29,14 @@ impl<'b, TX: DbTx> LatestStateProviderRef<'b, TX> { impl<'b, TX: DbTx> AccountReader for LatestStateProviderRef<'b, TX> { /// Get basic account information. - fn basic_account(&self, address: Address) -> RethResult> { + fn basic_account(&self, address: Address) -> ProviderResult> { self.db.get::(address).map_err(Into::into) } } impl<'b, TX: DbTx> BlockHashReader for LatestStateProviderRef<'b, TX> { /// Get block hash by number. - fn block_hash(&self, number: u64) -> RethResult> { + fn block_hash(&self, number: u64) -> ProviderResult> { self.db.get::(number).map_err(Into::into) } @@ -44,7 +44,7 @@ impl<'b, TX: DbTx> BlockHashReader for LatestStateProviderRef<'b, TX> { &self, start: BlockNumber, end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { let range = start..end; self.db .cursor_read::() @@ -52,15 +52,15 @@ impl<'b, TX: DbTx> BlockHashReader for LatestStateProviderRef<'b, TX> { cursor .walk_range(range)? .map(|result| result.map(|(_, hash)| hash).map_err(Into::into)) - .collect::>>() + .collect::>>() })? .map_err(Into::into) } } impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { - fn state_root(&self, bundle_state: &BundleStateWithReceipts) -> RethResult { - bundle_state.state_root_slow(self.db).map_err(|err| RethError::Database(err.into())) + fn state_root(&self, bundle_state: &BundleStateWithReceipts) -> ProviderResult { + bundle_state.state_root_slow(self.db).map_err(|err| ProviderError::Database(err.into())) } } @@ -70,7 +70,7 @@ impl<'b, TX: DbTx> StateProvider for LatestStateProviderRef<'b, TX> { &self, account: Address, storage_key: StorageKey, - ) -> RethResult> { + ) -> ProviderResult> { let mut cursor = self.db.cursor_dup_read::()?; if let Some(entry) = cursor.seek_by_key_subkey(account, storage_key)? { if entry.key == storage_key { @@ -81,11 +81,11 @@ impl<'b, TX: DbTx> StateProvider for LatestStateProviderRef<'b, TX> { } /// Get account code by its hash - fn bytecode_by_hash(&self, code_hash: B256) -> RethResult> { + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { self.db.get::(code_hash).map_err(Into::into) } - fn proof(&self, address: Address, _keys: &[B256]) -> RethResult { + fn proof(&self, address: Address, _keys: &[B256]) -> ProviderResult { let _hashed_address = keccak256(address); let _root = self .db diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index d14c7235f3936..67b3c33f50af1 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -31,19 +31,19 @@ macro_rules! delegate_provider_impls { $crate::providers::state::macros::delegate_impls_to_as_ref!( for $target => StateRootProvider $(where [$($generics)*])? { - fn state_root(&self, state: &crate::BundleStateWithReceipts) -> reth_interfaces::RethResult; + fn state_root(&self, state: &crate::BundleStateWithReceipts) -> reth_interfaces::provider::ProviderResult; } AccountReader $(where [$($generics)*])? { - fn basic_account(&self, address: reth_primitives::Address) -> reth_interfaces::RethResult>; + fn basic_account(&self, address: reth_primitives::Address) -> reth_interfaces::provider::ProviderResult>; } BlockHashReader $(where [$($generics)*])? { - fn block_hash(&self, number: u64) -> reth_interfaces::RethResult>; - fn canonical_hashes_range(&self, start: reth_primitives::BlockNumber, end: reth_primitives::BlockNumber) -> reth_interfaces::RethResult>; + fn block_hash(&self, number: u64) -> reth_interfaces::provider::ProviderResult>; + fn canonical_hashes_range(&self, start: reth_primitives::BlockNumber, end: reth_primitives::BlockNumber) -> reth_interfaces::provider::ProviderResult>; } StateProvider $(where [$($generics)*])?{ - fn storage(&self, account: reth_primitives::Address, storage_key: reth_primitives::StorageKey) -> reth_interfaces::RethResult>; - fn proof(&self, address: reth_primitives::Address, keys: &[reth_primitives::B256]) -> reth_interfaces::RethResult; - fn bytecode_by_hash(&self, code_hash: reth_primitives::B256) -> reth_interfaces::RethResult>; + fn storage(&self, account: reth_primitives::Address, storage_key: reth_primitives::StorageKey) -> reth_interfaces::provider::ProviderResult>; + fn proof(&self, address: reth_primitives::Address, keys: &[reth_primitives::B256]) -> reth_interfaces::provider::ProviderResult; + fn bytecode_by_hash(&self, code_hash: reth_primitives::B256) -> reth_interfaces::provider::ProviderResult>; } ); } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 8f18732f44aac..c4689ac57234d 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -8,7 +8,7 @@ use crate::{ }; use parking_lot::Mutex; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; -use reth_interfaces::{provider::ProviderError, RethResult}; +use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, BlockWithSenders, Bytecode, Bytes, ChainInfo, ChainSpec, Header, Receipt, @@ -124,17 +124,17 @@ impl MockEthProvider { } impl HeaderProvider for MockEthProvider { - fn header(&self, block_hash: &BlockHash) -> RethResult> { + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { let lock = self.headers.lock(); Ok(lock.get(block_hash).cloned()) } - fn header_by_number(&self, num: u64) -> RethResult> { + fn header_by_number(&self, num: u64) -> ProviderResult> { let lock = self.headers.lock(); Ok(lock.values().find(|h| h.number == num).cloned()) } - fn header_td(&self, hash: &BlockHash) -> RethResult> { + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { let lock = self.headers.lock(); Ok(lock.get(hash).map(|target| { lock.values() @@ -143,7 +143,7 @@ impl HeaderProvider for MockEthProvider { })) } - fn header_td_by_number(&self, number: BlockNumber) -> RethResult> { + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { let lock = self.headers.lock(); let sum = lock .values() @@ -152,7 +152,7 @@ impl HeaderProvider for MockEthProvider { Ok(Some(sum)) } - fn headers_range(&self, range: impl RangeBounds) -> RethResult> { + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { let lock = self.headers.lock(); let mut headers: Vec<_> = @@ -162,7 +162,7 @@ impl HeaderProvider for MockEthProvider { Ok(headers) } - fn sealed_header(&self, number: BlockNumber) -> RethResult> { + fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { Ok(self.header_by_number(number)?.map(|h| h.seal_slow())) } @@ -170,7 +170,7 @@ impl HeaderProvider for MockEthProvider { &self, range: impl RangeBounds, mut predicate: impl FnMut(&SealedHeader) -> bool, - ) -> RethResult> { + ) -> ProviderResult> { Ok(self .headers_range(range)? .into_iter() @@ -187,7 +187,7 @@ impl ChainSpecProvider for MockEthProvider { } impl TransactionsProvider for MockEthProvider { - fn transaction_id(&self, tx_hash: TxHash) -> RethResult> { + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { let lock = self.blocks.lock(); let tx_number = lock .values() @@ -198,7 +198,7 @@ impl TransactionsProvider for MockEthProvider { Ok(tx_number) } - fn transaction_by_id(&self, id: TxNumber) -> RethResult> { + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { let lock = self.blocks.lock(); let transaction = lock.values().flat_map(|block| &block.body).nth(id as usize).cloned(); @@ -208,7 +208,7 @@ impl TransactionsProvider for MockEthProvider { fn transaction_by_id_no_hash( &self, id: TxNumber, - ) -> RethResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); let transaction = lock .values() @@ -219,7 +219,7 @@ impl TransactionsProvider for MockEthProvider { Ok(transaction) } - fn transaction_by_hash(&self, hash: TxHash) -> RethResult> { + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { Ok(self .blocks .lock() @@ -230,7 +230,7 @@ impl TransactionsProvider for MockEthProvider { fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> RethResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); for (block_hash, block) in lock.iter() { for (index, tx) in block.body.iter().enumerate() { @@ -250,7 +250,7 @@ impl TransactionsProvider for MockEthProvider { Ok(None) } - fn transaction_block(&self, id: TxNumber) -> RethResult> { + fn transaction_block(&self, id: TxNumber) -> ProviderResult> { let lock = self.blocks.lock(); let mut current_tx_number: TxNumber = 0; for block in lock.values() { @@ -265,14 +265,14 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_block( &self, id: BlockHashOrNumber, - ) -> RethResult>> { + ) -> ProviderResult>> { Ok(self.block(id)?.map(|b| b.body)) } fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> RethResult>> { + ) -> ProviderResult>> { // init btreemap so we can return in order let mut map = BTreeMap::new(); for (_, block) in self.blocks.lock().iter() { @@ -287,7 +287,7 @@ impl TransactionsProvider for MockEthProvider { fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { let lock = self.blocks.lock(); let transactions = lock .values() @@ -305,7 +305,10 @@ impl TransactionsProvider for MockEthProvider { Ok(transactions) } - fn senders_by_tx_range(&self, range: impl RangeBounds) -> RethResult> { + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult> { let lock = self.blocks.lock(); let transactions = lock .values() @@ -323,21 +326,21 @@ impl TransactionsProvider for MockEthProvider { Ok(transactions) } - fn transaction_sender(&self, id: TxNumber) -> RethResult> { + fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { self.transaction_by_id(id).map(|tx_option| tx_option.map(|tx| tx.recover_signer().unwrap())) } } impl ReceiptProvider for MockEthProvider { - fn receipt(&self, _id: TxNumber) -> RethResult> { + fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } - fn receipt_by_hash(&self, _hash: TxHash) -> RethResult> { + fn receipt_by_hash(&self, _hash: TxHash) -> ProviderResult> { Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> RethResult>> { + fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { Ok(None) } } @@ -345,7 +348,7 @@ impl ReceiptProvider for MockEthProvider { impl ReceiptProviderIdExt for MockEthProvider {} impl BlockHashReader for MockEthProvider { - fn block_hash(&self, number: u64) -> RethResult> { + fn block_hash(&self, number: u64) -> ProviderResult> { let lock = self.blocks.lock(); let hash = lock.iter().find_map(|(hash, b)| (b.number == number).then_some(*hash)); @@ -356,7 +359,7 @@ impl BlockHashReader for MockEthProvider { &self, start: BlockNumber, end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { let range = start..end; let lock = self.blocks.lock(); @@ -369,7 +372,7 @@ impl BlockHashReader for MockEthProvider { } impl BlockNumReader for MockEthProvider { - fn chain_info(&self) -> RethResult { + fn chain_info(&self) -> ProviderResult { let best_block_number = self.best_block_number()?; let lock = self.headers.lock(); @@ -380,20 +383,19 @@ impl BlockNumReader for MockEthProvider { .unwrap_or_default()) } - fn best_block_number(&self) -> RethResult { + fn best_block_number(&self) -> ProviderResult { let lock = self.headers.lock(); - Ok(lock - .iter() + lock.iter() .max_by_key(|h| h.1.number) .map(|(_, header)| header.number) - .ok_or(ProviderError::BestBlockNotFound)?) + .ok_or(ProviderError::BestBlockNotFound) } - fn last_block_number(&self) -> RethResult { + fn last_block_number(&self) -> ProviderResult { self.best_block_number() } - fn block_number(&self, hash: B256) -> RethResult> { + fn block_number(&self, hash: B256) -> ProviderResult> { let lock = self.blocks.lock(); let num = lock.iter().find_map(|(h, b)| (*h == hash).then_some(b.number)); Ok(num) @@ -401,25 +403,29 @@ impl BlockNumReader for MockEthProvider { } impl BlockIdReader for MockEthProvider { - fn pending_block_num_hash(&self) -> RethResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> RethResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> RethResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } impl BlockReader for MockEthProvider { - fn find_block_by_hash(&self, hash: B256, _source: BlockSource) -> RethResult> { + fn find_block_by_hash( + &self, + hash: B256, + _source: BlockSource, + ) -> ProviderResult> { self.block(hash.into()) } - fn block(&self, id: BlockHashOrNumber) -> RethResult> { + fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { let lock = self.blocks.lock(); match id { BlockHashOrNumber::Hash(hash) => Ok(lock.get(&hash).cloned()), @@ -427,19 +433,19 @@ impl BlockReader for MockEthProvider { } } - fn pending_block(&self) -> RethResult> { + fn pending_block(&self) -> ProviderResult> { Ok(None) } - fn pending_block_and_receipts(&self) -> RethResult)>> { + fn pending_block_and_receipts(&self) -> ProviderResult)>> { Ok(None) } - fn ommers(&self, _id: BlockHashOrNumber) -> RethResult>> { + fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { Ok(None) } - fn block_body_indices(&self, _num: u64) -> RethResult> { + fn block_body_indices(&self, _num: u64) -> ProviderResult> { Ok(None) } @@ -447,35 +453,35 @@ impl BlockReader for MockEthProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> RethResult> { + ) -> ProviderResult> { Ok(None) } - fn block_range(&self, _range: RangeInclusive) -> RethResult> { + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { Ok(vec![]) } } impl BlockReaderIdExt for MockEthProvider { - fn block_by_id(&self, id: BlockId) -> RethResult> { + fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { BlockId::Number(num) => self.block_by_number_or_tag(num), BlockId::Hash(hash) => self.block_by_hash(hash.block_hash), } } - fn sealed_header_by_id(&self, id: BlockId) -> RethResult> { + fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { self.header_by_id(id)?.map_or_else(|| Ok(None), |h| Ok(Some(h.seal_slow()))) } - fn header_by_id(&self, id: BlockId) -> RethResult> { + fn header_by_id(&self, id: BlockId) -> ProviderResult> { match self.block_by_id(id)? { None => Ok(None), Some(block) => Ok(Some(block.header)), } } - fn ommers_by_id(&self, id: BlockId) -> RethResult>> { + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { match id { BlockId::Number(num) => self.ommers_by_number_or_tag(num), BlockId::Hash(hash) => self.ommers(BlockHashOrNumber::Hash(hash.block_hash)), @@ -484,13 +490,13 @@ impl BlockReaderIdExt for MockEthProvider { } impl AccountReader for MockEthProvider { - fn basic_account(&self, address: Address) -> RethResult> { + fn basic_account(&self, address: Address) -> ProviderResult> { Ok(self.accounts.lock().get(&address).cloned().map(|a| a.account)) } } impl StateRootProvider for MockEthProvider { - fn state_root(&self, _state: &BundleStateWithReceipts) -> RethResult { + fn state_root(&self, _state: &BundleStateWithReceipts) -> ProviderResult { todo!() } } @@ -500,12 +506,12 @@ impl StateProvider for MockEthProvider { &self, account: Address, storage_key: StorageKey, - ) -> RethResult> { + ) -> ProviderResult> { let lock = self.accounts.lock(); Ok(lock.get(&account).and_then(|account| account.storage.get(&storage_key)).cloned()) } - fn bytecode_by_hash(&self, code_hash: B256) -> RethResult> { + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { let lock = self.accounts.lock(); Ok(lock.values().find_map(|account| { match (account.account.bytecode_hash.as_ref(), account.bytecode.as_ref()) { @@ -517,7 +523,7 @@ impl StateProvider for MockEthProvider { })) } - fn proof(&self, _address: Address, _keys: &[B256]) -> RethResult { + fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { todo!() } } @@ -528,7 +534,7 @@ impl EvmEnvProvider for MockEthProvider { _cfg: &mut CfgEnv, _block_env: &mut BlockEnv, _at: BlockHashOrNumber, - ) -> RethResult<()> { + ) -> ProviderResult<()> { Ok(()) } @@ -537,7 +543,7 @@ impl EvmEnvProvider for MockEthProvider { _cfg: &mut CfgEnv, _block_env: &mut BlockEnv, _header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { Ok(()) } @@ -545,7 +551,7 @@ impl EvmEnvProvider for MockEthProvider { &self, _block_env: &mut BlockEnv, _at: BlockHashOrNumber, - ) -> RethResult<()> { + ) -> ProviderResult<()> { Ok(()) } @@ -553,94 +559,100 @@ impl EvmEnvProvider for MockEthProvider { &self, _block_env: &mut BlockEnv, _header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { Ok(()) } - fn fill_cfg_env_at(&self, _cfg: &mut CfgEnv, _at: BlockHashOrNumber) -> RethResult<()> { + fn fill_cfg_env_at(&self, _cfg: &mut CfgEnv, _at: BlockHashOrNumber) -> ProviderResult<()> { Ok(()) } - fn fill_cfg_env_with_header(&self, _cfg: &mut CfgEnv, _header: &Header) -> RethResult<()> { + fn fill_cfg_env_with_header(&self, _cfg: &mut CfgEnv, _header: &Header) -> ProviderResult<()> { Ok(()) } } impl StateProviderFactory for MockEthProvider { - fn latest(&self) -> RethResult> { + fn latest(&self) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn history_by_block_number(&self, _block: BlockNumber) -> RethResult> { + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn history_by_block_hash(&self, _block: BlockHash) -> RethResult> { + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn state_by_block_hash(&self, _block: BlockHash) -> RethResult> { + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn pending(&self) -> RethResult> { + fn pending(&self) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn pending_state_by_hash(&self, _block_hash: B256) -> RethResult>> { + fn pending_state_by_hash( + &self, + _block_hash: B256, + ) -> ProviderResult>> { Ok(Some(Box::new(self.clone()))) } fn pending_with_provider<'a>( &'a self, _post_state_data: Box, - ) -> RethResult> { + ) -> ProviderResult> { Ok(Box::new(self.clone())) } } impl StateProviderFactory for Arc { - fn latest(&self) -> RethResult> { + fn latest(&self) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn history_by_block_number(&self, _block: BlockNumber) -> RethResult> { + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn history_by_block_hash(&self, _block: BlockHash) -> RethResult> { + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn state_by_block_hash(&self, _block: BlockHash) -> RethResult> { + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn pending(&self) -> RethResult> { + fn pending(&self) -> ProviderResult> { Ok(Box::new(self.clone())) } - fn pending_state_by_hash(&self, _block_hash: B256) -> RethResult>> { + fn pending_state_by_hash( + &self, + _block_hash: B256, + ) -> ProviderResult>> { Ok(Some(Box::new(self.clone()))) } fn pending_with_provider<'a>( &'a self, _post_state_data: Box, - ) -> RethResult> { + ) -> ProviderResult> { Ok(Box::new(self.clone())) } } impl WithdrawalsProvider for MockEthProvider { - fn latest_withdrawal(&self) -> RethResult> { + fn latest_withdrawal(&self) -> ProviderResult> { Ok(None) } fn withdrawals_by_block( &self, _id: BlockHashOrNumber, _timestamp: u64, - ) -> RethResult>> { + ) -> ProviderResult>> { Ok(None) } } @@ -649,7 +661,7 @@ impl ChangeSetReader for MockEthProvider { fn account_block_changeset( &self, _block_number: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { Ok(Vec::default()) } } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 6780959c120b9..45258bc692635 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -8,7 +8,7 @@ use crate::{ WithdrawalsProvider, }; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{ stage::{StageCheckpoint, StageId}, trie::AccountProof, @@ -36,7 +36,7 @@ impl ChainSpecProvider for NoopProvider { /// Noop implementation for testing purposes impl BlockHashReader for NoopProvider { - fn block_hash(&self, _number: u64) -> RethResult> { + fn block_hash(&self, _number: u64) -> ProviderResult> { Ok(None) } @@ -44,51 +44,55 @@ impl BlockHashReader for NoopProvider { &self, _start: BlockNumber, _end: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { Ok(vec![]) } } impl BlockNumReader for NoopProvider { - fn chain_info(&self) -> RethResult { + fn chain_info(&self) -> ProviderResult { Ok(ChainInfo::default()) } - fn best_block_number(&self) -> RethResult { + fn best_block_number(&self) -> ProviderResult { Ok(0) } - fn last_block_number(&self) -> RethResult { + fn last_block_number(&self) -> ProviderResult { Ok(0) } - fn block_number(&self, _hash: B256) -> RethResult> { + fn block_number(&self, _hash: B256) -> ProviderResult> { Ok(None) } } impl BlockReader for NoopProvider { - fn find_block_by_hash(&self, hash: B256, _source: BlockSource) -> RethResult> { + fn find_block_by_hash( + &self, + hash: B256, + _source: BlockSource, + ) -> ProviderResult> { self.block(hash.into()) } - fn block(&self, _id: BlockHashOrNumber) -> RethResult> { + fn block(&self, _id: BlockHashOrNumber) -> ProviderResult> { Ok(None) } - fn pending_block(&self) -> RethResult> { + fn pending_block(&self) -> ProviderResult> { Ok(None) } - fn pending_block_and_receipts(&self) -> RethResult)>> { + fn pending_block_and_receipts(&self) -> ProviderResult)>> { Ok(None) } - fn ommers(&self, _id: BlockHashOrNumber) -> RethResult>> { + fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { Ok(None) } - fn block_body_indices(&self, _num: u64) -> RethResult> { + fn block_body_indices(&self, _num: u64) -> ProviderResult> { Ok(None) } @@ -96,118 +100,121 @@ impl BlockReader for NoopProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> RethResult> { + ) -> ProviderResult> { Ok(None) } - fn block_range(&self, _range: RangeInclusive) -> RethResult> { + fn block_range(&self, _range: RangeInclusive) -> ProviderResult> { Ok(vec![]) } } impl BlockReaderIdExt for NoopProvider { - fn block_by_id(&self, _id: BlockId) -> RethResult> { + fn block_by_id(&self, _id: BlockId) -> ProviderResult> { Ok(None) } - fn sealed_header_by_id(&self, _id: BlockId) -> RethResult> { + fn sealed_header_by_id(&self, _id: BlockId) -> ProviderResult> { Ok(None) } - fn header_by_id(&self, _id: BlockId) -> RethResult> { + fn header_by_id(&self, _id: BlockId) -> ProviderResult> { Ok(None) } - fn ommers_by_id(&self, _id: BlockId) -> RethResult>> { + fn ommers_by_id(&self, _id: BlockId) -> ProviderResult>> { Ok(None) } } impl BlockIdReader for NoopProvider { - fn pending_block_num_hash(&self) -> RethResult> { + fn pending_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn safe_block_num_hash(&self) -> RethResult> { + fn safe_block_num_hash(&self) -> ProviderResult> { Ok(None) } - fn finalized_block_num_hash(&self) -> RethResult> { + fn finalized_block_num_hash(&self) -> ProviderResult> { Ok(None) } } impl TransactionsProvider for NoopProvider { - fn transaction_id(&self, _tx_hash: TxHash) -> RethResult> { + fn transaction_id(&self, _tx_hash: TxHash) -> ProviderResult> { Ok(None) } - fn transaction_by_id(&self, _id: TxNumber) -> RethResult> { + fn transaction_by_id(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } fn transaction_by_id_no_hash( &self, _id: TxNumber, - ) -> RethResult> { + ) -> ProviderResult> { Ok(None) } - fn transaction_by_hash(&self, _hash: TxHash) -> RethResult> { + fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { Ok(None) } fn transaction_by_hash_with_meta( &self, _hash: TxHash, - ) -> RethResult> { + ) -> ProviderResult> { Ok(None) } - fn transaction_block(&self, _id: TxNumber) -> RethResult> { + fn transaction_block(&self, _id: TxNumber) -> ProviderResult> { todo!() } fn transactions_by_block( &self, _block_id: BlockHashOrNumber, - ) -> RethResult>> { + ) -> ProviderResult>> { Ok(None) } fn transactions_by_block_range( &self, _range: impl RangeBounds, - ) -> RethResult>> { + ) -> ProviderResult>> { Ok(Vec::default()) } - fn senders_by_tx_range(&self, _range: impl RangeBounds) -> RethResult> { + fn senders_by_tx_range( + &self, + _range: impl RangeBounds, + ) -> ProviderResult> { Ok(Vec::default()) } fn transactions_by_tx_range( &self, _range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { Ok(Vec::default()) } - fn transaction_sender(&self, _id: TxNumber) -> RethResult> { + fn transaction_sender(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } } impl ReceiptProvider for NoopProvider { - fn receipt(&self, _id: TxNumber) -> RethResult> { + fn receipt(&self, _id: TxNumber) -> ProviderResult> { Ok(None) } - fn receipt_by_hash(&self, _hash: TxHash) -> RethResult> { + fn receipt_by_hash(&self, _hash: TxHash) -> ProviderResult> { Ok(None) } - fn receipts_by_block(&self, _block: BlockHashOrNumber) -> RethResult>> { + fn receipts_by_block(&self, _block: BlockHashOrNumber) -> ProviderResult>> { Ok(None) } } @@ -215,27 +222,27 @@ impl ReceiptProvider for NoopProvider { impl ReceiptProviderIdExt for NoopProvider {} impl HeaderProvider for NoopProvider { - fn header(&self, _block_hash: &BlockHash) -> RethResult> { + fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { Ok(None) } - fn header_by_number(&self, _num: u64) -> RethResult> { + fn header_by_number(&self, _num: u64) -> ProviderResult> { Ok(None) } - fn header_td(&self, _hash: &BlockHash) -> RethResult> { + fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { Ok(None) } - fn header_td_by_number(&self, _number: BlockNumber) -> RethResult> { + fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { Ok(None) } - fn headers_range(&self, _range: impl RangeBounds) -> RethResult> { + fn headers_range(&self, _range: impl RangeBounds) -> ProviderResult> { Ok(vec![]) } - fn sealed_header(&self, _number: BlockNumber) -> RethResult> { + fn sealed_header(&self, _number: BlockNumber) -> ProviderResult> { Ok(None) } @@ -243,13 +250,13 @@ impl HeaderProvider for NoopProvider { &self, _range: impl RangeBounds, _predicate: impl FnMut(&SealedHeader) -> bool, - ) -> RethResult> { + ) -> ProviderResult> { Ok(vec![]) } } impl AccountReader for NoopProvider { - fn basic_account(&self, _address: Address) -> RethResult> { + fn basic_account(&self, _address: Address) -> ProviderResult> { Ok(None) } } @@ -258,13 +265,13 @@ impl ChangeSetReader for NoopProvider { fn account_block_changeset( &self, _block_number: BlockNumber, - ) -> RethResult> { + ) -> ProviderResult> { Ok(Vec::default()) } } impl StateRootProvider for NoopProvider { - fn state_root(&self, _state: &BundleStateWithReceipts) -> RethResult { + fn state_root(&self, _state: &BundleStateWithReceipts) -> ProviderResult { Ok(B256::default()) } } @@ -274,15 +281,15 @@ impl StateProvider for NoopProvider { &self, _account: Address, _storage_key: StorageKey, - ) -> RethResult> { + ) -> ProviderResult> { Ok(None) } - fn bytecode_by_hash(&self, _code_hash: B256) -> RethResult> { + fn bytecode_by_hash(&self, _code_hash: B256) -> ProviderResult> { Ok(None) } - fn proof(&self, _address: Address, _keys: &[B256]) -> RethResult { + fn proof(&self, _address: Address, _keys: &[B256]) -> ProviderResult { Ok(AccountProof::default()) } } @@ -293,7 +300,7 @@ impl EvmEnvProvider for NoopProvider { _cfg: &mut CfgEnv, _block_env: &mut BlockEnv, _at: BlockHashOrNumber, - ) -> RethResult<()> { + ) -> ProviderResult<()> { Ok(()) } @@ -302,7 +309,7 @@ impl EvmEnvProvider for NoopProvider { _cfg: &mut CfgEnv, _block_env: &mut BlockEnv, _header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { Ok(()) } @@ -310,7 +317,7 @@ impl EvmEnvProvider for NoopProvider { &self, _block_env: &mut BlockEnv, _at: BlockHashOrNumber, - ) -> RethResult<()> { + ) -> ProviderResult<()> { Ok(()) } @@ -318,77 +325,83 @@ impl EvmEnvProvider for NoopProvider { &self, _block_env: &mut BlockEnv, _header: &Header, - ) -> RethResult<()> { + ) -> ProviderResult<()> { Ok(()) } - fn fill_cfg_env_at(&self, _cfg: &mut CfgEnv, _at: BlockHashOrNumber) -> RethResult<()> { + fn fill_cfg_env_at(&self, _cfg: &mut CfgEnv, _at: BlockHashOrNumber) -> ProviderResult<()> { Ok(()) } - fn fill_cfg_env_with_header(&self, _cfg: &mut CfgEnv, _header: &Header) -> RethResult<()> { + fn fill_cfg_env_with_header(&self, _cfg: &mut CfgEnv, _header: &Header) -> ProviderResult<()> { Ok(()) } } impl StateProviderFactory for NoopProvider { - fn latest(&self) -> RethResult> { + fn latest(&self) -> ProviderResult> { Ok(Box::new(*self)) } - fn history_by_block_number(&self, _block: BlockNumber) -> RethResult> { + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult> { Ok(Box::new(*self)) } - fn history_by_block_hash(&self, _block: BlockHash) -> RethResult> { + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { Ok(Box::new(*self)) } - fn state_by_block_hash(&self, _block: BlockHash) -> RethResult> { + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { Ok(Box::new(*self)) } - fn pending(&self) -> RethResult> { + fn pending(&self) -> ProviderResult> { Ok(Box::new(*self)) } - fn pending_state_by_hash(&self, _block_hash: B256) -> RethResult>> { + fn pending_state_by_hash( + &self, + _block_hash: B256, + ) -> ProviderResult>> { Ok(Some(Box::new(*self))) } fn pending_with_provider<'a>( &'a self, _post_state_data: Box, - ) -> RethResult> { + ) -> ProviderResult> { Ok(Box::new(*self)) } } impl StageCheckpointReader for NoopProvider { - fn get_stage_checkpoint(&self, _id: StageId) -> RethResult> { + fn get_stage_checkpoint(&self, _id: StageId) -> ProviderResult> { Ok(None) } - fn get_stage_checkpoint_progress(&self, _id: StageId) -> RethResult>> { + fn get_stage_checkpoint_progress(&self, _id: StageId) -> ProviderResult>> { Ok(None) } } impl WithdrawalsProvider for NoopProvider { - fn latest_withdrawal(&self) -> RethResult> { + fn latest_withdrawal(&self) -> ProviderResult> { Ok(None) } fn withdrawals_by_block( &self, _id: BlockHashOrNumber, _timestamp: u64, - ) -> RethResult>> { + ) -> ProviderResult>> { Ok(None) } } impl PruneCheckpointReader for NoopProvider { - fn get_prune_checkpoint(&self, _segment: PruneSegment) -> RethResult> { + fn get_prune_checkpoint( + &self, + _segment: PruneSegment, + ) -> ProviderResult> { Ok(None) } } diff --git a/crates/storage/provider/src/traits/account.rs b/crates/storage/provider/src/traits/account.rs index 57243c698823b..16042bce122f6 100644 --- a/crates/storage/provider/src/traits/account.rs +++ b/crates/storage/provider/src/traits/account.rs @@ -1,6 +1,6 @@ use auto_impl::auto_impl; use reth_db::models::AccountBeforeTx; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{Account, Address, BlockNumber}; use std::{ collections::{BTreeMap, BTreeSet}, @@ -13,7 +13,7 @@ pub trait AccountReader: Send + Sync { /// Get basic account information. /// /// Returns `None` if the account doesn't exist. - fn basic_account(&self, address: Address) -> RethResult>; + fn basic_account(&self, address: Address) -> ProviderResult>; } /// Account reader @@ -23,7 +23,7 @@ pub trait AccountExtReader: Send + Sync { fn changed_accounts_with_range( &self, _range: impl RangeBounds, - ) -> RethResult>; + ) -> ProviderResult>; /// Get basic account information for multiple accounts. A more efficient version than calling /// [`AccountReader::basic_account`] repeatedly. @@ -32,7 +32,7 @@ pub trait AccountExtReader: Send + Sync { fn basic_accounts( &self, _iter: impl IntoIterator, - ) -> RethResult)>>; + ) -> ProviderResult)>>; /// Iterate over account changesets and return all account addresses that were changed alongside /// each specific set of blocks. @@ -41,7 +41,7 @@ pub trait AccountExtReader: Send + Sync { fn changed_accounts_and_blocks_with_range( &self, range: RangeInclusive, - ) -> RethResult>>; + ) -> ProviderResult>>; } /// AccountChange reader @@ -51,5 +51,5 @@ pub trait ChangeSetReader: Send + Sync { fn account_block_changeset( &self, block_number: BlockNumber, - ) -> RethResult>; + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index e5ea94641396a..76690580620b1 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -4,7 +4,7 @@ use crate::{ }; use auto_impl::auto_impl; use reth_db::models::StoredBlockBodyIndices; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{ Address, Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, ChainSpec, Header, PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, @@ -72,45 +72,45 @@ pub trait BlockReader: /// Note: this only operates on the hash because the number might be ambiguous. /// /// Returns `None` if block is not found. - fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> RethResult>; + fn find_block_by_hash(&self, hash: B256, source: BlockSource) -> ProviderResult>; /// Returns the block with given id from the database. /// /// Returns `None` if block is not found. - fn block(&self, id: BlockHashOrNumber) -> RethResult>; + fn block(&self, id: BlockHashOrNumber) -> ProviderResult>; /// Returns the pending block if available /// /// Note: This returns a [SealedBlock] because it's expected that this is sealed by the provider /// and the caller does not know the hash. - fn pending_block(&self) -> RethResult>; + fn pending_block(&self) -> ProviderResult>; /// Returns the pending block and receipts if available. - fn pending_block_and_receipts(&self) -> RethResult)>>; + fn pending_block_and_receipts(&self) -> ProviderResult)>>; /// Returns the ommers/uncle headers of the given block from the database. /// /// Returns `None` if block is not found. - fn ommers(&self, id: BlockHashOrNumber) -> RethResult>>; + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>>; /// Returns the block with matching hash from the database. /// /// Returns `None` if block is not found. - fn block_by_hash(&self, hash: B256) -> RethResult> { + fn block_by_hash(&self, hash: B256) -> ProviderResult> { self.block(hash.into()) } /// Returns the block with matching number from database. /// /// Returns `None` if block is not found. - fn block_by_number(&self, num: u64) -> RethResult> { + fn block_by_number(&self, num: u64) -> ProviderResult> { self.block(num.into()) } /// Returns the block body indices with matching number from database. /// /// Returns `None` if block is not found. - fn block_body_indices(&self, num: u64) -> RethResult>; + fn block_body_indices(&self, num: u64) -> ProviderResult>; /// Returns the block with senders with matching number or hash from database. /// @@ -121,12 +121,12 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> RethResult>; + ) -> ProviderResult>; /// Returns all blocks in the given inclusive range. /// /// Note: returns only available blocks - fn block_range(&self, range: RangeInclusive) -> RethResult>; + fn block_range(&self, range: RangeInclusive) -> ProviderResult>; } /// Trait extension for `BlockReader`, for types that implement `BlockId` conversion. @@ -144,7 +144,7 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// Returns the block with matching tag from the database /// /// Returns `None` if block is not found. - fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> RethResult> { + fn block_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.block(num.into())) } @@ -152,7 +152,7 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn pending_header(&self) -> RethResult> { + fn pending_header(&self) -> ProviderResult> { self.sealed_header_by_id(BlockNumberOrTag::Pending.into()) } @@ -160,7 +160,7 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn latest_header(&self) -> RethResult> { + fn latest_header(&self) -> ProviderResult> { self.sealed_header_by_id(BlockNumberOrTag::Latest.into()) } @@ -168,7 +168,7 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn safe_header(&self) -> RethResult> { + fn safe_header(&self) -> ProviderResult> { self.sealed_header_by_id(BlockNumberOrTag::Safe.into()) } @@ -176,19 +176,19 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// /// Note: This returns a [SealedHeader] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn finalized_header(&self) -> RethResult> { + fn finalized_header(&self) -> ProviderResult> { self.sealed_header_by_id(BlockNumberOrTag::Finalized.into()) } /// Returns the block with the matching `BlockId` from the database. /// /// Returns `None` if block is not found. - fn block_by_id(&self, id: BlockId) -> RethResult>; + fn block_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the header with matching tag from the database /// /// Returns `None` if header is not found. - fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> RethResult> { + fn header_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into())) } @@ -199,7 +199,7 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { fn sealed_header_by_number_or_tag( &self, id: BlockNumberOrTag, - ) -> RethResult> { + ) -> ProviderResult> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? .map_or_else(|| Ok(None), |h| Ok(Some(h.seal_slow()))) @@ -208,22 +208,22 @@ pub trait BlockReaderIdExt: BlockReader + BlockIdReader + ReceiptProviderIdExt { /// Returns the sealed header with the matching `BlockId` from the database. /// /// Returns `None` if header is not found. - fn sealed_header_by_id(&self, id: BlockId) -> RethResult>; + fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the header with the matching `BlockId` from the database. /// /// Returns `None` if header is not found. - fn header_by_id(&self, id: BlockId) -> RethResult>; + fn header_by_id(&self, id: BlockId) -> ProviderResult>; /// Returns the ommers with the matching tag from the database. - fn ommers_by_number_or_tag(&self, id: BlockNumberOrTag) -> RethResult>> { + fn ommers_by_number_or_tag(&self, id: BlockNumberOrTag) -> ProviderResult>> { self.convert_block_number(id)?.map_or_else(|| Ok(None), |num| self.ommers(num.into())) } /// Returns the ommers with the matching `BlockId` from the database. /// /// Returns `None` if block is not found. - fn ommers_by_id(&self, id: BlockId) -> RethResult>>; + fn ommers_by_id(&self, id: BlockId) -> ProviderResult>>; } /// BlockExecution Writer @@ -234,7 +234,7 @@ pub trait BlockExecutionWriter: BlockWriter + BlockReader + Send + Sync { &self, chain_spec: &ChainSpec, range: RangeInclusive, - ) -> RethResult { + ) -> ProviderResult { self.get_or_take_block_and_execution_range::(chain_spec, range) } @@ -243,7 +243,7 @@ pub trait BlockExecutionWriter: BlockWriter + BlockReader + Send + Sync { &self, chain_spec: &ChainSpec, range: RangeInclusive, - ) -> RethResult { + ) -> ProviderResult { self.get_or_take_block_and_execution_range::(chain_spec, range) } @@ -252,7 +252,7 @@ pub trait BlockExecutionWriter: BlockWriter + BlockReader + Send + Sync { &self, chain_spec: &ChainSpec, range: RangeInclusive, - ) -> RethResult; + ) -> ProviderResult; } /// Block Writer @@ -268,7 +268,7 @@ pub trait BlockWriter: Send + Sync { block: SealedBlock, senders: Option>, prune_modes: Option<&PruneModes>, - ) -> RethResult; + ) -> ProviderResult; /// Appends a batch of sealed blocks to the blockchain, including sender information, and /// updates the post-state. @@ -290,5 +290,5 @@ pub trait BlockWriter: Send + Sync { blocks: Vec, state: BundleStateWithReceipts, prune_modes: Option<&PruneModes>, - ) -> RethResult<()>; + ) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/traits/block_hash.rs b/crates/storage/provider/src/traits/block_hash.rs index e7b9e91c039bb..8bb334c8b8460 100644 --- a/crates/storage/provider/src/traits/block_hash.rs +++ b/crates/storage/provider/src/traits/block_hash.rs @@ -1,5 +1,5 @@ use auto_impl::auto_impl; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; /// Client trait for fetching block hashes by number. @@ -7,11 +7,14 @@ use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; pub trait BlockHashReader: Send + Sync { /// Get the hash of the block with the given number. Returns `None` if no block with this number /// exists. - fn block_hash(&self, number: BlockNumber) -> RethResult>; + fn block_hash(&self, number: BlockNumber) -> ProviderResult>; /// Get the hash of the block with the given number. Returns `None` if no block with this number /// exists. - fn convert_block_hash(&self, hash_or_number: BlockHashOrNumber) -> RethResult> { + fn convert_block_hash( + &self, + hash_or_number: BlockHashOrNumber, + ) -> ProviderResult> { match hash_or_number { BlockHashOrNumber::Hash(hash) => Ok(Some(hash)), BlockHashOrNumber::Number(num) => self.block_hash(num), @@ -23,6 +26,9 @@ pub trait BlockHashReader: Send + Sync { /// Returns the available hashes of that range. /// /// Note: The range is `start..end`, so the expected result is `[start..end)` - fn canonical_hashes_range(&self, start: BlockNumber, end: BlockNumber) - -> RethResult>; + fn canonical_hashes_range( + &self, + start: BlockNumber, + end: BlockNumber, + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/traits/block_id.rs b/crates/storage/provider/src/traits/block_id.rs index ee7f548807845..fd52f6c326b2d 100644 --- a/crates/storage/provider/src/traits/block_id.rs +++ b/crates/storage/provider/src/traits/block_id.rs @@ -1,5 +1,5 @@ use super::BlockHashReader; -use reth_interfaces::{provider::ProviderError, RethResult}; +use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, ChainInfo, B256}; /// Client trait for getting important block numbers (such as the latest block number), converting @@ -9,20 +9,20 @@ use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, #[auto_impl::auto_impl(&, Arc)] pub trait BlockNumReader: BlockHashReader + Send + Sync { /// Returns the current info for the chain. - fn chain_info(&self) -> RethResult; + fn chain_info(&self) -> ProviderResult; /// Returns the best block number in the chain. - fn best_block_number(&self) -> RethResult; + fn best_block_number(&self) -> ProviderResult; /// Returns the last block number associated with the last canonical header in the database. - fn last_block_number(&self) -> RethResult; + fn last_block_number(&self) -> ProviderResult; /// Gets the `BlockNumber` for the given hash. Returns `None` if no block with this hash exists. - fn block_number(&self, hash: B256) -> RethResult>; + fn block_number(&self, hash: B256) -> ProviderResult>; /// Gets the block number for the given `BlockHashOrNumber`. Returns `None` if no block with /// this hash exists. If the `BlockHashOrNumber` is a `Number`, it is returned as is. - fn convert_hash_or_number(&self, id: BlockHashOrNumber) -> RethResult> { + fn convert_hash_or_number(&self, id: BlockHashOrNumber) -> ProviderResult> { match id { BlockHashOrNumber::Hash(hash) => self.block_number(hash), BlockHashOrNumber::Number(num) => Ok(Some(num)), @@ -31,7 +31,7 @@ pub trait BlockNumReader: BlockHashReader + Send + Sync { /// Gets the block hash for the given `BlockHashOrNumber`. Returns `None` if no block with this /// number exists. If the `BlockHashOrNumber` is a `Hash`, it is returned as is. - fn convert_number(&self, id: BlockHashOrNumber) -> RethResult> { + fn convert_number(&self, id: BlockHashOrNumber) -> ProviderResult> { match id { BlockHashOrNumber::Hash(hash) => Ok(Some(hash)), BlockHashOrNumber::Number(num) => self.block_hash(num), @@ -51,7 +51,7 @@ pub trait BlockNumReader: BlockHashReader + Send + Sync { #[auto_impl::auto_impl(&, Arc)] pub trait BlockIdReader: BlockNumReader + Send + Sync { /// Converts the `BlockNumberOrTag` variants to a block number. - fn convert_block_number(&self, num: BlockNumberOrTag) -> RethResult> { + fn convert_block_number(&self, num: BlockNumberOrTag) -> ProviderResult> { let num = match num { BlockNumberOrTag::Latest => self.best_block_number()?, BlockNumberOrTag::Earliest => 0, @@ -63,18 +63,18 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { BlockNumberOrTag::Number(num) => num, BlockNumberOrTag::Finalized => match self.finalized_block_number()? { Some(block_number) => block_number, - None => return Err(ProviderError::FinalizedBlockNotFound.into()), + None => return Err(ProviderError::FinalizedBlockNotFound), }, BlockNumberOrTag::Safe => match self.safe_block_number()? { Some(block_number) => block_number, - None => return Err(ProviderError::SafeBlockNotFound.into()), + None => return Err(ProviderError::SafeBlockNotFound), }, }; Ok(Some(num)) } /// Get the hash of the block by matching the given id. - fn block_hash_for_id(&self, block_id: BlockId) -> RethResult> { + fn block_hash_for_id(&self, block_id: BlockId) -> ProviderResult> { match block_id { BlockId::Hash(hash) => Ok(Some(hash.into())), BlockId::Number(num) => { @@ -97,7 +97,7 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { } /// Get the number of the block by matching the given id. - fn block_number_for_id(&self, block_id: BlockId) -> RethResult> { + fn block_number_for_id(&self, block_id: BlockId) -> ProviderResult> { match block_id { BlockId::Hash(hash) => self.block_number(hash.into()), BlockId::Number(num) => self.convert_block_number(num), @@ -105,31 +105,31 @@ pub trait BlockIdReader: BlockNumReader + Send + Sync { } /// Get the current pending block number and hash. - fn pending_block_num_hash(&self) -> RethResult>; + fn pending_block_num_hash(&self) -> ProviderResult>; /// Get the current safe block number and hash. - fn safe_block_num_hash(&self) -> RethResult>; + fn safe_block_num_hash(&self) -> ProviderResult>; /// Get the current finalized block number and hash. - fn finalized_block_num_hash(&self) -> RethResult>; + fn finalized_block_num_hash(&self) -> ProviderResult>; /// Get the safe block number. - fn safe_block_number(&self) -> RethResult> { + fn safe_block_number(&self) -> ProviderResult> { self.safe_block_num_hash().map(|res_opt| res_opt.map(|num_hash| num_hash.number)) } /// Get the finalized block number. - fn finalized_block_number(&self) -> RethResult> { + fn finalized_block_number(&self) -> ProviderResult> { self.finalized_block_num_hash().map(|res_opt| res_opt.map(|num_hash| num_hash.number)) } /// Get the safe block hash. - fn safe_block_hash(&self) -> RethResult> { + fn safe_block_hash(&self) -> ProviderResult> { self.safe_block_num_hash().map(|res_opt| res_opt.map(|num_hash| num_hash.hash)) } /// Get the finalized block hash. - fn finalized_block_hash(&self) -> RethResult> { + fn finalized_block_hash(&self) -> ProviderResult> { self.finalized_block_num_hash().map(|res_opt| res_opt.map(|num_hash| num_hash.hash)) } } diff --git a/crates/storage/provider/src/traits/evm_env.rs b/crates/storage/provider/src/traits/evm_env.rs index 23c13440bcf35..4aa5400789421 100644 --- a/crates/storage/provider/src/traits/evm_env.rs +++ b/crates/storage/provider/src/traits/evm_env.rs @@ -1,4 +1,4 @@ -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, Header}; use revm::primitives::{BlockEnv, CfgEnv}; @@ -15,10 +15,10 @@ pub trait EvmEnvProvider: Send + Sync { cfg: &mut CfgEnv, block_env: &mut BlockEnv, at: BlockHashOrNumber, - ) -> RethResult<()>; + ) -> ProviderResult<()>; /// Fills the default [CfgEnv] and [BlockEnv] fields with values specific to the given [Header]. - fn env_with_header(&self, header: &Header) -> RethResult<(CfgEnv, BlockEnv)> { + fn env_with_header(&self, header: &Header) -> ProviderResult<(CfgEnv, BlockEnv)> { let mut cfg = CfgEnv::default(); let mut block_env = BlockEnv::default(); self.fill_env_with_header(&mut cfg, &mut block_env, header)?; @@ -31,21 +31,25 @@ pub trait EvmEnvProvider: Send + Sync { cfg: &mut CfgEnv, block_env: &mut BlockEnv, header: &Header, - ) -> RethResult<()>; + ) -> ProviderResult<()>; /// Fills the [BlockEnv] fields with values specific to the given [BlockHashOrNumber]. - fn fill_block_env_at(&self, block_env: &mut BlockEnv, at: BlockHashOrNumber) -> RethResult<()>; + fn fill_block_env_at( + &self, + block_env: &mut BlockEnv, + at: BlockHashOrNumber, + ) -> ProviderResult<()>; /// Fills the [BlockEnv] fields with values specific to the given [Header]. fn fill_block_env_with_header( &self, block_env: &mut BlockEnv, header: &Header, - ) -> RethResult<()>; + ) -> ProviderResult<()>; /// Fills the [CfgEnv] fields with values specific to the given [BlockHashOrNumber]. - fn fill_cfg_env_at(&self, cfg: &mut CfgEnv, at: BlockHashOrNumber) -> RethResult<()>; + fn fill_cfg_env_at(&self, cfg: &mut CfgEnv, at: BlockHashOrNumber) -> ProviderResult<()>; /// Fills the [CfgEnv] fields with values specific to the given [Header]. - fn fill_cfg_env_with_header(&self, cfg: &mut CfgEnv, header: &Header) -> RethResult<()>; + fn fill_cfg_env_with_header(&self, cfg: &mut CfgEnv, header: &Header) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/provider/src/traits/hashing.rs index dfeeabd186200..b5a77247ed1d7 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/provider/src/traits/hashing.rs @@ -1,6 +1,6 @@ use auto_impl::auto_impl; use reth_db::models::BlockNumberAddress; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{Account, Address, BlockNumber, StorageEntry, B256}; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, @@ -18,7 +18,7 @@ pub trait HashingWriter: Send + Sync { fn unwind_account_hashing( &self, range: RangeInclusive, - ) -> RethResult>>; + ) -> ProviderResult>>; /// Inserts all accounts into [reth_db::tables::AccountHistory] table. /// @@ -28,7 +28,7 @@ pub trait HashingWriter: Send + Sync { fn insert_account_for_hashing( &self, accounts: impl IntoIterator)>, - ) -> RethResult>>; + ) -> ProviderResult>>; /// Unwind and clear storage hashing /// @@ -38,7 +38,7 @@ pub trait HashingWriter: Send + Sync { fn unwind_storage_hashing( &self, range: Range, - ) -> RethResult>>; + ) -> ProviderResult>>; /// Iterates over storages and inserts them to hashing table. /// @@ -48,7 +48,7 @@ pub trait HashingWriter: Send + Sync { fn insert_storage_for_hashing( &self, storages: impl IntoIterator)>, - ) -> RethResult>>; + ) -> ProviderResult>>; /// Calculate the hashes of all changed accounts and storages, and finally calculate the state /// root. @@ -61,5 +61,5 @@ pub trait HashingWriter: Send + Sync { range: RangeInclusive, end_block_hash: B256, expected_state_root: B256, - ) -> RethResult<()>; + ) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/traits/header.rs b/crates/storage/provider/src/traits/header.rs index 3beebd1706a4e..ad04f52ac957f 100644 --- a/crates/storage/provider/src/traits/header.rs +++ b/crates/storage/provider/src/traits/header.rs @@ -1,5 +1,5 @@ use auto_impl::auto_impl; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, Header, SealedHeader, U256}; use std::ops::RangeBounds; @@ -7,21 +7,21 @@ use std::ops::RangeBounds; #[auto_impl(&, Arc)] pub trait HeaderProvider: Send + Sync { /// Check if block is known - fn is_known(&self, block_hash: &BlockHash) -> RethResult { + fn is_known(&self, block_hash: &BlockHash) -> ProviderResult { self.header(block_hash).map(|header| header.is_some()) } /// Get header by block hash - fn header(&self, block_hash: &BlockHash) -> RethResult>; + fn header(&self, block_hash: &BlockHash) -> ProviderResult>; /// Get header by block number - fn header_by_number(&self, num: u64) -> RethResult>; + fn header_by_number(&self, num: u64) -> ProviderResult>; /// Get header by block number or hash fn header_by_hash_or_number( &self, hash_or_num: BlockHashOrNumber, - ) -> RethResult> { + ) -> ProviderResult> { match hash_or_num { BlockHashOrNumber::Hash(hash) => self.header(&hash), BlockHashOrNumber::Number(num) => self.header_by_number(num), @@ -29,22 +29,22 @@ pub trait HeaderProvider: Send + Sync { } /// Get total difficulty by block hash. - fn header_td(&self, hash: &BlockHash) -> RethResult>; + fn header_td(&self, hash: &BlockHash) -> ProviderResult>; /// Get total difficulty by block number. - fn header_td_by_number(&self, number: BlockNumber) -> RethResult>; + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult>; /// Get headers in range of block numbers - fn headers_range(&self, range: impl RangeBounds) -> RethResult>; + fn headers_range(&self, range: impl RangeBounds) -> ProviderResult>; /// Get a single sealed header by block number. - fn sealed_header(&self, number: BlockNumber) -> RethResult>; + fn sealed_header(&self, number: BlockNumber) -> ProviderResult>; /// Get headers in range of block numbers. fn sealed_headers_range( &self, range: impl RangeBounds, - ) -> RethResult> { + ) -> ProviderResult> { self.sealed_headers_while(range, |_| true) } @@ -53,5 +53,5 @@ pub trait HeaderProvider: Send + Sync { &self, range: impl RangeBounds, predicate: impl FnMut(&SealedHeader) -> bool, - ) -> RethResult>; + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/provider/src/traits/history.rs index b9c59cdb76e18..daef02b9f36b2 100644 --- a/crates/storage/provider/src/traits/history.rs +++ b/crates/storage/provider/src/traits/history.rs @@ -1,6 +1,6 @@ use auto_impl::auto_impl; use reth_db::models::BlockNumberAddress; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{Address, BlockNumber, B256}; use std::{ collections::BTreeMap, @@ -16,26 +16,28 @@ pub trait HistoryWriter: Send + Sync { fn unwind_account_history_indices( &self, range: RangeInclusive, - ) -> RethResult; + ) -> ProviderResult; /// Insert account change index to database. Used inside AccountHistoryIndex stage fn insert_account_history_index( &self, account_transitions: BTreeMap>, - ) -> RethResult<()>; + ) -> ProviderResult<()>; /// Unwind and clear storage history indices. /// /// Returns number of changesets walked. - fn unwind_storage_history_indices(&self, range: Range) - -> RethResult; + fn unwind_storage_history_indices( + &self, + range: Range, + ) -> ProviderResult; /// Insert storage change index to database. Used inside StorageHistoryIndex stage fn insert_storage_history_index( &self, storage_transitions: BTreeMap<(Address, B256), Vec>, - ) -> RethResult<()>; + ) -> ProviderResult<()>; /// Read account/storage changesets and update account/storage history indices. - fn update_history_indices(&self, range: RangeInclusive) -> RethResult<()>; + fn update_history_indices(&self, range: RangeInclusive) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/traits/prune_checkpoint.rs b/crates/storage/provider/src/traits/prune_checkpoint.rs index f44a1c00ef8ec..60470bfecde02 100644 --- a/crates/storage/provider/src/traits/prune_checkpoint.rs +++ b/crates/storage/provider/src/traits/prune_checkpoint.rs @@ -1,11 +1,14 @@ -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{PruneCheckpoint, PruneSegment}; /// The trait for fetching prune checkpoint related data. #[auto_impl::auto_impl(&, Arc)] pub trait PruneCheckpointReader: Send + Sync { /// Fetch the checkpoint for the given prune segment. - fn get_prune_checkpoint(&self, segment: PruneSegment) -> RethResult>; + fn get_prune_checkpoint( + &self, + segment: PruneSegment, + ) -> ProviderResult>; } /// The trait for updating prune checkpoint related data. @@ -16,5 +19,5 @@ pub trait PruneCheckpointWriter: Send + Sync { &self, segment: PruneSegment, checkpoint: PruneCheckpoint, - ) -> RethResult<()>; + ) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/traits/receipts.rs b/crates/storage/provider/src/traits/receipts.rs index ccb1b7dc80ffe..defb69faf83bc 100644 --- a/crates/storage/provider/src/traits/receipts.rs +++ b/crates/storage/provider/src/traits/receipts.rs @@ -1,4 +1,4 @@ -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumberOrTag, Receipt, TxHash, TxNumber}; use crate::BlockIdReader; @@ -9,17 +9,17 @@ pub trait ReceiptProvider: Send + Sync { /// Get receipt by transaction number /// /// Returns `None` if the transaction is not found. - fn receipt(&self, id: TxNumber) -> RethResult>; + fn receipt(&self, id: TxNumber) -> ProviderResult>; /// Get receipt by transaction hash. /// /// Returns `None` if the transaction is not found. - fn receipt_by_hash(&self, hash: TxHash) -> RethResult>; + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get receipts by block num or hash. /// /// Returns `None` if the block is not found. - fn receipts_by_block(&self, block: BlockHashOrNumber) -> RethResult>>; + fn receipts_by_block(&self, block: BlockHashOrNumber) -> ProviderResult>>; } /// Trait extension for `ReceiptProvider`, for types that implement `BlockId` conversion. @@ -34,7 +34,7 @@ pub trait ReceiptProvider: Send + Sync { /// retrieving the receipts should be done using the type's `ReceiptProvider` methods. pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { /// Get receipt by block id - fn receipts_by_block_id(&self, block: BlockId) -> RethResult>> { + fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { let id = match block { BlockId::Hash(hash) => BlockHashOrNumber::Hash(hash.block_hash), BlockId::Number(num_tag) => { @@ -55,7 +55,7 @@ pub trait ReceiptProviderIdExt: ReceiptProvider + BlockIdReader { fn receipts_by_number_or_tag( &self, number_or_tag: BlockNumberOrTag, - ) -> RethResult>> { + ) -> ProviderResult>> { self.receipts_by_block_id(number_or_tag.into()) } } diff --git a/crates/storage/provider/src/traits/stage_checkpoint.rs b/crates/storage/provider/src/traits/stage_checkpoint.rs index 11db878fa200a..ff58fa3eafbf3 100644 --- a/crates/storage/provider/src/traits/stage_checkpoint.rs +++ b/crates/storage/provider/src/traits/stage_checkpoint.rs @@ -1,4 +1,4 @@ -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{ stage::{StageCheckpoint, StageId}, BlockNumber, @@ -8,25 +8,30 @@ use reth_primitives::{ #[auto_impl::auto_impl(&, Arc)] pub trait StageCheckpointReader: Send + Sync { /// Fetch the checkpoint for the given stage. - fn get_stage_checkpoint(&self, id: StageId) -> RethResult>; + fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult>; /// Get stage checkpoint progress. - fn get_stage_checkpoint_progress(&self, id: StageId) -> RethResult>>; + fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>>; } /// The trait for updating stage checkpoint related data. #[auto_impl::auto_impl(&, Arc)] pub trait StageCheckpointWriter: Send + Sync { /// Save stage checkpoint. - fn save_stage_checkpoint(&self, id: StageId, checkpoint: StageCheckpoint) -> RethResult<()>; + fn save_stage_checkpoint(&self, id: StageId, checkpoint: StageCheckpoint) + -> ProviderResult<()>; /// Save stage checkpoint progress. - fn save_stage_checkpoint_progress(&self, id: StageId, checkpoint: Vec) -> RethResult<()>; + fn save_stage_checkpoint_progress( + &self, + id: StageId, + checkpoint: Vec, + ) -> ProviderResult<()>; /// Update all pipeline sync stage progress. fn update_pipeline_stages( &self, block_number: BlockNumber, drop_stage_checkpoint: bool, - ) -> RethResult<()>; + ) -> ProviderResult<()>; } diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 9204ff86e4189..6175504411770 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -1,7 +1,7 @@ use super::AccountReader; use crate::{BlockHashReader, BlockIdReader, BundleStateWithReceipts}; use auto_impl::auto_impl; -use reth_interfaces::{provider::ProviderError, RethResult}; +use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ trie::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, @@ -18,18 +18,18 @@ pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + S &self, account: Address, storage_key: StorageKey, - ) -> RethResult>; + ) -> ProviderResult>; /// Get account code by its hash - fn bytecode_by_hash(&self, code_hash: B256) -> RethResult>; + fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult>; /// Get account and storage proofs. - fn proof(&self, address: Address, keys: &[B256]) -> RethResult; + fn proof(&self, address: Address, keys: &[B256]) -> ProviderResult; /// Get account code by its address. /// /// Returns `None` if the account doesn't exist or account is not a contract - fn account_code(&self, addr: Address) -> RethResult> { + fn account_code(&self, addr: Address) -> ProviderResult> { // Get basic account information // Returns None if acc doesn't exist let acc = match self.basic_account(addr)? { @@ -52,7 +52,7 @@ pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + S /// Get account balance by its address. /// /// Returns `None` if the account doesn't exist - fn account_balance(&self, addr: Address) -> RethResult> { + fn account_balance(&self, addr: Address) -> ProviderResult> { // Get basic account information // Returns None if acc doesn't exist match self.basic_account(addr)? { @@ -64,7 +64,7 @@ pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + S /// Get account nonce by its address. /// /// Returns `None` if the account doesn't exist - fn account_nonce(&self, addr: Address) -> RethResult> { + fn account_nonce(&self, addr: Address) -> ProviderResult> { // Get basic account information // Returns None if acc doesn't exist match self.basic_account(addr)? { @@ -99,13 +99,13 @@ pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + S /// to be used, since block `n` was executed on its parent block's state. pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// Storage provider for latest block. - fn latest(&self) -> RethResult>; + fn latest(&self) -> ProviderResult>; /// Returns a [StateProvider] indexed by the given [BlockId]. /// /// Note: if a number or hash is provided this will __only__ look at historical(canonical) /// state. - fn state_by_block_id(&self, block_id: BlockId) -> RethResult> { + fn state_by_block_id(&self, block_id: BlockId) -> ProviderResult> { match block_id { BlockId::Number(block_number) => self.state_by_block_number_or_tag(block_number), BlockId::Hash(block_hash) => self.history_by_block_hash(block_hash.into()), @@ -118,14 +118,14 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, - ) -> RethResult> { + ) -> ProviderResult> { match number_or_tag { BlockNumberOrTag::Latest => self.latest(), BlockNumberOrTag::Finalized => { // we can only get the finalized state by hash, not by num let hash = match self.finalized_block_hash()? { Some(hash) => hash, - None => return Err(ProviderError::FinalizedBlockNotFound.into()), + None => return Err(ProviderError::FinalizedBlockNotFound), }; // only look at historical state self.history_by_block_hash(hash) @@ -134,7 +134,7 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { // we can only get the safe state by hash, not by num let hash = match self.safe_block_hash()? { Some(hash) => hash, - None => return Err(ProviderError::SafeBlockNotFound.into()), + None => return Err(ProviderError::SafeBlockNotFound), }; self.history_by_block_hash(hash) @@ -152,37 +152,40 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// /// /// Note: this only looks at historical blocks, not pending blocks. - fn history_by_block_number(&self, block: BlockNumber) -> RethResult>; + fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult>; /// Returns a historical [StateProvider] indexed by the given block hash. /// /// Note: this only looks at historical blocks, not pending blocks. - fn history_by_block_hash(&self, block: BlockHash) -> RethResult>; + fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult>; /// Returns _any_[StateProvider] with matching block hash. /// /// This will return a [StateProvider] for either a historical or pending block. - fn state_by_block_hash(&self, block: BlockHash) -> RethResult>; + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult>; /// Storage provider for pending state. /// /// Represents the state at the block that extends the canonical chain by one. /// If there's no `pending` block, then this is equal to [StateProviderFactory::latest] - fn pending(&self) -> RethResult>; + fn pending(&self) -> ProviderResult>; /// Storage provider for pending state for the given block hash. /// /// Represents the state at the block that extends the canonical chain. /// /// If the block couldn't be found, returns `None`. - fn pending_state_by_hash(&self, block_hash: B256) -> RethResult>>; + fn pending_state_by_hash( + &self, + block_hash: B256, + ) -> ProviderResult>>; /// Return a [StateProvider] that contains post state data provider. /// Used to inspect or execute transaction on the pending state. fn pending_with_provider( &self, post_state_data: Box, - ) -> RethResult>; + ) -> ProviderResult>; } /// Blockchain trait provider that gives access to the blockchain state that is not yet committed @@ -195,10 +198,9 @@ pub trait BlockchainTreePendingStateProvider: Send + Sync { fn pending_state_provider( &self, block_hash: BlockHash, - ) -> RethResult> { - Ok(self - .find_pending_state_provider(block_hash) - .ok_or(ProviderError::StateForHashNotFound(block_hash))?) + ) -> ProviderResult> { + self.find_pending_state_provider(block_hash) + .ok_or(ProviderError::StateForHashNotFound(block_hash)) } /// Returns state provider if a matching block exists. @@ -231,5 +233,5 @@ pub trait BundleStateDataProvider: Send + Sync { #[auto_impl[Box,&, Arc]] pub trait StateRootProvider: Send + Sync { /// Returns the state root of the BundleState on top of the current state. - fn state_root(&self, post_state: &BundleStateWithReceipts) -> RethResult; + fn state_root(&self, post_state: &BundleStateWithReceipts) -> ProviderResult; } diff --git a/crates/storage/provider/src/traits/storage.rs b/crates/storage/provider/src/traits/storage.rs index 764ea1c01771a..302acad8b1873 100644 --- a/crates/storage/provider/src/traits/storage.rs +++ b/crates/storage/provider/src/traits/storage.rs @@ -4,23 +4,23 @@ use std::{ }; use auto_impl::auto_impl; -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{Address, BlockNumber, StorageEntry, B256}; /// Storage reader #[auto_impl(&, Arc, Box)] pub trait StorageReader: Send + Sync { /// Get plainstate storages for addresses and storage keys. - fn plainstate_storages( + fn plain_state_storages( &self, addresses_with_keys: impl IntoIterator)>, - ) -> RethResult)>>; + ) -> ProviderResult)>>; /// Iterate over storage changesets and return all storage slots that were changed. fn changed_storages_with_range( &self, range: RangeInclusive, - ) -> RethResult>>; + ) -> ProviderResult>>; /// Iterate over storage changesets and return all storage slots that were changed alongside /// each specific set of blocks. @@ -29,5 +29,5 @@ pub trait StorageReader: Send + Sync { fn changed_storages_and_blocks_with_range( &self, range: RangeInclusive, - ) -> RethResult>>; + ) -> ProviderResult>>; } diff --git a/crates/storage/provider/src/traits/transactions.rs b/crates/storage/provider/src/traits/transactions.rs index 2f9c72ed191dc..9041593b552ef 100644 --- a/crates/storage/provider/src/traits/transactions.rs +++ b/crates/storage/provider/src/traits/transactions.rs @@ -1,5 +1,5 @@ use crate::{BlockNumReader, BlockReader}; -use reth_interfaces::{provider::ProviderError, RethResult}; +use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ Address, BlockHashOrNumber, BlockNumber, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, @@ -13,55 +13,58 @@ pub trait TransactionsProvider: BlockNumReader + Send + Sync { /// /// This is the inverse of [TransactionsProvider::transaction_by_id]. /// Returns None if the transaction is not found. - fn transaction_id(&self, tx_hash: TxHash) -> RethResult>; + fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult>; /// Get transaction by id, computes hash everytime so more expensive. - fn transaction_by_id(&self, id: TxNumber) -> RethResult>; + fn transaction_by_id(&self, id: TxNumber) -> ProviderResult>; /// Get transaction by id without computing the hash. fn transaction_by_id_no_hash( &self, id: TxNumber, - ) -> RethResult>; + ) -> ProviderResult>; /// Get transaction by transaction hash. - fn transaction_by_hash(&self, hash: TxHash) -> RethResult>; + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult>; /// Get transaction by transaction hash and additional metadata of the block the transaction was /// mined in fn transaction_by_hash_with_meta( &self, hash: TxHash, - ) -> RethResult>; + ) -> ProviderResult>; /// Get transaction block number - fn transaction_block(&self, id: TxNumber) -> RethResult>; + fn transaction_block(&self, id: TxNumber) -> ProviderResult>; /// Get transactions by block id. fn transactions_by_block( &self, block: BlockHashOrNumber, - ) -> RethResult>>; + ) -> ProviderResult>>; /// Get transactions by block range. fn transactions_by_block_range( &self, range: impl RangeBounds, - ) -> RethResult>>; + ) -> ProviderResult>>; /// Get transactions by tx range. fn transactions_by_tx_range( &self, range: impl RangeBounds, - ) -> RethResult>; + ) -> ProviderResult>; /// Get Senders from a tx range. - fn senders_by_tx_range(&self, range: impl RangeBounds) -> RethResult>; + fn senders_by_tx_range( + &self, + range: impl RangeBounds, + ) -> ProviderResult>; /// Get transaction sender. /// /// Returns None if the transaction is not found. - fn transaction_sender(&self, id: TxNumber) -> RethResult>; + fn transaction_sender(&self, id: TxNumber) -> ProviderResult>; } /// Client trait for fetching additional [TransactionSigned] related data. @@ -71,7 +74,7 @@ pub trait TransactionsProviderExt: BlockReader + Send + Sync { fn transaction_range_by_block_range( &self, block_range: RangeInclusive, - ) -> RethResult> { + ) -> ProviderResult> { let from = self .block_body_indices(*block_range.start())? .ok_or(ProviderError::BlockBodyIndicesNotFound(*block_range.start()))? @@ -89,5 +92,5 @@ pub trait TransactionsProviderExt: BlockReader + Send + Sync { fn transaction_hashes_by_range( &self, tx_range: Range, - ) -> RethResult>; + ) -> ProviderResult>; } diff --git a/crates/storage/provider/src/traits/withdrawals.rs b/crates/storage/provider/src/traits/withdrawals.rs index 6361e6509e799..419c1453d27f5 100644 --- a/crates/storage/provider/src/traits/withdrawals.rs +++ b/crates/storage/provider/src/traits/withdrawals.rs @@ -1,4 +1,4 @@ -use reth_interfaces::RethResult; +use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, Withdrawal}; /// Client trait for fetching [Withdrawal] related data. @@ -9,8 +9,8 @@ pub trait WithdrawalsProvider: Send + Sync { &self, id: BlockHashOrNumber, timestamp: u64, - ) -> RethResult>>; + ) -> ProviderResult>>; /// Get latest withdrawal from this block or earlier . - fn latest_withdrawal(&self) -> RethResult>; + fn latest_withdrawal(&self) -> ProviderResult>; } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 692a5177bb24c..4f834de4de3ba 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -10,14 +10,13 @@ use futures_util::{ future::{BoxFuture, Fuse, FusedFuture}, FutureExt, Stream, StreamExt, }; -use reth_interfaces::RethError; use reth_primitives::{ Address, BlockHash, BlockNumber, BlockNumberOrTag, FromRecoveredPooledTransaction, FromRecoveredTransaction, PooledTransactionsElementEcRecovered, }; use reth_provider::{ BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotification, ChainSpecProvider, - StateProviderFactory, + ProviderError, StateProviderFactory, }; use reth_tasks::TaskSpawner; use std::{ @@ -495,7 +494,7 @@ fn load_accounts( client: Client, at: BlockHash, addresses: I, -) -> Result, RethError)>> +) -> Result, ProviderError)>> where I: Iterator, diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 6b2b08da11ba7..5d9a4bf868d40 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -78,17 +78,24 @@ impl Case for BlockchainTestCase { let provider = factory.provider_rw().unwrap(); // Insert test state - provider.insert_block( - SealedBlock::new(case.genesis_block_header.clone().into(), BlockBody::default()), - None, - None, - )?; + provider + .insert_block( + SealedBlock::new( + case.genesis_block_header.clone().into(), + BlockBody::default(), + ), + None, + None, + ) + .map_err(|err| Error::RethError(err.into()))?; case.pre.write_to_db(provider.tx_ref())?; let mut last_block = None; for block in case.blocks.iter() { let decoded = SealedBlock::decode(&mut block.rlp.as_ref())?; - provider.insert_block(decoded.clone(), None, None)?; + provider + .insert_block(decoded.clone(), None, None) + .map_err(|err| Error::RethError(err.into()))?; last_block = Some(decoded); } @@ -118,11 +125,9 @@ impl Case for BlockchainTestCase { // `insert_hashes` will insert hashed data, compute the state root and match it to // expected internally let last_block = last_block.unwrap_or_default(); - provider.insert_hashes( - 0..=last_block.number, - last_block.hash, - expected_state_root, - )?; + provider + .insert_hashes(0..=last_block.number, last_block.hash, expected_state_root) + .map_err(|err| Error::RethError(err.into()))?; } else { return Err(Error::MissingPostState) } From 3553cd50765af8aee5d6d9799698fa60f7b7b7d1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Nov 2023 15:07:46 +0100 Subject: [PATCH 31/77] chore: remove cursed env lifetime (#5474) --- .../storage/db/src/implementation/mdbx/mod.rs | 4 +- .../storage/db/src/implementation/mdbx/tx.rs | 30 ++-- crates/storage/libmdbx-rs/src/cursor.rs | 2 +- crates/storage/libmdbx-rs/src/database.rs | 28 ++-- crates/storage/libmdbx-rs/src/environment.rs | 135 ++++++++++-------- crates/storage/libmdbx-rs/src/transaction.rs | 69 +++++---- 6 files changed, 136 insertions(+), 132 deletions(-) diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 8a9475b40bc4e..e895b4015ca01 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -41,8 +41,8 @@ pub struct DatabaseEnv { } impl<'a> DatabaseGAT<'a> for DatabaseEnv { - type TX = tx::Tx<'a, RO>; - type TXMut = tx::Tx<'a, RW>; + type TX = tx::Tx; + type TXMut = tx::Tx; } impl Database for DatabaseEnv { diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 78c52172d5915..3798587d8280a 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -17,9 +17,9 @@ use std::{marker::PhantomData, str::FromStr, sync::Arc, time::Instant}; /// Wrapper for the libmdbx transaction. #[derive(Debug)] -pub struct Tx<'a, K: TransactionKind> { +pub struct Tx { /// Libmdbx-sys transaction. - pub inner: Transaction<'a, K>, + pub inner: Transaction, /// Database table handle cache. pub(crate) db_handles: Arc; NUM_TABLES]>>, /// Handler for metrics with its own [Drop] implementation for cases when the transaction isn't @@ -29,20 +29,14 @@ pub struct Tx<'a, K: TransactionKind> { metrics_handler: Option>, } -impl<'env, K: TransactionKind> Tx<'env, K> { +impl Tx { /// Creates new `Tx` object with a `RO` or `RW` transaction. - pub fn new<'a>(inner: Transaction<'a, K>) -> Self - where - 'a: 'env, - { + pub fn new(inner: Transaction) -> Self { Self { inner, db_handles: Default::default(), metrics_handler: None } } /// Creates new `Tx` object with a `RO` or `RW` transaction and optionally enables metrics. - pub fn new_with_metrics<'a>(inner: Transaction<'a, K>, with_metrics: bool) -> Self - where - 'a: 'env, - { + pub fn new_with_metrics(inner: Transaction, with_metrics: bool) -> Self { let metrics_handler = with_metrics.then(|| { let handler = MetricsHandler:: { txn_id: inner.id(), @@ -81,7 +75,7 @@ impl<'env, K: TransactionKind> Tx<'env, K> { } /// Create db Cursor - pub fn new_cursor(&self) -> Result, DatabaseError> { + pub fn new_cursor(&self) -> Result, DatabaseError> { let inner = self .inner .cursor_with_dbi(self.get_dbi::()?) @@ -128,7 +122,7 @@ impl<'env, K: TransactionKind> Tx<'env, K> { &self, operation: Operation, value_size: Option, - f: impl FnOnce(&Transaction<'_, K>) -> R, + f: impl FnOnce(&Transaction) -> R, ) -> R { if self.metrics_handler.is_some() { OperationMetrics::record(T::NAME, operation, value_size, || f(&self.inner)) @@ -173,19 +167,19 @@ impl Drop for MetricsHandler { } } -impl<'a, K: TransactionKind> DbTxGAT<'a> for Tx<'_, K> { +impl<'a, K: TransactionKind> DbTxGAT<'a> for Tx { type Cursor = Cursor<'a, K, T>; type DupCursor = Cursor<'a, K, T>; } -impl<'a, K: TransactionKind> DbTxMutGAT<'a> for Tx<'_, K> { +impl<'a, K: TransactionKind> DbTxMutGAT<'a> for Tx { type CursorMut = Cursor<'a, RW, T>; type DupCursorMut = Cursor<'a, RW, T>; } -impl TableImporter for Tx<'_, RW> {} +impl TableImporter for Tx {} -impl DbTx for Tx<'_, K> { +impl DbTx for Tx { fn get(&self, key: T::Key) -> Result::Value>, DatabaseError> { self.execute_with_operation_metric::(Operation::Get, None, |tx| { tx.get(self.get_dbi::()?, key.encode().as_ref()) @@ -229,7 +223,7 @@ impl DbTx for Tx<'_, K> { } } -impl DbTxMut for Tx<'_, RW> { +impl DbTxMut for Tx { fn put(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { let key = key.encode(); let value = value.compress(); diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index dfbdcc52c9890..dd242586e0737 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -28,7 +28,7 @@ impl<'txn, K> Cursor<'txn, K> where K: TransactionKind, { - pub(crate) fn new(txn: &'txn Transaction<'_, K>, dbi: ffi::MDBX_dbi) -> Result { + pub(crate) fn new(txn: &'txn Transaction, dbi: ffi::MDBX_dbi) -> Result { let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut(); let txn = txn.txn_ptr(); unsafe { diff --git a/crates/storage/libmdbx-rs/src/database.rs b/crates/storage/libmdbx-rs/src/database.rs index f0f48951aff10..db8124dbd6139 100644 --- a/crates/storage/libmdbx-rs/src/database.rs +++ b/crates/storage/libmdbx-rs/src/database.rs @@ -1,27 +1,29 @@ use crate::{ error::{mdbx_result, Result}, transaction::TransactionKind, - Transaction, + Environment, Transaction, }; use ffi::MDBX_db_flags_t; -use std::{ffi::CString, marker::PhantomData, ptr}; +use std::{ffi::CString, ptr}; /// A handle to an individual database in an environment. /// /// A database handle denotes the name and parameters of a database in an environment. #[derive(Debug)] -pub struct Database<'txn> { +pub struct Database { dbi: ffi::MDBX_dbi, - _marker: PhantomData<&'txn ()>, + /// The environment that this database belongs to keeps it alive as long as the database + /// instance exists. + _env: Option, } -impl<'txn> Database<'txn> { +impl Database { /// Opens a new database handle in the given transaction. /// /// Prefer using `Environment::open_db`, `Environment::create_db`, `TransactionExt::open_db`, /// or `RwTransaction::create_db`. - pub(crate) fn new<'env, K: TransactionKind>( - txn: &'txn Transaction<'env, K>, + pub(crate) fn new( + txn: &Transaction, name: Option<&str>, flags: MDBX_db_flags_t, ) -> Result { @@ -31,16 +33,16 @@ impl<'txn> Database<'txn> { mdbx_result( txn.txn_execute(|txn| unsafe { ffi::mdbx_dbi_open(txn, name_ptr, flags, &mut dbi) }), )?; - Ok(Self::new_from_ptr(dbi)) + Ok(Self::new_from_ptr(dbi, txn.env().clone())) } - pub(crate) fn new_from_ptr(dbi: ffi::MDBX_dbi) -> Self { - Self { dbi, _marker: PhantomData } + pub(crate) fn new_from_ptr(dbi: ffi::MDBX_dbi, env: Environment) -> Self { + Self { dbi, _env: Some(env) } } /// Opens the freelist database with DBI `0`. pub fn freelist_db() -> Self { - Database { dbi: 0, _marker: PhantomData } + Database { dbi: 0, _env: None } } /// Returns the underlying MDBX database handle. @@ -52,5 +54,5 @@ impl<'txn> Database<'txn> { } } -unsafe impl<'txn> Send for Database<'txn> {} -unsafe impl<'txn> Sync for Database<'txn> {} +unsafe impl Send for Database {} +unsafe impl Sync for Database {} diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 2728fad4bbf4d..80d63cc1b2417 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -15,67 +15,21 @@ use std::{ ops::{Bound, RangeBounds}, path::Path, ptr, - sync::mpsc::{sync_channel, SyncSender}, + sync::{ + mpsc::{sync_channel, SyncSender}, + Arc, + }, thread::sleep, time::Duration, }; -/// Determines how data is mapped into memory -/// -/// It only takes affect when the environment is opened. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub enum EnvironmentKind { - /// Open the environment in default mode, without WRITEMAP. - #[default] - Default, - /// Open the environment as mdbx-WRITEMAP. - /// Use a writeable memory map unless the environment is opened as MDBX_RDONLY - /// ([Mode::ReadOnly]). - /// - /// All data will be mapped into memory in the read-write mode [Mode::ReadWrite]. This offers a - /// significant performance benefit, since the data will be modified directly in mapped - /// memory and then flushed to disk by single system call, without any memory management - /// nor copying. - /// - /// This mode is incompatible with nested transactions. - WriteMap, -} - -impl EnvironmentKind { - /// Returns true if the environment was opened as WRITEMAP. - #[inline] - pub const fn is_write_map(&self) -> bool { - matches!(self, EnvironmentKind::WriteMap) - } - - /// Additional flags required when opening the environment. - pub(crate) fn extra_flags(&self) -> ffi::MDBX_env_flags_t { - match self { - EnvironmentKind::Default => ffi::MDBX_ENV_DEFAULTS, - EnvironmentKind::WriteMap => ffi::MDBX_WRITEMAP, - } - } -} - -#[derive(Copy, Clone, Debug)] -pub(crate) struct TxnPtr(pub *mut ffi::MDBX_txn); -unsafe impl Send for TxnPtr {} -unsafe impl Sync for TxnPtr {} - -#[derive(Copy, Clone, Debug)] -pub(crate) struct EnvPtr(pub *mut ffi::MDBX_env); -unsafe impl Send for EnvPtr {} -unsafe impl Sync for EnvPtr {} - -pub(crate) enum TxnManagerMessage { - Begin { parent: TxnPtr, flags: ffi::MDBX_txn_flags_t, sender: SyncSender> }, - Abort { tx: TxnPtr, sender: SyncSender> }, - Commit { tx: TxnPtr, sender: SyncSender> }, -} - /// An environment supports multiple databases, all residing in the same shared-memory map. +/// +/// Accessing the environment is thread-safe. +/// The environment will be closed when the last instance of this type is dropped. +#[derive(Clone)] pub struct Environment { - inner: EnvironmentInner, + inner: Arc, } impl Environment { @@ -139,13 +93,13 @@ impl Environment { /// Create a read-only transaction for use with the environment. #[inline] - pub fn begin_ro_txn(&self) -> Result> { - Transaction::new(self) + pub fn begin_ro_txn(&self) -> Result> { + Transaction::new(self.clone()) } /// Create a read-write transaction for use with the environment. This method will block while /// there are any other read-write transactions open on the environment. - pub fn begin_rw_txn(&self) -> Result> { + pub fn begin_rw_txn(&self) -> Result> { let sender = self.ensure_txn_manager()?; let txn = loop { let (tx, rx) = sync_channel(0); @@ -164,7 +118,7 @@ impl Environment { break res }?; - Ok(Transaction::new_from_ptr(self, txn.0)) + Ok(Transaction::new_from_ptr(self.clone(), txn.0)) } /// Returns a raw pointer to the underlying MDBX environment. @@ -294,6 +248,64 @@ impl Drop for EnvironmentInner { } } +// SAFETY: internal type, only used inside [Environment]. Accessing the environment pointer is +// thread-safe +unsafe impl Send for EnvironmentInner {} +unsafe impl Sync for EnvironmentInner {} + +/// Determines how data is mapped into memory +/// +/// It only takes affect when the environment is opened. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum EnvironmentKind { + /// Open the environment in default mode, without WRITEMAP. + #[default] + Default, + /// Open the environment as mdbx-WRITEMAP. + /// Use a writeable memory map unless the environment is opened as MDBX_RDONLY + /// ([Mode::ReadOnly]). + /// + /// All data will be mapped into memory in the read-write mode [Mode::ReadWrite]. This offers a + /// significant performance benefit, since the data will be modified directly in mapped + /// memory and then flushed to disk by single system call, without any memory management + /// nor copying. + /// + /// This mode is incompatible with nested transactions. + WriteMap, +} + +impl EnvironmentKind { + /// Returns true if the environment was opened as WRITEMAP. + #[inline] + pub const fn is_write_map(&self) -> bool { + matches!(self, EnvironmentKind::WriteMap) + } + + /// Additional flags required when opening the environment. + pub(crate) fn extra_flags(&self) -> ffi::MDBX_env_flags_t { + match self { + EnvironmentKind::Default => ffi::MDBX_ENV_DEFAULTS, + EnvironmentKind::WriteMap => ffi::MDBX_WRITEMAP, + } + } +} + +#[derive(Copy, Clone, Debug)] +pub(crate) struct TxnPtr(pub *mut ffi::MDBX_txn); +unsafe impl Send for TxnPtr {} +unsafe impl Sync for TxnPtr {} + +#[derive(Copy, Clone, Debug)] +pub(crate) struct EnvPtr(pub *mut ffi::MDBX_env); +unsafe impl Send for EnvPtr {} +unsafe impl Sync for EnvPtr {} + +pub(crate) enum TxnManagerMessage { + Begin { parent: TxnPtr, flags: ffi::MDBX_txn_flags_t, sender: SyncSender> }, + Abort { tx: TxnPtr, sender: SyncSender> }, + Commit { tx: TxnPtr, sender: SyncSender> }, +} + /// Environment statistics. /// /// Contains information about the size and layout of an MDBX environment or database. @@ -401,9 +413,6 @@ impl Info { } } -unsafe impl Send for Environment {} -unsafe impl Sync for Environment {} - impl fmt::Debug for Environment { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Environment").field("kind", &self.inner.env_kind).finish_non_exhaustive() @@ -607,7 +616,7 @@ impl EnvironmentBuilder { env.txn_manager = Some(tx); } - Ok(Environment { inner: env }) + Ok(Environment { inner: Arc::new(env) }) } /// Configures how this environment will be opened. diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 60df280e69ad0..c75168e0452b4 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -59,18 +59,18 @@ impl TransactionKind for RW { /// An MDBX transaction. /// /// All database operations require a transaction. -pub struct Transaction<'env, K> +pub struct Transaction where K: TransactionKind, { - inner: Arc>, + inner: Arc>, } -impl<'env, K> Transaction<'env, K> +impl Transaction where K: TransactionKind, { - pub(crate) fn new(env: &'env Environment) -> Result { + pub(crate) fn new(env: Environment) -> Result { let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); unsafe { mdbx_result(ffi::mdbx_txn_begin_ex( @@ -84,7 +84,7 @@ where } } - pub(crate) fn new_from_ptr(env: &'env Environment, txn: *mut ffi::MDBX_txn) -> Self { + pub(crate) fn new_from_ptr(env: Environment, txn: *mut ffi::MDBX_txn) -> Self { let inner = TransactionInner { txn: TransactionPtr::new(txn), primed_dbis: Mutex::new(IndexSet::new()), @@ -135,7 +135,7 @@ where /// Returns a raw pointer to the MDBX environment. pub fn env(&self) -> &Environment { - self.inner.env + &self.inner.env } /// Returns the transaction id. @@ -175,13 +175,12 @@ where self.commit_and_rebind_open_dbs().map(|v| v.0) } - pub fn prime_for_permaopen(&self, db: Database<'_>) { + pub fn prime_for_permaopen(&self, db: Database) { self.inner.primed_dbis.lock().insert(db.dbi()); } - /// Commits the transaction and returns table handles permanently open for the lifetime of - /// `Environment`. - pub fn commit_and_rebind_open_dbs(self) -> Result<(bool, Vec>)> { + /// Commits the transaction and returns table handles permanently open until dropped. + pub fn commit_and_rebind_open_dbs(self) -> Result<(bool, Vec)> { let result = { let result = self.txn_execute(|txn| { if K::ONLY_CLEAN { @@ -206,7 +205,7 @@ where .primed_dbis .lock() .iter() - .map(|&dbi| Database::new_from_ptr(dbi)) + .map(|&dbi| Database::new_from_ptr(dbi, self.env().clone())) .collect(), ) }) @@ -223,12 +222,12 @@ where /// The returned database handle may be shared among any transaction in the environment. /// /// The database name may not contain the null character. - pub fn open_db(&self, name: Option<&str>) -> Result> { + pub fn open_db(&self, name: Option<&str>) -> Result { Database::new(self, name, 0) } /// Gets the option flags for the given database in the transaction. - pub fn db_flags<'txn>(&'txn self, db: &Database<'txn>) -> Result { + pub fn db_flags(&self, db: &Database) -> Result { let mut flags: c_uint = 0; unsafe { mdbx_result(self.txn_execute(|txn| { @@ -242,7 +241,7 @@ where } /// Retrieves database statistics. - pub fn db_stat<'txn>(&'txn self, db: &Database<'txn>) -> Result { + pub fn db_stat(&self, db: &Database) -> Result { self.db_stat_with_dbi(db.dbi()) } @@ -258,7 +257,7 @@ where } /// Open a new cursor on the given database. - pub fn cursor<'txn>(&'txn self, db: &Database<'txn>) -> Result> { + pub fn cursor(&self, db: &Database) -> Result> { Cursor::new(self, db.dbi()) } @@ -269,7 +268,7 @@ where } /// Internals of a transaction. -struct TransactionInner<'env, K> +struct TransactionInner where K: TransactionKind, { @@ -279,11 +278,11 @@ where primed_dbis: Mutex>, /// Whether the transaction has committed. committed: AtomicBool, - env: &'env Environment, + env: Environment, _marker: std::marker::PhantomData, } -impl<'env, K> TransactionInner<'env, K> +impl TransactionInner where K: TransactionKind, { @@ -305,7 +304,7 @@ where } } -impl<'env, K> Drop for TransactionInner<'env, K> +impl Drop for TransactionInner where K: TransactionKind, { @@ -330,8 +329,8 @@ where } } -impl<'env> Transaction<'env, RW> { - fn open_db_with_flags(&self, name: Option<&str>, flags: DatabaseFlags) -> Result> { +impl Transaction { + fn open_db_with_flags(&self, name: Option<&str>, flags: DatabaseFlags) -> Result { Database::new(self, name, flags.bits()) } @@ -347,7 +346,7 @@ impl<'env> Transaction<'env, RW> { /// /// This function will fail with [Error::BadRslot] if called by a thread with an open /// transaction. - pub fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> Result> { + pub fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> Result { self.open_db_with_flags(name, flags | DatabaseFlags::CREATE) } @@ -380,13 +379,13 @@ impl<'env> Transaction<'env, RW> { /// Returns a buffer which can be used to write a value into the item at the /// given key and with the given length. The buffer must be completely /// filled by the caller. - pub fn reserve<'txn>( - &'txn self, - db: &Database<'txn>, + pub fn reserve( + &self, + db: &Database, key: impl AsRef<[u8]>, len: usize, flags: WriteFlags, - ) -> Result<&'txn mut [u8]> { + ) -> Result<&mut [u8]> { let key = key.as_ref(); let key_val: ffi::MDBX_val = ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void }; @@ -457,29 +456,29 @@ impl<'env> Transaction<'env, RW> { /// # Safety /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi /// BEFORE calling this function. - pub unsafe fn drop_db<'txn>(&'txn self, db: Database<'txn>) -> Result<()> { + pub unsafe fn drop_db(&self, db: Database) -> Result<()> { mdbx_result(self.txn_execute(|txn| ffi::mdbx_drop(txn, db.dbi(), true)))?; Ok(()) } } -impl<'env> Transaction<'env, RO> { +impl Transaction { /// Closes the database handle. /// /// # Safety /// Caller must close ALL other [Database] and [Cursor] instances pointing to the same dbi /// BEFORE calling this function. - pub unsafe fn close_db(&self, db: Database<'_>) -> Result<()> { + pub unsafe fn close_db(&self, db: Database) -> Result<()> { mdbx_result(ffi::mdbx_dbi_close(self.env().env_ptr(), db.dbi()))?; Ok(()) } } -impl<'env> Transaction<'env, RW> { +impl Transaction { /// Begins a new nested transaction inside of this transaction. - pub fn begin_nested_txn(&mut self) -> Result> { + pub fn begin_nested_txn(&mut self) -> Result> { if self.inner.env.is_write_map() { return Err(Error::NestedTransactionsUnsupportedWithWriteMap) } @@ -495,12 +494,12 @@ impl<'env> Transaction<'env, RW> { }) .unwrap(); - rx.recv().unwrap().map(|ptr| Transaction::new_from_ptr(self.env(), ptr.0)) + rx.recv().unwrap().map(|ptr| Transaction::new_from_ptr(self.env().clone(), ptr.0)) }) } } -impl<'env, K> fmt::Debug for Transaction<'env, K> +impl fmt::Debug for Transaction where K: TransactionKind, { @@ -546,7 +545,7 @@ mod tests { #[allow(dead_code)] fn test_txn_send_sync() { - assert_send_sync::>(); - assert_send_sync::>(); + assert_send_sync::>(); + assert_send_sync::>(); } } From f33ef66cc135d237f5b065e5d4d28ce6a53fb90a Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Fri, 17 Nov 2023 15:27:40 +0000 Subject: [PATCH 32/77] chore: `make maxperf`, enable GC profiling on MDBX debug (#5459) --- Makefile | 4 ++++ crates/storage/libmdbx-rs/mdbx-sys/build.rs | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fb5502821fe97..9ce4eb7d17e82 100644 --- a/Makefile +++ b/Makefile @@ -228,3 +228,7 @@ update-book-cli: ## Update book cli documentation. cargo build --bin reth --features "$(FEATURES)" --profile "$(PROFILE)" @echo "Updating book cli doc..." @./book/cli/update.sh $(BUILD_PATH) + +.PHONY: maxperf +maxperf: + RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf --features jemalloc diff --git a/crates/storage/libmdbx-rs/mdbx-sys/build.rs b/crates/storage/libmdbx-rs/mdbx-sys/build.rs index 194ffaa33712a..f4b3e1ea52b8f 100644 --- a/crates/storage/libmdbx-rs/mdbx-sys/build.rs +++ b/crates/storage/libmdbx-rs/mdbx-sys/build.rs @@ -91,7 +91,10 @@ fn main() { // Enable debugging on debug builds #[cfg(debug_assertions)] - cc_builder.define("MDBX_DEBUG", "1"); + { + cc_builder.define("MDBX_DEBUG", "1"); + cc_builder.define("MDBX_ENABLE_PROFGC", "1"); + } // Disables debug logging on optimized builds #[cfg(not(debug_assertions))] From 9b1416b4f4a4fe805fa4b386e6329368ab4bf66b Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 17 Nov 2023 14:09:06 -0500 Subject: [PATCH 33/77] feat: add benchmark for blob_tx_priority and fee_delta (#5468) --- crates/transaction-pool/Cargo.toml | 5 ++ crates/transaction-pool/benches/priority.rs | 70 +++++++++++++++++++++ crates/transaction-pool/src/lib.rs | 4 +- crates/transaction-pool/src/pool/blob.rs | 28 ++++++++- crates/transaction-pool/src/pool/mod.rs | 1 + 5 files changed, 103 insertions(+), 5 deletions(-) create mode 100644 crates/transaction-pool/benches/priority.rs diff --git a/crates/transaction-pool/Cargo.toml b/crates/transaction-pool/Cargo.toml index 93548f83f4a40..52ceaf937447e 100644 --- a/crates/transaction-pool/Cargo.toml +++ b/crates/transaction-pool/Cargo.toml @@ -78,3 +78,8 @@ optimism = [ name = "reorder" required-features = ["test-utils", "arbitrary"] harness = false + +[[bench]] +name = "priority" +required-features = ["arbitrary"] +harness = false diff --git a/crates/transaction-pool/benches/priority.rs b/crates/transaction-pool/benches/priority.rs new file mode 100644 index 0000000000000..65218c4a2f245 --- /dev/null +++ b/crates/transaction-pool/benches/priority.rs @@ -0,0 +1,70 @@ +use criterion::{ + black_box, criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, +}; +use proptest::{ + prelude::*, + strategy::{Strategy, ValueTree}, + test_runner::TestRunner, +}; +use reth_transaction_pool::{blob_tx_priority, fee_delta}; + +fn generate_test_data_fee_delta() -> (u128, u128) { + let config = ProptestConfig::default(); + let mut runner = TestRunner::new(config); + prop::arbitrary::any::<(u128, u128)>().new_tree(&mut runner).unwrap().current() +} + +fn generate_test_data_priority() -> (u128, u128, u128, u128) { + let config = ProptestConfig::default(); + let mut runner = TestRunner::new(config); + prop::arbitrary::any::<(u128, u128, u128, u128)>().new_tree(&mut runner).unwrap().current() +} + +fn priority_bench( + group: &mut BenchmarkGroup, + description: &str, + input_data: (u128, u128, u128, u128), +) { + let group_id = format!("txpool | {}", description); + + group.bench_function(group_id, |b| { + b.iter(|| { + black_box(blob_tx_priority( + black_box(input_data.0), + black_box(input_data.1), + black_box(input_data.2), + black_box(input_data.3), + )); + }); + }); +} + +fn fee_jump_bench( + group: &mut BenchmarkGroup, + description: &str, + input_data: (u128, u128), +) { + let group_id = format!("txpool | {}", description); + + group.bench_function(group_id, |b| { + b.iter(|| { + black_box(fee_delta(black_box(input_data.0), black_box(input_data.1))); + }); + }); +} + +pub fn blob_priority_calculation(c: &mut Criterion) { + let mut group = c.benchmark_group("Blob priority calculation"); + let fee_jump_input = generate_test_data_fee_delta(); + + // Unstable sorting of unsorted collection + fee_jump_bench(&mut group, "BenchmarkDynamicFeeJumpCalculation", fee_jump_input); + + let blob_priority_input = generate_test_data_priority(); + + // BinaryHeap that is resorted on each update + priority_bench(&mut group, "BenchmarkPriorityCalculation", blob_priority_input); +} + +criterion_group!(priority, blob_priority_calculation); +criterion_main!(priority); diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index a1d8b4ba7aa7f..de60ab7c5fffb 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -162,8 +162,8 @@ pub use crate::{ error::PoolResult, ordering::{CoinbaseTipOrdering, Priority, TransactionOrdering}, pool::{ - state::SubPool, AllTransactionsEvents, FullTransactionEvent, TransactionEvent, - TransactionEvents, + blob_tx_priority, fee_delta, state::SubPool, AllTransactionsEvents, FullTransactionEvent, + TransactionEvent, TransactionEvents, }, traits::*, validate::{ diff --git a/crates/transaction-pool/src/pool/blob.rs b/crates/transaction-pool/src/pool/blob.rs index 15677bf55dfd2..24c1fa6cade46 100644 --- a/crates/transaction-pool/src/pool/blob.rs +++ b/crates/transaction-pool/src/pool/blob.rs @@ -272,6 +272,9 @@ impl Ord for BlobTransaction { } } +/// This is the log base 2 of 1.125, which we'll use to calculate the priority +const LOG_2_1_125: f64 = 0.16992500144231237; + /// The blob step function, attempting to compute the delta given the `max_tx_fee`, and /// `current_fee`. /// @@ -284,9 +287,28 @@ impl Ord for BlobTransaction { /// /// This is supposed to get the number of fee jumps required to get from the current fee to the fee /// cap, or where the transaction would not be executable any more. -fn fee_delta(max_tx_fee: u128, current_fee: u128) -> i64 { +pub fn fee_delta(max_tx_fee: u128, current_fee: u128) -> i64 { + if max_tx_fee == current_fee { + // if these are equal, then there's no fee jump + return 0; + } + + let max_tx_fee_jumps = if max_tx_fee == 0 { + // we can't take log2 of 0, so we set this to zero here + 0f64 + } else { + (max_tx_fee.ilog2() as f64) / LOG_2_1_125 + }; + + let current_fee_jumps = if current_fee == 0 { + // we can't take log2 of 0, so we set this to zero here + 0f64 + } else { + (current_fee.ilog2() as f64) / LOG_2_1_125 + }; + // jumps = log1.125(txfee) - log1.125(basefee) - let jumps = (max_tx_fee as f64).log(1.125) - (current_fee as f64).log(1.125); + let jumps = max_tx_fee_jumps - current_fee_jumps; // delta = sign(jumps) * log(abs(jumps)) match (jumps as i64).cmp(&0) { @@ -300,7 +322,7 @@ fn fee_delta(max_tx_fee: u128, current_fee: u128) -> i64 { } /// Returns the priority for the transaction, based on the "delta" blob fee and priority fee. -fn blob_tx_priority( +pub fn blob_tx_priority( blob_fee_cap: u128, blob_fee: u128, max_priority_fee: u128, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 55f4cbb1baf77..7242099c1588b 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -110,6 +110,7 @@ pub use listener::{AllTransactionsEvents, TransactionEvents}; mod best; mod blob; +pub use blob::{blob_tx_priority, fee_delta}; mod parked; pub(crate) mod pending; pub(crate) mod size; From 7f9ce6f7c06b8e733931f3babf5b5ff24488def5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Nov 2023 21:37:34 +0100 Subject: [PATCH 34/77] feat: get rid of cursor lifetime (#5476) --- Cargo.lock | 10 - crates/stages/src/stages/mod.rs | 2 +- .../db/src/implementation/mdbx/cursor.rs | 23 +- .../storage/db/src/implementation/mdbx/tx.rs | 10 +- crates/storage/libmdbx-rs/Cargo.toml | 2 - crates/storage/libmdbx-rs/benches/cursor.rs | 2 +- crates/storage/libmdbx-rs/src/codec.rs | 27 +- crates/storage/libmdbx-rs/src/cursor.rs | 285 +++++++++--------- crates/storage/libmdbx-rs/src/environment.rs | 2 +- crates/storage/libmdbx-rs/src/transaction.rs | 44 +-- 10 files changed, 194 insertions(+), 213 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e1f81c2c2c593..124e6245b2d2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4007,15 +4007,6 @@ dependencies = [ "redox_syscall 0.4.1", ] -[[package]] -name = "lifetimed-bytes" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c970c8ea4c7b023a41cfa4af4c785a16694604c2f2a3b0d1f20a9bcb73fa550" -dependencies = [ - "bytes", -] - [[package]] name = "linked-hash-map" version = "0.5.6" @@ -6027,7 +6018,6 @@ dependencies = [ "derive_more", "indexmap 2.1.0", "libc", - "lifetimed-bytes", "parking_lot 0.12.1", "pprof", "rand 0.8.5", diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index c0173747a8ec2..d4eeaf2d3be35 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -166,7 +166,7 @@ mod tests { assert!(acc_indexing_stage.execute(&provider, input).await.is_err()); } else { acc_indexing_stage.execute(&provider, input).await.unwrap(); - let mut account_history: Cursor<'_, RW, AccountHistory> = + let mut account_history: Cursor = provider.tx_ref().cursor_read::().unwrap(); assert_eq!(account_history.walk(None).unwrap().count(), expect_num_acc_changesets); } diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 181e13e05421c..3b14f0f427f61 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -17,15 +17,15 @@ use crate::{ use reth_libmdbx::{self, Error as MDBXError, TransactionKind, WriteFlags, RO, RW}; /// Read only Cursor. -pub type CursorRO<'tx, T> = Cursor<'tx, RO, T>; +pub type CursorRO = Cursor; /// Read write cursor. -pub type CursorRW<'tx, T> = Cursor<'tx, RW, T>; +pub type CursorRW = Cursor; /// Cursor wrapper to access KV items. #[derive(Debug)] -pub struct Cursor<'tx, K: TransactionKind, T: Table> { +pub struct Cursor { /// Inner `libmdbx` cursor. - pub(crate) inner: reth_libmdbx::Cursor<'tx, K>, + pub(crate) inner: reth_libmdbx::Cursor, /// Cache buffer that receives compressed values. buf: Vec, /// Whether to record metrics or not. @@ -34,11 +34,8 @@ pub struct Cursor<'tx, K: TransactionKind, T: Table> { _dbi: PhantomData, } -impl<'tx, K: TransactionKind, T: Table> Cursor<'tx, K, T> { - pub(crate) fn new_with_metrics( - inner: reth_libmdbx::Cursor<'tx, K>, - with_metrics: bool, - ) -> Self { +impl Cursor { + pub(crate) fn new_with_metrics(inner: reth_libmdbx::Cursor, with_metrics: bool) -> Self { Self { inner, buf: Vec::new(), with_metrics, _dbi: PhantomData } } @@ -81,7 +78,7 @@ macro_rules! compress_to_buf_or_ref { }; } -impl DbCursorRO for Cursor<'_, K, T> { +impl DbCursorRO for Cursor { fn first(&mut self) -> PairResult { decode!(self.inner.first()) } @@ -164,7 +161,7 @@ impl DbCursorRO for Cursor<'_, K, T> { } } -impl DbDupCursorRO for Cursor<'_, K, T> { +impl DbDupCursorRO for Cursor { /// Returns the next `(key, value)` pair of a DUPSORT table. fn next_dup(&mut self) -> PairResult { decode!(self.inner.next_dup()) @@ -245,7 +242,7 @@ impl DbDupCursorRO for Cursor<'_, K, T> { } } -impl DbCursorRW for Cursor<'_, RW, T> { +impl DbCursorRW for Cursor { /// Database operation that will update an existing row if a specified value already /// exists in a table, and insert a new row if the specified value doesn't already exist /// @@ -328,7 +325,7 @@ impl DbCursorRW for Cursor<'_, RW, T> { } } -impl DbDupCursorRW for Cursor<'_, RW, T> { +impl DbDupCursorRW for Cursor { fn delete_current_duplicates(&mut self) -> Result<(), DatabaseError> { self.execute_with_operation_metric(Operation::CursorDeleteCurrentDuplicates, None, |this| { this.inner.del(WriteFlags::NO_DUP_DATA).map_err(|e| DatabaseError::Delete(e.into())) diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 3798587d8280a..c62a54e6724ad 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -75,7 +75,7 @@ impl Tx { } /// Create db Cursor - pub fn new_cursor(&self) -> Result, DatabaseError> { + pub fn new_cursor(&self) -> Result, DatabaseError> { let inner = self .inner .cursor_with_dbi(self.get_dbi::()?) @@ -168,13 +168,13 @@ impl Drop for MetricsHandler { } impl<'a, K: TransactionKind> DbTxGAT<'a> for Tx { - type Cursor = Cursor<'a, K, T>; - type DupCursor = Cursor<'a, K, T>; + type Cursor = Cursor; + type DupCursor = Cursor; } impl<'a, K: TransactionKind> DbTxMutGAT<'a> for Tx { - type CursorMut = Cursor<'a, RW, T>; - type DupCursorMut = Cursor<'a, RW, T>; + type CursorMut = Cursor; + type DupCursorMut = Cursor; } impl TableImporter for Tx {} diff --git a/crates/storage/libmdbx-rs/Cargo.toml b/crates/storage/libmdbx-rs/Cargo.toml index dc65f34faae22..7acda0894fc4a 100644 --- a/crates/storage/libmdbx-rs/Cargo.toml +++ b/crates/storage/libmdbx-rs/Cargo.toml @@ -22,8 +22,6 @@ thiserror.workspace = true ffi = { package = "reth-mdbx-sys", path = "./mdbx-sys" } -lifetimed-bytes = { version = "0.1", optional = true } - [features] default = [] return-borrowed = [] diff --git a/crates/storage/libmdbx-rs/benches/cursor.rs b/crates/storage/libmdbx-rs/benches/cursor.rs index 78044e45b9fe4..89c87c6f417a5 100644 --- a/crates/storage/libmdbx-rs/benches/cursor.rs +++ b/crates/storage/libmdbx-rs/benches/cursor.rs @@ -33,7 +33,7 @@ fn bench_get_seq_iter(c: &mut Criterion) { count += 1; } - fn iterate(cursor: &mut Cursor<'_, K>) -> Result<()> { + fn iterate(cursor: &mut Cursor) -> Result<()> { let mut i = 0; for result in cursor.iter::() { let (key_len, data_len) = result?; diff --git a/crates/storage/libmdbx-rs/src/codec.rs b/crates/storage/libmdbx-rs/src/codec.rs index f313492d7ac47..40509f533d85f 100644 --- a/crates/storage/libmdbx-rs/src/codec.rs +++ b/crates/storage/libmdbx-rs/src/codec.rs @@ -3,7 +3,7 @@ use derive_more::*; use std::{borrow::Cow, slice}; /// Implement this to be able to decode data values -pub trait TableObject<'tx> { +pub trait TableObject { /// Decodes the object from the given bytes. fn decode(data_val: &[u8]) -> Result where @@ -28,7 +28,7 @@ pub trait TableObject<'tx> { } } -impl<'tx> TableObject<'tx> for Cow<'tx, [u8]> { +impl<'tx> TableObject for Cow<'tx, [u8]> { fn decode(_: &[u8]) -> Result { unreachable!() } @@ -55,22 +55,7 @@ impl<'tx> TableObject<'tx> for Cow<'tx, [u8]> { } } -#[cfg(feature = "lifetimed-bytes")] -impl<'tx> TableObject<'tx> for lifetimed_bytes::Bytes<'tx> { - fn decode(_: &[u8]) -> Result { - unreachable!() - } - - #[doc(hidden)] - unsafe fn decode_val( - txn: *const ffi::MDBX_txn, - data_val: &ffi::MDBX_val, - ) -> Result { - Cow::<'tx, [u8]>::decode_val::(txn, data_val).map(From::from) - } -} - -impl<'tx> TableObject<'tx> for Vec { +impl TableObject for Vec { fn decode(data_val: &[u8]) -> Result where Self: Sized, @@ -79,7 +64,7 @@ impl<'tx> TableObject<'tx> for Vec { } } -impl<'tx> TableObject<'tx> for () { +impl TableObject for () { fn decode(_: &[u8]) -> Result { Ok(()) } @@ -96,7 +81,7 @@ impl<'tx> TableObject<'tx> for () { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Deref, DerefMut)] pub struct ObjectLength(pub usize); -impl<'tx> TableObject<'tx> for ObjectLength { +impl TableObject for ObjectLength { fn decode(data_val: &[u8]) -> Result where Self: Sized, @@ -105,7 +90,7 @@ impl<'tx> TableObject<'tx> for ObjectLength { } } -impl<'tx, const LEN: usize> TableObject<'tx> for [u8; LEN] { +impl TableObject for [u8; LEN] { fn decode(data_val: &[u8]) -> Result where Self: Sized, diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index dd242586e0737..30765bc93e85c 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -1,40 +1,41 @@ -use crate::{ - error::{mdbx_result, Error, Result}, - flags::*, - mdbx_try_optional, - transaction::{TransactionKind, TransactionPtr, RW}, - TableObject, Transaction, -}; +use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr}; + +use libc::c_void; + use ffi::{ MDBX_cursor_op, MDBX_FIRST, MDBX_FIRST_DUP, MDBX_GET_BOTH, MDBX_GET_BOTH_RANGE, MDBX_GET_CURRENT, MDBX_GET_MULTIPLE, MDBX_LAST, MDBX_LAST_DUP, MDBX_NEXT, MDBX_NEXT_DUP, MDBX_NEXT_MULTIPLE, MDBX_NEXT_NODUP, MDBX_PREV, MDBX_PREV_DUP, MDBX_PREV_MULTIPLE, MDBX_PREV_NODUP, MDBX_SET, MDBX_SET_KEY, MDBX_SET_LOWERBOUND, MDBX_SET_RANGE, }; -use libc::c_void; -use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr}; + +use crate::{ + error::{mdbx_result, Error, Result}, + flags::*, + mdbx_try_optional, + transaction::{TransactionKind, RW}, + TableObject, Transaction, +}; /// A cursor for navigating the items within a database. -pub struct Cursor<'txn, K> +pub struct Cursor where K: TransactionKind, { - txn: TransactionPtr, + txn: Transaction, cursor: *mut ffi::MDBX_cursor, - _marker: PhantomData, } -impl<'txn, K> Cursor<'txn, K> +impl Cursor where K: TransactionKind, { - pub(crate) fn new(txn: &'txn Transaction, dbi: ffi::MDBX_dbi) -> Result { + pub(crate) fn new(txn: Transaction, dbi: ffi::MDBX_dbi) -> Result { let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut(); - let txn = txn.txn_ptr(); unsafe { mdbx_result(txn.txn_execute(|txn| ffi::mdbx_cursor_open(txn, dbi, &mut cursor)))?; } - Ok(Self { txn, cursor, _marker: PhantomData }) + Ok(Self { txn, cursor }) } fn new_at_position(other: &Self) -> Result { @@ -43,7 +44,7 @@ where let res = ffi::mdbx_cursor_copy(other.cursor(), cursor); - let s = Self { txn: other.txn.clone(), cursor, _marker: PhantomData }; + let s = Self { txn: other.txn.clone(), cursor }; mdbx_result(res)?; @@ -59,6 +60,24 @@ where self.cursor } + /// Returns an Iterator over the raw key value slices + /// + /// Note: The lifetime ensures that the transaction is kept alive while entries are used + pub fn into_iter_slices<'cur>(self) -> IntoIter<'cur, K, Cow<'cur, [u8]>, Cow<'cur, [u8]>> { + self.into_iter() + } + /// Returns an Iterator over key value pairs of the cursor + /// + /// Note: The lifetime ensures that the transaction is kept alive while entries are used + #[allow(clippy::should_implement_trait)] + pub fn into_iter<'cur, Key, Value>(self) -> IntoIter<'cur, K, Key, Value> + where + Key: TableObject, + Value: TableObject, + { + IntoIter::new(self, MDBX_NEXT, MDBX_NEXT) + } + /// Retrieves a key/data pair from the cursor. Depending on the cursor op, /// the current key may be returned. fn get( @@ -68,8 +87,8 @@ where op: MDBX_cursor_op, ) -> Result<(Option, Value, bool)> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { unsafe { let mut key_val = slice_to_val(key); @@ -105,7 +124,7 @@ where op: MDBX_cursor_op, ) -> Result> where - Value: TableObject<'txn>, + Value: TableObject, { let (_, v, _) = mdbx_try_optional!(self.get::<(), Value>(key, data, op)); @@ -119,8 +138,8 @@ where op: MDBX_cursor_op, ) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { let (k, v, _) = mdbx_try_optional!(self.get(key, data, op)); @@ -130,8 +149,8 @@ where /// Position at first key/data item. pub fn first(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_FIRST) } @@ -139,7 +158,7 @@ where /// [DatabaseFlags::DUP_SORT]-only: Position at first data item of current key. pub fn first_dup(&mut self) -> Result> where - Value: TableObject<'txn>, + Value: TableObject, { self.get_value(None, None, MDBX_FIRST_DUP) } @@ -147,7 +166,7 @@ where /// [DatabaseFlags::DUP_SORT]-only: Position at key/data pair. pub fn get_both(&mut self, k: &[u8], v: &[u8]) -> Result> where - Value: TableObject<'txn>, + Value: TableObject, { self.get_value(Some(k), Some(v), MDBX_GET_BOTH) } @@ -156,7 +175,7 @@ where /// equal to specified data. pub fn get_both_range(&mut self, k: &[u8], v: &[u8]) -> Result> where - Value: TableObject<'txn>, + Value: TableObject, { self.get_value(Some(k), Some(v), MDBX_GET_BOTH_RANGE) } @@ -164,8 +183,8 @@ where /// Return key/data at current cursor position. pub fn get_current(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_GET_CURRENT) } @@ -174,7 +193,7 @@ where /// Move cursor to prepare for [Self::next_multiple()]. pub fn get_multiple(&mut self) -> Result> where - Value: TableObject<'txn>, + Value: TableObject, { self.get_value(None, None, MDBX_GET_MULTIPLE) } @@ -182,8 +201,8 @@ where /// Position at last key/data item. pub fn last(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_LAST) } @@ -191,7 +210,7 @@ where /// DupSort-only: Position at last data item of current key. pub fn last_dup(&mut self) -> Result> where - Value: TableObject<'txn>, + Value: TableObject, { self.get_value(None, None, MDBX_LAST_DUP) } @@ -200,8 +219,8 @@ where #[allow(clippy::should_implement_trait)] pub fn next(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_NEXT) } @@ -209,8 +228,8 @@ where /// [DatabaseFlags::DUP_SORT]-only: Position at next data item of current key. pub fn next_dup(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_NEXT_DUP) } @@ -219,8 +238,8 @@ where /// cursor position. Move cursor to prepare for MDBX_NEXT_MULTIPLE. pub fn next_multiple(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_NEXT_MULTIPLE) } @@ -228,8 +247,8 @@ where /// Position at first data item of next key. pub fn next_nodup(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_NEXT_NODUP) } @@ -237,8 +256,8 @@ where /// Position at previous data item. pub fn prev(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_PREV) } @@ -246,8 +265,8 @@ where /// [DatabaseFlags::DUP_SORT]-only: Position at previous data item of current key. pub fn prev_dup(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_PREV_DUP) } @@ -255,8 +274,8 @@ where /// Position at last data item of previous key. pub fn prev_nodup(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_PREV_NODUP) } @@ -264,7 +283,7 @@ where /// Position at specified key. pub fn set(&mut self, key: &[u8]) -> Result> where - Value: TableObject<'txn>, + Value: TableObject, { self.get_value(Some(key), None, MDBX_SET) } @@ -272,8 +291,8 @@ where /// Position at specified key, return both key and data. pub fn set_key(&mut self, key: &[u8]) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(Some(key), None, MDBX_SET_KEY) } @@ -281,8 +300,8 @@ where /// Position at first key greater than or equal to specified key. pub fn set_range(&mut self, key: &[u8]) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(Some(key), None, MDBX_SET_RANGE) } @@ -291,8 +310,8 @@ where /// duplicate data items. pub fn prev_multiple(&mut self) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { self.get_full(None, None, MDBX_PREV_MULTIPLE) } @@ -308,8 +327,8 @@ where /// exactly and [true] if the next pair was returned. pub fn set_lowerbound(&mut self, key: &[u8]) -> Result> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { let (k, v, found) = mdbx_try_optional!(self.get(Some(key), None, MDBX_SET_LOWERBOUND)); @@ -323,11 +342,11 @@ where /// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the /// duplicate data items of each key will be returned before moving on to /// the next key. - pub fn iter(&mut self) -> Iter<'txn, '_, K, Key, Value> + pub fn iter(&mut self) -> Iter<'_, K, Key, Value> where Self: Sized, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { Iter::new(self, ffi::MDBX_NEXT, ffi::MDBX_NEXT) } @@ -337,11 +356,11 @@ where /// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the /// duplicate data items of each key will be returned before moving on to /// the next key. - pub fn iter_start(&mut self) -> Iter<'txn, '_, K, Key, Value> + pub fn iter_start(&mut self) -> Iter<'_, K, Key, Value> where Self: Sized, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { Iter::new(self, ffi::MDBX_FIRST, ffi::MDBX_NEXT) } @@ -351,10 +370,10 @@ where /// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the /// duplicate data items of each key will be returned before moving on to /// the next key. - pub fn iter_from(&mut self, key: &[u8]) -> Iter<'txn, '_, K, Key, Value> + pub fn iter_from(&mut self, key: &[u8]) -> Iter<'_, K, Key, Value> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { let res: Result> = self.set_range(key); if let Err(error) = res { @@ -366,30 +385,30 @@ where /// Iterate over duplicate database items. The iterator will begin with the /// item next after the cursor, and continue until the end of the database. /// Each item will be returned as an iterator of its duplicates. - pub fn iter_dup(&mut self) -> IterDup<'txn, '_, K, Key, Value> + pub fn iter_dup(&mut self) -> IterDup<'_, K, Key, Value> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { IterDup::new(self, ffi::MDBX_NEXT) } /// Iterate over duplicate database items starting from the beginning of the /// database. Each item will be returned as an iterator of its duplicates. - pub fn iter_dup_start(&mut self) -> IterDup<'txn, '_, K, Key, Value> + pub fn iter_dup_start(&mut self) -> IterDup<'_, K, Key, Value> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { IterDup::new(self, ffi::MDBX_FIRST) } /// Iterate over duplicate items in the database starting from the given /// key. Each item will be returned as an iterator of its duplicates. - pub fn iter_dup_from(&mut self, key: &[u8]) -> IterDup<'txn, '_, K, Key, Value> + pub fn iter_dup_from(&mut self, key: &[u8]) -> IterDup<'_, K, Key, Value> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { let res: Result> = self.set_range(key); if let Err(error) = res { @@ -399,10 +418,10 @@ where } /// Iterate over the duplicates of the item in the database with the given key. - pub fn iter_dup_of(&mut self, key: &[u8]) -> Iter<'txn, '_, K, Key, Value> + pub fn iter_dup_of(&mut self, key: &[u8]) -> Iter<'_, K, Key, Value> where - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { let res: Result> = self.set(key); match res { @@ -417,7 +436,7 @@ where } } -impl<'txn> Cursor<'txn, RW> { +impl Cursor { /// Puts a key/data pair into the database. The cursor will be positioned at /// the new data item, or on failure usually near it. pub fn put(&mut self, key: &[u8], data: &[u8], flags: WriteFlags) -> Result<()> { @@ -449,7 +468,7 @@ impl<'txn> Cursor<'txn, RW> { } } -impl<'txn, K> Clone for Cursor<'txn, K> +impl Clone for Cursor where K: TransactionKind, { @@ -458,7 +477,7 @@ where } } -impl<'txn, K> fmt::Debug for Cursor<'txn, K> +impl fmt::Debug for Cursor where K: TransactionKind, { @@ -467,7 +486,7 @@ where } } -impl<'txn, K> Drop for Cursor<'txn, K> +impl Drop for Cursor where K: TransactionKind, { @@ -485,28 +504,16 @@ unsafe fn slice_to_val(slice: Option<&[u8]>) -> ffi::MDBX_val { } } -unsafe impl<'txn, K> Send for Cursor<'txn, K> where K: TransactionKind {} -unsafe impl<'txn, K> Sync for Cursor<'txn, K> where K: TransactionKind {} - -impl<'txn, K> IntoIterator for Cursor<'txn, K> -where - K: TransactionKind, -{ - type Item = Result<(Cow<'txn, [u8]>, Cow<'txn, [u8]>)>; - type IntoIter = IntoIter<'txn, K, Cow<'txn, [u8]>, Cow<'txn, [u8]>>; - - fn into_iter(self) -> Self::IntoIter { - IntoIter::new(self, MDBX_NEXT, MDBX_NEXT) - } -} +unsafe impl Send for Cursor where K: TransactionKind {} +unsafe impl Sync for Cursor where K: TransactionKind {} /// An iterator over the key/value pairs in an MDBX database. #[derive(Debug)] -pub enum IntoIter<'txn, K, Key, Value> +pub enum IntoIter<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { /// An iterator that returns an error on every call to [Iter::next()]. /// Cursor.iter*() creates an Iter of this type when MDBX returns an error @@ -521,7 +528,7 @@ where /// fails for some reason. Ok { /// The MDBX cursor with which to iterate. - cursor: Cursor<'txn, K>, + cursor: Cursor, /// The first operation to perform when the consumer calls [Iter::next()]. op: ffi::MDBX_cursor_op, @@ -529,33 +536,33 @@ where /// The next and subsequent operations to perform. next_op: ffi::MDBX_cursor_op, - _marker: PhantomData, + _marker: PhantomData, }, } -impl<'txn, K, Key, Value> IntoIter<'txn, K, Key, Value> +impl<'cur, K, Key, Value> IntoIter<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { /// Creates a new iterator backed by the given cursor. - fn new(cursor: Cursor<'txn, K>, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self { - IntoIter::Ok { cursor, op, next_op, _marker: PhantomData } + fn new(cursor: Cursor, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op) -> Self { + IntoIter::Ok { cursor, op, next_op, _marker: Default::default() } } } -impl<'txn, K, Key, Value> Iterator for IntoIter<'txn, K, Key, Value> +impl<'cur, K, Key, Value> Iterator for IntoIter<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { type Item = Result<(Key, Value)>; fn next(&mut self) -> Option { match self { - Self::Ok { cursor, op, next_op, _marker } => { + Self::Ok { cursor, op, next_op, .. } => { let mut key = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let mut data = ffi::MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let op = mem::replace(op, *next_op); @@ -589,11 +596,11 @@ where /// An iterator over the key/value pairs in an MDBX database. #[derive(Debug)] -pub enum Iter<'txn, 'cur, K, Key, Value> +pub enum Iter<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { /// An iterator that returns an error on every call to [Iter::next()]. /// Cursor.iter*() creates an Iter of this type when MDBX returns an error @@ -608,7 +615,7 @@ where /// fails for some reason. Ok { /// The MDBX cursor with which to iterate. - cursor: &'cur mut Cursor<'txn, K>, + cursor: &'cur mut Cursor, /// The first operation to perform when the consumer calls [Iter::next()]. op: ffi::MDBX_cursor_op, @@ -616,31 +623,31 @@ where /// The next and subsequent operations to perform. next_op: ffi::MDBX_cursor_op, - _marker: PhantomData, + _marker: PhantomData, }, } -impl<'txn, 'cur, K, Key, Value> Iter<'txn, 'cur, K, Key, Value> +impl<'cur, K, Key, Value> Iter<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { /// Creates a new iterator backed by the given cursor. fn new( - cursor: &'cur mut Cursor<'txn, K>, + cursor: &'cur mut Cursor, op: ffi::MDBX_cursor_op, next_op: ffi::MDBX_cursor_op, ) -> Self { - Iter::Ok { cursor, op, next_op, _marker: PhantomData } + Iter::Ok { cursor, op, next_op, _marker: Default::default() } } } -impl<'txn, 'cur, K, Key, Value> Iterator for Iter<'txn, 'cur, K, Key, Value> +impl<'cur, K, Key, Value> Iterator for Iter<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { type Item = Result<(Key, Value)>; @@ -682,11 +689,11 @@ where /// /// The yielded items of the iterator are themselves iterators over the duplicate values for a /// specific key. -pub enum IterDup<'txn, 'cur, K, Key, Value> +pub enum IterDup<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { /// An iterator that returns an error on every call to Iter.next(). /// Cursor.iter*() creates an Iter of this type when MDBX returns an error @@ -701,45 +708,45 @@ where /// fails for some reason. Ok { /// The MDBX cursor with which to iterate. - cursor: &'cur mut Cursor<'txn, K>, + cursor: &'cur mut Cursor, /// The first operation to perform when the consumer calls Iter.next(). op: MDBX_cursor_op, - _marker: PhantomData, + _marker: PhantomData, }, } -impl<'txn, 'cur, K, Key, Value> IterDup<'txn, 'cur, K, Key, Value> +impl<'cur, K, Key, Value> IterDup<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { /// Creates a new iterator backed by the given cursor. - fn new(cursor: &'cur mut Cursor<'txn, K>, op: MDBX_cursor_op) -> Self { - IterDup::Ok { cursor, op, _marker: PhantomData } + fn new(cursor: &'cur mut Cursor, op: MDBX_cursor_op) -> Self { + IterDup::Ok { cursor, op, _marker: Default::default() } } } -impl<'txn, 'cur, K, Key, Value> fmt::Debug for IterDup<'txn, 'cur, K, Key, Value> +impl<'cur, K, Key, Value> fmt::Debug for IterDup<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("IterDup").finish() } } -impl<'txn, 'cur, K, Key, Value> Iterator for IterDup<'txn, 'cur, K, Key, Value> +impl<'cur, K, Key, Value> Iterator for IterDup<'cur, K, Key, Value> where K: TransactionKind, - Key: TableObject<'txn>, - Value: TableObject<'txn>, + Key: TableObject, + Value: TableObject, { - type Item = IntoIter<'txn, K, Key, Value>; + type Item = IntoIter<'cur, K, Key, Value>; fn next(&mut self) -> Option { match self { diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 80d63cc1b2417..2342373a27c60 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -208,7 +208,7 @@ impl Environment { let db = Database::freelist_db(); let cursor = txn.cursor(&db)?; - for result in cursor { + for result in cursor.into_iter_slices() { let (_key, value) = result?; if value.len() < size_of::() { return Err(Error::Corrupted) diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index c75168e0452b4..1b102ad4ffe69 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -107,11 +107,6 @@ where self.inner.txn_execute(f) } - /// Returns a copy of the pointer to the underlying MDBX transaction. - pub(crate) fn txn_ptr(&self) -> TransactionPtr { - self.inner.txn.clone() - } - /// Returns a copy of the raw pointer to the underlying MDBX transaction. #[doc(hidden)] pub fn txn(&self) -> *mut ffi::MDBX_txn { @@ -151,9 +146,9 @@ where /// returned. Retrieval of other items requires the use of /// [Cursor]. If the item is not in the database, then /// [None] will be returned. - pub fn get<'txn, Key>(&'txn self, dbi: ffi::MDBX_dbi, key: &[u8]) -> Result> + pub fn get(&self, dbi: ffi::MDBX_dbi, key: &[u8]) -> Result> where - Key: TableObject<'txn>, + Key: TableObject, { let key_val: ffi::MDBX_val = ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void }; @@ -257,13 +252,31 @@ where } /// Open a new cursor on the given database. - pub fn cursor(&self, db: &Database) -> Result> { - Cursor::new(self, db.dbi()) + pub fn cursor(&self, db: &Database) -> Result> { + Cursor::new(self.clone(), db.dbi()) } /// Open a new cursor on the given dbi. - pub fn cursor_with_dbi(&self, dbi: ffi::MDBX_dbi) -> Result> { - Cursor::new(self, dbi) + pub fn cursor_with_dbi(&self, dbi: ffi::MDBX_dbi) -> Result> { + Cursor::new(self.clone(), dbi) + } +} + +impl Clone for Transaction +where + K: TransactionKind, +{ + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +impl fmt::Debug for Transaction +where + K: TransactionKind, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RoTransaction").finish_non_exhaustive() } } @@ -499,15 +512,6 @@ impl Transaction { } } -impl fmt::Debug for Transaction -where - K: TransactionKind, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RoTransaction").finish_non_exhaustive() - } -} - /// A shareable pointer to an MDBX transaction. #[derive(Clone)] pub(crate) struct TransactionPtr { From db5d01e328b2cabc37837c508665b454bafb04fc Mon Sep 17 00:00:00 2001 From: Bjerg Date: Fri, 17 Nov 2023 22:12:12 +0100 Subject: [PATCH 35/77] refactor: split async/sync work in stages (#4636) Co-authored-by: Roman Krasiuk --- bin/reth/src/chain/import.rs | 17 +- bin/reth/src/debug_cmd/execution.rs | 8 +- bin/reth/src/debug_cmd/merkle.rs | 57 ++-- bin/reth/src/node/mod.rs | 9 +- bin/reth/src/stage/dump/execution.rs | 33 +- bin/reth/src/stage/dump/hashing_account.rs | 44 ++- bin/reth/src/stage/dump/hashing_storage.rs | 44 ++- bin/reth/src/stage/dump/merkle.rs | 57 ++-- bin/reth/src/stage/run.rs | 27 +- .../consensus/beacon/src/engine/test_utils.rs | 9 +- crates/interfaces/src/provider.rs | 3 + crates/stages/benches/criterion.rs | 7 +- crates/stages/benches/setup/mod.rs | 23 +- crates/stages/src/error.rs | 4 + crates/stages/src/lib.rs | 10 +- crates/stages/src/pipeline/mod.rs | 203 ++++++------ crates/stages/src/sets.rs | 50 +-- crates/stages/src/stage.rs | 41 ++- crates/stages/src/stages/bodies.rs | 88 ++--- crates/stages/src/stages/execution.rs | 14 +- crates/stages/src/stages/finish.rs | 5 +- crates/stages/src/stages/hashing_account.rs | 13 +- crates/stages/src/stages/hashing_storage.rs | 5 +- crates/stages/src/stages/headers.rs | 302 +++++------------- .../src/stages/index_account_history.rs | 41 ++- .../src/stages/index_storage_history.rs | 41 ++- crates/stages/src/stages/merkle.rs | 5 +- crates/stages/src/stages/mod.rs | 10 +- crates/stages/src/stages/sender_recovery.rs | 14 +- crates/stages/src/stages/total_difficulty.rs | 5 +- crates/stages/src/stages/tx_lookup.rs | 5 +- crates/stages/src/test_utils/runner.rs | 13 +- crates/stages/src/test_utils/stage.rs | 5 +- crates/storage/provider/src/lib.rs | 10 +- .../provider/src/providers/database/mod.rs | 101 +++++- .../src/providers/database/provider.rs | 65 +++- .../provider/src/traits/header_sync_gap.rs | 50 +++ crates/storage/provider/src/traits/mod.rs | 3 + testing/ef-tests/src/cases/blockchain_test.rs | 3 +- 39 files changed, 769 insertions(+), 675 deletions(-) create mode 100644 crates/storage/provider/src/traits/header_sync_gap.rs diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index 984a34f8cf443..572f8c0ee2f8b 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -1,4 +1,8 @@ use crate::{ + args::{ + utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, + DatabaseArgs, + }, dirs::{DataDirPath, MaybePlatformPath}, init::init_genesis, node::events::{handle_events, NodeEvent}, @@ -8,12 +12,6 @@ use clap::Parser; use eyre::Context; use futures::{Stream, StreamExt}; use reth_beacon_consensus::BeaconConsensus; -use reth_provider::{ProviderFactory, StageCheckpointReader}; - -use crate::args::{ - utils::{chain_help, genesis_value_parser, SUPPORTED_CHAINS}, - DatabaseArgs, -}; use reth_config::Config; use reth_db::{database::Database, init_db}; use reth_downloaders::{ @@ -22,12 +20,10 @@ use reth_downloaders::{ }; use reth_interfaces::consensus::Consensus; use reth_primitives::{stage::StageId, ChainSpec, B256}; +use reth_provider::{HeaderSyncMode, ProviderFactory, StageCheckpointReader}; use reth_stages::{ prelude::*, - stages::{ - ExecutionStage, ExecutionStageThresholds, HeaderSyncMode, SenderRecoveryStage, - TotalDifficultyStage, - }, + stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage, TotalDifficultyStage}, }; use std::{path::PathBuf, sync::Arc}; use tokio::sync::watch; @@ -164,6 +160,7 @@ impl ImportCommand { .with_max_block(max_block) .add_stages( DefaultStages::new( + ProviderFactory::new(db.clone(), self.chain.clone()), HeaderSyncMode::Tip(tip_rx), consensus.clone(), header_downloader, diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index fee6390d2f9eb..83c5549451ca7 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -27,13 +27,10 @@ use reth_interfaces::{ use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_primitives::{fs, stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, B256}; -use reth_provider::{BlockExecutionWriter, ProviderFactory, StageCheckpointReader}; +use reth_provider::{BlockExecutionWriter, HeaderSyncMode, ProviderFactory, StageCheckpointReader}; use reth_stages::{ sets::DefaultStages, - stages::{ - ExecutionStage, ExecutionStageThresholds, HeaderSyncMode, SenderRecoveryStage, - TotalDifficultyStage, - }, + stages::{ExecutionStage, ExecutionStageThresholds, SenderRecoveryStage, TotalDifficultyStage}, Pipeline, StageSet, }; use reth_tasks::TaskExecutor; @@ -118,6 +115,7 @@ impl Command { .with_tip_sender(tip_tx) .add_stages( DefaultStages::new( + ProviderFactory::new(db.clone(), self.chain.clone()), header_mode, Arc::clone(&consensus), header_downloader, diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index dc5f98e59eb31..765d1f866132e 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -222,53 +222,42 @@ impl Command { None }; - execution_stage - .execute( + execution_stage.execute( + &provider_rw, + ExecInput { + target: Some(block), + checkpoint: block.checked_sub(1).map(StageCheckpoint::new), + }, + )?; + + let mut account_hashing_done = false; + while !account_hashing_done { + let output = account_hashing_stage.execute( &provider_rw, ExecInput { target: Some(block), - checkpoint: block.checked_sub(1).map(StageCheckpoint::new), + checkpoint: progress.map(StageCheckpoint::new), }, - ) - .await?; - - let mut account_hashing_done = false; - while !account_hashing_done { - let output = account_hashing_stage - .execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: progress.map(StageCheckpoint::new), - }, - ) - .await?; + )?; account_hashing_done = output.done; } let mut storage_hashing_done = false; while !storage_hashing_done { - let output = storage_hashing_stage - .execute( - &provider_rw, - ExecInput { - target: Some(block), - checkpoint: progress.map(StageCheckpoint::new), - }, - ) - .await?; - storage_hashing_done = output.done; - } - - let incremental_result = merkle_stage - .execute( + let output = storage_hashing_stage.execute( &provider_rw, ExecInput { target: Some(block), checkpoint: progress.map(StageCheckpoint::new), }, - ) - .await; + )?; + storage_hashing_done = output.done; + } + + let incremental_result = merkle_stage.execute( + &provider_rw, + ExecInput { target: Some(block), checkpoint: progress.map(StageCheckpoint::new) }, + ); if incremental_result.is_err() { tracing::warn!(target: "reth::cli", block, "Incremental calculation failed, retrying from scratch"); @@ -285,7 +274,7 @@ impl Command { let clean_input = ExecInput { target: Some(block), checkpoint: None }; loop { - let clean_result = merkle_stage.execute(&provider_rw, clean_input).await; + let clean_result = merkle_stage.execute(&provider_rw, clean_input); assert!(clean_result.is_ok(), "Clean state root calculation failed"); if clean_result.unwrap().done { break diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 3db510564a2ae..a144a3bcea662 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -61,7 +61,7 @@ use reth_primitives::{ }; use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, CanonStateSubscriptions, - HeaderProvider, ProviderFactory, StageCheckpointReader, + HeaderProvider, HeaderSyncMode, ProviderFactory, StageCheckpointReader, }; use reth_prune::{segments::SegmentSet, Pruner}; use reth_revm::Factory; @@ -71,9 +71,9 @@ use reth_snapshot::HighestSnapshotsTracker; use reth_stages::{ prelude::*, stages::{ - AccountHashingStage, ExecutionStage, ExecutionStageThresholds, HeaderSyncMode, - IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, - StorageHashingStage, TotalDifficultyStage, TransactionLookupStage, + AccountHashingStage, ExecutionStage, ExecutionStageThresholds, IndexAccountHistoryStage, + IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, + TotalDifficultyStage, TransactionLookupStage, }, }; use reth_tasks::TaskExecutor; @@ -896,6 +896,7 @@ impl NodeCommand { .with_metrics_tx(metrics_tx.clone()) .add_stages( DefaultStages::new( + ProviderFactory::new(db.clone(), self.chain.clone()), header_mode, Arc::clone(&consensus), header_downloader, diff --git a/bin/reth/src/stage/dump/execution.rs b/bin/reth/src/stage/dump/execution.rs index 67eda8033cc77..5bc301bf8101e 100644 --- a/bin/reth/src/stage/dump/execution.rs +++ b/bin/reth/src/stage/dump/execution.rs @@ -100,16 +100,14 @@ async fn unwind_and_copy( let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(db_tool.chain.clone())); - exec_stage - .unwind( - &provider, - UnwindInput { - unwind_to: from, - checkpoint: StageCheckpoint::new(tip_block_number), - bad_block: None, - }, - ) - .await?; + exec_stage.unwind( + &provider, + UnwindInput { + unwind_to: from, + checkpoint: StageCheckpoint::new(tip_block_number), + bad_block: None, + }, + )?; let unwind_inner_tx = provider.into_tx(); @@ -131,20 +129,13 @@ async fn dry_run( info!(target: "reth::cli", "Executing stage. [dry-run]"); let factory = ProviderFactory::new(&output_db, chain.clone()); - let provider = factory.provider_rw()?; let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(chain.clone())); - exec_stage - .execute( - &provider, - reth_stages::ExecInput { - target: Some(to), - checkpoint: Some(StageCheckpoint::new(from)), - }, - ) - .await?; + let input = + reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) }; + exec_stage.execute(&factory.provider_rw()?, input)?; - info!(target: "reth::cli", "Success."); + info!(target: "reth::cli", "Success"); Ok(()) } diff --git a/bin/reth/src/stage/dump/hashing_account.rs b/bin/reth/src/stage/dump/hashing_account.rs index 2a947d013e63d..7fe723257f695 100644 --- a/bin/reth/src/stage/dump/hashing_account.rs +++ b/bin/reth/src/stage/dump/hashing_account.rs @@ -22,7 +22,7 @@ pub(crate) async fn dump_hashing_account_stage( tx.import_table_with_range::(&db_tool.db.tx()?, Some(from), to) })??; - unwind_and_copy(db_tool, from, tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, from, tip_block_number, &output_db)?; if should_run { dry_run(db_tool.chain.clone(), output_db, to, from).await?; @@ -32,7 +32,7 @@ pub(crate) async fn dump_hashing_account_stage( } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -async fn unwind_and_copy( +fn unwind_and_copy( db_tool: &DbTool<'_, DB>, from: u64, tip_block_number: u64, @@ -42,16 +42,14 @@ async fn unwind_and_copy( let provider = factory.provider_rw()?; let mut exec_stage = AccountHashingStage::default(); - exec_stage - .unwind( - &provider, - UnwindInput { - unwind_to: from, - checkpoint: StageCheckpoint::new(tip_block_number), - bad_block: None, - }, - ) - .await?; + exec_stage.unwind( + &provider, + UnwindInput { + unwind_to: from, + checkpoint: StageCheckpoint::new(tip_block_number), + bad_block: None, + }, + )?; let unwind_inner_tx = provider.into_tx(); output_db.update(|tx| tx.import_table::(&unwind_inner_tx))??; @@ -70,23 +68,19 @@ async fn dry_run( let factory = ProviderFactory::new(&output_db, chain); let provider = factory.provider_rw()?; - let mut exec_stage = AccountHashingStage { + let mut stage = AccountHashingStage { clean_threshold: 1, // Forces hashing from scratch ..Default::default() }; - let mut exec_output = false; - while !exec_output { - exec_output = exec_stage - .execute( - &provider, - reth_stages::ExecInput { - target: Some(to), - checkpoint: Some(StageCheckpoint::new(from)), - }, - ) - .await? - .done; + loop { + let input = reth_stages::ExecInput { + target: Some(to), + checkpoint: Some(StageCheckpoint::new(from)), + }; + if stage.execute(&provider, input)?.done { + break + } } info!(target: "reth::cli", "Success."); diff --git a/bin/reth/src/stage/dump/hashing_storage.rs b/bin/reth/src/stage/dump/hashing_storage.rs index 0a8df0a6e44ae..3738180725291 100644 --- a/bin/reth/src/stage/dump/hashing_storage.rs +++ b/bin/reth/src/stage/dump/hashing_storage.rs @@ -17,7 +17,7 @@ pub(crate) async fn dump_hashing_storage_stage( ) -> Result<()> { let (output_db, tip_block_number) = setup(from, to, output_db, db_tool)?; - unwind_and_copy(db_tool, from, tip_block_number, &output_db).await?; + unwind_and_copy(db_tool, from, tip_block_number, &output_db)?; if should_run { dry_run(db_tool.chain.clone(), output_db, to, from).await?; @@ -27,7 +27,7 @@ pub(crate) async fn dump_hashing_storage_stage( } /// Dry-run an unwind to FROM block and copy the necessary table data to the new database. -async fn unwind_and_copy( +fn unwind_and_copy( db_tool: &DbTool<'_, DB>, from: u64, tip_block_number: u64, @@ -38,16 +38,14 @@ async fn unwind_and_copy( let mut exec_stage = StorageHashingStage::default(); - exec_stage - .unwind( - &provider, - UnwindInput { - unwind_to: from, - checkpoint: StageCheckpoint::new(tip_block_number), - bad_block: None, - }, - ) - .await?; + exec_stage.unwind( + &provider, + UnwindInput { + unwind_to: from, + checkpoint: StageCheckpoint::new(tip_block_number), + bad_block: None, + }, + )?; let unwind_inner_tx = provider.into_tx(); // TODO optimize we can actually just get the entries we need for both these tables @@ -69,23 +67,19 @@ async fn dry_run( let factory = ProviderFactory::new(&output_db, chain); let provider = factory.provider_rw()?; - let mut exec_stage = StorageHashingStage { + let mut stage = StorageHashingStage { clean_threshold: 1, // Forces hashing from scratch ..Default::default() }; - let mut exec_output = false; - while !exec_output { - exec_output = exec_stage - .execute( - &provider, - reth_stages::ExecInput { - target: Some(to), - checkpoint: Some(StageCheckpoint::new(from)), - }, - ) - .await? - .done; + loop { + let input = reth_stages::ExecInput { + target: Some(to), + checkpoint: Some(StageCheckpoint::new(from)), + }; + if stage.execute(&provider, input)?.done { + break + } } info!(target: "reth::cli", "Success."); diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index 55eef819f1c5f..4615b884c35ea 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -61,10 +61,10 @@ async fn unwind_and_copy( // Unwind hashes all the way to FROM - StorageHashingStage::default().unwind(&provider, unwind).await.unwrap(); - AccountHashingStage::default().unwind(&provider, unwind).await.unwrap(); + StorageHashingStage::default().unwind(&provider, unwind).unwrap(); + AccountHashingStage::default().unwind(&provider, unwind).unwrap(); - MerkleStage::default_unwind().unwind(&provider, unwind).await?; + MerkleStage::default_unwind().unwind(&provider, unwind)?; // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( @@ -78,26 +78,21 @@ async fn unwind_and_copy( PruneModes::all(), ); - exec_stage - .unwind( - &provider, - UnwindInput { - unwind_to: to, - checkpoint: StageCheckpoint::new(tip_block_number), - bad_block: None, - }, - ) - .await?; + exec_stage.unwind( + &provider, + UnwindInput { + unwind_to: to, + checkpoint: StageCheckpoint::new(tip_block_number), + bad_block: None, + }, + )?; // Bring hashes to TO - AccountHashingStage { clean_threshold: u64::MAX, commit_threshold: u64::MAX } .execute(&provider, execute_input) - .await .unwrap(); StorageHashingStage { clean_threshold: u64::MAX, commit_threshold: u64::MAX } .execute(&provider, execute_input) - .await .unwrap(); let unwind_inner_tx = provider.into_tx(); @@ -123,25 +118,23 @@ async fn dry_run( info!(target: "reth::cli", "Executing stage."); let factory = ProviderFactory::new(&output_db, chain); let provider = factory.provider_rw()?; - let mut exec_output = false; - while !exec_output { - exec_output = MerkleStage::Execution { - clean_threshold: u64::MAX, /* Forces updating the root instead of calculating - * from - * scratch */ + + let mut stage = MerkleStage::Execution { + // Forces updating the root instead of calculating from scratch + clean_threshold: u64::MAX, + }; + + loop { + let input = reth_stages::ExecInput { + target: Some(to), + checkpoint: Some(StageCheckpoint::new(from)), + }; + if stage.execute(&provider, input)?.done { + break } - .execute( - &provider, - reth_stages::ExecInput { - target: Some(to), - checkpoint: Some(StageCheckpoint::new(from)), - }, - ) - .await? - .done; } - info!(target: "reth::cli", "Success."); + info!(target: "reth::cli", "Success"); Ok(()) } diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index c66792668371f..5eaeaf361ad8a 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -12,6 +12,7 @@ use crate::{ version::SHORT_VERSION, }; use clap::Parser; +use futures::future::poll_fn; use reth_beacon_consensus::BeaconConsensus; use reth_config::Config; use reth_db::init_db; @@ -24,7 +25,7 @@ use reth_stages::{ IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, TransactionLookupStage, }, - ExecInput, ExecOutput, Stage, UnwindInput, + ExecInput, Stage, UnwindInput, }; use std::{any::Any, net::SocketAddr, path::PathBuf, sync::Arc}; use tracing::*; @@ -175,8 +176,8 @@ impl Command { .await?; let fetch_client = Arc::new(network.fetch_client().await?); - let stage = BodyStage { - downloader: BodiesDownloaderBuilder::default() + let stage = BodyStage::new( + BodiesDownloaderBuilder::default() .with_stream_batch_size(batch_size as usize) .with_request_limit(config.stages.bodies.downloader_request_limit) .with_max_buffered_blocks_size_bytes( @@ -187,8 +188,7 @@ impl Command { config.stages.bodies.downloader_max_concurrent_requests, ) .build(fetch_client, consensus.clone(), db.clone()), - consensus: consensus.clone(), - }; + ); (Box::new(stage), None) } @@ -242,7 +242,7 @@ impl Command { if !self.skip_unwind { while unwind.checkpoint.block_number > self.from { - let unwind_output = unwind_stage.unwind(&provider_rw, unwind).await?; + let unwind_output = unwind_stage.unwind(&provider_rw, unwind)?; unwind.checkpoint = unwind_output.checkpoint; if self.commit { @@ -257,19 +257,20 @@ impl Command { checkpoint: Some(checkpoint.with_block_number(self.from)), }; - while let ExecOutput { checkpoint: stage_progress, done: false } = - exec_stage.execute(&provider_rw, input).await? - { - input.checkpoint = Some(stage_progress); + loop { + poll_fn(|cx| exec_stage.poll_execute_ready(cx, input)).await?; + let output = exec_stage.execute(&provider_rw, input)?; + + input.checkpoint = Some(output.checkpoint); if self.commit { provider_rw.commit()?; provider_rw = factory.provider_rw()?; } - } - if self.commit { - provider_rw.commit()?; + if output.done { + break + } } Ok(()) diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index 092ce9f5e2186..f58ebf0133a54 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -26,17 +26,15 @@ use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{BlockNumber, ChainSpec, PruneModes, Receipt, B256, U256}; use reth_provider::{ providers::BlockchainProvider, test_utils::TestExecutorFactory, BlockExecutor, - BundleStateWithReceipts, ExecutorFactory, ProviderFactory, PrunableBlockExecutor, + BundleStateWithReceipts, ExecutorFactory, HeaderSyncMode, ProviderFactory, + PrunableBlockExecutor, }; use reth_prune::Pruner; use reth_revm::Factory; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; -use reth_stages::{ - sets::DefaultStages, stages::HeaderSyncMode, test_utils::TestStages, ExecOutput, Pipeline, - StageError, -}; +use reth_stages::{sets::DefaultStages, test_utils::TestStages, ExecOutput, Pipeline, StageError}; use reth_tasks::TokioTaskExecutor; use std::{collections::VecDeque, sync::Arc}; use tokio::sync::{oneshot, watch}; @@ -502,6 +500,7 @@ where .into_task(); Pipeline::builder().add_stages(DefaultStages::new( + ProviderFactory::new(db.clone(), self.base_config.chain_spec.clone()), HeaderSyncMode::Tip(tip_rx.clone()), Arc::clone(&consensus), header_downloader, diff --git a/crates/interfaces/src/provider.rs b/crates/interfaces/src/provider.rs index f5f0a7fccf6b7..c2137b4b7134f 100644 --- a/crates/interfaces/src/provider.rs +++ b/crates/interfaces/src/provider.rs @@ -20,6 +20,9 @@ pub enum ProviderError { /// Error when recovering the sender for a transaction #[error("failed to recover sender for transaction")] SenderRecoveryError, + /// Inconsistent header gap. + #[error("inconsistent header gap in the database")] + InconsistentHeaderGap, /// The header number was not found for the given block hash. #[error("block hash {0} does not exist in Headers table")] BlockHashNotFound(BlockHash), diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index 9e55781b7e749..ad210165cbd87 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -12,7 +12,7 @@ use reth_stages::{ test_utils::TestTransaction, ExecInput, Stage, UnwindInput, }; -use std::{path::PathBuf, sync::Arc}; +use std::{future::poll_fn, path::PathBuf, sync::Arc}; mod setup; use setup::StageRange; @@ -138,7 +138,10 @@ fn measure_stage_with_path( let mut stage = stage.clone(); let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - stage.execute(&provider, input).await.unwrap(); + poll_fn(|cx| stage.poll_execute_ready(cx, input)) + .await + .and_then(|_| stage.execute(&provider, input)) + .unwrap(); provider.commit().unwrap(); }, ) diff --git a/crates/stages/benches/setup/mod.rs b/crates/stages/benches/setup/mod.rs index f5c45be9b96e6..806f2d78fe49e 100644 --- a/crates/stages/benches/setup/mod.rs +++ b/crates/stages/benches/setup/mod.rs @@ -47,7 +47,6 @@ pub(crate) fn stage_unwind>( // Clear previous run stage .unwind(&provider, unwind) - .await .map_err(|e| { format!( "{e}\nMake sure your test database at `{}` isn't too old and incompatible with newer stage changes.", @@ -67,22 +66,20 @@ pub(crate) fn unwind_hashes>( ) { let (input, unwind) = range; - tokio::runtime::Runtime::new().unwrap().block_on(async { - let mut stage = stage.clone(); - let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); + let mut stage = stage.clone(); + let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone()); + let provider = factory.provider_rw().unwrap(); - StorageHashingStage::default().unwind(&provider, unwind).await.unwrap(); - AccountHashingStage::default().unwind(&provider, unwind).await.unwrap(); + StorageHashingStage::default().unwind(&provider, unwind).unwrap(); + AccountHashingStage::default().unwind(&provider, unwind).unwrap(); - // Clear previous run - stage.unwind(&provider, unwind).await.unwrap(); + // Clear previous run + stage.unwind(&provider, unwind).unwrap(); - AccountHashingStage::default().execute(&provider, input).await.unwrap(); - StorageHashingStage::default().execute(&provider, input).await.unwrap(); + AccountHashingStage::default().execute(&provider, input).unwrap(); + StorageHashingStage::default().execute(&provider, input).unwrap(); - provider.commit().unwrap(); - }); + provider.commit().unwrap(); } // Helper for generating testdata for the benchmarks. diff --git a/crates/stages/src/error.rs b/crates/stages/src/error.rs index 180a8ca5ae24a..8795868d080cb 100644 --- a/crates/stages/src/error.rs +++ b/crates/stages/src/error.rs @@ -50,6 +50,9 @@ pub enum StageError { #[source] error: Box, }, + /// The headers stage is missing sync gap. + #[error("missing sync gap")] + MissingSyncGap, /// The stage encountered a database error. #[error("internal database error occurred: {0}")] Database(#[from] DbError), @@ -94,6 +97,7 @@ impl StageError { StageError::Download(_) | StageError::DatabaseIntegrity(_) | StageError::StageCheckpoint(_) | + StageError::MissingSyncGap | StageError::ChannelClosed | StageError::Fatal(_) ) diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index f30471182c8a6..bf9ba9e8dd946 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -22,8 +22,11 @@ //! # use reth_primitives::{PeerId, MAINNET, B256}; //! # use reth_stages::Pipeline; //! # use reth_stages::sets::DefaultStages; -//! # use reth_stages::stages::HeaderSyncMode; //! # use tokio::sync::watch; +//! # use reth_provider::ProviderFactory; +//! # use reth_provider::HeaderSyncMode; +//! # +//! # let chain_spec = MAINNET.clone(); //! # let consensus: Arc = Arc::new(TestConsensus::default()); //! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build( //! # Arc::new(TestHeadersClient::default()), @@ -36,19 +39,20 @@ //! # db.clone() //! # ); //! # let (tip_tx, tip_rx) = watch::channel(B256::default()); -//! # let factory = Factory::new(MAINNET.clone()); +//! # let factory = Factory::new(chain_spec.clone()); //! // Create a pipeline that can fully sync //! # let pipeline = //! Pipeline::builder() //! .with_tip_sender(tip_tx) //! .add_stages(DefaultStages::new( +//! ProviderFactory::new(db.clone(), chain_spec.clone()), //! HeaderSyncMode::Tip(tip_rx), //! consensus, //! headers_downloader, //! bodies_downloader, //! factory, //! )) -//! .build(db, MAINNET.clone()); +//! .build(db, chain_spec.clone()); //! ``` //! //! ## Feature Flags diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index f5955a5dffbc9..718809abc6e9e 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -5,11 +5,13 @@ use crate::{ use futures_util::Future; use reth_db::database::Database; use reth_primitives::{ - constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, stage::StageId, BlockNumber, ChainSpec, B256, + constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, + stage::{StageCheckpoint, StageId}, + BlockNumber, ChainSpec, B256, }; use reth_provider::{ProviderFactory, StageCheckpointReader, StageCheckpointWriter}; use reth_tokio_util::EventListeners; -use std::{pin::Pin, sync::Arc}; +use std::{future::poll_fn, pin::Pin, sync::Arc}; use tokio::sync::watch; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; @@ -217,10 +219,7 @@ where let stage_id = stage.id(); trace!(target: "sync::pipeline", stage = %stage_id, "Executing stage"); - let next = self - .execute_stage_to_completion(previous_stage, stage_index) - .instrument(info_span!("execute", stage = %stage_id)) - .await?; + let next = self.execute_stage_to_completion(previous_stage, stage_index).await?; trace!(target: "sync::pipeline", stage = %stage_id, ?next, "Completed stage"); @@ -232,7 +231,7 @@ where } ControlFlow::Continue { block_number } => self.progress.update(block_number), ControlFlow::Unwind { target, bad_block } => { - self.unwind(target, Some(bad_block.number)).await?; + self.unwind(target, Some(bad_block.number))?; return Ok(ControlFlow::Unwind { target, bad_block }) } } @@ -254,7 +253,7 @@ where /// Unwind the stages to the target block. /// /// If the unwind is due to a bad block the number of that block should be specified. - pub async fn unwind( + pub fn unwind( &mut self, to: BlockNumber, bad_block: Option, @@ -293,7 +292,7 @@ where let input = UnwindInput { checkpoint, unwind_to: to, bad_block }; self.listeners.notify(PipelineEvent::Unwinding { stage_id, input }); - let output = stage.unwind(&provider_rw, input).await; + let output = stage.unwind(&provider_rw, input); match output { Ok(unwind_output) => { checkpoint = unwind_output.checkpoint; @@ -346,10 +345,9 @@ where let target = self.max_block.or(previous_stage); let factory = ProviderFactory::new(&self.db, self.chain_spec.clone()); - let mut provider_rw = factory.provider_rw()?; loop { - let prev_checkpoint = provider_rw.get_stage_checkpoint(stage_id)?; + let prev_checkpoint = factory.get_stage_checkpoint(stage_id)?; let stage_reached_max_block = prev_checkpoint .zip(self.max_block) @@ -370,6 +368,16 @@ where }) } + let exec_input = ExecInput { target, checkpoint: prev_checkpoint }; + + if let Err(err) = poll_fn(|cx| stage.poll_execute_ready(cx, exec_input)).await { + self.listeners.notify(PipelineEvent::Error { stage_id }); + match on_stage_error(&factory, stage_id, prev_checkpoint, err)? { + Some(ctrl) => return Ok(ctrl), + None => continue, + }; + } + self.listeners.notify(PipelineEvent::Running { pipeline_stages_progress: event::PipelineStagesProgress { current: stage_index + 1, @@ -379,10 +387,8 @@ where checkpoint: prev_checkpoint, }); - match stage - .execute(&provider_rw, ExecInput { target, checkpoint: prev_checkpoint }) - .await - { + let provider_rw = factory.provider_rw()?; + match stage.execute(&provider_rw, exec_input) { Ok(out @ ExecOutput { checkpoint, done }) => { made_progress |= checkpoint.block_number != prev_checkpoint.unwrap_or_default().block_number; @@ -425,9 +431,7 @@ where result: out.clone(), }); - // TODO: Make the commit interval configurable provider_rw.commit()?; - provider_rw = factory.provider_rw()?; if done { let block_number = checkpoint.block_number; @@ -439,94 +443,93 @@ where } } Err(err) => { + drop(provider_rw); self.listeners.notify(PipelineEvent::Error { stage_id }); - - let out = if let StageError::DetachedHead { local_head, header, error } = err { - warn!(target: "sync::pipeline", stage = %stage_id, ?local_head, ?header, ?error, "Stage encountered detached head"); - - // We unwind because of a detached head. - let unwind_to = local_head - .number - .saturating_sub(BEACON_CONSENSUS_REORG_UNWIND_DEPTH) - .max(1); - Ok(ControlFlow::Unwind { target: unwind_to, bad_block: local_head }) - } else if let StageError::Block { block, error } = err { - match error { - BlockErrorKind::Validation(validation_error) => { - error!( - target: "sync::pipeline", - stage = %stage_id, - bad_block = %block.number, - "Stage encountered a validation error: {validation_error}" - ); - - // FIXME: When handling errors, we do not commit the database - // transaction. This leads to the Merkle - // stage not clearing its checkpoint, and - // restarting from an invalid place. - drop(provider_rw); - provider_rw = factory.provider_rw()?; - provider_rw.save_stage_checkpoint_progress( - StageId::MerkleExecute, - vec![], - )?; - provider_rw.save_stage_checkpoint( - StageId::MerkleExecute, - prev_checkpoint.unwrap_or_default(), - )?; - provider_rw.commit()?; - - // We unwind because of a validation error. If the unwind itself - // fails, we bail entirely, - // otherwise we restart the execution loop from the - // beginning. - Ok(ControlFlow::Unwind { - target: prev_checkpoint.unwrap_or_default().block_number, - bad_block: block, - }) - } - BlockErrorKind::Execution(execution_error) => { - error!( - target: "sync::pipeline", - stage = %stage_id, - bad_block = %block.number, - "Stage encountered an execution error: {execution_error}" - ); - - // We unwind because of an execution error. If the unwind itself - // fails, we bail entirely, - // otherwise we restart - // the execution loop from the beginning. - Ok(ControlFlow::Unwind { - target: prev_checkpoint.unwrap_or_default().block_number, - bad_block: block, - }) - } - } - } else if err.is_fatal() { - error!( - target: "sync::pipeline", - stage = %stage_id, - "Stage encountered a fatal error: {err}." - ); - Err(err.into()) - } else { - // On other errors we assume they are recoverable if we discard the - // transaction and run the stage again. - warn!( - target: "sync::pipeline", - stage = %stage_id, - "Stage encountered a non-fatal error: {err}. Retrying..." - ); - continue - }; - return out + if let Some(ctrl) = on_stage_error(&factory, stage_id, prev_checkpoint, err)? { + return Ok(ctrl) + } } } } } } +fn on_stage_error( + factory: &ProviderFactory, + stage_id: StageId, + prev_checkpoint: Option, + err: StageError, +) -> Result, PipelineError> { + if let StageError::DetachedHead { local_head, header, error } = err { + warn!(target: "sync::pipeline", stage = %stage_id, ?local_head, ?header, ?error, "Stage encountered detached head"); + + // We unwind because of a detached head. + let unwind_to = + local_head.number.saturating_sub(BEACON_CONSENSUS_REORG_UNWIND_DEPTH).max(1); + Ok(Some(ControlFlow::Unwind { target: unwind_to, bad_block: local_head })) + } else if let StageError::Block { block, error } = err { + match error { + BlockErrorKind::Validation(validation_error) => { + error!( + target: "sync::pipeline", + stage = %stage_id, + bad_block = %block.number, + "Stage encountered a validation error: {validation_error}" + ); + + // FIXME: When handling errors, we do not commit the database transaction. This + // leads to the Merkle stage not clearing its checkpoint, and restarting from an + // invalid place. + let provider_rw = factory.provider_rw()?; + provider_rw.save_stage_checkpoint_progress(StageId::MerkleExecute, vec![])?; + provider_rw.save_stage_checkpoint( + StageId::MerkleExecute, + prev_checkpoint.unwrap_or_default(), + )?; + provider_rw.commit()?; + + // We unwind because of a validation error. If the unwind itself + // fails, we bail entirely, + // otherwise we restart the execution loop from the + // beginning. + Ok(Some(ControlFlow::Unwind { + target: prev_checkpoint.unwrap_or_default().block_number, + bad_block: block, + })) + } + BlockErrorKind::Execution(execution_error) => { + error!( + target: "sync::pipeline", + stage = %stage_id, + bad_block = %block.number, + "Stage encountered an execution error: {execution_error}" + ); + + // We unwind because of an execution error. If the unwind itself + // fails, we bail entirely, + // otherwise we restart + // the execution loop from the beginning. + Ok(Some(ControlFlow::Unwind { + target: prev_checkpoint.unwrap_or_default().block_number, + bad_block: block, + })) + } + } + } else if err.is_fatal() { + error!(target: "sync::pipeline", stage = %stage_id, "Stage encountered a fatal error: {err}"); + Err(err.into()) + } else { + // On other errors we assume they are recoverable if we discard the + // transaction and run the stage again. + warn!( + target: "sync::pipeline", + stage = %stage_id, + "Stage encountered a non-fatal error: {err}. Retrying..." + ); + Ok(None) + } +} + impl std::fmt::Debug for Pipeline { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("Pipeline") @@ -660,7 +663,7 @@ mod tests { pipeline.run().await.expect("Could not run pipeline"); // Unwind - pipeline.unwind(1, None).await.expect("Could not unwind pipeline"); + pipeline.unwind(1, None).expect("Could not unwind pipeline"); }); // Check that the stages were unwound in reverse order @@ -764,7 +767,7 @@ mod tests { pipeline.run().await.expect("Could not run pipeline"); // Unwind - pipeline.unwind(50, None).await.expect("Could not unwind pipeline"); + pipeline.unwind(50, None).expect("Could not unwind pipeline"); }); // Check that the stages were unwound in reverse order diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index f49714e0133e0..5a9ac7942449c 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -38,7 +38,7 @@ //! ``` use crate::{ stages::{ - AccountHashingStage, BodyStage, ExecutionStage, FinishStage, HeaderStage, HeaderSyncMode, + AccountHashingStage, BodyStage, ExecutionStage, FinishStage, HeaderStage, IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, TotalDifficultyStage, TransactionLookupStage, }, @@ -49,7 +49,7 @@ use reth_interfaces::{ consensus::Consensus, p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}, }; -use reth_provider::ExecutorFactory; +use reth_provider::{ExecutorFactory, HeaderSyncGapProvider, HeaderSyncMode}; use std::sync::Arc; /// A set containing all stages to run a fully syncing instance of reth. @@ -75,16 +75,17 @@ use std::sync::Arc; /// - [`IndexAccountHistoryStage`] /// - [`FinishStage`] #[derive(Debug)] -pub struct DefaultStages { +pub struct DefaultStages { /// Configuration for the online stages - online: OnlineStages, + online: OnlineStages, /// Executor factory needs for execution stage executor_factory: EF, } -impl DefaultStages { +impl DefaultStages { /// Create a new set of default stages with default values. pub fn new( + provider: Provider, header_mode: HeaderSyncMode, consensus: Arc, header_downloader: H, @@ -95,13 +96,19 @@ impl DefaultStages { EF: ExecutorFactory, { Self { - online: OnlineStages::new(header_mode, consensus, header_downloader, body_downloader), + online: OnlineStages::new( + provider, + header_mode, + consensus, + header_downloader, + body_downloader, + ), executor_factory, } } } -impl DefaultStages +impl DefaultStages where EF: ExecutorFactory, { @@ -114,9 +121,10 @@ where } } -impl StageSet for DefaultStages +impl StageSet for DefaultStages where DB: Database, + Provider: HeaderSyncGapProvider + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, EF: ExecutorFactory, @@ -131,7 +139,9 @@ where /// These stages *can* be run without network access if the specified downloaders are /// themselves offline. #[derive(Debug)] -pub struct OnlineStages { +pub struct OnlineStages { + /// Sync gap provider for the headers stage. + provider: Provider, /// The sync mode for the headers stage. header_mode: HeaderSyncMode, /// The consensus engine used to validate incoming data. @@ -142,60 +152,64 @@ pub struct OnlineStages { body_downloader: B, } -impl OnlineStages { +impl OnlineStages { /// Create a new set of online stages with default values. pub fn new( + provider: Provider, header_mode: HeaderSyncMode, consensus: Arc, header_downloader: H, body_downloader: B, ) -> Self { - Self { header_mode, consensus, header_downloader, body_downloader } + Self { provider, header_mode, consensus, header_downloader, body_downloader } } } -impl OnlineStages +impl OnlineStages where + Provider: HeaderSyncGapProvider + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, { /// Create a new builder using the given headers stage. pub fn builder_with_headers( - headers: HeaderStage, + headers: HeaderStage, body_downloader: B, consensus: Arc, ) -> StageSetBuilder { StageSetBuilder::default() .add_stage(headers) .add_stage(TotalDifficultyStage::new(consensus.clone())) - .add_stage(BodyStage { downloader: body_downloader, consensus }) + .add_stage(BodyStage::new(body_downloader)) } /// Create a new builder using the given bodies stage. pub fn builder_with_bodies( bodies: BodyStage, + provider: Provider, mode: HeaderSyncMode, header_downloader: H, consensus: Arc, ) -> StageSetBuilder { StageSetBuilder::default() - .add_stage(HeaderStage::new(header_downloader, mode)) + .add_stage(HeaderStage::new(provider, header_downloader, mode)) .add_stage(TotalDifficultyStage::new(consensus.clone())) .add_stage(bodies) } } -impl StageSet for OnlineStages +impl StageSet for OnlineStages where DB: Database, + Provider: HeaderSyncGapProvider + 'static, H: HeaderDownloader + 'static, B: BodyDownloader + 'static, { fn builder(self) -> StageSetBuilder { StageSetBuilder::default() - .add_stage(HeaderStage::new(self.header_downloader, self.header_mode)) + .add_stage(HeaderStage::new(self.provider, self.header_downloader, self.header_mode)) .add_stage(TotalDifficultyStage::new(self.consensus.clone())) - .add_stage(BodyStage { downloader: self.body_downloader, consensus: self.consensus }) + .add_stage(BodyStage::new(self.body_downloader)) } } diff --git a/crates/stages/src/stage.rs b/crates/stages/src/stage.rs index 95e397cbe8a18..55a491a83c9d5 100644 --- a/crates/stages/src/stage.rs +++ b/crates/stages/src/stage.rs @@ -1,5 +1,4 @@ use crate::error::StageError; -use async_trait::async_trait; use reth_db::database::Database; use reth_primitives::{ stage::{StageCheckpoint, StageId}, @@ -9,6 +8,7 @@ use reth_provider::{BlockReader, DatabaseProviderRW, ProviderError, Transactions use std::{ cmp::{max, min}, ops::{Range, RangeInclusive}, + task::{Context, Poll}, }; /// Stage execution input, see [Stage::execute]. @@ -189,22 +189,55 @@ pub struct UnwindOutput { /// Stages are executed as part of a pipeline where they are executed serially. /// /// Stages receive [`DatabaseProviderRW`]. -#[async_trait] pub trait Stage: Send + Sync { /// Get the ID of the stage. /// /// Stage IDs must be unique. fn id(&self) -> StageId; + /// Returns `Poll::Ready(Ok(()))` when the stage is ready to execute the given range. + /// + /// This method is heavily inspired by [tower](https://crates.io/crates/tower)'s `Service` trait. + /// Any asynchronous tasks or communication should be handled in `poll_ready`, e.g. moving + /// downloaded items from downloaders to an internal buffer in the stage. + /// + /// If the stage has any pending external state, then `Poll::Pending` is returned. + /// + /// If `Poll::Ready(Err(_))` is returned, the stage may not be able to execute anymore + /// depending on the specific error. In that case, an unwind must be issued instead. + /// + /// Once `Poll::Ready(Ok(()))` is returned, the stage may be executed once using `execute`. + /// Until the stage has been executed, repeated calls to `poll_ready` must return either + /// `Poll::Ready(Ok(()))` or `Poll::Ready(Err(_))`. + /// + /// Note that `poll_ready` may reserve shared resources that are consumed in a subsequent call + /// of `execute`, e.g. internal buffers. It is crucial for implementations to not assume that + /// `execute` will always be invoked and to ensure that those resources are appropriately + /// released if the stage is dropped before `execute` is called. + /// + /// For the same reason, it is also important that any shared resources do not exhibit + /// unbounded growth on repeated calls to `poll_ready`. + /// + /// Unwinds may happen without consulting `poll_ready` first. + fn poll_execute_ready( + &mut self, + _cx: &mut Context<'_>, + _input: ExecInput, + ) -> Poll> { + Poll::Ready(Ok(())) + } + /// Execute the stage. - async fn execute( + /// It is expected that the stage will write all necessary data to the database + /// upon invoking this method. + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, ) -> Result; /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index 8da7e6511ed3a..cb908ebf95a9e 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -8,13 +8,10 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, DatabaseError, }; -use reth_interfaces::{ - consensus::Consensus, - p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}, -}; +use reth_interfaces::p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::stage::{EntitiesCheckpoint, StageCheckpoint, StageId}; use reth_provider::DatabaseProviderRW; -use std::sync::Arc; +use std::task::{ready, Context, Poll}; use tracing::*; // TODO(onbjerg): Metrics and events (gradual status for e.g. CLI) @@ -51,21 +48,55 @@ use tracing::*; #[derive(Debug)] pub struct BodyStage { /// The body downloader. - pub downloader: D, - /// The consensus engine. - pub consensus: Arc, + downloader: D, + /// Block response buffer. + buffer: Vec, +} + +impl BodyStage { + /// Create new bodies stage from downloader. + pub fn new(downloader: D) -> Self { + Self { downloader, buffer: Vec::new() } + } } -#[async_trait::async_trait] impl Stage for BodyStage { /// Return the id of the stage fn id(&self) -> StageId { StageId::Bodies } + fn poll_execute_ready( + &mut self, + cx: &mut Context<'_>, + input: ExecInput, + ) -> Poll> { + if input.target_reached() || !self.buffer.is_empty() { + return Poll::Ready(Ok(())) + } + + // Update the header range on the downloader + self.downloader.set_download_range(input.next_block_range())?; + + // Poll next downloader item. + let maybe_next_result = ready!(self.downloader.try_poll_next_unpin(cx)); + + // Task downloader can return `None` only if the response relaying channel was closed. This + // is a fatal error to prevent the pipeline from running forever. + let response = match maybe_next_result { + Some(Ok(downloaded)) => { + self.buffer.extend(downloaded); + Ok(()) + } + Some(Err(err)) => Err(err.into()), + None => Err(StageError::ChannelClosed), + }; + Poll::Ready(response) + } + /// Download block bodies from the last checkpoint for this stage up until the latest synced /// header, limited by the stage's batch size. - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, @@ -73,11 +104,7 @@ impl Stage for BodyStage { if input.target_reached() { return Ok(ExecOutput::done(input.checkpoint())) } - - let range = input.next_block_range(); - // Update the header range on the downloader - self.downloader.set_download_range(range.clone())?; - let (from_block, to_block) = range.into_inner(); + let (from_block, to_block) = input.next_block_range().into_inner(); // Cursors used to write bodies, ommers and transactions let tx = provider.tx_ref(); @@ -91,16 +118,9 @@ impl Stage for BodyStage { let mut next_tx_num = tx_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, start_tx_id = next_tx_num, "Commencing sync"); - - // Task downloader can return `None` only if the response relaying channel was closed. This - // is a fatal error to prevent the pipeline from running forever. - let downloaded_bodies = - self.downloader.try_next().await?.ok_or(StageError::ChannelClosed)?; - - trace!(target: "sync::stages::bodies", bodies_len = downloaded_bodies.len(), "Writing blocks"); - + trace!(target: "sync::stages::bodies", bodies_len = self.buffer.len(), "Writing blocks"); let mut highest_block = from_block; - for response in downloaded_bodies { + for response in self.buffer.drain(..) { // Write block let block_number = response.block_number(); @@ -161,11 +181,13 @@ impl Stage for BodyStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { + self.buffer.clear(); + let tx = provider.tx_ref(); // Cursors to unwind bodies, ommers let mut body_cursor = tx.cursor_write::()?; @@ -476,7 +498,6 @@ mod tests { test_utils::{ generators, generators::{random_block_range, random_signed_tx}, - TestConsensus, }, }; use reth_primitives::{BlockBody, BlockNumber, SealedBlock, SealedHeader, TxNumber, B256}; @@ -505,7 +526,6 @@ mod tests { /// A helper struct for running the [BodyStage]. pub(crate) struct BodyTestRunner { - pub(crate) consensus: Arc, responses: HashMap, tx: TestTransaction, batch_size: u64, @@ -514,7 +534,6 @@ mod tests { impl Default for BodyTestRunner { fn default() -> Self { Self { - consensus: Arc::new(TestConsensus::default()), responses: HashMap::default(), tx: TestTransaction::default(), batch_size: 1000, @@ -540,14 +559,11 @@ mod tests { } fn stage(&self) -> Self::S { - BodyStage { - downloader: TestBodyDownloader::new( - self.tx.inner_raw(), - self.responses.clone(), - self.batch_size, - ), - consensus: self.consensus.clone(), - } + BodyStage::new(TestBodyDownloader::new( + self.tx.inner_raw(), + self.responses.clone(), + self.batch_size, + )) } } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index a53bef0702119..d6ffc67df32d1 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -331,7 +331,6 @@ fn calculate_gas_used_from_headers( Ok(gas_total) } -#[async_trait::async_trait] impl Stage for ExecutionStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -339,7 +338,7 @@ impl Stage for ExecutionStage { } /// Execute the stage - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, @@ -348,7 +347,7 @@ impl Stage for ExecutionStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, @@ -685,8 +684,8 @@ mod tests { provider.commit().unwrap(); let provider = factory.provider_rw().unwrap(); - let mut execution_stage = stage(); - let output = execution_stage.execute(&provider, input).await.unwrap(); + let mut execution_stage: ExecutionStage = stage(); + let output = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); assert_matches!(output, ExecOutput { checkpoint: StageCheckpoint { @@ -787,7 +786,7 @@ mod tests { // execute let provider = factory.provider_rw().unwrap(); let mut execution_stage = stage(); - let result = execution_stage.execute(&provider, input).await.unwrap(); + let result = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); let provider = factory.provider_rw().unwrap(); @@ -797,7 +796,6 @@ mod tests { &provider, UnwindInput { checkpoint: result.checkpoint, unwind_to: 0, bad_block: None }, ) - .await .unwrap(); assert_matches!(result, UnwindOutput { @@ -886,7 +884,7 @@ mod tests { // execute let provider = factory.provider_rw().unwrap(); let mut execution_stage = stage(); - let _ = execution_stage.execute(&provider, input).await.unwrap(); + let _ = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); // assert unwind stage diff --git a/crates/stages/src/stages/finish.rs b/crates/stages/src/stages/finish.rs index 751c4e37bfe13..d0a63e8905711 100644 --- a/crates/stages/src/stages/finish.rs +++ b/crates/stages/src/stages/finish.rs @@ -11,13 +11,12 @@ use reth_provider::DatabaseProviderRW; #[non_exhaustive] pub struct FinishStage; -#[async_trait::async_trait] impl Stage for FinishStage { fn id(&self) -> StageId { StageId::Finish } - async fn execute( + fn execute( &mut self, _provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, @@ -25,7 +24,7 @@ impl Stage for FinishStage { Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }) } - async fn unwind( + fn unwind( &mut self, _provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 896bfc9762b11..4eab05e0941b4 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -21,8 +21,8 @@ use std::{ cmp::max, fmt::Debug, ops::{Range, RangeInclusive}, + sync::mpsc, }; -use tokio::sync::mpsc; use tracing::*; /// Account hashing stage hashes plain account. @@ -125,7 +125,6 @@ impl AccountHashingStage { } } -#[async_trait::async_trait] impl Stage for AccountHashingStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -133,7 +132,7 @@ impl Stage for AccountHashingStage { } /// Execute the stage. - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, @@ -190,7 +189,7 @@ impl Stage for AccountHashingStage { ) { // An _unordered_ channel to receive results from a rayon job - let (tx, rx) = mpsc::unbounded_channel(); + let (tx, rx) = mpsc::channel(); channels.push(rx); let chunk = chunk.collect::, _>>()?; @@ -205,8 +204,8 @@ impl Stage for AccountHashingStage { let mut hashed_batch = Vec::with_capacity(self.commit_threshold as usize); // Iterate over channels and append the hashed accounts. - for mut channel in channels { - while let Some(hashed) = channel.recv().await { + for channel in channels { + while let Ok(hashed) = channel.recv() { hashed_batch.push(hashed); } } @@ -265,7 +264,7 @@ impl Stage for AccountHashingStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 2580b58c9783b..da2fd38aced2e 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -44,7 +44,6 @@ impl Default for StorageHashingStage { } } -#[async_trait::async_trait] impl Stage for StorageHashingStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -52,7 +51,7 @@ impl Stage for StorageHashingStage { } /// Execute the stage. - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, @@ -191,7 +190,7 @@ impl Stage for StorageHashingStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index e57b736d61e6d..9ad06a198fc5a 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -2,38 +2,24 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput} use futures_util::StreamExt; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, - database::Database, + database::{Database, DatabaseGAT}, tables, transaction::{DbTx, DbTxMut}, }; use reth_interfaces::{ - p2p::headers::{ - downloader::{HeaderDownloader, SyncTarget}, - error::HeadersDownloaderError, - }, + p2p::headers::{downloader::HeaderDownloader, error::HeadersDownloaderError}, provider::ProviderError, }; use reth_primitives::{ stage::{ CheckpointBlockRange, EntitiesCheckpoint, HeadersCheckpoint, StageCheckpoint, StageId, }, - BlockHashOrNumber, BlockNumber, SealedHeader, B256, + BlockHashOrNumber, BlockNumber, SealedHeader, }; -use reth_provider::DatabaseProviderRW; -use tokio::sync::watch; +use reth_provider::{DatabaseProviderRW, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode}; +use std::task::{ready, Context, Poll}; use tracing::*; -/// The header sync mode. -#[derive(Debug)] -pub enum HeaderSyncMode { - /// A sync mode in which the stage continuously requests the downloader for - /// next blocks. - Continuous, - /// A sync mode in which the stage polls the receiver for the next tip - /// to download from. - Tip(watch::Receiver), -} - /// The headers stage. /// /// The headers stage downloads all block headers from the highest block in the local database to @@ -48,27 +34,33 @@ pub enum HeaderSyncMode { /// NOTE: This stage downloads headers in reverse. Upon returning the control flow to the pipeline, /// the stage checkpoint is not updated until this stage is done. #[derive(Debug)] -pub struct HeaderStage { +pub struct HeaderStage { + /// Database handle. + provider: Provider, /// Strategy for downloading the headers - downloader: D, + downloader: Downloader, /// The sync mode for the stage. mode: HeaderSyncMode, + /// Current sync gap. + sync_gap: Option, + /// Header buffer. + buffer: Vec, } // === impl HeaderStage === -impl HeaderStage +impl HeaderStage where - D: HeaderDownloader, + Downloader: HeaderDownloader, { /// Create a new header stage - pub fn new(downloader: D, mode: HeaderSyncMode) -> Self { - Self { downloader, mode } + pub fn new(database: Provider, downloader: Downloader, mode: HeaderSyncMode) -> Self { + Self { provider: database, downloader, mode, sync_gap: None, buffer: Vec::new() } } fn is_stage_done( &self, - tx: &>::TXMut, + tx: &>::TXMut, checkpoint: u64, ) -> Result { let mut header_cursor = tx.cursor_read::()?; @@ -79,75 +71,12 @@ where Ok(header_cursor.next()?.map(|(next_num, _)| head_num + 1 == next_num).unwrap_or_default()) } - /// Get the head and tip of the range we need to sync - /// - /// See also [SyncTarget] - async fn get_sync_gap( - &mut self, - provider: &DatabaseProviderRW<'_, &DB>, - checkpoint: u64, - ) -> Result { - // Create a cursor over canonical header hashes - let mut cursor = provider.tx_ref().cursor_read::()?; - let mut header_cursor = provider.tx_ref().cursor_read::()?; - - // Get head hash and reposition the cursor - let (head_num, head_hash) = cursor - .seek_exact(checkpoint)? - .ok_or_else(|| ProviderError::HeaderNotFound(checkpoint.into()))?; - - // Construct head - let (_, head) = header_cursor - .seek_exact(head_num)? - .ok_or_else(|| ProviderError::HeaderNotFound(head_num.into()))?; - let local_head = head.seal(head_hash); - - // Look up the next header - let next_header = cursor - .next()? - .map(|(next_num, next_hash)| -> Result { - let (_, next) = header_cursor - .seek_exact(next_num)? - .ok_or_else(|| ProviderError::HeaderNotFound(next_num.into()))?; - Ok(next.seal(next_hash)) - }) - .transpose()?; - - // Decide the tip or error out on invalid input. - // If the next element found in the cursor is not the "expected" next block per our current - // checkpoint, then there is a gap in the database and we should start downloading in - // reverse from there. Else, it should use whatever the forkchoice state reports. - let target = match next_header { - Some(header) if checkpoint + 1 != header.number => SyncTarget::Gap(header), - None => self - .next_sync_target(head_num) - .await - .ok_or(StageError::StageCheckpoint(checkpoint))?, - _ => return Err(StageError::StageCheckpoint(checkpoint)), - }; - - Ok(SyncGap { local_head, target }) - } - - async fn next_sync_target(&mut self, head: BlockNumber) -> Option { - match self.mode { - HeaderSyncMode::Tip(ref mut rx) => { - let tip = rx.wait_for(|tip| !tip.is_zero()).await.ok()?; - Some(SyncTarget::Tip(*tip)) - } - HeaderSyncMode::Continuous => { - trace!(target: "sync::stages::headers", head, "No next header found, using continuous sync strategy"); - Some(SyncTarget::TipNum(head + 1)) - } - } - } - /// Write downloaded headers to the given transaction /// /// Note: this writes the headers with rising block numbers. fn write_headers( &self, - tx: &>::TXMut, + tx: &>::TXMut, headers: Vec, ) -> Result, StageError> { trace!(target: "sync::stages::headers", len = headers.len(), "writing headers"); @@ -178,10 +107,10 @@ where } } -#[async_trait::async_trait] -impl Stage for HeaderStage +impl Stage for HeaderStage where DB: Database, + Provider: HeaderSyncGapProvider, D: HeaderDownloader, { /// Return the id of the stage @@ -189,20 +118,27 @@ where StageId::Headers } - /// Download the headers in reverse order (falling block numbers) - /// starting from the tip of the chain - async fn execute( + fn poll_execute_ready( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + cx: &mut Context<'_>, input: ExecInput, - ) -> Result { - let tx = provider.tx_ref(); + ) -> Poll> { let current_checkpoint = input.checkpoint(); + // Return if buffer already has some items. + if !self.buffer.is_empty() { + trace!( + target: "sync::stages::headers", + checkpoint = %current_checkpoint.block_number, + "Buffer is not empty" + ); + return Poll::Ready(Ok(())) + } + // Lookup the head and tip of the sync range - let gap = self.get_sync_gap(provider, current_checkpoint.block_number).await?; - let local_head = gap.local_head.number; + let gap = self.provider.sync_gap(self.mode.clone(), current_checkpoint.block_number)?; let tip = gap.target.tip(); + self.sync_gap = Some(gap.clone()); // Nothing to sync if gap.is_closed() { @@ -212,7 +148,7 @@ where target = ?tip, "Target block already reached" ); - return Ok(ExecOutput::done(current_checkpoint)) + return Poll::Ready(Ok(())) } debug!(target: "sync::stages::headers", ?tip, head = ?gap.local_head.hash(), "Commencing sync"); @@ -220,31 +156,44 @@ where // let the downloader know what to sync self.downloader.update_sync_gap(gap.local_head, gap.target); - // The downloader returns the headers in descending order starting from the tip - // down to the local head (latest block in db). - // Task downloader can return `None` only if the response relaying channel was closed. This - // is a fatal error to prevent the pipeline from running forever. - let downloaded_headers = match self.downloader.next().await { - Some(Ok(headers)) => headers, + let result = match ready!(self.downloader.poll_next_unpin(cx)) { + Some(Ok(headers)) => { + info!(target: "sync::stages::headers", len = headers.len(), "Received headers"); + self.buffer.extend(headers); + Ok(()) + } Some(Err(HeadersDownloaderError::DetachedHead { local_head, header, error })) => { error!(target: "sync::stages::headers", ?error, "Cannot attach header to head"); - return Err(StageError::DetachedHead { local_head, header, error }) + Err(StageError::DetachedHead { local_head, header, error }) } - None => return Err(StageError::ChannelClosed), + None => Err(StageError::ChannelClosed), }; + Poll::Ready(result) + } - info!(target: "sync::stages::headers", len = downloaded_headers.len(), "Received headers"); + /// Download the headers in reverse order (falling block numbers) + /// starting from the tip of the chain + fn execute( + &mut self, + provider: &DatabaseProviderRW<'_, &DB>, + input: ExecInput, + ) -> Result { + let current_checkpoint = input.checkpoint(); + if self.buffer.is_empty() { + return Ok(ExecOutput::done(current_checkpoint)) + } + + let gap = self.sync_gap.clone().ok_or(StageError::MissingSyncGap)?; + let local_head = gap.local_head.number; + let tip = gap.target.tip(); + let downloaded_headers = std::mem::take(&mut self.buffer); let tip_block_number = match tip { // If tip is hash and it equals to the first downloaded header's hash, we can use // the block number of this header as tip. - BlockHashOrNumber::Hash(hash) => downloaded_headers.first().and_then(|header| { - if header.hash == hash { - Some(header.number) - } else { - None - } - }), + BlockHashOrNumber::Hash(hash) => downloaded_headers + .first() + .and_then(|header| (header.hash == hash).then_some(header.number)), // If tip is number, we can just grab it and not resolve using downloaded headers. BlockHashOrNumber::Number(number) => Some(number), }; @@ -254,13 +203,14 @@ where // syncing towards, we need to take into account already synced headers from the database. // It is `None`, if tip didn't change and we're still downloading headers for previously // calculated gap. + let tx = provider.tx_ref(); let target_block_number = if let Some(tip_block_number) = tip_block_number { let local_max_block_number = tx .cursor_read::()? .last()? .map(|(canonical_block, _)| canonical_block); - Some(tip_block_number.max(local_max_block_number.unwrap_or(tip_block_number))) + Some(tip_block_number.max(local_max_block_number.unwrap_or_default())) } else { None }; @@ -278,18 +228,17 @@ where // `target_block_number` is guaranteed to be `Some`, because on the first iteration // we download the header for missing tip and use its block number. _ => { + let target = target_block_number.expect("No downloaded header for tip found"); HeadersCheckpoint { block_range: CheckpointBlockRange { from: input.checkpoint().block_number, - to: target_block_number.expect("No downloaded header for tip found"), + to: target, }, progress: EntitiesCheckpoint { // Set processed to the local head block number + number // of block already filled in the gap. - processed: local_head + - (target_block_number.unwrap_or_default() - - tip_block_number.unwrap_or_default()), - total: target_block_number.expect("No downloaded header for tip found"), + processed: local_head + (target - tip_block_number.unwrap_or_default()), + total: target, }, } } @@ -326,12 +275,14 @@ where } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { - // TODO: handle bad block + self.buffer.clear(); + self.sync_gap.take(); + provider.unwind_table_by_walker::( input.unwind_to + 1, )?; @@ -359,46 +310,22 @@ where } } -/// Represents a gap to sync: from `local_head` to `target` -#[derive(Debug)] -pub struct SyncGap { - /// The local head block. Represents lower bound of sync range. - pub local_head: SealedHeader, - - /// The sync target. Represents upper bound of sync range. - pub target: SyncTarget, -} - -// === impl SyncGap === - -impl SyncGap { - /// Returns `true` if the gap from the head to the target was closed - #[inline] - pub fn is_closed(&self) -> bool { - match self.target.tip() { - BlockHashOrNumber::Hash(hash) => self.local_head.hash() == hash, - BlockHashOrNumber::Number(num) => self.local_head.number == num, - } - } -} - #[cfg(test)] mod tests { - use super::*; use crate::test_utils::{ stage_test_suite, ExecuteStageTestRunner, StageTestRunner, UnwindStageTestRunner, }; use assert_matches::assert_matches; - use rand::Rng; - use reth_interfaces::test_utils::{generators, generators::random_header}; - use reth_primitives::{stage::StageUnitCheckpoint, B256, MAINNET}; + use reth_interfaces::test_utils::generators::random_header; + use reth_primitives::{stage::StageUnitCheckpoint, B256}; use reth_provider::ProviderFactory; use test_runner::HeadersTestRunner; mod test_runner { use super::*; use crate::test_utils::{TestRunnerError, TestTransaction}; + use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_downloaders::headers::reverse_headers::{ ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, }; @@ -409,6 +336,7 @@ mod tests { use reth_primitives::U256; use reth_provider::{BlockHashReader, BlockNumReader, HeaderProvider}; use std::sync::Arc; + use tokio::sync::watch; pub(crate) struct HeadersTestRunner { pub(crate) client: TestHeadersClient, @@ -437,17 +365,18 @@ mod tests { } impl StageTestRunner for HeadersTestRunner { - type S = HeaderStage; + type S = HeaderStage>>, D>; fn tx(&self) -> &TestTransaction { &self.tx } fn stage(&self) -> Self::S { - HeaderStage { - mode: HeaderSyncMode::Tip(self.channel.1.clone()), - downloader: (*self.downloader_factory)(), - } + HeaderStage::new( + self.tx.factory.clone(), + (*self.downloader_factory)(), + HeaderSyncMode::Tip(self.channel.1.clone()), + ) } } @@ -599,65 +528,6 @@ mod tests { assert!(runner.validate_execution(input, result.ok()).is_ok(), "validation failed"); } - /// Test the head and tip range lookup - #[tokio::test] - async fn head_and_tip_lookup() { - let runner = HeadersTestRunner::default(); - let factory = ProviderFactory::new(runner.tx().tx.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); - let tx = provider.tx_ref(); - let mut stage = runner.stage(); - - let mut rng = generators::rng(); - - let consensus_tip = rng.gen(); - runner.send_tip(consensus_tip); - - // Genesis - let checkpoint = 0; - let head = random_header(&mut rng, 0, None); - let gap_fill = random_header(&mut rng, 1, Some(head.hash())); - let gap_tip = random_header(&mut rng, 2, Some(gap_fill.hash())); - - // Empty database - assert_matches!( - stage.get_sync_gap(&provider, checkpoint).await, - Err(StageError::DatabaseIntegrity(ProviderError::HeaderNotFound(block_number))) - if block_number.as_number().unwrap() == checkpoint - ); - - // Checkpoint and no gap - tx.put::(head.number, head.hash()) - .expect("failed to write canonical"); - tx.put::(head.number, head.clone().unseal()) - .expect("failed to write header"); - - let gap = stage.get_sync_gap(&provider, checkpoint).await.unwrap(); - assert_eq!(gap.local_head, head); - assert_eq!(gap.target.tip(), consensus_tip.into()); - - // Checkpoint and gap - tx.put::(gap_tip.number, gap_tip.hash()) - .expect("failed to write canonical"); - tx.put::(gap_tip.number, gap_tip.clone().unseal()) - .expect("failed to write header"); - - let gap = stage.get_sync_gap(&provider, checkpoint).await.unwrap(); - assert_eq!(gap.local_head, head); - assert_eq!(gap.target.tip(), gap_tip.parent_hash.into()); - - // Checkpoint and gap closed - tx.put::(gap_fill.number, gap_fill.hash()) - .expect("failed to write canonical"); - tx.put::(gap_fill.number, gap_fill.clone().unseal()) - .expect("failed to write header"); - - assert_matches!( - stage.get_sync_gap(&provider, checkpoint).await, - Err(StageError::StageCheckpoint(_checkpoint)) if _checkpoint == checkpoint - ); - } - /// Execute the stage in two steps #[tokio::test] async fn execute_from_previous_checkpoint() { diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 0945538c3dcbc..b1e7721dcfe0f 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -35,7 +35,6 @@ impl Default for IndexAccountHistoryStage { } } -#[async_trait::async_trait] impl Stage for IndexAccountHistoryStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -43,7 +42,7 @@ impl Stage for IndexAccountHistoryStage { } /// Execute the stage. - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, mut input: ExecInput, @@ -86,7 +85,7 @@ impl Stage for IndexAccountHistoryStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, @@ -178,17 +177,17 @@ mod tests { .unwrap() } - async fn run(tx: &TestTransaction, run_to: u64) { + fn run(tx: &TestTransaction, run_to: u64) { let input = ExecInput { target: Some(run_to), ..Default::default() }; let mut stage = IndexAccountHistoryStage::default(); let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let out = stage.execute(&provider, input).await.unwrap(); + let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(5), done: true }); provider.commit().unwrap(); } - async fn unwind(tx: &TestTransaction, unwind_from: u64, unwind_to: u64) { + fn unwind(tx: &TestTransaction, unwind_from: u64, unwind_to: u64) { let input = UnwindInput { checkpoint: StageCheckpoint::new(unwind_from), unwind_to, @@ -197,7 +196,7 @@ mod tests { let mut stage = IndexAccountHistoryStage::default(); let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let out = stage.unwind(&provider, input).await.unwrap(); + let out = stage.unwind(&provider, input).unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); provider.commit().unwrap(); } @@ -211,14 +210,14 @@ mod tests { partial_setup(&tx); // run - run(&tx, 5).await; + run(&tx, 5); // verify let table = cast(tx.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![4, 5])])); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state let table = tx.table::().unwrap(); @@ -239,14 +238,14 @@ mod tests { .unwrap(); // run - run(&tx, 5).await; + run(&tx, 5); // verify let table = cast(tx.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3, 4, 5]),])); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state let table = cast(tx.table::().unwrap()); @@ -268,7 +267,7 @@ mod tests { .unwrap(); // run - run(&tx, 5).await; + run(&tx, 5); // verify let table = cast(tx.table::().unwrap()); @@ -278,7 +277,7 @@ mod tests { ); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state let table = cast(tx.table::().unwrap()); @@ -300,7 +299,7 @@ mod tests { .unwrap(); // run - run(&tx, 5).await; + run(&tx, 5); // verify close_full_list.push(4); @@ -309,7 +308,7 @@ mod tests { assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list.clone()),])); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state close_full_list.pop(); @@ -335,7 +334,7 @@ mod tests { .unwrap(); // run - run(&tx, 5).await; + run(&tx, 5); // verify close_full_list.push(4); @@ -346,7 +345,7 @@ mod tests { ); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state close_full_list.pop(); @@ -370,7 +369,7 @@ mod tests { }) .unwrap(); - run(&tx, 5).await; + run(&tx, 5); // verify let table = cast(tx.table::().unwrap()); @@ -384,7 +383,7 @@ mod tests { ); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state let table = cast(tx.table::().unwrap()); @@ -434,7 +433,7 @@ mod tests { }; let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let out = stage.execute(&provider, input).await.unwrap(); + let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(20000), done: true }); provider.commit().unwrap(); @@ -443,7 +442,7 @@ mod tests { assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100])])); // unwind - unwind(&tx, 20000, 0).await; + unwind(&tx, 20000, 0); // verify initial state let table = tx.table::().unwrap(); diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index b1e27aed18098..f9896fb4f1955 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -34,7 +34,6 @@ impl Default for IndexStorageHistoryStage { } } -#[async_trait::async_trait] impl Stage for IndexStorageHistoryStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -42,7 +41,7 @@ impl Stage for IndexStorageHistoryStage { } /// Execute the stage. - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, mut input: ExecInput, @@ -84,7 +83,7 @@ impl Stage for IndexStorageHistoryStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, @@ -188,17 +187,17 @@ mod tests { .unwrap() } - async fn run(tx: &TestTransaction, run_to: u64) { + fn run(tx: &TestTransaction, run_to: u64) { let input = ExecInput { target: Some(run_to), ..Default::default() }; let mut stage = IndexStorageHistoryStage::default(); let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let out = stage.execute(&provider, input).await.unwrap(); + let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(5), done: true }); provider.commit().unwrap(); } - async fn unwind(tx: &TestTransaction, unwind_from: u64, unwind_to: u64) { + fn unwind(tx: &TestTransaction, unwind_from: u64, unwind_to: u64) { let input = UnwindInput { checkpoint: StageCheckpoint::new(unwind_from), unwind_to, @@ -207,7 +206,7 @@ mod tests { let mut stage = IndexStorageHistoryStage::default(); let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let out = stage.unwind(&provider, input).await.unwrap(); + let out = stage.unwind(&provider, input).unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); provider.commit().unwrap(); } @@ -221,14 +220,14 @@ mod tests { partial_setup(&tx); // run - run(&tx, 5).await; + run(&tx, 5); // verify let table = cast(tx.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![4, 5]),])); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state let table = tx.table::().unwrap(); @@ -249,14 +248,14 @@ mod tests { .unwrap(); // run - run(&tx, 5).await; + run(&tx, 5); // verify let table = cast(tx.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3, 4, 5]),])); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state let table = cast(tx.table::().unwrap()); @@ -281,7 +280,7 @@ mod tests { .unwrap(); // run - run(&tx, 5).await; + run(&tx, 5); // verify let table = cast(tx.table::().unwrap()); @@ -291,7 +290,7 @@ mod tests { ); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state let table = cast(tx.table::().unwrap()); @@ -313,7 +312,7 @@ mod tests { .unwrap(); // run - run(&tx, 5).await; + run(&tx, 5); // verify close_full_list.push(4); @@ -322,7 +321,7 @@ mod tests { assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list.clone()),])); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state close_full_list.pop(); @@ -348,7 +347,7 @@ mod tests { .unwrap(); // run - run(&tx, 5).await; + run(&tx, 5); // verify close_full_list.push(4); @@ -359,7 +358,7 @@ mod tests { ); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state close_full_list.pop(); @@ -383,7 +382,7 @@ mod tests { }) .unwrap(); - run(&tx, 5).await; + run(&tx, 5); // verify let table = cast(tx.table::().unwrap()); @@ -397,7 +396,7 @@ mod tests { ); // unwind - unwind(&tx, 5, 0).await; + unwind(&tx, 5, 0); // verify initial state let table = cast(tx.table::().unwrap()); @@ -447,7 +446,7 @@ mod tests { }; let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let out = stage.execute(&provider, input).await.unwrap(); + let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(20000), done: true }); provider.commit().unwrap(); @@ -456,7 +455,7 @@ mod tests { assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100]),])); // unwind - unwind(&tx, 20000, 0).await; + unwind(&tx, 20000, 0); // verify initial state let table = tx.table::().unwrap(); diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index cd02696cee152..4354b5628b36e 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -113,7 +113,6 @@ impl MerkleStage { } } -#[async_trait::async_trait] impl Stage for MerkleStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -126,7 +125,7 @@ impl Stage for MerkleStage { } /// Execute the stage. - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, @@ -260,7 +259,7 @@ impl Stage for MerkleStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index d4eeaf2d3be35..771de3586bef9 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -139,7 +139,7 @@ mod tests { prune_modes.clone(), ); - execution_stage.execute(&provider, input).await.unwrap(); + execution_stage.execute(&provider, input).unwrap(); assert_eq!( provider.receipts_by_block(1.into()).unwrap().unwrap().len(), expect_num_receipts @@ -163,9 +163,9 @@ mod tests { if let Some(PruneMode::Full) = prune_modes.account_history { // Full is not supported - assert!(acc_indexing_stage.execute(&provider, input).await.is_err()); + assert!(acc_indexing_stage.execute(&provider, input).is_err()); } else { - acc_indexing_stage.execute(&provider, input).await.unwrap(); + acc_indexing_stage.execute(&provider, input).unwrap(); let mut account_history: Cursor = provider.tx_ref().cursor_read::().unwrap(); assert_eq!(account_history.walk(None).unwrap().count(), expect_num_acc_changesets); @@ -179,9 +179,9 @@ mod tests { if let Some(PruneMode::Full) = prune_modes.storage_history { // Full is not supported - assert!(acc_indexing_stage.execute(&provider, input).await.is_err()); + assert!(acc_indexing_stage.execute(&provider, input).is_err()); } else { - storage_indexing_stage.execute(&provider, input).await.unwrap(); + storage_indexing_stage.execute(&provider, input).unwrap(); let mut storage_history = provider.tx_ref().cursor_read::().unwrap(); diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 80ffb040a057a..cdafd9e6275e0 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -16,9 +16,8 @@ use reth_primitives::{ use reth_provider::{ BlockReader, DatabaseProviderRW, HeaderProvider, ProviderError, PruneCheckpointReader, }; -use std::fmt::Debug; +use std::{fmt::Debug, sync::mpsc}; use thiserror::Error; -use tokio::sync::mpsc; use tracing::*; /// The sender recovery stage iterates over existing transactions, @@ -44,7 +43,6 @@ impl Default for SenderRecoveryStage { } } -#[async_trait::async_trait] impl Stage for SenderRecoveryStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -56,7 +54,7 @@ impl Stage for SenderRecoveryStage { /// collect transactions within that range, /// recover signer for each transaction and store entries in /// the [`TxSenders`][reth_db::tables::TxSenders] table. - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, @@ -110,7 +108,7 @@ impl Stage for SenderRecoveryStage { for chunk in &tx_walker.chunks(chunk_size) { // An _unordered_ channel to receive results from a rayon job - let (recovered_senders_tx, recovered_senders_rx) = mpsc::unbounded_channel(); + let (recovered_senders_tx, recovered_senders_rx) = mpsc::channel(); channels.push(recovered_senders_rx); // Note: Unfortunate side-effect of how chunk is designed in itertools (it is not Send) let chunk: Vec<_> = chunk.collect(); @@ -128,8 +126,8 @@ impl Stage for SenderRecoveryStage { } // Iterate over channels and append the sender in the order that they are received. - for mut channel in channels { - while let Some(recovered) = channel.recv().await { + for channel in channels { + while let Ok(recovered) = channel.recv() { let (tx_id, sender) = match recovered { Ok(result) => result, Err(error) => { @@ -168,7 +166,7 @@ impl Stage for SenderRecoveryStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, diff --git a/crates/stages/src/stages/total_difficulty.rs b/crates/stages/src/stages/total_difficulty.rs index ea1e20630d4a8..1cdaa971cf379 100644 --- a/crates/stages/src/stages/total_difficulty.rs +++ b/crates/stages/src/stages/total_difficulty.rs @@ -41,7 +41,6 @@ impl TotalDifficultyStage { } } -#[async_trait::async_trait] impl Stage for TotalDifficultyStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -49,7 +48,7 @@ impl Stage for TotalDifficultyStage { } /// Write total difficulty entries - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: ExecInput, @@ -99,7 +98,7 @@ impl Stage for TotalDifficultyStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 758fa403320b3..0de9ce74b6c14 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -42,7 +42,6 @@ impl TransactionLookupStage { } } -#[async_trait::async_trait] impl Stage for TransactionLookupStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -50,7 +49,7 @@ impl Stage for TransactionLookupStage { } /// Write transaction hash -> id entries - async fn execute( + fn execute( &mut self, provider: &DatabaseProviderRW<'_, &DB>, mut input: ExecInput, @@ -128,7 +127,7 @@ impl Stage for TransactionLookupStage { } /// Unwind the stage. - async fn unwind( + fn unwind( &mut self, provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, diff --git a/crates/stages/src/test_utils/runner.rs b/crates/stages/src/test_utils/runner.rs index 9bc08638d34ff..96c44cacb4cf0 100644 --- a/crates/stages/src/test_utils/runner.rs +++ b/crates/stages/src/test_utils/runner.rs @@ -4,7 +4,7 @@ use reth_db::DatabaseEnv; use reth_interfaces::db::DatabaseError; use reth_primitives::MAINNET; use reth_provider::{ProviderError, ProviderFactory}; -use std::{borrow::Borrow, sync::Arc}; +use std::{borrow::Borrow, future::poll_fn, sync::Arc}; use tokio::sync::oneshot; #[derive(thiserror::Error, Debug)] @@ -48,10 +48,13 @@ pub(crate) trait ExecuteStageTestRunner: StageTestRunner { let (db, mut stage) = (self.tx().inner_raw(), self.stage()); tokio::spawn(async move { let factory = ProviderFactory::new(db.db(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); - let result = stage.execute(&provider, input).await; - provider.commit().expect("failed to commit"); + let result = poll_fn(|cx| stage.poll_execute_ready(cx, input)).await.and_then(|_| { + let provider_rw = factory.provider_rw().unwrap(); + let result = stage.execute(&provider_rw, input); + provider_rw.commit().expect("failed to commit"); + result + }); tx.send(result).expect("failed to send message") }); rx @@ -76,7 +79,7 @@ pub(crate) trait UnwindStageTestRunner: StageTestRunner { let factory = ProviderFactory::new(db.db(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - let result = stage.unwind(&provider, input).await; + let result = stage.unwind(&provider, input); provider.commit().expect("failed to commit"); tx.send(result).expect("failed to send result"); }); diff --git a/crates/stages/src/test_utils/stage.rs b/crates/stages/src/test_utils/stage.rs index 65ea51362dfb7..85e88841ba8c9 100644 --- a/crates/stages/src/test_utils/stage.rs +++ b/crates/stages/src/test_utils/stage.rs @@ -40,13 +40,12 @@ impl TestStage { } } -#[async_trait::async_trait] impl Stage for TestStage { fn id(&self) -> StageId { self.id } - async fn execute( + fn execute( &mut self, _: &DatabaseProviderRW<'_, &DB>, _input: ExecInput, @@ -56,7 +55,7 @@ impl Stage for TestStage { .unwrap_or_else(|| panic!("Test stage {} executed too many times.", self.id)) } - async fn unwind( + fn unwind( &mut self, _: &DatabaseProviderRW<'_, &DB>, _input: UnwindInput, diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 87118a6351c1e..194c60d500c5b 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -21,11 +21,11 @@ pub use traits::{ BlockWriter, BlockchainTreePendingStateProvider, BundleStateDataProvider, CanonChainTracker, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, EvmEnvProvider, ExecutorFactory, - HashingWriter, HeaderProvider, HistoryWriter, PrunableBlockExecutor, PruneCheckpointReader, - PruneCheckpointWriter, ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, - StageCheckpointWriter, StateProvider, StateProviderBox, StateProviderFactory, - StateRootProvider, StorageReader, TransactionVariant, TransactionsProvider, - TransactionsProviderExt, WithdrawalsProvider, + HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, + HistoryWriter, PrunableBlockExecutor, PruneCheckpointReader, PruneCheckpointWriter, + ReceiptProvider, ReceiptProviderIdExt, StageCheckpointReader, StageCheckpointWriter, + StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StorageReader, + TransactionVariant, TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; /// Provider trait implementations. diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 38b4be901d27c..c21cbdd686c77 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -5,8 +5,9 @@ use crate::{ }, traits::{BlockSource, ReceiptProvider}, BlockHashReader, BlockNumReader, BlockReader, ChainSpecProvider, EvmEnvProvider, - HeaderProvider, ProviderError, PruneCheckpointReader, StageCheckpointReader, StateProviderBox, - TransactionVariant, TransactionsProvider, WithdrawalsProvider, + HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode, ProviderError, + PruneCheckpointReader, StageCheckpointReader, StateProviderBox, TransactionVariant, + TransactionsProvider, WithdrawalsProvider, }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; use reth_interfaces::{db::LogLevel, provider::ProviderResult, RethError, RethResult}; @@ -196,6 +197,16 @@ impl ProviderFactory { } } +impl HeaderSyncGapProvider for ProviderFactory { + fn sync_gap( + &self, + mode: HeaderSyncMode, + highest_uninterrupted_block: BlockNumber, + ) -> RethResult { + self.provider()?.sync_gap(mode, highest_uninterrupted_block) + } +} + impl HeaderProvider for ProviderFactory { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { self.provider()?.header(block_hash) @@ -477,19 +488,32 @@ impl PruneCheckpointReader for ProviderFactory { #[cfg(test)] mod tests { use super::ProviderFactory; - use crate::{BlockHashReader, BlockNumReader, BlockWriter, TransactionsProvider}; + use crate::{ + BlockHashReader, BlockNumReader, BlockWriter, HeaderSyncGapProvider, HeaderSyncMode, + TransactionsProvider, + }; use alloy_rlp::Decodable; use assert_matches::assert_matches; + use rand::Rng; use reth_db::{ tables, test_utils::{create_test_rw_db, ERROR_TEMPDIR}, + transaction::DbTxMut, DatabaseEnv, }; - use reth_interfaces::test_utils::{generators, generators::random_block}; + use reth_interfaces::{ + provider::ProviderError, + test_utils::{ + generators, + generators::{random_block, random_header}, + }, + RethError, + }; use reth_primitives::{ hex_literal::hex, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, TxNumber, B256, }; use std::{ops::RangeInclusive, sync::Arc}; + use tokio::sync::watch; #[test] fn common_history_provider() { @@ -618,4 +642,73 @@ mod tests { ) } } + + #[test] + fn header_sync_gap_lookup() { + let mut rng = generators::rng(); + let chain_spec = ChainSpecBuilder::mainnet().build(); + let db = create_test_rw_db(); + let factory = ProviderFactory::new(db, Arc::new(chain_spec)); + let provider = factory.provider_rw().unwrap(); + + let consensus_tip = rng.gen(); + let (_tip_tx, tip_rx) = watch::channel(consensus_tip); + let mode = HeaderSyncMode::Tip(tip_rx); + + // Genesis + let checkpoint = 0; + let head = random_header(&mut rng, 0, None); + let gap_fill = random_header(&mut rng, 1, Some(head.hash())); + let gap_tip = random_header(&mut rng, 2, Some(gap_fill.hash())); + + // Empty database + assert_matches!( + provider.sync_gap(mode.clone(), checkpoint), + Err(RethError::Provider(ProviderError::HeaderNotFound(block_number))) + if block_number.as_number().unwrap() == checkpoint + ); + + // Checkpoint and no gap + provider + .tx_ref() + .put::(head.number, head.hash()) + .expect("failed to write canonical"); + provider + .tx_ref() + .put::(head.number, head.clone().unseal()) + .expect("failed to write header"); + + let gap = provider.sync_gap(mode.clone(), checkpoint).unwrap(); + assert_eq!(gap.local_head, head); + assert_eq!(gap.target.tip(), consensus_tip.into()); + + // Checkpoint and gap + provider + .tx_ref() + .put::(gap_tip.number, gap_tip.hash()) + .expect("failed to write canonical"); + provider + .tx_ref() + .put::(gap_tip.number, gap_tip.clone().unseal()) + .expect("failed to write header"); + + let gap = provider.sync_gap(mode.clone(), checkpoint).unwrap(); + assert_eq!(gap.local_head, head); + assert_eq!(gap.target.tip(), gap_tip.parent_hash.into()); + + // Checkpoint and gap closed + provider + .tx_ref() + .put::(gap_fill.number, gap_fill.hash()) + .expect("failed to write canonical"); + provider + .tx_ref() + .put::(gap_fill.number, gap_fill.clone().unseal()) + .expect("failed to write header"); + + assert_matches!( + provider.sync_gap(mode, checkpoint), + Err(RethError::Provider(ProviderError::InconsistentHeaderGap)) + ); + } } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 198aeb5533acf..ad289f19883af 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -5,10 +5,10 @@ use crate::{ AccountExtReader, BlockSource, ChangeSetReader, ReceiptProvider, StageCheckpointWriter, }, AccountReader, BlockExecutionWriter, BlockHashReader, BlockNumReader, BlockReader, BlockWriter, - Chain, EvmEnvProvider, HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, - ProviderError, PruneCheckpointReader, PruneCheckpointWriter, StageCheckpointReader, - StorageReader, TransactionVariant, TransactionsProvider, TransactionsProviderExt, - WithdrawalsProvider, + Chain, EvmEnvProvider, HashingWriter, HeaderProvider, HeaderSyncGap, HeaderSyncGapProvider, + HeaderSyncMode, HistoryWriter, OriginalValuesKnown, ProviderError, PruneCheckpointReader, + PruneCheckpointWriter, StageCheckpointReader, StorageReader, TransactionVariant, + TransactionsProvider, TransactionsProviderExt, WithdrawalsProvider, }; use itertools::{izip, Itertools}; use reth_db::{ @@ -24,7 +24,11 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, BlockNumberList, DatabaseError, }; -use reth_interfaces::provider::{ProviderResult, RootMismatch}; +use reth_interfaces::{ + p2p::headers::downloader::SyncTarget, + provider::{ProviderResult, RootMismatch}, + RethError, RethResult, +}; use reth_primitives::{ keccak256, revm::{ @@ -868,6 +872,57 @@ impl ChangeSetReader for DatabaseProvider { } } +impl HeaderSyncGapProvider for DatabaseProvider { + fn sync_gap( + &self, + mode: HeaderSyncMode, + highest_uninterrupted_block: BlockNumber, + ) -> RethResult { + // Create a cursor over canonical header hashes + let mut cursor = self.tx.cursor_read::()?; + let mut header_cursor = self.tx.cursor_read::()?; + + // Get head hash and reposition the cursor + let (head_num, head_hash) = cursor + .seek_exact(highest_uninterrupted_block)? + .ok_or_else(|| ProviderError::HeaderNotFound(highest_uninterrupted_block.into()))?; + + // Construct head + let (_, head) = header_cursor + .seek_exact(head_num)? + .ok_or_else(|| ProviderError::HeaderNotFound(head_num.into()))?; + let local_head = head.seal(head_hash); + + // Look up the next header + let next_header = cursor + .next()? + .map(|(next_num, next_hash)| -> Result { + let (_, next) = header_cursor + .seek_exact(next_num)? + .ok_or_else(|| ProviderError::HeaderNotFound(next_num.into()))?; + Ok(next.seal(next_hash)) + }) + .transpose()?; + + // Decide the tip or error out on invalid input. + // If the next element found in the cursor is not the "expected" next block per our current + // checkpoint, then there is a gap in the database and we should start downloading in + // reverse from there. Else, it should use whatever the forkchoice state reports. + let target = match next_header { + Some(header) if highest_uninterrupted_block + 1 != header.number => { + SyncTarget::Gap(header) + } + None => match mode { + HeaderSyncMode::Tip(rx) => SyncTarget::Tip(*rx.borrow()), + HeaderSyncMode::Continuous => SyncTarget::TipNum(head_num + 1), + }, + _ => return Err(ProviderError::InconsistentHeaderGap.into()), + }; + + Ok(HeaderSyncGap { local_head, target }) + } +} + impl HeaderProvider for DatabaseProvider { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { if let Some(num) = self.block_number(*block_hash)? { diff --git a/crates/storage/provider/src/traits/header_sync_gap.rs b/crates/storage/provider/src/traits/header_sync_gap.rs new file mode 100644 index 0000000000000..576a26a9e8c74 --- /dev/null +++ b/crates/storage/provider/src/traits/header_sync_gap.rs @@ -0,0 +1,50 @@ +use auto_impl::auto_impl; +use reth_interfaces::{p2p::headers::downloader::SyncTarget, RethResult}; +use reth_primitives::{BlockHashOrNumber, BlockNumber, SealedHeader, B256}; +use tokio::sync::watch; + +/// The header sync mode. +#[derive(Clone, Debug)] +pub enum HeaderSyncMode { + /// A sync mode in which the stage continuously requests the downloader for + /// next blocks. + Continuous, + /// A sync mode in which the stage polls the receiver for the next tip + /// to download from. + Tip(watch::Receiver), +} + +/// Represents a gap to sync: from `local_head` to `target` +#[derive(Clone, Debug)] +pub struct HeaderSyncGap { + /// The local head block. Represents lower bound of sync range. + pub local_head: SealedHeader, + + /// The sync target. Represents upper bound of sync range. + pub target: SyncTarget, +} + +impl HeaderSyncGap { + /// Returns `true` if the gap from the head to the target was closed + #[inline] + pub fn is_closed(&self) -> bool { + match self.target.tip() { + BlockHashOrNumber::Hash(hash) => self.local_head.hash() == hash, + BlockHashOrNumber::Number(num) => self.local_head.number == num, + } + } +} + +/// Client trait for determining the current headers sync gap. +#[auto_impl(&, Arc)] +pub trait HeaderSyncGapProvider: Send + Sync { + /// Find a current sync gap for the headers depending on the [HeaderSyncMode] and the last + /// uninterrupted block number. Last uninterrupted block represents the block number before + /// which there are no gaps. It's up to the caller to ensure that last uninterrupted block is + /// determined correctly. + fn sync_gap( + &self, + mode: HeaderSyncMode, + highest_uninterrupted_block: BlockNumber, + ) -> RethResult; +} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 8134a19613af6..64f806f5f2b20 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -27,6 +27,9 @@ pub use chain_info::CanonChainTracker; mod header; pub use header::HeaderProvider; +mod header_sync_gap; +pub use header_sync_gap::{HeaderSyncGap, HeaderSyncGapProvider, HeaderSyncMode}; + mod receipts; pub use receipts::{ReceiptProvider, ReceiptProviderIdExt}; diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 5d9a4bf868d40..d775550295f44 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -111,8 +111,7 @@ impl Case for BlockchainTestCase { .expect("Could not build tokio RT") .block_on(async { // ignore error - let _ = - stage.execute(&provider, ExecInput { target, checkpoint: None }).await; + let _ = stage.execute(&provider, ExecInput { target, checkpoint: None }); }); } From 716222894d95a1a7c9773baabead99679fc73c21 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Sat, 18 Nov 2023 00:23:09 +0100 Subject: [PATCH 36/77] chore: make Cursor iterators depend on the cursor's lifetime (#5479) --- crates/storage/libmdbx-rs/src/codec.rs | 30 +++------- crates/storage/libmdbx-rs/src/cursor.rs | 58 +++++++++----------- crates/storage/libmdbx-rs/src/environment.rs | 2 +- crates/storage/libmdbx-rs/src/transaction.rs | 2 +- 4 files changed, 37 insertions(+), 55 deletions(-) diff --git a/crates/storage/libmdbx-rs/src/codec.rs b/crates/storage/libmdbx-rs/src/codec.rs index 40509f533d85f..fc9f48856ec8d 100644 --- a/crates/storage/libmdbx-rs/src/codec.rs +++ b/crates/storage/libmdbx-rs/src/codec.rs @@ -3,11 +3,9 @@ use derive_more::*; use std::{borrow::Cow, slice}; /// Implement this to be able to decode data values -pub trait TableObject { +pub trait TableObject: Sized { /// Decodes the object from the given bytes. - fn decode(data_val: &[u8]) -> Result - where - Self: Sized; + fn decode(data_val: &[u8]) -> Result; /// Decodes the value directly from the given MDBX_val pointer. /// @@ -17,14 +15,13 @@ pub trait TableObject { #[doc(hidden)] unsafe fn decode_val( _: *const ffi::MDBX_txn, - data_val: &ffi::MDBX_val, + data_val: ffi::MDBX_val, ) -> Result where Self: Sized, { let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len); - - TableObject::decode(s) + Self::decode(s) } } @@ -36,7 +33,7 @@ impl<'tx> TableObject for Cow<'tx, [u8]> { #[doc(hidden)] unsafe fn decode_val( _txn: *const ffi::MDBX_txn, - data_val: &ffi::MDBX_val, + data_val: ffi::MDBX_val, ) -> Result { let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len); @@ -56,10 +53,7 @@ impl<'tx> TableObject for Cow<'tx, [u8]> { } impl TableObject for Vec { - fn decode(data_val: &[u8]) -> Result - where - Self: Sized, - { + fn decode(data_val: &[u8]) -> Result { Ok(data_val.to_vec()) } } @@ -71,7 +65,7 @@ impl TableObject for () { unsafe fn decode_val( _: *const ffi::MDBX_txn, - _: &ffi::MDBX_val, + _: ffi::MDBX_val, ) -> Result { Ok(()) } @@ -82,19 +76,13 @@ impl TableObject for () { pub struct ObjectLength(pub usize); impl TableObject for ObjectLength { - fn decode(data_val: &[u8]) -> Result - where - Self: Sized, - { + fn decode(data_val: &[u8]) -> Result { Ok(Self(data_val.len())) } } impl TableObject for [u8; LEN] { - fn decode(data_val: &[u8]) -> Result - where - Self: Sized, - { + fn decode(data_val: &[u8]) -> Result { if data_val.len() != LEN { return Err(Error::DecodeErrorLenDiff) } diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index 30765bc93e85c..a5cb2a3830a80 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -1,14 +1,3 @@ -use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr}; - -use libc::c_void; - -use ffi::{ - MDBX_cursor_op, MDBX_FIRST, MDBX_FIRST_DUP, MDBX_GET_BOTH, MDBX_GET_BOTH_RANGE, - MDBX_GET_CURRENT, MDBX_GET_MULTIPLE, MDBX_LAST, MDBX_LAST_DUP, MDBX_NEXT, MDBX_NEXT_DUP, - MDBX_NEXT_MULTIPLE, MDBX_NEXT_NODUP, MDBX_PREV, MDBX_PREV_DUP, MDBX_PREV_MULTIPLE, - MDBX_PREV_NODUP, MDBX_SET, MDBX_SET_KEY, MDBX_SET_LOWERBOUND, MDBX_SET_RANGE, -}; - use crate::{ error::{mdbx_result, Error, Result}, flags::*, @@ -16,6 +5,14 @@ use crate::{ transaction::{TransactionKind, RW}, TableObject, Transaction, }; +use ffi::{ + MDBX_cursor_op, MDBX_FIRST, MDBX_FIRST_DUP, MDBX_GET_BOTH, MDBX_GET_BOTH_RANGE, + MDBX_GET_CURRENT, MDBX_GET_MULTIPLE, MDBX_LAST, MDBX_LAST_DUP, MDBX_NEXT, MDBX_NEXT_DUP, + MDBX_NEXT_MULTIPLE, MDBX_NEXT_NODUP, MDBX_PREV, MDBX_PREV_DUP, MDBX_PREV_MULTIPLE, + MDBX_PREV_NODUP, MDBX_SET, MDBX_SET_KEY, MDBX_SET_LOWERBOUND, MDBX_SET_RANGE, +}; +use libc::c_void; +use std::{borrow::Cow, fmt, marker::PhantomData, mem, ptr}; /// A cursor for navigating the items within a database. pub struct Cursor @@ -60,22 +57,20 @@ where self.cursor } - /// Returns an Iterator over the raw key value slices - /// - /// Note: The lifetime ensures that the transaction is kept alive while entries are used - pub fn into_iter_slices<'cur>(self) -> IntoIter<'cur, K, Cow<'cur, [u8]>, Cow<'cur, [u8]>> { + /// Returns an iterator over the raw key value slices. + #[allow(clippy::needless_lifetimes)] + pub fn iter_slices<'a>(&'a self) -> IntoIter<'a, K, Cow<'a, [u8]>, Cow<'a, [u8]>> { self.into_iter() } - /// Returns an Iterator over key value pairs of the cursor - /// - /// Note: The lifetime ensures that the transaction is kept alive while entries are used + + /// Returns an iterator over database items. #[allow(clippy::should_implement_trait)] - pub fn into_iter<'cur, Key, Value>(self) -> IntoIter<'cur, K, Key, Value> + pub fn into_iter(&self) -> IntoIter<'_, K, Key, Value> where Key: TableObject, Value: TableObject, { - IntoIter::new(self, MDBX_NEXT, MDBX_NEXT) + IntoIter::new(self.clone(), MDBX_NEXT, MDBX_NEXT) } /// Retrieves a key/data pair from the cursor. Depending on the cursor op, @@ -106,12 +101,12 @@ where let key_out = { // MDBX wrote in new key if key_ptr != key_val.iov_base { - Some(Key::decode_val::(txn, &key_val)?) + Some(Key::decode_val::(txn, key_val)?) } else { None } }; - let data_out = Value::decode_val::(txn, &data_val)?; + let data_out = Value::decode_val::(txn, data_val)?; Ok((key_out, data_out, v)) }) } @@ -335,16 +330,16 @@ where Ok(Some((found, k.unwrap(), v))) } - /// Iterate over database items. The iterator will begin with item next - /// after the cursor, and continue until the end of the database. For new - /// cursors, the iterator will begin with the first item in the database. + /// Returns an iterator over database items. + /// + /// The iterator will begin with item next after the cursor, and continue until the end of the + /// database. For new cursors, the iterator will begin with the first item in the database. /// /// For databases with duplicate data items ([DatabaseFlags::DUP_SORT]), the /// duplicate data items of each key will be returned before moving on to /// the next key. pub fn iter(&mut self) -> Iter<'_, K, Key, Value> where - Self: Sized, Key: TableObject, Value: TableObject, { @@ -358,7 +353,6 @@ where /// the next key. pub fn iter_start(&mut self) -> Iter<'_, K, Key, Value> where - Self: Sized, Key: TableObject, Value: TableObject, { @@ -536,7 +530,7 @@ where /// The next and subsequent operations to perform. next_op: ffi::MDBX_cursor_op, - _marker: PhantomData, + _marker: PhantomData<(&'cur (), Key, Value)>, }, } @@ -570,11 +564,11 @@ where cursor.txn.txn_execute(|txn| { match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) { ffi::MDBX_SUCCESS => { - let key = match Key::decode_val::(txn, &key) { + let key = match Key::decode_val::(txn, key) { Ok(v) => v, Err(e) => return Some(Err(e)), }; - let data = match Value::decode_val::(txn, &data) { + let data = match Value::decode_val::(txn, data) { Ok(v) => v, Err(e) => return Some(Err(e)), }; @@ -661,11 +655,11 @@ where cursor.txn.txn_execute(|txn| { match ffi::mdbx_cursor_get(cursor.cursor(), &mut key, &mut data, op) { ffi::MDBX_SUCCESS => { - let key = match Key::decode_val::(txn, &key) { + let key = match Key::decode_val::(txn, key) { Ok(v) => v, Err(e) => return Some(Err(e)), }; - let data = match Value::decode_val::(txn, &data) { + let data = match Value::decode_val::(txn, data) { Ok(v) => v, Err(e) => return Some(Err(e)), }; diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 2342373a27c60..0b83a243c0f48 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -208,7 +208,7 @@ impl Environment { let db = Database::freelist_db(); let cursor = txn.cursor(&db)?; - for result in cursor.into_iter_slices() { + for result in cursor.iter_slices() { let (_key, value) = result?; if value.len() < size_of::() { return Err(Error::Corrupted) diff --git a/crates/storage/libmdbx-rs/src/transaction.rs b/crates/storage/libmdbx-rs/src/transaction.rs index 1b102ad4ffe69..2330c3a3f8919 100644 --- a/crates/storage/libmdbx-rs/src/transaction.rs +++ b/crates/storage/libmdbx-rs/src/transaction.rs @@ -156,7 +156,7 @@ where self.txn_execute(|txn| unsafe { match ffi::mdbx_get(txn, dbi, &key_val, &mut data_val) { - ffi::MDBX_SUCCESS => Key::decode_val::(txn, &data_val).map(Some), + ffi::MDBX_SUCCESS => Key::decode_val::(txn, data_val).map(Some), ffi::MDBX_NOTFOUND => Ok(None), err_code => Err(Error::from_err_code(err_code)), } From 6ae86d5a96c0e9e99fc6ed7cce31d30997857fd5 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Sat, 18 Nov 2023 00:55:30 +0100 Subject: [PATCH 37/77] chore: shrink ProviderError size (#5482) --- crates/interfaces/src/error.rs | 58 ++++++++++++------- crates/interfaces/src/provider.rs | 10 ++-- .../src/providers/state/historical.rs | 4 +- 3 files changed, 46 insertions(+), 26 deletions(-) diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index ef14d1211eb4f..58bef122b424f 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -1,3 +1,13 @@ +use crate::{ + blockchain_tree::error::{BlockchainTreeError, CanonicalError}, + consensus::ConsensusError, + db::DatabaseError, + executor::BlockExecutionError, + provider::ProviderError, +}; +use reth_network_api::NetworkError; +use reth_primitives::fs::FsPathError; + /// Result alias for [`RethError`]. pub type RethResult = Result; @@ -6,47 +16,55 @@ pub type RethResult = Result; #[allow(missing_docs)] pub enum RethError { #[error(transparent)] - Execution(#[from] crate::executor::BlockExecutionError), + Execution(#[from] BlockExecutionError), #[error(transparent)] - Consensus(#[from] crate::consensus::ConsensusError), + Consensus(#[from] ConsensusError), #[error(transparent)] - Database(#[from] crate::db::DatabaseError), + Database(#[from] DatabaseError), #[error(transparent)] - Provider(#[from] crate::provider::ProviderError), + Provider(#[from] ProviderError), #[error(transparent)] - Network(#[from] reth_network_api::NetworkError), + Network(#[from] NetworkError), #[error(transparent)] - Canonical(#[from] crate::blockchain_tree::error::CanonicalError), + Canonical(#[from] CanonicalError), #[error("{0}")] Custom(String), } -impl From for RethError { - fn from(error: crate::blockchain_tree::error::BlockchainTreeError) -> Self { - RethError::Canonical(error.into()) +impl From for RethError { + fn from(error: BlockchainTreeError) -> Self { + RethError::Canonical(CanonicalError::BlockchainTree(error)) } } -impl From for RethError { - fn from(err: reth_primitives::fs::FsPathError) -> Self { +impl From for RethError { + fn from(err: FsPathError) -> Self { RethError::Custom(err.to_string()) } } -// We don't want these types to be too large because they're used in a lot of places. -const _SIZE_ASSERTIONS: () = { - // Main error. - let _: [(); 64] = [(); std::mem::size_of::()]; +// Some types are used a lot. Make sure they don't unintentionally get bigger. +#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] +mod size_asserts { + use super::*; - // Biggest variant. - let _: [(); 64] = [(); std::mem::size_of::()]; + macro_rules! static_assert_size { + ($t:ty, $sz:expr) => { + const _: [(); $sz] = [(); std::mem::size_of::<$t>()]; + }; + } - // Other common types. - let _: [(); 16] = [(); std::mem::size_of::()]; -}; + static_assert_size!(RethError, 56); + static_assert_size!(BlockExecutionError, 48); + static_assert_size!(ConsensusError, 48); + static_assert_size!(DatabaseError, 16); + static_assert_size!(ProviderError, 48); + static_assert_size!(NetworkError, 0); + static_assert_size!(CanonicalError, 48); +} diff --git a/crates/interfaces/src/provider.rs b/crates/interfaces/src/provider.rs index c2137b4b7134f..9fad40efd8b30 100644 --- a/crates/interfaces/src/provider.rs +++ b/crates/interfaces/src/provider.rs @@ -29,19 +29,21 @@ pub enum ProviderError { /// A block body is missing. #[error("block meta not found for block #{0}")] BlockBodyIndicesNotFound(BlockNumber), - /// The transition id was found for the given address and storage key, but the changeset was + /// The transition ID was found for the given address and storage key, but the changeset was /// not found. - #[error("storage ChangeSet address: ({address} key: {storage_key:?}) for block #{block_number} does not exist")] + #[error("storage change set for address {address} and key {storage_key} at block #{block_number} does not exist")] StorageChangesetNotFound { /// The block number found for the address and storage key. block_number: BlockNumber, /// The account address. address: Address, /// The storage key. - storage_key: B256, + // NOTE: This is a Box only because otherwise this variant is 16 bytes larger than the + // second largest (which uses `BlockHashOrNumber`). + storage_key: Box, }, /// The block number was found for the given address, but the changeset was not found. - #[error("account {address} ChangeSet for block #{block_number} does not exist")] + #[error("account change set for address {address} at block #{block_number} does not exist")] AccountChangesetNotFound { /// Block number found for the address. block_number: BlockNumber, diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index c76ea75d53e93..dbdba8f988cae 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -217,10 +217,10 @@ impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { .cursor_dup_read::()? .seek_by_key_subkey((changeset_block_number, address).into(), storage_key)? .filter(|entry| entry.key == storage_key) - .ok_or(ProviderError::StorageChangesetNotFound { + .ok_or_else(|| ProviderError::StorageChangesetNotFound { block_number: changeset_block_number, address, - storage_key, + storage_key: Box::new(storage_key), })? .value, )), From e34aec23caf9394e94ea104d73d03f2c2d040b0f Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Sat, 18 Nov 2023 03:16:31 +0100 Subject: [PATCH 38/77] chore: clean up `Self: Sized` bounds (#5481) --- crates/primitives/src/account.rs | 5 +--- crates/primitives/src/stage/checkpoints.rs | 10 ++------ crates/primitives/src/storage.rs | 5 +--- .../primitives/src/trie/hash_builder/state.rs | 5 +--- .../primitives/src/trie/hash_builder/value.rs | 5 +--- crates/primitives/src/trie/mask.rs | 5 +--- crates/primitives/src/trie/nibbles.rs | 5 +--- crates/primitives/src/trie/nodes/branch.rs | 5 +--- crates/primitives/src/trie/storage.rs | 5 +--- crates/primitives/src/trie/subnode.rs | 5 +--- crates/rpc/rpc/src/result.rs | 3 +-- crates/storage/codecs/src/lib.rs | 12 +++------- crates/storage/db/src/abstraction/mock.rs | 20 ++++------------ .../db/src/implementation/mdbx/cursor.rs | 24 +++++-------------- .../storage/db/src/tables/models/accounts.rs | 5 +--- crates/storage/libmdbx-rs/src/codec.rs | 5 +--- 16 files changed, 27 insertions(+), 97 deletions(-) diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs index ab6761e415348..94d2458286977 100644 --- a/crates/primitives/src/account.rs +++ b/crates/primitives/src/account.rs @@ -98,10 +98,7 @@ impl Compact for Bytecode { len + self.0.bytecode.len() + 4 } - fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { let len = buf.read_u32::().expect("could not read bytecode length"); let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); let variant = buf.read_u8().expect("could not read bytecode variant"); diff --git a/crates/primitives/src/stage/checkpoints.rs b/crates/primitives/src/stage/checkpoints.rs index 21af9a21284ee..0304f0f727079 100644 --- a/crates/primitives/src/stage/checkpoints.rs +++ b/crates/primitives/src/stage/checkpoints.rs @@ -71,10 +71,7 @@ impl Compact for MerkleCheckpoint { len } - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { let target_block = buf.get_u64(); let last_account_key = B256::from_slice(&buf[..32]); @@ -286,10 +283,7 @@ macro_rules! stage_unit_checkpoints { } } - fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { match buf[0] { $( $index => { diff --git a/crates/primitives/src/storage.rs b/crates/primitives/src/storage.rs index 91bdce470477b..1c9157fbd8889 100644 --- a/crates/primitives/src/storage.rs +++ b/crates/primitives/src/storage.rs @@ -40,10 +40,7 @@ impl Compact for StorageEntry { self.value.to_compact(buf) + 32 } - fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { let key = B256::from_slice(&buf[..32]); let (value, out) = U256::from_compact(&buf[32..], len - 32); (Self { key, value }, out) diff --git a/crates/primitives/src/trie/hash_builder/state.rs b/crates/primitives/src/trie/hash_builder/state.rs index fef54726bbae7..c714dedd0bb40 100644 --- a/crates/primitives/src/trie/hash_builder/state.rs +++ b/crates/primitives/src/trie/hash_builder/state.rs @@ -68,10 +68,7 @@ impl Compact for HashBuilderState { len } - fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { let (key, mut buf) = Vec::from_compact(buf, 0); let stack_len = buf.get_u16() as usize; diff --git a/crates/primitives/src/trie/hash_builder/value.rs b/crates/primitives/src/trie/hash_builder/value.rs index 45d4c0ce1c9fb..fed85e680cf98 100644 --- a/crates/primitives/src/trie/hash_builder/value.rs +++ b/crates/primitives/src/trie/hash_builder/value.rs @@ -29,10 +29,7 @@ impl Compact for HashBuilderValue { } } - fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { match buf[0] { 0 => { let (hash, buf) = B256::from_compact(&buf[1..], 32); diff --git a/crates/primitives/src/trie/mask.rs b/crates/primitives/src/trie/mask.rs index d54f239ad0a64..152be03c936d5 100644 --- a/crates/primitives/src/trie/mask.rs +++ b/crates/primitives/src/trie/mask.rs @@ -72,10 +72,7 @@ impl Compact for TrieMask { 2 } - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { let mask = buf.get_u16(); (Self(mask), buf) } diff --git a/crates/primitives/src/trie/nibbles.rs b/crates/primitives/src/trie/nibbles.rs index a6ad01b241ab1..876b9ff9a6525 100644 --- a/crates/primitives/src/trie/nibbles.rs +++ b/crates/primitives/src/trie/nibbles.rs @@ -41,10 +41,7 @@ impl Compact for StoredNibblesSubKey { 64 + 1 } - fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { let len = buf[64] as usize; let inner = Vec::from(&buf[..len]).into(); (Self(StoredNibbles { inner }), &buf[65..]) diff --git a/crates/primitives/src/trie/nodes/branch.rs b/crates/primitives/src/trie/nodes/branch.rs index 073c2e125f69e..2771adfa40c8c 100644 --- a/crates/primitives/src/trie/nodes/branch.rs +++ b/crates/primitives/src/trie/nodes/branch.rs @@ -163,10 +163,7 @@ impl Compact for BranchNodeCompact { buf_size } - fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { let hash_len = B256::len_bytes(); // Assert the buffer is long enough to contain the masks and the hashes. diff --git a/crates/primitives/src/trie/storage.rs b/crates/primitives/src/trie/storage.rs index bbb5c5bc4206f..33f68fc05d1b9 100644 --- a/crates/primitives/src/trie/storage.rs +++ b/crates/primitives/src/trie/storage.rs @@ -24,10 +24,7 @@ impl Compact for StorageTrieEntry { nibbles_len + node_len } - fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { let (nibbles, buf) = StoredNibblesSubKey::from_compact(buf, 33); let (node, buf) = BranchNodeCompact::from_compact(buf, len - 33); let this = Self { nibbles, node }; diff --git a/crates/primitives/src/trie/subnode.rs b/crates/primitives/src/trie/subnode.rs index 232a67279220b..e6976cf13a2bc 100644 --- a/crates/primitives/src/trie/subnode.rs +++ b/crates/primitives/src/trie/subnode.rs @@ -46,10 +46,7 @@ impl Compact for StoredSubNode { len } - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { let key_len = buf.get_u16() as usize; let key = Vec::from(&buf[..key_len]); buf.advance(key_len); diff --git a/crates/rpc/rpc/src/result.rs b/crates/rpc/rpc/src/result.rs index 43ceb5d949453..c37ced80179e6 100644 --- a/crates/rpc/rpc/src/result.rs +++ b/crates/rpc/rpc/src/result.rs @@ -8,12 +8,11 @@ use reth_rpc_types::engine::PayloadError; use std::fmt::Display; /// Helper trait to easily convert various `Result` types into [`RpcResult`] -pub trait ToRpcResult { +pub trait ToRpcResult: Sized { /// Converts the error of the [Result] to an [RpcResult] via the `Err` [Display] impl. fn to_rpc_result(self) -> RpcResult where Err: Display, - Self: Sized, { self.map_internal_err(|err| err.to_string()) } diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index fee674a23abf1..5b7a5ee1610e2 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -31,7 +31,7 @@ use revm_primitives::{ /// Regarding the `specialized_to/from_compact` methods: Mainly used as a workaround for not being /// able to specialize an impl over certain types like `Vec`/`Option` where `T` is a fixed /// size array like `Vec`. -pub trait Compact { +pub trait Compact: Sized { /// Takes a buffer which can be written to. *Ideally*, it returns the length written to. fn to_compact(self, buf: &mut B) -> usize where @@ -43,24 +43,18 @@ pub trait Compact { /// `len` can either be the `buf` remaining length, or the length of the compacted type. /// /// It will panic, if `len` is smaller than `buf.len()`. - fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) - where - Self: Sized; + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]); /// "Optional": If there's no good reason to use it, don't. fn specialized_to_compact(self, buf: &mut B) -> usize where B: bytes::BufMut + AsMut<[u8]>, - Self: Sized, { self.to_compact(buf) } /// "Optional": If there's no good reason to use it, don't. - fn specialized_from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn specialized_from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { Self::from_compact(buf, len) } } diff --git a/crates/storage/db/src/abstraction/mock.rs b/crates/storage/db/src/abstraction/mock.rs index 7377970080853..c094eb944165c 100644 --- a/crates/storage/db/src/abstraction/mock.rs +++ b/crates/storage/db/src/abstraction/mock.rs @@ -147,30 +147,21 @@ impl DbCursorRO for CursorMock { todo!() } - fn walk(&mut self, _start_key: Option) -> Result, DatabaseError> - where - Self: Sized, - { + fn walk(&mut self, _start_key: Option) -> Result, DatabaseError> { todo!() } fn walk_range( &mut self, _range: impl RangeBounds, - ) -> Result, DatabaseError> - where - Self: Sized, - { + ) -> Result, DatabaseError> { todo!() } fn walk_back( &mut self, _start_key: Option, - ) -> Result, DatabaseError> - where - Self: Sized, - { + ) -> Result, DatabaseError> { todo!() } } @@ -200,10 +191,7 @@ impl DbDupCursorRO for CursorMock { &mut self, _key: Option<::Key>, _subkey: Option<::SubKey>, - ) -> Result, DatabaseError> - where - Self: Sized, - { + ) -> Result, DatabaseError> { todo!() } } diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 3b14f0f427f61..63017be2d5242 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -107,10 +107,7 @@ impl DbCursorRO for Cursor { decode!(self.inner.get_current()) } - fn walk(&mut self, start_key: Option) -> Result, DatabaseError> - where - Self: Sized, - { + fn walk(&mut self, start_key: Option) -> Result, DatabaseError> { let start = if let Some(start_key) = start_key { self.inner .set_range(start_key.encode().as_ref()) @@ -126,10 +123,7 @@ impl DbCursorRO for Cursor { fn walk_range( &mut self, range: impl RangeBounds, - ) -> Result, DatabaseError> - where - Self: Sized, - { + ) -> Result, DatabaseError> { let start = match range.start_bound().cloned() { Bound::Included(key) => self.inner.set_range(key.encode().as_ref()), Bound::Excluded(_key) => { @@ -146,10 +140,7 @@ impl DbCursorRO for Cursor { fn walk_back( &mut self, start_key: Option, - ) -> Result, DatabaseError> - where - Self: Sized, - { + ) -> Result, DatabaseError> { let start = if let Some(start_key) = start_key { decode!(self.inner.set_range(start_key.encode().as_ref())) } else { @@ -207,16 +198,14 @@ impl DbDupCursorRO for Cursor { let start = match (key, subkey) { (Some(key), Some(subkey)) => { // encode key and decode it after. - let key = key.encode().as_ref().to_vec(); - + let key: Vec = key.encode().into(); self.inner .get_both_range(key.as_ref(), subkey.encode().as_ref()) .map_err(|e| DatabaseError::Read(e.into()))? .map(|val| decoder::((Cow::Owned(key), val))) } (Some(key), None) => { - let key = key.encode().as_ref().to_vec(); - + let key: Vec = key.encode().into(); self.inner .set(key.as_ref()) .map_err(|e| DatabaseError::Read(e.into()))? @@ -224,8 +213,7 @@ impl DbDupCursorRO for Cursor { } (None, Some(subkey)) => { if let Some((key, _)) = self.first()? { - let key = key.encode().as_ref().to_vec(); - + let key: Vec = key.encode().into(); self.inner .get_both_range(key.as_ref(), subkey.encode().as_ref()) .map_err(|e| DatabaseError::Read(e.into()))? diff --git a/crates/storage/db/src/tables/models/accounts.rs b/crates/storage/db/src/tables/models/accounts.rs index 57533f57783e4..3fe6122e23995 100644 --- a/crates/storage/db/src/tables/models/accounts.rs +++ b/crates/storage/db/src/tables/models/accounts.rs @@ -41,10 +41,7 @@ impl Compact for AccountBeforeTx { acc_len + 20 } - fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) - where - Self: Sized, - { + fn from_compact(mut buf: &[u8], len: usize) -> (Self, &[u8]) { let address = Address::from_slice(&buf[..20]); buf.advance(20); diff --git a/crates/storage/libmdbx-rs/src/codec.rs b/crates/storage/libmdbx-rs/src/codec.rs index fc9f48856ec8d..024c869cfb63d 100644 --- a/crates/storage/libmdbx-rs/src/codec.rs +++ b/crates/storage/libmdbx-rs/src/codec.rs @@ -16,10 +16,7 @@ pub trait TableObject: Sized { unsafe fn decode_val( _: *const ffi::MDBX_txn, data_val: ffi::MDBX_val, - ) -> Result - where - Self: Sized, - { + ) -> Result { let s = slice::from_raw_parts(data_val.iov_base as *const u8, data_val.iov_len); Self::decode(s) } From 4555dc1fe282437dd593a7d5e373aeadd43f0c29 Mon Sep 17 00:00:00 2001 From: DoTheBestToGetTheBest <146037313+DoTheBestToGetTheBest@users.noreply.github.com> Date: Fri, 17 Nov 2023 23:35:02 -0800 Subject: [PATCH 39/77] feat(rpc-testing-utils) : eth node testing (#5475) --- crates/rpc/rpc-testing-util/tests/it/trace.rs | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc-testing-util/tests/it/trace.rs b/crates/rpc/rpc-testing-util/tests/it/trace.rs index f3d4ee7e7b3c7..c2192b0f1e63d 100644 --- a/crates/rpc/rpc-testing-util/tests/it/trace.rs +++ b/crates/rpc/rpc-testing-util/tests/it/trace.rs @@ -1,7 +1,9 @@ use futures::StreamExt; use jsonrpsee::http_client::HttpClientBuilder; use reth_rpc_api_testing_util::{trace::TraceApiExt, utils::parse_env_url}; -use reth_rpc_types::trace::{filter::TraceFilter, parity::TraceType}; +use reth_rpc_types::trace::{ + filter::TraceFilter, parity::TraceType, tracerequest::TraceCallRequest, +}; use std::{collections::HashSet, time::Instant}; /// This is intended to be run locally against a running node. /// @@ -67,3 +69,26 @@ async fn trace_filters() { println!("Duration since test start: {:?}", start_time.elapsed()); } } + +#[tokio::test(flavor = "multi_thread")] +#[ignore] +async fn trace_call() { + let url = parse_env_url("RETH_RPC_TEST_NODE_URL").unwrap(); + let client = HttpClientBuilder::default().build(url).unwrap(); + let trace_call_request = TraceCallRequest::default(); + let mut stream = client.trace_call_stream(trace_call_request); + let start_time = Instant::now(); + + while let Some(result) = stream.next().await { + match result { + Ok(trace_result) => { + println!("Trace Result: {:?}", trace_result); + } + Err((error, request)) => { + eprintln!("Error for request {:?}: {:?}", request, error); + } + } + } + + println!("Completed in {:?}", start_time.elapsed()); +} From 6ded64355e7a45593a9f58cf5c3b1583ed8b73c0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 18 Nov 2023 00:14:14 -0800 Subject: [PATCH 40/77] pipeline: error on missing buffer in online stages (#5480) --- crates/stages/src/error.rs | 5 +++++ crates/stages/src/stages/bodies.rs | 16 +++++++++------- crates/stages/src/stages/headers.rs | 18 ++++++++++-------- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/crates/stages/src/error.rs b/crates/stages/src/error.rs index 8795868d080cb..4a4df0d269878 100644 --- a/crates/stages/src/error.rs +++ b/crates/stages/src/error.rs @@ -62,6 +62,10 @@ pub enum StageError { /// Invalid checkpoint passed to the stage #[error("invalid stage checkpoint: {0}")] StageCheckpoint(u64), + /// Missing download buffer on stage execution. + /// Returned if stage execution was called without polling for readiness. + #[error("missing download buffer")] + MissingDownloadBuffer, /// Download channel closed #[error("download channel closed")] ChannelClosed, @@ -97,6 +101,7 @@ impl StageError { StageError::Download(_) | StageError::DatabaseIntegrity(_) | StageError::StageCheckpoint(_) | + StageError::MissingDownloadBuffer | StageError::MissingSyncGap | StageError::ChannelClosed | StageError::Fatal(_) diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index cb908ebf95a9e..fe9f583b9aff6 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -50,13 +50,13 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Vec, + buffer: Option>, } impl BodyStage { /// Create new bodies stage from downloader. pub fn new(downloader: D) -> Self { - Self { downloader, buffer: Vec::new() } + Self { downloader, buffer: None } } } @@ -71,7 +71,7 @@ impl Stage for BodyStage { cx: &mut Context<'_>, input: ExecInput, ) -> Poll> { - if input.target_reached() || !self.buffer.is_empty() { + if input.target_reached() || self.buffer.is_some() { return Poll::Ready(Ok(())) } @@ -85,7 +85,7 @@ impl Stage for BodyStage { // is a fatal error to prevent the pipeline from running forever. let response = match maybe_next_result { Some(Ok(downloaded)) => { - self.buffer.extend(downloaded); + self.buffer = Some(downloaded); Ok(()) } Some(Err(err)) => Err(err.into()), @@ -118,9 +118,11 @@ impl Stage for BodyStage { let mut next_tx_num = tx_cursor.last()?.map(|(id, _)| id + 1).unwrap_or_default(); debug!(target: "sync::stages::bodies", stage_progress = from_block, target = to_block, start_tx_id = next_tx_num, "Commencing sync"); - trace!(target: "sync::stages::bodies", bodies_len = self.buffer.len(), "Writing blocks"); + + let buffer = self.buffer.take().ok_or(StageError::MissingDownloadBuffer)?; + trace!(target: "sync::stages::bodies", bodies_len = buffer.len(), "Writing blocks"); let mut highest_block = from_block; - for response in self.buffer.drain(..) { + for response in buffer { // Write block let block_number = response.block_number(); @@ -186,7 +188,7 @@ impl Stage for BodyStage { provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { - self.buffer.clear(); + self.buffer.take(); let tx = provider.tx_ref(); // Cursors to unwind bodies, ommers diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index 9ad06a198fc5a..40ffa8d9461ae 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -44,7 +44,7 @@ pub struct HeaderStage { /// Current sync gap. sync_gap: Option, /// Header buffer. - buffer: Vec, + buffer: Option>, } // === impl HeaderStage === @@ -55,7 +55,7 @@ where { /// Create a new header stage pub fn new(database: Provider, downloader: Downloader, mode: HeaderSyncMode) -> Self { - Self { provider: database, downloader, mode, sync_gap: None, buffer: Vec::new() } + Self { provider: database, downloader, mode, sync_gap: None, buffer: None } } fn is_stage_done( @@ -126,7 +126,8 @@ where let current_checkpoint = input.checkpoint(); // Return if buffer already has some items. - if !self.buffer.is_empty() { + if self.buffer.is_some() { + // TODO: review trace!( target: "sync::stages::headers", checkpoint = %current_checkpoint.block_number, @@ -159,7 +160,7 @@ where let result = match ready!(self.downloader.poll_next_unpin(cx)) { Some(Ok(headers)) => { info!(target: "sync::stages::headers", len = headers.len(), "Received headers"); - self.buffer.extend(headers); + self.buffer = Some(headers); Ok(()) } Some(Err(HeadersDownloaderError::DetachedHead { local_head, header, error })) => { @@ -179,15 +180,16 @@ where input: ExecInput, ) -> Result { let current_checkpoint = input.checkpoint(); - if self.buffer.is_empty() { + + let gap = self.sync_gap.clone().ok_or(StageError::MissingSyncGap)?; + if gap.is_closed() { return Ok(ExecOutput::done(current_checkpoint)) } - let gap = self.sync_gap.clone().ok_or(StageError::MissingSyncGap)?; let local_head = gap.local_head.number; let tip = gap.target.tip(); - let downloaded_headers = std::mem::take(&mut self.buffer); + let downloaded_headers = self.buffer.take().ok_or(StageError::MissingDownloadBuffer)?; let tip_block_number = match tip { // If tip is hash and it equals to the first downloaded header's hash, we can use // the block number of this header as tip. @@ -280,7 +282,7 @@ where provider: &DatabaseProviderRW<'_, &DB>, input: UnwindInput, ) -> Result { - self.buffer.clear(); + self.buffer.take(); self.sync_gap.take(); provider.unwind_table_by_walker::( From d05dda72dd910a95ca99c5624b548fd363dc62e5 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 18 Nov 2023 00:16:19 -0800 Subject: [PATCH 41/77] chore(downloader): replace database with header provider (#5472) --- Cargo.lock | 1 + bin/reth/src/chain/import.rs | 6 +- bin/reth/src/debug_cmd/execution.rs | 6 +- bin/reth/src/node/mod.rs | 6 +- bin/reth/src/stage/run.rs | 8 +- .../consensus/beacon/src/engine/test_utils.rs | 7 +- crates/interfaces/src/p2p/error.rs | 6 +- crates/net/downloaders/Cargo.toml | 5 +- crates/net/downloaders/src/bodies/bodies.rs | 122 +++++++++--------- crates/net/downloaders/src/bodies/task.rs | 15 ++- .../downloaders/src/test_utils/file_client.rs | 7 +- crates/stages/src/lib.rs | 2 +- crates/stages/src/stages/bodies.rs | 41 +++--- 13 files changed, 126 insertions(+), 106 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 124e6245b2d2b..463dccf88c0e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5890,6 +5890,7 @@ dependencies = [ "reth-interfaces", "reth-metrics", "reth-primitives", + "reth-provider", "reth-tasks", "reth-tracing", "tempfile", diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index 572f8c0ee2f8b..7478aad47f174 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -147,7 +147,11 @@ impl ImportCommand { .into_task(); let body_downloader = BodiesDownloaderBuilder::from(config.stages.bodies) - .build(file_client.clone(), consensus.clone(), db.clone()) + .build( + file_client.clone(), + consensus.clone(), + ProviderFactory::new(db.clone(), self.chain.clone()), + ) .into_task(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index 83c5549451ca7..c248819a0ea27 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -102,7 +102,11 @@ impl Command { .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::from(config.stages.bodies) - .build(client, Arc::clone(&consensus), db.clone()) + .build( + client, + Arc::clone(&consensus), + ProviderFactory::new(db.clone(), self.chain.clone()), + ) .into_task_with(task_executor); let stage_conf = &config.stages; diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index a144a3bcea662..da07c17f1e804 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -617,7 +617,11 @@ impl NodeCommand { .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::from(config.stages.bodies) - .build(client, Arc::clone(&consensus), db.clone()) + .build( + client, + Arc::clone(&consensus), + ProviderFactory::new(db.clone(), self.chain.clone()), + ) .into_task_with(task_executor); let pipeline = self diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 5eaeaf361ad8a..589bcbf7d0290 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -163,6 +163,9 @@ impl Command { let default_peers_path = data_dir.known_peers_path(); + let provider_factory = + Arc::new(ProviderFactory::new(db.clone(), self.chain.clone())); + let network = self .network .network_config( @@ -171,7 +174,7 @@ impl Command { p2p_secret_key, default_peers_path, ) - .build(Arc::new(ProviderFactory::new(db.clone(), self.chain.clone()))) + .build(provider_factory.clone()) .start_network() .await?; let fetch_client = Arc::new(network.fetch_client().await?); @@ -187,9 +190,8 @@ impl Command { config.stages.bodies.downloader_min_concurrent_requests..= config.stages.bodies.downloader_max_concurrent_requests, ) - .build(fetch_client, consensus.clone(), db.clone()), + .build(fetch_client, consensus.clone(), provider_factory), ); - (Box::new(stage), None) } StageEnum::Senders => (Box::new(SenderRecoveryStage::new(batch_size)), None), diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index f58ebf0133a54..b916d3e89c857 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -455,6 +455,8 @@ where pub fn build(self) -> (TestBeaconConsensusEngine, TestEnv>) { reth_tracing::init_test_tracing(); let db = create_test_rw_db(); + let provider_factory = + ProviderFactory::new(db.clone(), self.base_config.chain_spec.clone()); let consensus: Arc = match self.base_config.consensus { TestConsensusConfig::Real => { @@ -496,7 +498,7 @@ where .into_task(); let body_downloader = BodiesDownloaderBuilder::default() - .build(client.clone(), consensus.clone(), db.clone()) + .build(client.clone(), consensus.clone(), provider_factory.clone()) .into_task(); Pipeline::builder().add_stages(DefaultStages::new( @@ -527,9 +529,8 @@ where let tree = ShareableBlockchainTree::new( BlockchainTree::new(externals, config, None).expect("failed to create tree"), ); - let shareable_db = ProviderFactory::new(db.clone(), self.base_config.chain_spec.clone()); let latest = self.base_config.chain_spec.genesis_header().seal_slow(); - let blockchain_provider = BlockchainProvider::with_latest(shareable_db, tree, latest); + let blockchain_provider = BlockchainProvider::with_latest(provider_factory, tree, latest); let pruner = Pruner::new( db.clone(), diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 53cf68e4b4ded..9758c8ab2e434 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -1,5 +1,5 @@ use super::headers::client::HeadersRequest; -use crate::{consensus::ConsensusError, db}; +use crate::{consensus::ConsensusError, provider::ProviderError}; use reth_network_api::ReputationChangeKind; use reth_primitives::{ BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, WithPeerId, B256, @@ -177,9 +177,9 @@ pub enum DownloadError { /// Error while executing the request. #[error(transparent)] RequestError(#[from] RequestError), - /// Error while reading data from database. + /// Provider error. #[error(transparent)] - DatabaseError(#[from] db::DatabaseError), + Provider(#[from] ProviderError), } #[cfg(test)] diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index cdb3317dbe848..3a50908b13d27 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -12,8 +12,8 @@ description = "Implementations of various block downloaders" # reth reth-interfaces.workspace = true reth-primitives.workspace = true -reth-db.workspace = true reth-tasks.workspace = true +reth-provider.workspace = true # async futures.workspace = true @@ -33,6 +33,7 @@ rayon.workspace = true thiserror.workspace = true # optional deps for the test-utils feature +reth-db = { workspace = true, optional = true } alloy-rlp = { workspace = true, optional = true } tempfile = { workspace = true, optional = true } itertools = { workspace = true, optional = true } @@ -50,4 +51,4 @@ itertools.workspace = true tempfile.workspace = true [features] -test-utils = ["dep:alloy-rlp", "dep:tempfile", "dep:itertools", "reth-interfaces/test-utils"] +test-utils = ["dep:alloy-rlp", "dep:tempfile", "dep:itertools", "reth-db/test-utils", "reth-interfaces/test-utils"] diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index a1451bb5b159a..b601865d3a6cb 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -2,7 +2,6 @@ use super::queue::BodiesRequestQueue; use crate::{bodies::task::TaskDownloader, metrics::BodyDownloaderMetrics}; use futures::Stream; use futures_util::StreamExt; -use reth_db::{cursor::DbCursorRO, database::Database, tables, transaction::DbTx}; use reth_interfaces::{ consensus::Consensus, p2p::{ @@ -15,6 +14,7 @@ use reth_interfaces::{ }, }; use reth_primitives::{BlockNumber, SealedHeader}; +use reth_provider::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ cmp::Ordering, @@ -27,22 +27,18 @@ use std::{ }; use tracing::info; -/// The scope for headers downloader metrics. -pub const BODIES_DOWNLOADER_SCOPE: &str = "downloaders.bodies"; - /// Downloads bodies in batches. /// /// All blocks in a batch are fetched at the same time. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] -pub struct BodiesDownloader { +pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client consensus: Arc, - // TODO: make this a [HeaderProvider] /// The database handle - db: DB, + provider: Provider, /// The maximum number of non-empty blocks per one request request_limit: u64, /// The maximum number of block bodies returned at once from the stream @@ -67,10 +63,10 @@ pub struct BodiesDownloader { metrics: BodyDownloaderMetrics, } -impl BodiesDownloader +impl BodiesDownloader where B: BodiesClient + 'static, - DB: Database + Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, { /// Returns the next contiguous request. fn next_headers_request(&mut self) -> DownloadResult>> { @@ -103,47 +99,29 @@ where return Ok(None) } - // Collection of results - let mut headers = Vec::new(); - - // Non empty headers count - let mut non_empty_headers = 0; - let mut current_block_num = *range.start(); - - // Acquire cursors over canonical and header tables - let tx = self.db.tx()?; - let mut canonical_cursor = tx.cursor_read::()?; - let mut header_cursor = tx.cursor_read::()?; - // Collect headers while // 1. Current block number is in range // 2. The number of non empty headers is less than maximum // 3. The total number of headers is less than the stream batch size (this is only - // relevant if the range consists entirely of empty headers) - while range.contains(¤t_block_num) && - non_empty_headers < max_non_empty && - headers.len() < self.stream_batch_size - { - // Find the block hash. - let (number, hash) = canonical_cursor - .seek_exact(current_block_num)? - .ok_or(DownloadError::MissingHeader { block_number: current_block_num })?; - // Find the block header. - let (_, header) = header_cursor - .seek_exact(number)? - .ok_or(DownloadError::MissingHeader { block_number: number })?; - - // If the header is not empty, increment the counter - if !header.is_empty() { - non_empty_headers += 1; + // relevant if the range consists entirely of empty headers) + let mut collected = 0; + let mut non_empty_headers = 0; + let headers = self.provider.sealed_headers_while(range.clone(), |header| { + let should_take = range.contains(&header.number) && + non_empty_headers < max_non_empty && + collected < self.stream_batch_size; + + if should_take { + collected += 1; + if !header.is_empty() { + non_empty_headers += 1; + } + true + } else { + false } + })?; - // Add header to the result collection - headers.push(header.seal(hash)); - - // Increment current block number - current_block_num += 1; - } Ok(Some(headers).filter(|h| !h.is_empty())) } @@ -286,10 +264,10 @@ where } } -impl BodiesDownloader +impl BodiesDownloader where B: BodiesClient + 'static, - DB: Database + Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, Self: BodyDownloader + 'static, { /// Spawns the downloader task via [tokio::task::spawn] @@ -306,10 +284,10 @@ where } } -impl BodyDownloader for BodiesDownloader +impl BodyDownloader for BodiesDownloader where B: BodiesClient + 'static, - DB: Database + Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, { /// Set a new download range (exclusive). /// @@ -354,10 +332,10 @@ where } } -impl Stream for BodiesDownloader +impl Stream for BodiesDownloader where B: BodiesClient + 'static, - DB: Database + Unpin + 'static, + Provider: HeaderProvider + Unpin + 'static, { type Item = BodyDownloaderResult; @@ -557,15 +535,15 @@ impl BodiesDownloaderBuilder { } /// Consume self and return the concurrent downloader. - pub fn build( + pub fn build( self, client: B, consensus: Arc, - db: DB, - ) -> BodiesDownloader + provider: Provider, + ) -> BodiesDownloader where B: BodiesClient + 'static, - DB: Database, + Provider: HeaderProvider, { let Self { request_limit, @@ -578,7 +556,7 @@ impl BodiesDownloaderBuilder { BodiesDownloader { client: Arc::new(client), consensus, - db, + provider, request_limit, stream_batch_size, max_buffered_blocks_size_bytes, @@ -605,7 +583,8 @@ mod tests { use futures_util::stream::StreamExt; use reth_db::test_utils::create_test_rw_db; use reth_interfaces::test_utils::{generators, generators::random_block_range, TestConsensus}; - use reth_primitives::{BlockBody, B256}; + use reth_primitives::{BlockBody, B256, MAINNET}; + use reth_provider::ProviderFactory; use std::{collections::HashMap, sync::Arc}; // Check that the blocks are emitted in order of block number, not in order of @@ -624,7 +603,7 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default().build( client.clone(), Arc::new(TestConsensus::default()), - db, + ProviderFactory::new(db, MAINNET.clone()), ); downloader.set_download_range(0..=19).expect("failed to set download range"); @@ -659,9 +638,12 @@ mod tests { let request_limit = 10; let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone())); - let mut downloader = BodiesDownloaderBuilder::default() - .with_request_limit(request_limit) - .build(client.clone(), Arc::new(TestConsensus::default()), db); + let mut downloader = + BodiesDownloaderBuilder::default().with_request_limit(request_limit).build( + client.clone(), + Arc::new(TestConsensus::default()), + ProviderFactory::new(db, MAINNET.clone()), + ); downloader.set_download_range(0..=199).expect("failed to set download range"); let _ = downloader.collect::>().await; @@ -686,7 +668,11 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default() .with_stream_batch_size(stream_batch_size) .with_request_limit(request_limit) - .build(client.clone(), Arc::new(TestConsensus::default()), db); + .build( + client.clone(), + Arc::new(TestConsensus::default()), + ProviderFactory::new(db, MAINNET.clone()), + ); let mut range_start = 0; while range_start < 100 { @@ -715,7 +701,7 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default().with_stream_batch_size(100).build( client.clone(), Arc::new(TestConsensus::default()), - db, + ProviderFactory::new(db, MAINNET.clone()), ); // Set and download the first range @@ -752,7 +738,11 @@ mod tests { .with_stream_batch_size(10) .with_request_limit(1) .with_max_buffered_blocks_size_bytes(1) - .build(client.clone(), Arc::new(TestConsensus::default()), db); + .build( + client.clone(), + Arc::new(TestConsensus::default()), + ProviderFactory::new(db, MAINNET.clone()), + ); // Set and download the entire range downloader.set_download_range(0..=199).expect("failed to set download range"); @@ -779,7 +769,11 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default() .with_request_limit(3) .with_stream_batch_size(100) - .build(client.clone(), Arc::new(TestConsensus::default()), db); + .build( + client.clone(), + Arc::new(TestConsensus::default()), + ProviderFactory::new(db, MAINNET.clone()), + ); // Download the requested range downloader.set_download_range(0..=99).expect("failed to set download range"); diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 97748f54f7e12..9a713a8539bcb 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -42,16 +42,17 @@ impl TaskDownloader { /// # Example /// /// ``` - /// use reth_db::database::Database; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; /// use reth_interfaces::{consensus::Consensus, p2p::bodies::client::BodiesClient}; + /// use reth_provider::HeaderProvider; /// use std::sync::Arc; - /// fn t( + /// + /// fn t( /// client: Arc, /// consensus: Arc, - /// db: Arc, + /// provider: Provider, /// ) { - /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, db); + /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); /// let downloader = TaskDownloader::spawn(downloader); /// } /// ``` @@ -170,6 +171,8 @@ mod tests { use assert_matches::assert_matches; use reth_db::test_utils::create_test_rw_db; use reth_interfaces::{p2p::error::DownloadError, test_utils::TestConsensus}; + use reth_primitives::MAINNET; + use reth_provider::ProviderFactory; use std::sync::Arc; #[tokio::test(flavor = "multi_thread")] @@ -187,7 +190,7 @@ mod tests { let downloader = BodiesDownloaderBuilder::default().build( client.clone(), Arc::new(TestConsensus::default()), - db, + ProviderFactory::new(db, MAINNET.clone()), ); let mut downloader = TaskDownloader::spawn(downloader); @@ -209,7 +212,7 @@ mod tests { let downloader = BodiesDownloaderBuilder::default().build( Arc::new(TestBodiesClient::default()), Arc::new(TestConsensus::default()), - db, + ProviderFactory::new(db, MAINNET.clone()), ); let mut downloader = TaskDownloader::spawn(downloader); diff --git a/crates/net/downloaders/src/test_utils/file_client.rs b/crates/net/downloaders/src/test_utils/file_client.rs index 45df474e1e90c..69320fe4b1714 100644 --- a/crates/net/downloaders/src/test_utils/file_client.rs +++ b/crates/net/downloaders/src/test_utils/file_client.rs @@ -267,7 +267,8 @@ mod tests { }, test_utils::TestConsensus, }; - use reth_primitives::SealedHeader; + use reth_primitives::{SealedHeader, MAINNET}; + use reth_provider::ProviderFactory; use std::{ io::{Read, Seek, SeekFrom, Write}, sync::Arc, @@ -291,7 +292,7 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default().build( client.clone(), Arc::new(TestConsensus::default()), - db, + ProviderFactory::new(db, MAINNET.clone()), ); downloader.set_download_range(0..=19).expect("failed to set download range"); @@ -373,7 +374,7 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default().build( client.clone(), Arc::new(TestConsensus::default()), - db, + ProviderFactory::new(db, MAINNET.clone()), ); downloader.set_download_range(0..=19).expect("failed to set download range"); diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index bf9ba9e8dd946..8651dce35194e 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -36,7 +36,7 @@ //! # let bodies_downloader = BodiesDownloaderBuilder::default().build( //! # Arc::new(TestBodiesClient { responder: |_| Ok((PeerId::ZERO, vec![]).into()) }), //! # consensus.clone(), -//! # db.clone() +//! # ProviderFactory::new(db.clone(), MAINNET.clone()) //! # ); //! # let (tip_tx, tip_rx) = watch::channel(B256::default()); //! # let factory = Factory::new(chain_spec.clone()); diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index fe9f583b9aff6..8dc6de073f052 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -484,7 +484,7 @@ mod tests { tables, test_utils::TempDatabase, transaction::{DbTx, DbTxMut}, - DatabaseEnv, + DatabaseEnv, DatabaseError, }; use reth_interfaces::{ p2p::{ @@ -494,7 +494,7 @@ mod tests { response::BlockResponse, }, download::DownloadClient, - error::DownloadResult, + error::{DownloadError, DownloadResult}, priority::Priority, }, test_utils::{ @@ -780,22 +780,27 @@ mod tests { &mut self, range: RangeInclusive, ) -> DownloadResult<()> { - self.headers = - VecDeque::from(self.db.view(|tx| -> DownloadResult> { - let mut header_cursor = tx.cursor_read::()?; - - let mut canonical_cursor = tx.cursor_read::()?; - let walker = canonical_cursor.walk_range(range)?; - - let mut headers = Vec::default(); - for entry in walker { - let (num, hash) = entry?; - let (_, header) = - header_cursor.seek_exact(num)?.expect("missing header"); - headers.push(header.seal(hash)); - } - Ok(headers) - })??); + self.headers = VecDeque::from( + self.db + .view(|tx| -> Result, DatabaseError> { + let mut header_cursor = tx.cursor_read::()?; + + let mut canonical_cursor = + tx.cursor_read::()?; + let walker = canonical_cursor.walk_range(range)?; + + let mut headers = Vec::default(); + for entry in walker { + let (num, hash) = entry?; + let (_, header) = + header_cursor.seek_exact(num)?.expect("missing header"); + headers.push(header.seal(hash)); + } + Ok(headers) + }) + .map_err(|err| DownloadError::Provider(err.into()))? + .map_err(|err| DownloadError::Provider(err.into()))?, + ); Ok(()) } } From f29e04dadc55ae69353fc160e342bc52141aac55 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 18 Nov 2023 09:34:57 +0100 Subject: [PATCH 42/77] fix: track actually requested transactions (#5483) --- crates/net/network/src/transactions.rs | 46 +++++++++++--------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/crates/net/network/src/transactions.rs b/crates/net/network/src/transactions.rs index 24f6f8ffb24ac..f9ffc138deaeb 100644 --- a/crates/net/network/src/transactions.rs +++ b/crates/net/network/src/transactions.rs @@ -760,7 +760,7 @@ where } fn report_peer(&self, peer_id: PeerId, kind: ReputationChangeKind) { - trace!(target: "net::tx", ?peer_id, ?kind); + trace!(target: "net::tx", ?peer_id, ?kind, "reporting reputation change"); self.network.reputation_change(peer_id, kind); self.metrics.reported_bad_transactions.increment(1); } @@ -831,11 +831,10 @@ where while let Poll::Ready(fetch_event) = this.transaction_fetcher.poll(cx) { match fetch_event { FetchEvent::TransactionsFetched { peer_id, transactions } => { - if let Some(txns) = transactions { - this.import_transactions(peer_id, txns, TransactionSource::Response); - } + this.import_transactions(peer_id, transactions, TransactionSource::Response); } FetchEvent::FetchError { peer_id, error } => { + trace!(target: "net::tx", ?peer_id, ?error, "requesting transactions from peer failed"); this.on_request_error(peer_id, error); } } @@ -857,7 +856,7 @@ where // known that this transaction is bad. (e.g. consensus // rules) if err.is_bad_transaction() && !this.network.is_syncing() { - trace!(target: "net::tx", ?err, "Bad transaction import"); + trace!(target: "net::tx", ?err, "bad pool transaction import"); this.on_bad_import(err.hash); continue } @@ -1008,6 +1007,8 @@ impl TransactionSource { /// An inflight request for `PooledTransactions` from a peer struct GetPooledTxRequest { peer_id: PeerId, + /// Transaction hashes that were requested, for cleanup purposes + requested_hashes: Vec, response: oneshot::Receiver>, } @@ -1026,11 +1027,13 @@ struct GetPooledTxRequestFut { } impl GetPooledTxRequestFut { + #[inline] fn new( peer_id: PeerId, + requested_hashes: Vec, response: oneshot::Receiver>, ) -> Self { - Self { inner: Some(GetPooledTxRequest { peer_id, response }) } + Self { inner: Some(GetPooledTxRequest { peer_id, requested_hashes, response }) } } } @@ -1040,20 +1043,11 @@ impl Future for GetPooledTxRequestFut { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let mut req = self.as_mut().project().inner.take().expect("polled after completion"); match req.response.poll_unpin(cx) { - Poll::Ready(result) => { - let request_hashes: Vec = match &result { - Ok(Ok(pooled_txs)) => { - pooled_txs.0.iter().map(|tx_elem| *tx_elem.hash()).collect() - } - _ => Vec::new(), - }; - - Poll::Ready(GetPooledTxResponse { - peer_id: req.peer_id, - requested_hashes: request_hashes, - result, - }) - } + Poll::Ready(result) => Poll::Ready(GetPooledTxResponse { + peer_id: req.peer_id, + requested_hashes: req.requested_hashes, + result, + }), Poll::Pending => { self.project().inner.set(Some(req)); Poll::Pending @@ -1108,16 +1102,16 @@ impl TransactionFetcher { self.inflight_requests.poll_next_unpin(cx) { return match result { - Ok(Ok(txs)) => { + Ok(Ok(transactions)) => { // clear received hashes - self.remove_inflight_hashes(txs.hashes()); + self.remove_inflight_hashes(transactions.hashes()); // TODO: re-request missing hashes, for now clear all of them self.remove_inflight_hashes(requested_hashes.iter()); Poll::Ready(FetchEvent::TransactionsFetched { peer_id, - transactions: Some(txs.0), + transactions: transactions.0, }) } Ok(Err(req_err)) => { @@ -1189,7 +1183,7 @@ impl TransactionFetcher { let (response, rx) = oneshot::channel(); let req: PeerRequest = PeerRequest::GetPooledTransactions { - request: GetPooledTransactions(announced_hashes), + request: GetPooledTransactions(announced_hashes.clone()), response, }; @@ -1210,7 +1204,7 @@ impl TransactionFetcher { return false } else { //create a new request for it, from that peer - self.inflight_requests.push(GetPooledTxRequestFut::new(peer_id, rx)) + self.inflight_requests.push(GetPooledTxRequestFut::new(peer_id, announced_hashes, rx)) } true @@ -1225,7 +1219,7 @@ enum FetchEvent { /// The ID of the peer from which transactions were fetched. peer_id: PeerId, /// The transactions that were fetched, if available. - transactions: Option>, + transactions: Vec, }, /// Triggered when there is an error in fetching transactions. FetchError { From c7a57a703135582914cb77eeabdc3db179e0fbb2 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 18 Nov 2023 00:41:42 -0800 Subject: [PATCH 43/77] chore(pipeline): stage poll extension trait (#5484) --- Cargo.lock | 1 + bin/reth/src/stage/run.rs | 5 ++--- crates/stages/Cargo.toml | 1 + crates/stages/benches/criterion.rs | 7 ++++--- crates/stages/src/pipeline/mod.rs | 6 +++--- crates/stages/src/stage.rs | 14 ++++++++++++++ crates/stages/src/test_utils/runner.rs | 6 +++--- 7 files changed, 28 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 463dccf88c0e7..89e2cd91e4999 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6510,6 +6510,7 @@ dependencies = [ "aquamarine", "assert_matches", "async-trait", + "auto_impl", "criterion", "futures-util", "itertools 0.11.0", diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 589bcbf7d0290..6c821451907e2 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -12,7 +12,6 @@ use crate::{ version::SHORT_VERSION, }; use clap::Parser; -use futures::future::poll_fn; use reth_beacon_consensus::BeaconConsensus; use reth_config::Config; use reth_db::init_db; @@ -25,7 +24,7 @@ use reth_stages::{ IndexAccountHistoryStage, IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage, TransactionLookupStage, }, - ExecInput, Stage, UnwindInput, + ExecInput, Stage, StageExt, UnwindInput, }; use std::{any::Any, net::SocketAddr, path::PathBuf, sync::Arc}; use tracing::*; @@ -260,7 +259,7 @@ impl Command { }; loop { - poll_fn(|cx| exec_stage.poll_execute_ready(cx, input)).await?; + exec_stage.execute_ready(input).await?; let output = exec_stage.execute(&provider_rw, input)?; input.checkpoint = Some(output.checkpoint); diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index 6da02ad00a702..890bc135ed027 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -50,6 +50,7 @@ aquamarine.workspace = true itertools.workspace = true rayon.workspace = true num-traits = "0.2.15" +auto_impl = "1" [dev-dependencies] # reth diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index ad210165cbd87..98979ca5a6f82 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -10,9 +10,9 @@ use reth_provider::ProviderFactory; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TotalDifficultyStage, TransactionLookupStage}, test_utils::TestTransaction, - ExecInput, Stage, UnwindInput, + ExecInput, Stage, StageExt, UnwindInput, }; -use std::{future::poll_fn, path::PathBuf, sync::Arc}; +use std::{path::PathBuf, sync::Arc}; mod setup; use setup::StageRange; @@ -138,7 +138,8 @@ fn measure_stage_with_path( let mut stage = stage.clone(); let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); - poll_fn(|cx| stage.poll_execute_ready(cx, input)) + stage + .execute_ready(input) .await .and_then(|_| stage.execute(&provider, input)) .unwrap(); diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index 718809abc6e9e..06f487858d96c 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -1,6 +1,6 @@ use crate::{ error::*, BlockErrorKind, ExecInput, ExecOutput, MetricEvent, MetricEventsSender, Stage, - StageError, UnwindInput, + StageError, StageExt, UnwindInput, }; use futures_util::Future; use reth_db::database::Database; @@ -11,7 +11,7 @@ use reth_primitives::{ }; use reth_provider::{ProviderFactory, StageCheckpointReader, StageCheckpointWriter}; use reth_tokio_util::EventListeners; -use std::{future::poll_fn, pin::Pin, sync::Arc}; +use std::{pin::Pin, sync::Arc}; use tokio::sync::watch; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; @@ -370,7 +370,7 @@ where let exec_input = ExecInput { target, checkpoint: prev_checkpoint }; - if let Err(err) = poll_fn(|cx| stage.poll_execute_ready(cx, exec_input)).await { + if let Err(err) = stage.execute_ready(exec_input).await { self.listeners.notify(PipelineEvent::Error { stage_id }); match on_stage_error(&factory, stage_id, prev_checkpoint, err)? { Some(ctrl) => return Ok(ctrl), diff --git a/crates/stages/src/stage.rs b/crates/stages/src/stage.rs index 55a491a83c9d5..1fc2b29c1d1e8 100644 --- a/crates/stages/src/stage.rs +++ b/crates/stages/src/stage.rs @@ -7,6 +7,7 @@ use reth_primitives::{ use reth_provider::{BlockReader, DatabaseProviderRW, ProviderError, TransactionsProvider}; use std::{ cmp::{max, min}, + future::poll_fn, ops::{Range, RangeInclusive}, task::{Context, Poll}, }; @@ -189,6 +190,7 @@ pub struct UnwindOutput { /// Stages are executed as part of a pipeline where they are executed serially. /// /// Stages receive [`DatabaseProviderRW`]. +#[auto_impl::auto_impl(Box)] pub trait Stage: Send + Sync { /// Get the ID of the stage. /// @@ -243,3 +245,15 @@ pub trait Stage: Send + Sync { input: UnwindInput, ) -> Result; } + +/// [Stage] trait extension. +#[async_trait::async_trait] +pub trait StageExt: Stage { + /// Utility extension for the `Stage` trait that invokes `Stage::poll_execute_ready` + /// with [poll_fn] context. For more information see [Stage::poll_execute_ready]. + async fn execute_ready(&mut self, input: ExecInput) -> Result<(), StageError> { + poll_fn(|cx| self.poll_execute_ready(cx, input)).await + } +} + +impl> StageExt for S {} diff --git a/crates/stages/src/test_utils/runner.rs b/crates/stages/src/test_utils/runner.rs index 96c44cacb4cf0..0be375edcd9f7 100644 --- a/crates/stages/src/test_utils/runner.rs +++ b/crates/stages/src/test_utils/runner.rs @@ -1,10 +1,10 @@ use super::TestTransaction; -use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; +use crate::{ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput}; use reth_db::DatabaseEnv; use reth_interfaces::db::DatabaseError; use reth_primitives::MAINNET; use reth_provider::{ProviderError, ProviderFactory}; -use std::{borrow::Borrow, future::poll_fn, sync::Arc}; +use std::{borrow::Borrow, sync::Arc}; use tokio::sync::oneshot; #[derive(thiserror::Error, Debug)] @@ -49,7 +49,7 @@ pub(crate) trait ExecuteStageTestRunner: StageTestRunner { tokio::spawn(async move { let factory = ProviderFactory::new(db.db(), MAINNET.clone()); - let result = poll_fn(|cx| stage.poll_execute_ready(cx, input)).await.and_then(|_| { + let result = stage.execute_ready(input).await.and_then(|_| { let provider_rw = factory.provider_rw().unwrap(); let result = stage.execute(&provider_rw, input); provider_rw.commit().expect("failed to commit"); From 6e6e873d88a2311c4edb3eb310be3ac2641c9354 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 18 Nov 2023 11:16:20 +0100 Subject: [PATCH 44/77] chore: add some capability helper fns (#5486) --- crates/net/eth-wire/src/capability.rs | 52 +++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index f33993a52a422..9270122a17a55 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -236,10 +236,8 @@ pub enum SharedCapability { }, /// Any other unknown capability. UnknownCapability { - /// Name of the capability. - name: Cow<'static, str>, - /// (Highest) negotiated version of the eth capability. - version: u8, + /// Shared capability. + cap: Capability, /// The message ID offset for this capability. /// /// This represents the message ID offset for the first message of the eth capability in @@ -259,7 +257,10 @@ impl SharedCapability { match name { "eth" => Ok(Self::eth(EthVersion::try_from(version)?, offset)), - _ => Ok(Self::UnknownCapability { name: name.to_string().into(), version, offset }), + _ => Ok(Self::UnknownCapability { + cap: Capability::new(name.to_string(), version as usize), + offset, + }), } } @@ -268,12 +269,20 @@ impl SharedCapability { Self::Eth { version, offset } } + /// Returns the capability. + pub fn capability(&self) -> Cow<'_, Capability> { + match self { + SharedCapability::Eth { version, .. } => Cow::Owned(Capability::eth(*version)), + SharedCapability::UnknownCapability { cap, .. } => Cow::Borrowed(cap), + } + } + /// Returns the name of the capability. #[inline] pub fn name(&self) -> &str { match self { SharedCapability::Eth { .. } => "eth", - SharedCapability::UnknownCapability { name, .. } => name, + SharedCapability::UnknownCapability { cap, .. } => cap.name.as_ref(), } } @@ -287,7 +296,7 @@ impl SharedCapability { pub fn version(&self) -> u8 { match self { SharedCapability::Eth { version, .. } => *version as u8, - SharedCapability::UnknownCapability { version, .. } => *version, + SharedCapability::UnknownCapability { cap, .. } => cap.version as u8, } } @@ -348,9 +357,31 @@ impl SharedCapabilities { } /// Returns the negotiated eth version if it is shared. + #[inline] pub fn eth_version(&self) -> Result { self.eth().map(|cap| cap.version()) } + + /// Returns true if the shared capabilities contain the given capability. + #[inline] + pub fn contains(&self, cap: &Capability) -> bool { + self.find(cap).is_some() + } + + /// Returns the shared capability for the given capability. + #[inline] + pub fn find(&self, cap: &Capability) -> Option<&SharedCapability> { + self.0.iter().find(|c| c.version() == cap.version as u8 && c.name() == cap.name) + } + + /// Returns the shared capability for the given capability or an error if it's not compatible. + #[inline] + pub fn ensure_matching_capability( + &self, + cap: &Capability, + ) -> Result<&SharedCapability, UnsupportedCapabilityError> { + self.find(cap).ok_or_else(|| UnsupportedCapabilityError { capability: cap.clone() }) + } } /// Determines the offsets for each shared capability between the input list of peer @@ -452,6 +483,13 @@ pub enum SharedCapabilityError { ReservedMessageIdOffset(u8), } +/// An error thrown when capabilities mismatch. +#[derive(Debug, thiserror::Error)] +#[error("unsupported capability {capability}")] +pub struct UnsupportedCapabilityError { + capability: Capability, +} + #[cfg(test)] mod tests { use super::*; From 49d69c66ccd4600ed81c1d9d2b52a3b6276234c8 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 18 Nov 2023 02:45:48 -0800 Subject: [PATCH 45/77] chore(tree): minor `Chain` modifications (#5487) --- crates/blockchain-tree/src/blockchain_tree.rs | 21 ++++---- crates/blockchain-tree/src/chain.rs | 14 ++--- crates/storage/provider/src/chain.rs | 54 ++++++++++--------- 3 files changed, 44 insertions(+), 45 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 565d6801aebdb..1d850933aaf1b 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -546,8 +546,9 @@ impl BlockchainTree { let Some(chain) = self.state.chains.get(&chain_id) else { return hashes }; hashes.extend(chain.blocks().values().map(|b| (b.number, b.hash()))); - let fork_block = chain.fork_block_hash(); - if let Some(next_chain_id) = self.block_indices().get_blocks_chain_id(&fork_block) { + let fork_block = chain.fork_block(); + if let Some(next_chain_id) = self.block_indices().get_blocks_chain_id(&fork_block.hash) + { chain_id = next_chain_id; } else { // if there is no fork block that point to other chains, break the loop. @@ -794,7 +795,7 @@ impl BlockchainTree { // check unconnected block buffer for childs of the chains let mut all_chain_blocks = Vec::new(); for (_, chain) in self.state.chains.iter() { - for (&number, blocks) in chain.blocks.iter() { + for (&number, blocks) in chain.blocks().iter() { all_chain_blocks.push(BlockNumHash { number, hash: blocks.hash }) } } @@ -946,18 +947,16 @@ impl BlockchainTree { let canonical = self.split_chain(chain_id, chain, ChainSplitTarget::Hash(*block_hash)); durations_recorder.record_relative(MakeCanonicalAction::SplitChain); - let mut block_fork = canonical.fork_block(); - let mut block_fork_number = canonical.fork_block_number(); + let mut fork_block = canonical.fork_block(); let mut chains_to_promote = vec![canonical]; // loop while fork blocks are found in Tree. - while let Some(chain_id) = self.block_indices().get_blocks_chain_id(&block_fork.hash) { - let chain = self.state.chains.remove(&chain_id).expect("To fork to be present"); - block_fork = chain.fork_block(); + while let Some(chain_id) = self.block_indices().get_blocks_chain_id(&fork_block.hash) { + let chain = self.state.chains.remove(&chain_id).expect("fork is present"); // canonical chain is lower part of the chain. let canonical = - self.split_chain(chain_id, chain, ChainSplitTarget::Number(block_fork_number)); - block_fork_number = canonical.fork_block_number(); + self.split_chain(chain_id, chain, ChainSplitTarget::Number(fork_block.number)); + fork_block = canonical.fork_block(); chains_to_promote.push(canonical); } durations_recorder.record_relative(MakeCanonicalAction::SplitChainForks); @@ -989,7 +988,7 @@ impl BlockchainTree { ); // if joins to the tip; - if new_canon_chain.fork_block_hash() == old_tip.hash { + if new_canon_chain.fork_block().hash == old_tip.hash { chain_notification = CanonStateNotification::Commit { new: Arc::new(new_canon_chain.clone()) }; // append to database diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 36227f003b724..252b93606ec4d 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -152,7 +152,7 @@ impl AppendableChain { ) })?; - let mut state = self.state.clone(); + let mut state = self.state().clone(); // Revert state to the state after execution of the parent block state.revert_to(parent.number); @@ -169,11 +169,8 @@ impl AppendableChain { .map_err(|err| InsertBlockError::new(block.block.clone(), err.into()))?; state.extend(block_state); - let chain = - Self { chain: Chain { state, blocks: BTreeMap::from([(block.number, block)]) } }; - // If all is okay, return new chain back. Present chain is not modified. - Ok(chain) + Ok(Self { chain: Chain::from_block(block, state) }) } /// Validate and execute the given block that _extends the canonical chain_, validating its @@ -280,10 +277,10 @@ impl AppendableChain { DB: Database, EF: ExecutorFactory, { - let (_, parent_block) = self.blocks.last_key_value().expect("Chain has at least one block"); + let parent_block = self.chain.tip(); let post_state_data = BundleStateDataRef { - state: &self.state, + state: self.state(), sidechain_block_hashes: &side_chain_block_hashes, canonical_block_hashes, canonical_fork, @@ -299,8 +296,7 @@ impl AppendableChain { ) .map_err(|err| InsertBlockError::new(block.block.clone(), err.into()))?; // extend the state. - self.state.extend(block_state); - self.blocks.insert(block.number, block); + self.chain.append_block(block, block_state); Ok(()) } } diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 53bc31a941ced..ea240dc6d8f61 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -16,16 +16,29 @@ use std::{borrow::Cow, collections::BTreeMap, fmt}; /// Used inside the BlockchainTree. #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct Chain { + /// All blocks in this chain. + blocks: BTreeMap, /// The state of all accounts after execution of the _all_ blocks in this chain's range from /// [Chain::first] to [Chain::tip], inclusive. /// /// This state also contains the individual changes that lead to the current state. - pub state: BundleStateWithReceipts, - /// All blocks in this chain. - pub blocks: BTreeMap, + state: BundleStateWithReceipts, } impl Chain { + /// Create new Chain from blocks and state. + pub fn new( + blocks: impl IntoIterator, + state: BundleStateWithReceipts, + ) -> Self { + Self { blocks: BTreeMap::from_iter(blocks.into_iter().map(|b| (b.number, b))), state } + } + + /// Create new Chain from a single block and its state. + pub fn from_block(block: SealedBlockWithSenders, state: BundleStateWithReceipts) -> Self { + Self::new([block], state) + } + /// Get the blocks in this chain. pub fn blocks(&self) -> &BTreeMap { &self.blocks @@ -96,18 +109,6 @@ impl Chain { ForkBlock { number: first.number.saturating_sub(1), hash: first.parent_hash } } - /// Get the block number at which this chain forked. - #[track_caller] - pub fn fork_block_number(&self) -> BlockNumber { - self.first().number.saturating_sub(1) - } - - /// Get the block hash at which this chain forked. - #[track_caller] - pub fn fork_block_hash(&self) -> BlockHash { - self.first().parent_hash - } - /// Get the first block in this chain. #[track_caller] pub fn first(&self) -> &SealedBlockWithSenders { @@ -124,11 +125,6 @@ impl Chain { self.blocks.last_key_value().expect("Chain should have at least one block").1 } - /// Create new chain with given blocks and post state. - pub fn new(blocks: Vec, state: BundleStateWithReceipts) -> Self { - Self { state, blocks: blocks.into_iter().map(|b| (b.number, b)).collect() } - } - /// Returns length of the chain. pub fn len(&self) -> usize { self.blocks.len() @@ -160,22 +156,30 @@ impl Chain { receipt_attch } + /// Append a single block with state to the chain. + /// This method assumes that blocks attachment to the chain has already been validated. + pub fn append_block(&mut self, block: SealedBlockWithSenders, state: BundleStateWithReceipts) { + self.blocks.insert(block.number, block); + self.state.extend(state); + } + /// Merge two chains by appending the given chain into the current one. /// /// The state of accounts for this chain is set to the state of the newest chain. - pub fn append_chain(&mut self, chain: Chain) -> RethResult<()> { + pub fn append_chain(&mut self, other: Chain) -> RethResult<()> { let chain_tip = self.tip(); - if chain_tip.hash != chain.fork_block_hash() { + let other_fork_block = other.fork_block(); + if chain_tip.hash != other_fork_block.hash { return Err(BlockExecutionError::AppendChainDoesntConnect { chain_tip: Box::new(chain_tip.num_hash()), - other_chain_fork: Box::new(chain.fork_block()), + other_chain_fork: Box::new(other_fork_block), } .into()) } // Insert blocks from other chain - self.blocks.extend(chain.blocks); - self.state.extend(chain.state); + self.blocks.extend(other.blocks); + self.state.extend(other.state); Ok(()) } From 14dd9e815028f3b9566c63a192efbef8cf1ebcb4 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sun, 19 Nov 2023 06:33:11 -0800 Subject: [PATCH 46/77] feat(provider): `StateRootProvider::state_root_with_updates` (#5485) --- Cargo.lock | 1 + crates/blockchain-tree/src/blockchain_tree.rs | 4 +-- crates/blockchain-tree/src/chain.rs | 26 ++++++++------ crates/revm/Cargo.toml | 3 ++ crates/revm/src/processor.rs | 8 +++++ .../bundle_state_with_receipts.rs | 33 ++++++++++++++---- .../src/bundle_state/state_changes.rs | 14 ++++---- .../src/providers/bundle_state_provider.rs | 34 ++++++++++++------- crates/storage/provider/src/providers/mod.rs | 8 ++--- .../src/providers/state/historical.rs | 10 +++++- .../provider/src/providers/state/latest.rs | 10 ++++++ .../provider/src/providers/state/macros.rs | 1 + .../storage/provider/src/test_utils/mock.rs | 14 ++++++-- .../storage/provider/src/test_utils/noop.rs | 10 +++++- crates/storage/provider/src/traits/state.rs | 20 ++++++++--- 15 files changed, 144 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 89e2cd91e4999..a6f9284f91aa8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6297,6 +6297,7 @@ dependencies = [ "reth-primitives", "reth-provider", "reth-revm-inspectors", + "reth-trie", "revm", "tracing", ] diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 1d850933aaf1b..f869eb4859566 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -104,8 +104,8 @@ impl BlockchainTree { externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?; // TODO(rakita) save last finalized block inside database but for now just take - // tip-max_reorg_depth - // task: https://github.com/paradigmxyz/reth/issues/1712 + // `tip - max_reorg_depth` + // https://github.com/paradigmxyz/reth/issues/1712 let last_finalized_block_number = if last_canonical_hashes.len() > max_reorg_depth { // we pick `Highest - max_reorg_depth` block as last finalized block. last_canonical_hashes.keys().nth_back(max_reorg_depth) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 252b93606ec4d..c52ccb3043f72 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -158,15 +158,19 @@ impl AppendableChain { state.revert_to(parent.number); // Revert changesets to get the state of the parent that we need to apply the change. - let post_state_data = BundleStateDataRef { + let bundle_state_data = BundleStateDataRef { state: &state, sidechain_block_hashes: &side_chain_block_hashes, canonical_block_hashes, canonical_fork, }; - let block_state = - Self::validate_and_execute_sidechain(block.clone(), parent, post_state_data, externals) - .map_err(|err| InsertBlockError::new(block.block.clone(), err.into()))?; + let block_state = Self::validate_and_execute_sidechain( + block.clone(), + parent, + bundle_state_data, + externals, + ) + .map_err(|err| InsertBlockError::new(block.block.clone(), err.into()))?; state.extend(block_state); // If all is okay, return new chain back. Present chain is not modified. @@ -185,7 +189,7 @@ impl AppendableChain { fn validate_and_execute( block: SealedBlockWithSenders, parent_block: &SealedHeader, - post_state_data_provider: BSDP, + bundle_state_data_provider: BSDP, externals: &TreeExternals, block_kind: BlockKind, block_validation_kind: BlockValidationKind, @@ -203,10 +207,10 @@ impl AppendableChain { // get the state provider. let db = externals.database(); - let canonical_fork = post_state_data_provider.canonical_fork(); + let canonical_fork = bundle_state_data_provider.canonical_fork(); let state_provider = db.history_by_block_number(canonical_fork.number)?; - let provider = BundleStateProvider::new(state_provider, post_state_data_provider); + let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); let mut executor = externals.executor_factory.with_state(&provider); executor.execute_and_verify_receipt(&block, U256::MAX, Some(senders))?; @@ -232,7 +236,7 @@ impl AppendableChain { fn validate_and_execute_sidechain( block: SealedBlockWithSenders, parent_block: &SealedHeader, - post_state_data_provider: BSDP, + bundle_state_data_provider: BSDP, externals: &TreeExternals, ) -> RethResult where @@ -243,7 +247,7 @@ impl AppendableChain { Self::validate_and_execute( block, parent_block, - post_state_data_provider, + bundle_state_data_provider, externals, BlockKind::ForksHistoricalBlock, BlockValidationKind::SkipStateRootValidation, @@ -279,7 +283,7 @@ impl AppendableChain { { let parent_block = self.chain.tip(); - let post_state_data = BundleStateDataRef { + let bundle_state_data = BundleStateDataRef { state: self.state(), sidechain_block_hashes: &side_chain_block_hashes, canonical_block_hashes, @@ -289,7 +293,7 @@ impl AppendableChain { let block_state = Self::validate_and_execute( block.clone(), parent_block, - post_state_data, + bundle_state_data, externals, block_kind, block_validation_kind, diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 30c6c351c5703..18f74ecf222f1 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -22,6 +22,9 @@ revm.workspace = true # common tracing.workspace = true +[dev-dependencies] +reth-trie.workspace = true + [features] optimism = [ "revm/optimism", diff --git a/crates/revm/src/processor.rs b/crates/revm/src/processor.rs index f78c0411703fa..1f30ef4dd9856 100644 --- a/crates/revm/src/processor.rs +++ b/crates/revm/src/processor.rs @@ -568,6 +568,7 @@ mod tests { use reth_provider::{ AccountReader, BlockHashReader, BundleStateWithReceipts, StateRootProvider, }; + use reth_trie::updates::TrieUpdates; use revm::{Database, TransitionState}; use std::collections::HashMap; @@ -627,6 +628,13 @@ mod tests { fn state_root(&self, _bundle_state: &BundleStateWithReceipts) -> ProviderResult { unimplemented!("state root computation is not supported") } + + fn state_root_with_updates( + &self, + _bundle_state: &BundleStateWithReceipts, + ) -> ProviderResult<(B256, TrieUpdates)> { + unimplemented!("state root computation is not supported") + } } impl StateProvider for StateProviderTest { diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index c1ee9b65f4775..a2858142971c9 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -13,6 +13,7 @@ use reth_primitives::{ }; use reth_trie::{ hashed_cursor::{HashedPostState, HashedPostStateCursorFactory, HashedStorage}, + updates::TrieUpdates, StateRoot, StateRootError, }; use revm::{db::states::BundleState, primitives::AccountInfo}; @@ -154,6 +155,20 @@ impl BundleStateWithReceipts { hashed_state.sorted() } + /// Returns [StateRoot] calculator. + fn state_root_calculator<'a, 'b, TX: DbTx>( + &self, + tx: &'a TX, + hashed_post_state: &'b HashedPostState, + ) -> StateRoot<'a, TX, HashedPostStateCursorFactory<'a, 'b, TX>> { + let (account_prefix_set, storage_prefix_set) = hashed_post_state.construct_prefix_sets(); + let hashed_cursor_factory = HashedPostStateCursorFactory::new(tx, hashed_post_state); + StateRoot::new(tx) + .with_hashed_cursor_factory(hashed_cursor_factory) + .with_changed_account_prefixes(account_prefix_set) + .with_changed_storage_prefixes(storage_prefix_set) + } + /// Calculate the state root for this [BundleState]. /// Internally, function calls [Self::hash_state_slow] to obtain the [HashedPostState]. /// Afterwards, it retrieves the prefixsets from the [HashedPostState] and uses them to @@ -196,13 +211,17 @@ impl BundleStateWithReceipts { /// The state root for this [BundleState]. pub fn state_root_slow(&self, tx: &TX) -> Result { let hashed_post_state = self.hash_state_slow(); - let (account_prefix_set, storage_prefix_set) = hashed_post_state.construct_prefix_sets(); - let hashed_cursor_factory = HashedPostStateCursorFactory::new(tx, &hashed_post_state); - StateRoot::new(tx) - .with_hashed_cursor_factory(hashed_cursor_factory) - .with_changed_account_prefixes(account_prefix_set) - .with_changed_storage_prefixes(storage_prefix_set) - .root() + self.state_root_calculator(tx, &hashed_post_state).root() + } + + /// Calculates the state root for this [BundleState] and returns it alongside trie updates. + /// See [Self::state_root_slow] for more info. + pub fn state_root_slow_with_updates( + &self, + tx: &TX, + ) -> Result<(B256, TrieUpdates), StateRootError> { + let hashed_post_state = self.hash_state_slow(); + self.state_root_calculator(tx, &hashed_post_state).root_with_updates() } /// Transform block number to the index of block. diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs index 765fc0ee20c80..a62606dedebc3 100644 --- a/crates/storage/provider/src/bundle_state/state_changes.rs +++ b/crates/storage/provider/src/bundle_state/state_changes.rs @@ -19,7 +19,7 @@ impl From for StateChanges { } impl StateChanges { - /// Write the post state to the database. + /// Write the bundle state to the database. pub fn write_to_db(mut self, tx: &TX) -> Result<(), DatabaseError> { // sort all entries so they can be written to database in more performant way. // and take smaller memory footprint. @@ -28,28 +28,28 @@ impl StateChanges { self.0.contracts.par_sort_by_key(|a| a.0); // Write new account state - tracing::trace!(target: "provider::post_state", len = self.0.accounts.len(), "Writing new account state"); + tracing::trace!(target: "provider::bundle_state", len = self.0.accounts.len(), "Writing new account state"); let mut accounts_cursor = tx.cursor_write::()?; // write account to database. for (address, account) in self.0.accounts.into_iter() { if let Some(account) = account { - tracing::trace!(target: "provider::post_state", ?address, "Updating plain state account"); + tracing::trace!(target: "provider::bundle_state", ?address, "Updating plain state account"); accounts_cursor.upsert(address, into_reth_acc(account))?; } else if accounts_cursor.seek_exact(address)?.is_some() { - tracing::trace!(target: "provider::post_state", ?address, "Deleting plain state account"); + tracing::trace!(target: "provider::bundle_state", ?address, "Deleting plain state account"); accounts_cursor.delete_current()?; } } // Write bytecode - tracing::trace!(target: "provider::post_state", len = self.0.contracts.len(), "Writing bytecodes"); + tracing::trace!(target: "provider::bundle_state", len = self.0.contracts.len(), "Writing bytecodes"); let mut bytecodes_cursor = tx.cursor_write::()?; for (hash, bytecode) in self.0.contracts.into_iter() { bytecodes_cursor.upsert(hash, Bytecode(bytecode))?; } // Write new storage state and wipe storage if needed. - tracing::trace!(target: "provider::post_state", len = self.0.storage.len(), "Writing new storage state"); + tracing::trace!(target: "provider::bundle_state", len = self.0.storage.len(), "Writing new storage state"); let mut storages_cursor = tx.cursor_dup_write::()?; for PlainStorageChangeset { address, wipe_storage, storage } in self.0.storage.into_iter() { // Wiping of storage. @@ -65,7 +65,7 @@ impl StateChanges { storage.par_sort_unstable_by_key(|a| a.key); for entry in storage.into_iter() { - tracing::trace!(target: "provider::post_state", ?address, ?entry.key, "Updating plain state storage"); + tracing::trace!(target: "provider::bundle_state", ?address, ?entry.key, "Updating plain state storage"); if let Some(db_entry) = storages_cursor.seek_by_key_subkey(address, entry.key)? { if db_entry.key == entry.key { storages_cursor.delete_current()?; diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 46d9ae702ffd7..f2b0e5fdf468e 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -4,6 +4,7 @@ use crate::{ }; use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{trie::AccountProof, Account, Address, BlockNumber, Bytecode, B256}; +use reth_trie::updates::TrieUpdates; /// A state provider that either resolves to data in a wrapped [`crate::BundleStateWithReceipts`], /// or an underlying state provider. @@ -11,14 +12,14 @@ use reth_primitives::{trie::AccountProof, Account, Address, BlockNumber, Bytecod pub struct BundleStateProvider { /// The inner state provider. pub(crate) state_provider: SP, - /// Post state data, - pub(crate) post_state_data_provider: BSDP, + /// Bundle state data, + pub(crate) bundle_state_data_provider: BSDP, } impl BundleStateProvider { - /// Create new post-state provider - pub fn new(state_provider: SP, post_state_data_provider: BSDP) -> Self { - Self { state_provider, post_state_data_provider } + /// Create new bundle state provider + pub fn new(state_provider: SP, bundle_state_data_provider: BSDP) -> Self { + Self { state_provider, bundle_state_data_provider } } } @@ -28,7 +29,7 @@ impl BlockHashReader for BundleStateProvider { fn block_hash(&self, block_number: BlockNumber) -> ProviderResult> { - let block_hash = self.post_state_data_provider.block_hash(block_number); + let block_hash = self.bundle_state_data_provider.block_hash(block_number); if block_hash.is_some() { return Ok(block_hash) } @@ -48,7 +49,7 @@ impl AccountReader for BundleStateProvider { fn basic_account(&self, address: Address) -> ProviderResult> { - if let Some(account) = self.post_state_data_provider.state().account(&address) { + if let Some(account) = self.bundle_state_data_provider.state().account(&address) { Ok(account) } else { self.state_provider.basic_account(address) @@ -59,11 +60,20 @@ impl AccountReader impl StateRootProvider for BundleStateProvider { - fn state_root(&self, post_state: &BundleStateWithReceipts) -> ProviderResult { - let mut state = self.post_state_data_provider.state().clone(); - state.extend(post_state.clone()); + fn state_root(&self, bundle_state: &BundleStateWithReceipts) -> ProviderResult { + let mut state = self.bundle_state_data_provider.state().clone(); + state.extend(bundle_state.clone()); self.state_provider.state_root(&state) } + + fn state_root_with_updates( + &self, + bundle_state: &BundleStateWithReceipts, + ) -> ProviderResult<(B256, TrieUpdates)> { + let mut state = self.bundle_state_data_provider.state().clone(); + state.extend(bundle_state.clone()); + self.state_provider.state_root_with_updates(&state) + } } impl StateProvider @@ -76,7 +86,7 @@ impl StateProvider ) -> ProviderResult> { let u256_storage_key = storage_key.into(); if let Some(value) = - self.post_state_data_provider.state().storage(&account, u256_storage_key) + self.bundle_state_data_provider.state().storage(&account, u256_storage_key) { return Ok(Some(value)) } @@ -85,7 +95,7 @@ impl StateProvider } fn bytecode_by_hash(&self, code_hash: B256) -> ProviderResult> { - if let Some(bytecode) = self.post_state_data_provider.state().bytecode(&code_hash) { + if let Some(bytecode) = self.bundle_state_data_provider.state().bytecode(&code_hash) { return Ok(Some(bytecode)) } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 898b5a39c065f..528577963e8fa 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -571,14 +571,14 @@ where fn pending_with_provider( &self, - post_state_data: Box, + bundle_state_data: Box, ) -> ProviderResult> { - let canonical_fork = post_state_data.canonical_fork(); + let canonical_fork = bundle_state_data.canonical_fork(); trace!(target: "providers::blockchain", ?canonical_fork, "Returning post state provider"); let state_provider = self.history_by_block_hash(canonical_fork.hash)?; - let post_state_provider = BundleStateProvider::new(state_provider, post_state_data); - Ok(Box::new(post_state_provider)) + let bundle_state_provider = BundleStateProvider::new(state_provider, bundle_state_data); + Ok(Box::new(bundle_state_provider)) } } diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index dbdba8f988cae..7d7fabe2018e8 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -14,6 +14,7 @@ use reth_interfaces::provider::ProviderResult; use reth_primitives::{ trie::AccountProof, Account, Address, BlockNumber, Bytecode, StorageKey, StorageValue, B256, }; +use reth_trie::updates::TrieUpdates; /// State provider for a given block number which takes a tx reference. /// @@ -198,7 +199,14 @@ impl<'b, TX: DbTx> BlockHashReader for HistoricalStateProviderRef<'b, TX> { } impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { - fn state_root(&self, _post_state: &BundleStateWithReceipts) -> ProviderResult { + fn state_root(&self, _bundle_state: &BundleStateWithReceipts) -> ProviderResult { + Err(ProviderError::StateRootNotAvailableForHistoricalBlock) + } + + fn state_root_with_updates( + &self, + _bundle_state: &BundleStateWithReceipts, + ) -> ProviderResult<(B256, TrieUpdates)> { Err(ProviderError::StateRootNotAvailableForHistoricalBlock) } } diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index 1b45555fc2183..df515f78e202a 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -12,6 +12,7 @@ use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, BlockNumber, Bytecode, StorageKey, StorageValue, B256, }; +use reth_trie::updates::TrieUpdates; /// State provider over latest state that takes tx reference. #[derive(Debug)] @@ -62,6 +63,15 @@ impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { fn state_root(&self, bundle_state: &BundleStateWithReceipts) -> ProviderResult { bundle_state.state_root_slow(self.db).map_err(|err| ProviderError::Database(err.into())) } + + fn state_root_with_updates( + &self, + bundle_state: &BundleStateWithReceipts, + ) -> ProviderResult<(B256, TrieUpdates)> { + bundle_state + .state_root_slow_with_updates(self.db) + .map_err(|err| ProviderError::Database(err.into())) + } } impl<'b, TX: DbTx> StateProvider for LatestStateProviderRef<'b, TX> { diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 67b3c33f50af1..300b2c2ec4f5e 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -32,6 +32,7 @@ macro_rules! delegate_provider_impls { for $target => StateRootProvider $(where [$($generics)*])? { fn state_root(&self, state: &crate::BundleStateWithReceipts) -> reth_interfaces::provider::ProviderResult; + fn state_root_with_updates(&self, state: &crate::BundleStateWithReceipts) -> reth_interfaces::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; } AccountReader $(where [$($generics)*])? { fn basic_account(&self, address: reth_primitives::Address) -> reth_interfaces::provider::ProviderResult>; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index c4689ac57234d..f32b4fd8132c4 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -15,6 +15,7 @@ use reth_primitives::{ SealedBlock, SealedHeader, StorageKey, StorageValue, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, B256, U256, }; +use reth_trie::updates::TrieUpdates; use revm::primitives::{BlockEnv, CfgEnv}; use std::{ collections::{BTreeMap, HashMap}, @@ -496,7 +497,14 @@ impl AccountReader for MockEthProvider { } impl StateRootProvider for MockEthProvider { - fn state_root(&self, _state: &BundleStateWithReceipts) -> ProviderResult { + fn state_root(&self, _bundle_state: &BundleStateWithReceipts) -> ProviderResult { + todo!() + } + + fn state_root_with_updates( + &self, + _bundle_state: &BundleStateWithReceipts, + ) -> ProviderResult<(B256, TrieUpdates)> { todo!() } } @@ -602,7 +610,7 @@ impl StateProviderFactory for MockEthProvider { fn pending_with_provider<'a>( &'a self, - _post_state_data: Box, + _bundle_state_data: Box, ) -> ProviderResult> { Ok(Box::new(self.clone())) } @@ -638,7 +646,7 @@ impl StateProviderFactory for Arc { fn pending_with_provider<'a>( &'a self, - _post_state_data: Box, + _bundle_state_data: Box, ) -> ProviderResult> { Ok(Box::new(self.clone())) } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 45258bc692635..7c2b761cef3dd 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -17,6 +17,7 @@ use reth_primitives::{ SealedHeader, StorageKey, StorageValue, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, B256, MAINNET, U256, }; +use reth_trie::updates::TrieUpdates; use revm::primitives::{BlockEnv, CfgEnv}; use std::{ ops::{RangeBounds, RangeInclusive}, @@ -274,6 +275,13 @@ impl StateRootProvider for NoopProvider { fn state_root(&self, _state: &BundleStateWithReceipts) -> ProviderResult { Ok(B256::default()) } + + fn state_root_with_updates( + &self, + _bundle_state: &BundleStateWithReceipts, + ) -> ProviderResult<(B256, TrieUpdates)> { + Ok((B256::default(), TrieUpdates::default())) + } } impl StateProvider for NoopProvider { @@ -368,7 +376,7 @@ impl StateProviderFactory for NoopProvider { fn pending_with_provider<'a>( &'a self, - _post_state_data: Box, + _bundle_state_data: Box, ) -> ProviderResult> { Ok(Box::new(*self)) } diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 6175504411770..158ec2bb4f594 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -6,6 +6,7 @@ use reth_primitives::{ trie::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, }; +use reth_trie::updates::TrieUpdates; /// Type alias of boxed [StateProvider]. pub type StateProviderBox<'a> = Box; @@ -180,11 +181,11 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { block_hash: B256, ) -> ProviderResult>>; - /// Return a [StateProvider] that contains post state data provider. + /// Return a [StateProvider] that contains bundle state data provider. /// Used to inspect or execute transaction on the pending state. fn pending_with_provider( &self, - post_state_data: Box, + bundle_state_data: Box, ) -> ProviderResult>; } @@ -232,6 +233,17 @@ pub trait BundleStateDataProvider: Send + Sync { /// A type that can compute the state root of a given post state. #[auto_impl[Box,&, Arc]] pub trait StateRootProvider: Send + Sync { - /// Returns the state root of the BundleState on top of the current state. - fn state_root(&self, post_state: &BundleStateWithReceipts) -> ProviderResult; + /// Returns the state root of the `BundleState` on top of the current state. + /// + /// NOTE: It is recommended to provide a different implementation from + /// `state_root_with_updates` since it affects the memory usage during state root + /// computation. + fn state_root(&self, bundle_state: &BundleStateWithReceipts) -> ProviderResult; + + /// Returns the state root of the BundleState on top of the current state with trie + /// updates to be committed to the database. + fn state_root_with_updates( + &self, + bundle_state: &BundleStateWithReceipts, + ) -> ProviderResult<(B256, TrieUpdates)>; } From 631eb2b6249a8ee32a5e31a7f6c832ac0e0c9db2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 19 Nov 2023 16:19:00 +0100 Subject: [PATCH 47/77] chore: add hash+signer fn (#5493) --- crates/primitives/src/transaction/variant.rs | 32 +++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/crates/primitives/src/transaction/variant.rs b/crates/primitives/src/transaction/variant.rs index 2ab2667222c7a..b89aa6aa10e3c 100644 --- a/crates/primitives/src/transaction/variant.rs +++ b/crates/primitives/src/transaction/variant.rs @@ -1,8 +1,10 @@ //! Helper enum functions for `Transaction`, `TransactionSigned` and //! `TransactionSignedEcRecovered` use crate::{ - Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + Address, Transaction, TransactionSigned, TransactionSignedEcRecovered, TransactionSignedNoHash, + B256, }; +use std::ops::Deref; /// Represents various different transaction formats used in reth. /// @@ -29,6 +31,26 @@ impl TransactionSignedVariant { } } + /// Returns the hash of the transaction + pub fn hash(&self) -> B256 { + match self { + TransactionSignedVariant::SignedNoHash(tx) => tx.hash(), + TransactionSignedVariant::Signed(tx) => tx.hash, + TransactionSignedVariant::SignedEcRecovered(tx) => tx.hash, + } + } + + /// Returns the signer of the transaction. + /// + /// If the transaction is of not of [TransactionSignedEcRecovered] it will be recovered. + pub fn signer(&self) -> Option
{ + match self { + TransactionSignedVariant::SignedNoHash(tx) => tx.recover_signer(), + TransactionSignedVariant::Signed(tx) => tx.recover_signer(), + TransactionSignedVariant::SignedEcRecovered(tx) => Some(tx.signer), + } + } + /// Returns [TransactionSigned] type /// else None pub fn as_signed(&self) -> Option<&TransactionSigned> { @@ -130,3 +152,11 @@ impl AsRef for TransactionSignedVariant { self.as_raw() } } + +impl Deref for TransactionSignedVariant { + type Target = Transaction; + + fn deref(&self) -> &Self::Target { + self.as_raw() + } +} From aea11405addcd82ce13507424aa66ebe8278d2e3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 19 Nov 2023 16:21:50 +0100 Subject: [PATCH 48/77] feat: get rid of lifetime GATs (#5478) --- bin/reth/src/db/snapshots/bench.rs | 2 +- bin/reth/src/db/snapshots/headers.rs | 2 +- bin/reth/src/db/snapshots/receipts.rs | 2 +- bin/reth/src/db/snapshots/transactions.rs | 2 +- bin/reth/src/init.rs | 12 +-- crates/prune/src/segments/account_history.rs | 2 +- crates/prune/src/segments/headers.rs | 4 +- crates/prune/src/segments/history.rs | 2 +- crates/prune/src/segments/mod.rs | 6 +- crates/prune/src/segments/receipts.rs | 4 +- crates/prune/src/segments/receipts_by_logs.rs | 2 +- crates/prune/src/segments/sender_recovery.rs | 2 +- crates/prune/src/segments/storage_history.rs | 2 +- .../prune/src/segments/transaction_lookup.rs | 2 +- crates/prune/src/segments/transactions.rs | 2 +- crates/rpc/rpc/src/debug.rs | 2 +- crates/rpc/rpc/src/eth/api/mod.rs | 8 +- crates/rpc/rpc/src/eth/api/transactions.rs | 52 +++++------- crates/snapshot/src/segments/headers.rs | 2 +- crates/snapshot/src/segments/mod.rs | 6 +- crates/snapshot/src/segments/receipts.rs | 2 +- crates/snapshot/src/segments/transactions.rs | 2 +- crates/snapshot/src/snapshotter.rs | 2 +- crates/stages/src/stage.rs | 6 +- crates/stages/src/stages/bodies.rs | 6 +- crates/stages/src/stages/execution.rs | 12 +-- crates/stages/src/stages/finish.rs | 4 +- crates/stages/src/stages/hashing_account.rs | 8 +- crates/stages/src/stages/hashing_storage.rs | 6 +- crates/stages/src/stages/headers.rs | 10 +-- .../src/stages/index_account_history.rs | 4 +- .../src/stages/index_storage_history.rs | 4 +- crates/stages/src/stages/merkle.rs | 8 +- crates/stages/src/stages/mod.rs | 2 +- crates/stages/src/stages/sender_recovery.rs | 6 +- crates/stages/src/stages/total_difficulty.rs | 6 +- crates/stages/src/stages/tx_lookup.rs | 6 +- crates/stages/src/test_utils/stage.rs | 4 +- crates/stages/src/test_utils/test_db.rs | 12 +-- crates/storage/db/src/abstraction/common.rs | 23 ++++-- crates/storage/db/src/abstraction/database.rs | 53 +++++------- crates/storage/db/src/abstraction/mock.rs | 45 ++++------ .../storage/db/src/abstraction/transaction.rs | 57 +++++-------- .../storage/db/src/implementation/mdbx/mod.rs | 10 +-- .../storage/db/src/implementation/mdbx/tx.rs | 32 +++----- crates/storage/db/src/lib.rs | 13 ++- .../provider/src/providers/database/mod.rs | 15 ++-- .../src/providers/database/provider.rs | 18 ++-- crates/storage/provider/src/providers/mod.rs | 17 ++-- .../storage/provider/src/test_utils/blocks.rs | 2 +- .../storage/provider/src/test_utils/mock.rs | 34 ++++---- .../storage/provider/src/test_utils/noop.rs | 17 ++-- crates/storage/provider/src/traits/state.rs | 23 +++--- crates/trie/src/hashed_cursor/default.rs | 6 +- crates/trie/src/hashed_cursor/post_state.rs | 6 +- crates/trie/src/trie.rs | 4 +- docs/crates/db.md | 82 ++++++++----------- 57 files changed, 292 insertions(+), 393 deletions(-) diff --git a/bin/reth/src/db/snapshots/bench.rs b/bin/reth/src/db/snapshots/bench.rs index 2505b23d4015f..928898205f077 100644 --- a/bin/reth/src/db/snapshots/bench.rs +++ b/bin/reth/src/db/snapshots/bench.rs @@ -25,7 +25,7 @@ pub(crate) fn bench( ) -> eyre::Result<()> where F1: FnMut() -> eyre::Result, - F2: Fn(DatabaseProviderRO<'_, DatabaseEnv>) -> eyre::Result, + F2: Fn(DatabaseProviderRO) -> eyre::Result, R: Debug + PartialEq, { let (db, chain) = db; diff --git a/bin/reth/src/db/snapshots/headers.rs b/bin/reth/src/db/snapshots/headers.rs index e4537cd6c3da1..d05ff80c8c9d8 100644 --- a/bin/reth/src/db/snapshots/headers.rs +++ b/bin/reth/src/db/snapshots/headers.rs @@ -22,7 +22,7 @@ use std::{ impl Command { pub(crate) fn generate_headers_snapshot( &self, - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, compression: Compression, inclusion_filter: InclusionFilter, phf: PerfectHashingFunction, diff --git a/bin/reth/src/db/snapshots/receipts.rs b/bin/reth/src/db/snapshots/receipts.rs index dc8708ac04031..b24eccda51d83 100644 --- a/bin/reth/src/db/snapshots/receipts.rs +++ b/bin/reth/src/db/snapshots/receipts.rs @@ -22,7 +22,7 @@ use std::{ impl Command { pub(crate) fn generate_receipts_snapshot( &self, - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, compression: Compression, inclusion_filter: InclusionFilter, phf: PerfectHashingFunction, diff --git a/bin/reth/src/db/snapshots/transactions.rs b/bin/reth/src/db/snapshots/transactions.rs index 00c06102e8d7a..94a61d262a8e1 100644 --- a/bin/reth/src/db/snapshots/transactions.rs +++ b/bin/reth/src/db/snapshots/transactions.rs @@ -22,7 +22,7 @@ use std::{ impl Command { pub(crate) fn generate_transactions_snapshot( &self, - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, compression: Compression, inclusion_filter: InclusionFilter, phf: PerfectHashingFunction, diff --git a/bin/reth/src/init.rs b/bin/reth/src/init.rs index 6b3d638040edb..04b29036226fc 100644 --- a/bin/reth/src/init.rs +++ b/bin/reth/src/init.rs @@ -1,7 +1,7 @@ //! Reth genesis initialization utility functions. use reth_db::{ cursor::DbCursorRO, - database::{Database, DatabaseGAT}, + database::Database, tables, transaction::{DbTx, DbTxMut}, }; @@ -94,7 +94,7 @@ pub fn init_genesis( /// Inserts the genesis state into the database. pub fn insert_genesis_state( - tx: &>::TXMut, + tx: &::TXMut, genesis: &reth_primitives::Genesis, ) -> ProviderResult<()> { let mut state_init: BundleStateInit = HashMap::new(); @@ -160,7 +160,7 @@ pub fn insert_genesis_state( /// Inserts hashes for the genesis state. pub fn insert_genesis_hashes( - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, genesis: &reth_primitives::Genesis, ) -> ProviderResult<()> { // insert and hash accounts to hashing table @@ -184,7 +184,7 @@ pub fn insert_genesis_hashes( /// Inserts history indices for genesis accounts and storage. pub fn insert_genesis_history( - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, genesis: &reth_primitives::Genesis, ) -> ProviderResult<()> { let account_transitions = @@ -204,7 +204,7 @@ pub fn insert_genesis_history( /// Inserts header for the genesis state. pub fn insert_genesis_header( - tx: &>::TXMut, + tx: &::TXMut, chain: Arc, ) -> ProviderResult<()> { let header = chain.sealed_genesis_header(); @@ -236,7 +236,7 @@ mod tests { #[allow(clippy::type_complexity)] fn collect_table_entries( - tx: &>::TX, + tx: &::TX, ) -> Result>, InitDatabaseError> where DB: Database, diff --git a/crates/prune/src/segments/account_history.rs b/crates/prune/src/segments/account_history.rs index c1b5ae682cfcf..d8d94764bd93a 100644 --- a/crates/prune/src/segments/account_history.rs +++ b/crates/prune/src/segments/account_history.rs @@ -32,7 +32,7 @@ impl Segment for AccountHistory { #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, input: PruneInput, ) -> Result { let range = match input.get_next_block_range() { diff --git a/crates/prune/src/segments/headers.rs b/crates/prune/src/segments/headers.rs index 12ba19416af3d..a1e951665e1b0 100644 --- a/crates/prune/src/segments/headers.rs +++ b/crates/prune/src/segments/headers.rs @@ -33,7 +33,7 @@ impl Segment for Headers { #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, input: PruneInput, ) -> Result { let block_range = match input.get_next_block_range() { @@ -91,7 +91,7 @@ impl Headers { /// Returns `done`, number of pruned rows and last pruned block number. fn prune_table>( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, range: RangeInclusive, delete_limit: usize, ) -> RethResult<(bool, usize, BlockNumber)> { diff --git a/crates/prune/src/segments/history.rs b/crates/prune/src/segments/history.rs index bb3352a396a0c..4836eeb841547 100644 --- a/crates/prune/src/segments/history.rs +++ b/crates/prune/src/segments/history.rs @@ -14,7 +14,7 @@ use reth_provider::DatabaseProviderRW; /// /// Returns total number of processed (walked) and deleted entities. pub(crate) fn prune_history_indices( - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, to_block: BlockNumber, key_matches: impl Fn(&T::Key, &T::Key) -> bool, last_key: impl Fn(&T::Key) -> T::Key, diff --git a/crates/prune/src/segments/mod.rs b/crates/prune/src/segments/mod.rs index 62fda61958647..339c4e0137456 100644 --- a/crates/prune/src/segments/mod.rs +++ b/crates/prune/src/segments/mod.rs @@ -45,14 +45,14 @@ pub trait Segment: Debug + Send + Sync { /// Prune data for [Self::segment] using the provided input. fn prune( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, input: PruneInput, ) -> Result; /// Save checkpoint for [Self::segment] to the database. fn save_checkpoint( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, checkpoint: PruneCheckpoint, ) -> ProviderResult<()> { provider.save_prune_checkpoint(self.segment(), checkpoint) @@ -80,7 +80,7 @@ impl PruneInput { /// To get the range end: get last tx number for `to_block`. pub(crate) fn get_next_tx_num_range( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, ) -> RethResult>> { let from_tx_number = self.previous_checkpoint // Checkpoint exists, prune from the next transaction after the highest pruned one diff --git a/crates/prune/src/segments/receipts.rs b/crates/prune/src/segments/receipts.rs index fb97897e0cd47..acbdd6829cf15 100644 --- a/crates/prune/src/segments/receipts.rs +++ b/crates/prune/src/segments/receipts.rs @@ -31,7 +31,7 @@ impl Segment for Receipts { #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, input: PruneInput, ) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { @@ -71,7 +71,7 @@ impl Segment for Receipts { fn save_checkpoint( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, checkpoint: PruneCheckpoint, ) -> ProviderResult<()> { provider.save_prune_checkpoint(PruneSegment::Receipts, checkpoint)?; diff --git a/crates/prune/src/segments/receipts_by_logs.rs b/crates/prune/src/segments/receipts_by_logs.rs index 8f9faa4fd7b08..aec6d7a2cf661 100644 --- a/crates/prune/src/segments/receipts_by_logs.rs +++ b/crates/prune/src/segments/receipts_by_logs.rs @@ -32,7 +32,7 @@ impl Segment for ReceiptsByLogs { #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, input: PruneInput, ) -> Result { // Contract log filtering removes every receipt possible except the ones in the list. So, diff --git a/crates/prune/src/segments/sender_recovery.rs b/crates/prune/src/segments/sender_recovery.rs index aa8d48a2ea4bc..14fcdfae3b7e0 100644 --- a/crates/prune/src/segments/sender_recovery.rs +++ b/crates/prune/src/segments/sender_recovery.rs @@ -30,7 +30,7 @@ impl Segment for SenderRecovery { #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, input: PruneInput, ) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { diff --git a/crates/prune/src/segments/storage_history.rs b/crates/prune/src/segments/storage_history.rs index aa68eb7147947..1bf294a9736f4 100644 --- a/crates/prune/src/segments/storage_history.rs +++ b/crates/prune/src/segments/storage_history.rs @@ -36,7 +36,7 @@ impl Segment for StorageHistory { #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, input: PruneInput, ) -> Result { let range = match input.get_next_block_range() { diff --git a/crates/prune/src/segments/transaction_lookup.rs b/crates/prune/src/segments/transaction_lookup.rs index 6785a22fc2f43..4a094f46018ca 100644 --- a/crates/prune/src/segments/transaction_lookup.rs +++ b/crates/prune/src/segments/transaction_lookup.rs @@ -31,7 +31,7 @@ impl Segment for TransactionLookup { #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, input: PruneInput, ) -> Result { let (start, end) = match input.get_next_tx_num_range(provider)? { diff --git a/crates/prune/src/segments/transactions.rs b/crates/prune/src/segments/transactions.rs index d06e97b65955a..c70fd1197a704 100644 --- a/crates/prune/src/segments/transactions.rs +++ b/crates/prune/src/segments/transactions.rs @@ -30,7 +30,7 @@ impl Segment for Transactions { #[instrument(level = "trace", target = "pruner", skip(self, provider), ret)] fn prune( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, input: PruneInput, ) -> Result { let tx_range = match input.get_next_tx_num_range(provider)? { diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b8b313d804bfc..0dffc6661ef0b 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -458,7 +458,7 @@ where opts: GethDebugTracingOptions, env: Env, at: BlockId, - db: &mut SubState>, + db: &mut SubState, ) -> EthResult<(GethTrace, revm_primitives::State)> { let GethDebugTracingOptions { config, tracer, tracer_config, .. } = opts; diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index b30b4db562db8..5312c822921c5 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -206,7 +206,7 @@ where /// Returns the state at the given [BlockId] enum. /// /// Note: if not [BlockNumberOrTag::Pending] then this will only return canonical state. See also - pub fn state_at_block_id(&self, at: BlockId) -> EthResult> { + pub fn state_at_block_id(&self, at: BlockId) -> EthResult { Ok(self.provider().state_by_block_id(at)?) } @@ -216,7 +216,7 @@ where pub fn state_at_block_id_or_latest( &self, block_id: Option, - ) -> EthResult> { + ) -> EthResult { if let Some(block_id) = block_id { self.state_at_block_id(block_id) } else { @@ -225,12 +225,12 @@ where } /// Returns the state at the given block number - pub fn state_at_hash(&self, block_hash: B256) -> RethResult> { + pub fn state_at_hash(&self, block_hash: B256) -> RethResult { Ok(self.provider().history_by_block_hash(block_hash)?) } /// Returns the _latest_ state - pub fn latest_state(&self) -> RethResult> { + pub fn latest_state(&self) -> RethResult { Ok(self.provider().latest()?) } } diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index 78307ee333726..58af7c7697a92 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -51,7 +51,7 @@ use revm::L1BlockInfo; use std::ops::Div; /// Helper alias type for the state's [CacheDB] -pub(crate) type StateCacheDB<'r> = CacheDB>>; +pub(crate) type StateCacheDB = CacheDB>; /// Commonly used transaction related functions for the [EthApi] type in the `eth_` namespace. /// @@ -63,17 +63,17 @@ pub trait EthTransactions: Send + Sync { fn call_gas_limit(&self) -> u64; /// Returns the state at the given [BlockId] - fn state_at(&self, at: BlockId) -> EthResult>; + fn state_at(&self, at: BlockId) -> EthResult; /// Executes the closure with the state that corresponds to the given [BlockId]. fn with_state_at_block(&self, at: BlockId, f: F) -> EthResult where - F: FnOnce(StateProviderBox<'_>) -> EthResult; + F: FnOnce(StateProviderBox) -> EthResult; /// Executes the closure with the state that corresponds to the given [BlockId] on a new task async fn spawn_with_state_at_block(&self, at: BlockId, f: F) -> EthResult where - F: FnOnce(StateProviderBox<'_>) -> EthResult + Send + 'static, + F: FnOnce(StateProviderBox) -> EthResult + Send + 'static, T: Send + 'static; /// Returns the revm evm env for the requested [BlockId] @@ -154,7 +154,7 @@ pub trait EthTransactions: Send + Sync { f: F, ) -> EthResult where - F: for<'r> FnOnce(StateCacheDB<'r>, Env) -> EthResult + Send + 'static, + F: FnOnce(StateCacheDB, Env) -> EthResult + Send + 'static, R: Send + 'static; /// Executes the call request at the given [BlockId]. @@ -175,7 +175,7 @@ pub trait EthTransactions: Send + Sync { inspector: I, ) -> EthResult<(ResultAndState, Env)> where - I: for<'r> Inspector> + Send + 'static; + I: Inspector + Send + 'static; /// Executes the transaction on top of the given [BlockId] with a tracer configured by the /// config. @@ -209,9 +209,7 @@ pub trait EthTransactions: Send + Sync { f: F, ) -> EthResult where - F: for<'a> FnOnce(TracingInspector, ResultAndState, StateCacheDB<'a>) -> EthResult - + Send - + 'static, + F: FnOnce(TracingInspector, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, R: Send + 'static; /// Fetches the transaction and the transaction's block @@ -236,12 +234,7 @@ pub trait EthTransactions: Send + Sync { f: F, ) -> EthResult> where - F: for<'a> FnOnce( - TransactionInfo, - TracingInspector, - ResultAndState, - StateCacheDB<'a>, - ) -> EthResult + F: FnOnce(TransactionInfo, TracingInspector, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, R: Send + 'static; @@ -269,7 +262,7 @@ pub trait EthTransactions: Send + Sync { TracingInspector, ExecutionResult, &'a State, - &'a CacheDB>>, + &'a CacheDB>, ) -> EthResult + Send + 'static, @@ -293,7 +286,7 @@ pub trait EthTransactions: Send + Sync { TracingInspector, ExecutionResult, &'a State, - &'a CacheDB>>, + &'a CacheDB>, ) -> EthResult + Send + 'static, @@ -312,13 +305,13 @@ where self.inner.gas_cap } - fn state_at(&self, at: BlockId) -> EthResult> { + fn state_at(&self, at: BlockId) -> EthResult { self.state_at_block_id(at) } fn with_state_at_block(&self, at: BlockId, f: F) -> EthResult where - F: FnOnce(StateProviderBox<'_>) -> EthResult, + F: FnOnce(StateProviderBox) -> EthResult, { let state = self.state_at(at)?; f(state) @@ -326,7 +319,7 @@ where async fn spawn_with_state_at_block(&self, at: BlockId, f: F) -> EthResult where - F: FnOnce(StateProviderBox<'_>) -> EthResult + Send + 'static, + F: FnOnce(StateProviderBox) -> EthResult + Send + 'static, T: Send + 'static, { self.spawn_tracing_task_with(move |this| { @@ -595,7 +588,7 @@ where f: F, ) -> EthResult where - F: for<'r> FnOnce(StateCacheDB<'r>, Env) -> EthResult + Send + 'static, + F: FnOnce(StateCacheDB, Env) -> EthResult + Send + 'static, R: Send + 'static, { let (cfg, block_env, at) = self.evm_env_at(at).await?; @@ -638,7 +631,7 @@ where inspector: I, ) -> EthResult<(ResultAndState, Env)> where - I: for<'r> Inspector> + Send + 'static, + I: Inspector + Send + 'static, { self.spawn_with_call_at(request, at, overrides, move |db, env| inspect(db, env, inspector)) .await @@ -672,9 +665,7 @@ where f: F, ) -> EthResult where - F: for<'a> FnOnce(TracingInspector, ResultAndState, StateCacheDB<'a>) -> EthResult - + Send - + 'static, + F: FnOnce(TracingInspector, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, R: Send + 'static, { self.spawn_with_state_at_block(at, move |state| { @@ -712,12 +703,7 @@ where f: F, ) -> EthResult> where - F: for<'a> FnOnce( - TransactionInfo, - TracingInspector, - ResultAndState, - StateCacheDB<'a>, - ) -> EthResult + F: FnOnce(TransactionInfo, TracingInspector, ResultAndState, StateCacheDB) -> EthResult + Send + 'static, R: Send + 'static, @@ -764,7 +750,7 @@ where TracingInspector, ExecutionResult, &'a State, - &'a CacheDB>>, + &'a CacheDB>, ) -> EthResult + Send + 'static, @@ -786,7 +772,7 @@ where TracingInspector, ExecutionResult, &'a State, - &'a CacheDB>>, + &'a CacheDB>, ) -> EthResult + Send + 'static, diff --git a/crates/snapshot/src/segments/headers.rs b/crates/snapshot/src/segments/headers.rs index d6852c73dec34..2bf73b2f7cf62 100644 --- a/crates/snapshot/src/segments/headers.rs +++ b/crates/snapshot/src/segments/headers.rs @@ -37,7 +37,7 @@ impl Segment for Headers { fn snapshot( &self, - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, directory: impl AsRef, range: RangeInclusive, ) -> ProviderResult<()> { diff --git a/crates/snapshot/src/segments/mod.rs b/crates/snapshot/src/segments/mod.rs index ec9061ebcb196..88cdc52ef6856 100644 --- a/crates/snapshot/src/segments/mod.rs +++ b/crates/snapshot/src/segments/mod.rs @@ -31,7 +31,7 @@ pub trait Segment: Default { /// file's save location. fn snapshot( &self, - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, directory: impl AsRef, range: RangeInclusive, ) -> ProviderResult<()>; @@ -42,7 +42,7 @@ pub trait Segment: Default { /// Generates the dataset to train a zstd dictionary with the most recent rows (at most 1000). fn dataset_for_compression>( &self, - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, range: &RangeInclusive, range_len: usize, ) -> ProviderResult>> { @@ -58,7 +58,7 @@ pub trait Segment: Default { /// Returns a [`NippyJar`] according to the desired configuration. The `directory` parameter /// determines the snapshot file's save location. pub(crate) fn prepare_jar( - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, directory: impl AsRef, segment: SnapshotSegment, segment_config: SegmentConfig, diff --git a/crates/snapshot/src/segments/receipts.rs b/crates/snapshot/src/segments/receipts.rs index c40949a0dd65f..4b82a7133a4eb 100644 --- a/crates/snapshot/src/segments/receipts.rs +++ b/crates/snapshot/src/segments/receipts.rs @@ -34,7 +34,7 @@ impl Segment for Receipts { fn snapshot( &self, - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, directory: impl AsRef, block_range: RangeInclusive, ) -> ProviderResult<()> { diff --git a/crates/snapshot/src/segments/transactions.rs b/crates/snapshot/src/segments/transactions.rs index 4367f1ce0a7fc..585bc9625e42b 100644 --- a/crates/snapshot/src/segments/transactions.rs +++ b/crates/snapshot/src/segments/transactions.rs @@ -34,7 +34,7 @@ impl Segment for Transactions { fn snapshot( &self, - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, directory: impl AsRef, block_range: RangeInclusive, ) -> ProviderResult<()> { diff --git a/crates/snapshot/src/snapshotter.rs b/crates/snapshot/src/snapshotter.rs index d9c1f6aeb003f..030db93cea569 100644 --- a/crates/snapshot/src/snapshotter.rs +++ b/crates/snapshot/src/snapshotter.rs @@ -291,7 +291,7 @@ impl Snapshotter { fn get_snapshot_target_tx_range( &self, - provider: &DatabaseProviderRO<'_, DB>, + provider: &DatabaseProviderRO, block_to_tx_number_cache: &mut HashMap, highest_snapshot: Option, block_range: &RangeInclusive, diff --git a/crates/stages/src/stage.rs b/crates/stages/src/stage.rs index 1fc2b29c1d1e8..b165fd0709f7b 100644 --- a/crates/stages/src/stage.rs +++ b/crates/stages/src/stage.rs @@ -76,7 +76,7 @@ impl ExecInput { /// the number of transactions exceeds the threshold. pub fn next_block_range_with_transaction_threshold( &self, - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, tx_threshold: u64, ) -> Result<(Range, RangeInclusive, bool), StageError> { let start_block = self.next_block(); @@ -234,14 +234,14 @@ pub trait Stage: Send + Sync { /// upon invoking this method. fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result; /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result; } diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index 8dc6de073f052..cb57c44f81457 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -98,7 +98,7 @@ impl Stage for BodyStage { /// header, limited by the stage's batch size. fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -185,7 +185,7 @@ impl Stage for BodyStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { self.buffer.take(); @@ -245,7 +245,7 @@ impl Stage for BodyStage { // beforehand how many bytes we need to download. So the good solution would be to measure the // progress in gas as a proxy to size. Execution stage uses a similar approach. fn stage_checkpoint( - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, ) -> Result { Ok(EntitiesCheckpoint { processed: provider.tx_ref().entries::()? as u64, diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index d6ffc67df32d1..15a97cb19b623 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -110,7 +110,7 @@ impl ExecutionStage { /// Execute the stage. pub fn execute_inner( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -228,7 +228,7 @@ impl ExecutionStage { /// been previously executed. fn adjust_prune_modes( &self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, start_block: u64, max_block: u64, ) -> Result { @@ -247,7 +247,7 @@ impl ExecutionStage { } fn execution_checkpoint( - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, start_block: BlockNumber, max_block: BlockNumber, checkpoint: StageCheckpoint, @@ -314,7 +314,7 @@ fn execution_checkpoint( } fn calculate_gas_used_from_headers( - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, range: RangeInclusive, ) -> Result { let mut gas_total = 0; @@ -340,7 +340,7 @@ impl Stage for ExecutionStage { /// Execute the stage fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { self.execute_inner(provider, input) @@ -349,7 +349,7 @@ impl Stage for ExecutionStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { let tx = provider.tx_ref(); diff --git a/crates/stages/src/stages/finish.rs b/crates/stages/src/stages/finish.rs index d0a63e8905711..26357aedc4058 100644 --- a/crates/stages/src/stages/finish.rs +++ b/crates/stages/src/stages/finish.rs @@ -18,7 +18,7 @@ impl Stage for FinishStage { fn execute( &mut self, - _provider: &DatabaseProviderRW<'_, &DB>, + _provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }) @@ -26,7 +26,7 @@ impl Stage for FinishStage { fn unwind( &mut self, - _provider: &DatabaseProviderRW<'_, &DB>, + _provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) }) diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 4eab05e0941b4..fc3424f2ea573 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -79,7 +79,7 @@ impl AccountHashingStage { /// Proceeds to go to the `BlockTransitionIndex` end, go back `transitions` and change the /// account state in the `AccountChangeSet` table. pub fn seed( - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, opts: SeedOpts, ) -> Result, StageError> { use reth_db::models::AccountBeforeTx; @@ -134,7 +134,7 @@ impl Stage for AccountHashingStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -266,7 +266,7 @@ impl Stage for AccountHashingStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = @@ -288,7 +288,7 @@ impl Stage for AccountHashingStage { } fn stage_checkpoint_progress( - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, ) -> Result { Ok(EntitiesCheckpoint { processed: provider.tx_ref().entries::()? as u64, diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index da2fd38aced2e..73d6277a830d8 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -53,7 +53,7 @@ impl Stage for StorageHashingStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { let tx = provider.tx_ref(); @@ -192,7 +192,7 @@ impl Stage for StorageHashingStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = @@ -213,7 +213,7 @@ impl Stage for StorageHashingStage { } fn stage_checkpoint_progress( - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, ) -> Result { Ok(EntitiesCheckpoint { processed: provider.tx_ref().entries::()? as u64, diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index 40ffa8d9461ae..a150415bc36ba 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -2,7 +2,7 @@ use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput} use futures_util::StreamExt; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, - database::{Database, DatabaseGAT}, + database::Database, tables, transaction::{DbTx, DbTxMut}, }; @@ -60,7 +60,7 @@ where fn is_stage_done( &self, - tx: &>::TXMut, + tx: &::TXMut, checkpoint: u64, ) -> Result { let mut header_cursor = tx.cursor_read::()?; @@ -76,7 +76,7 @@ where /// Note: this writes the headers with rising block numbers. fn write_headers( &self, - tx: &>::TXMut, + tx: &::TXMut, headers: Vec, ) -> Result, StageError> { trace!(target: "sync::stages::headers", len = headers.len(), "writing headers"); @@ -176,7 +176,7 @@ where /// starting from the tip of the chain fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { let current_checkpoint = input.checkpoint(); @@ -279,7 +279,7 @@ where /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { self.buffer.take(); diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index b1e7721dcfe0f..4a0df0c29c8ed 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -44,7 +44,7 @@ impl Stage for IndexAccountHistoryStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, mut input: ExecInput, ) -> Result { if let Some((target_prunable_block, prune_mode)) = self @@ -87,7 +87,7 @@ impl Stage for IndexAccountHistoryStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index f9896fb4f1955..b5ef6fda99bf1 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -43,7 +43,7 @@ impl Stage for IndexStorageHistoryStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, mut input: ExecInput, ) -> Result { if let Some((target_prunable_block, prune_mode)) = self @@ -85,7 +85,7 @@ impl Stage for IndexStorageHistoryStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 4354b5628b36e..602db57232efc 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -80,7 +80,7 @@ impl MerkleStage { /// Gets the hashing progress pub fn get_execution_checkpoint( &self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, ) -> Result, StageError> { let buf = provider.get_stage_checkpoint_progress(StageId::MerkleExecute)?.unwrap_or_default(); @@ -96,7 +96,7 @@ impl MerkleStage { /// Saves the hashing progress pub fn save_execution_checkpoint( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, checkpoint: Option, ) -> Result<(), StageError> { let mut buf = vec![]; @@ -127,7 +127,7 @@ impl Stage for MerkleStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { let threshold = match self { @@ -261,7 +261,7 @@ impl Stage for MerkleStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { let tx = provider.tx_ref(); diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 771de3586bef9..8197ec8340081 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -124,7 +124,7 @@ mod tests { expect_num_receipts: usize, expect_num_acc_changesets: usize, expect_num_storage_changesets: usize| async move { - let provider: DatabaseProviderRW<'_, &DatabaseEnv> = factory.provider_rw().unwrap(); + let provider: DatabaseProviderRW<&DatabaseEnv> = factory.provider_rw().unwrap(); // Check execution and create receipts and changesets according to the pruning // configuration diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index cdafd9e6275e0..551a70ccddfbf 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -56,7 +56,7 @@ impl Stage for SenderRecoveryStage { /// the [`TxSenders`][reth_db::tables::TxSenders] table. fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { if input.target_reached() { @@ -168,7 +168,7 @@ impl Stage for SenderRecoveryStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { let (_, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold); @@ -207,7 +207,7 @@ fn recover_sender( } fn stage_checkpoint( - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, ) -> Result { let pruned_entries = provider .get_prune_checkpoint(PruneSegment::SenderRecovery)? diff --git a/crates/stages/src/stages/total_difficulty.rs b/crates/stages/src/stages/total_difficulty.rs index 1cdaa971cf379..042f1b6a6c069 100644 --- a/crates/stages/src/stages/total_difficulty.rs +++ b/crates/stages/src/stages/total_difficulty.rs @@ -50,7 +50,7 @@ impl Stage for TotalDifficultyStage { /// Write total difficulty entries fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: ExecInput, ) -> Result { let tx = provider.tx_ref(); @@ -100,7 +100,7 @@ impl Stage for TotalDifficultyStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { let (_, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold); @@ -115,7 +115,7 @@ impl Stage for TotalDifficultyStage { } fn stage_checkpoint( - provider: &DatabaseProviderRW<'_, DB>, + provider: &DatabaseProviderRW, ) -> Result { Ok(EntitiesCheckpoint { processed: provider.tx_ref().entries::()? as u64, diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index 0de9ce74b6c14..f6ef73a9af5e5 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -51,7 +51,7 @@ impl Stage for TransactionLookupStage { /// Write transaction hash -> id entries fn execute( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, mut input: ExecInput, ) -> Result { if let Some((target_prunable_block, prune_mode)) = self @@ -129,7 +129,7 @@ impl Stage for TransactionLookupStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, input: UnwindInput, ) -> Result { let tx = provider.tx_ref(); @@ -164,7 +164,7 @@ impl Stage for TransactionLookupStage { } fn stage_checkpoint( - provider: &DatabaseProviderRW<'_, &DB>, + provider: &DatabaseProviderRW<&DB>, ) -> Result { let pruned_entries = provider .get_prune_checkpoint(PruneSegment::TransactionLookup)? diff --git a/crates/stages/src/test_utils/stage.rs b/crates/stages/src/test_utils/stage.rs index 85e88841ba8c9..a73773e6b8b98 100644 --- a/crates/stages/src/test_utils/stage.rs +++ b/crates/stages/src/test_utils/stage.rs @@ -47,7 +47,7 @@ impl Stage for TestStage { fn execute( &mut self, - _: &DatabaseProviderRW<'_, &DB>, + _: &DatabaseProviderRW<&DB>, _input: ExecInput, ) -> Result { self.exec_outputs @@ -57,7 +57,7 @@ impl Stage for TestStage { fn unwind( &mut self, - _: &DatabaseProviderRW<'_, &DB>, + _: &DatabaseProviderRW<&DB>, _input: UnwindInput, ) -> Result { self.unwind_outputs diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index 56361f21295ae..586a58c99d7ad 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -1,12 +1,12 @@ use reth_db::{ common::KeyValue, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - database::DatabaseGAT, + database::Database, models::{AccountBeforeTx, StoredBlockBodyIndices}, table::{Table, TableRow}, tables, test_utils::{create_test_rw_db, create_test_rw_db_with_path, TempDatabase}, - transaction::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT}, + transaction::{DbTx, DbTxMut}, DatabaseEnv, DatabaseError as DbError, }; use reth_interfaces::{test_utils::generators::ChangeSet, RethResult}; @@ -57,12 +57,12 @@ impl TestTransaction { } /// Return a database wrapped in [DatabaseProviderRW]. - pub fn inner_rw(&self) -> DatabaseProviderRW<'_, Arc>> { + pub fn inner_rw(&self) -> DatabaseProviderRW>> { self.factory.provider_rw().expect("failed to create db container") } /// Return a database wrapped in [DatabaseProviderRO]. - pub fn inner(&self) -> DatabaseProviderRO<'_, Arc>> { + pub fn inner(&self) -> DatabaseProviderRO>> { self.factory.provider().expect("failed to create db container") } @@ -74,7 +74,7 @@ impl TestTransaction { /// Invoke a callback with transaction committing it afterwards pub fn commit(&self, f: F) -> Result<(), DbError> where - F: FnOnce(&>::TXMut) -> Result<(), DbError>, + F: FnOnce(&::TXMut) -> Result<(), DbError>, { let mut tx = self.inner_rw(); f(tx.tx_ref())?; @@ -85,7 +85,7 @@ impl TestTransaction { /// Invoke a callback with a read transaction pub fn query(&self, f: F) -> Result where - F: FnOnce(&>::TX) -> Result, + F: FnOnce(&::TX) -> Result, { f(self.inner().tx_ref()) } diff --git a/crates/storage/db/src/abstraction/common.rs b/crates/storage/db/src/abstraction/common.rs index 29a1e34294d9c..9bce16e397d2d 100644 --- a/crates/storage/db/src/abstraction/common.rs +++ b/crates/storage/db/src/abstraction/common.rs @@ -1,3 +1,5 @@ +use crate::{abstraction::table::*, DatabaseError}; + /// A key-value pair for table `T`. pub type KeyValue = (::Key, ::Value); @@ -16,13 +18,20 @@ pub type IterPairResult = Option, DatabaseError>>; /// A value only result for table `T`. pub type ValueOnlyResult = Result::Value>, DatabaseError>; -use crate::{abstraction::table::*, DatabaseError}; - -// Sealed trait helper to prevent misuse of the API. +// Sealed trait helper to prevent misuse of the Database API. mod sealed { + use crate::{database::Database, mock::DatabaseMock, DatabaseEnv}; + use std::sync::Arc; + + /// Sealed trait to limit the implementors of the Database trait. pub trait Sealed: Sized {} - #[allow(missing_debug_implementations)] - pub struct Bounds(T); - impl Sealed for Bounds {} + + impl Sealed for &DB {} + impl Sealed for Arc {} + impl Sealed for DatabaseEnv {} + impl Sealed for DatabaseMock {} + + #[cfg(any(test, feature = "test-utils"))] + impl Sealed for crate::test_utils::TempDatabase {} } -pub(crate) use sealed::{Bounds, Sealed}; +pub(crate) use sealed::Sealed; diff --git a/crates/storage/db/src/abstraction/database.rs b/crates/storage/db/src/abstraction/database.rs index eacf845bb7bde..e185b44382637 100644 --- a/crates/storage/db/src/abstraction/database.rs +++ b/crates/storage/db/src/abstraction/database.rs @@ -1,35 +1,31 @@ use crate::{ - common::{Bounds, Sealed}, + abstraction::common::Sealed, table::TableImporter, transaction::{DbTx, DbTxMut}, DatabaseError, }; use std::{fmt::Debug, sync::Arc}; -/// Implements the GAT method from: -/// . +/// Main Database trait that can open read-only and read-write transactions. /// -/// Sealed trait which cannot be implemented by 3rd parties, exposed only for implementers -pub trait DatabaseGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync { - /// RO database transaction - type TX: DbTx + Send + Sync + Debug; - /// RW database transaction - type TXMut: DbTxMut + DbTx + TableImporter + Send + Sync + Debug; -} +/// Sealed trait which cannot be implemented by 3rd parties, exposed only for consumption. +pub trait Database: Send + Sync + Sealed { + /// Read-Only database transaction + type TX: DbTx + Send + Sync + Debug + 'static; + /// Read-Write database transaction + type TXMut: DbTxMut + DbTx + TableImporter + Send + Sync + Debug + 'static; -/// Main Database trait that spawns transactions to be executed. -pub trait Database: for<'a> DatabaseGAT<'a> { /// Create read only transaction. - fn tx(&self) -> Result<>::TX, DatabaseError>; + fn tx(&self) -> Result; /// Create read write transaction only possible if database is open with write access. - fn tx_mut(&self) -> Result<>::TXMut, DatabaseError>; + fn tx_mut(&self) -> Result; /// Takes a function and passes a read-only transaction into it, making sure it's closed in the /// end of the execution. fn view(&self, f: F) -> Result where - F: FnOnce(&>::TX) -> T, + F: FnOnce(&Self::TX) -> T, { let tx = self.tx()?; @@ -43,7 +39,7 @@ pub trait Database: for<'a> DatabaseGAT<'a> { /// the end of the execution. fn update(&self, f: F) -> Result where - F: FnOnce(&>::TXMut) -> T, + F: FnOnce(&Self::TXMut) -> T, { let tx = self.tx_mut()?; @@ -54,34 +50,27 @@ pub trait Database: for<'a> DatabaseGAT<'a> { } } -// Generic over Arc -impl<'a, DB: Database> DatabaseGAT<'a> for Arc { - type TX = >::TX; - type TXMut = >::TXMut; -} - impl Database for Arc { - fn tx(&self) -> Result<>::TX, DatabaseError> { + type TX = ::TX; + type TXMut = ::TXMut; + + fn tx(&self) -> Result { ::tx(self) } - fn tx_mut(&self) -> Result<>::TXMut, DatabaseError> { + fn tx_mut(&self) -> Result { ::tx_mut(self) } } -// Generic over reference -impl<'a, DB: Database> DatabaseGAT<'a> for &DB { - type TX = >::TX; - type TXMut = >::TXMut; -} - impl Database for &DB { - fn tx(&self) -> Result<>::TX, DatabaseError> { + type TX = ::TX; + type TXMut = ::TXMut; + fn tx(&self) -> Result { ::tx(self) } - fn tx_mut(&self) -> Result<>::TXMut, DatabaseError> { + fn tx_mut(&self) -> Result { ::tx_mut(self) } } diff --git a/crates/storage/db/src/abstraction/mock.rs b/crates/storage/db/src/abstraction/mock.rs index c094eb944165c..f1f0854fb5cd1 100644 --- a/crates/storage/db/src/abstraction/mock.rs +++ b/crates/storage/db/src/abstraction/mock.rs @@ -1,17 +1,16 @@ //! Mock database -use std::{collections::BTreeMap, ops::RangeBounds}; - use crate::{ common::{PairResult, ValueOnlyResult}, cursor::{ DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW, DupWalker, RangeWalker, ReverseWalker, Walker, }, - database::{Database, DatabaseGAT}, + database::Database, table::{DupSort, Table, TableImporter}, - transaction::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT}, + transaction::{DbTx, DbTxMut}, DatabaseError, }; +use std::{collections::BTreeMap, ops::RangeBounds}; /// Mock database used for testing with inner BTreeMap structure /// TODO @@ -22,21 +21,17 @@ pub struct DatabaseMock { } impl Database for DatabaseMock { - fn tx(&self) -> Result<>::TX, DatabaseError> { + type TX = TxMock; + type TXMut = TxMock; + fn tx(&self) -> Result { Ok(TxMock::default()) } - fn tx_mut(&self) -> Result<>::TXMut, DatabaseError> { + fn tx_mut(&self) -> Result { Ok(TxMock::default()) } } -impl<'a> DatabaseGAT<'a> for DatabaseMock { - type TX = TxMock; - - type TXMut = TxMock; -} - /// Mock read only tx #[derive(Debug, Clone, Default)] pub struct TxMock { @@ -44,17 +39,10 @@ pub struct TxMock { _table: BTreeMap, Vec>, } -impl<'a> DbTxGAT<'a> for TxMock { +impl DbTx for TxMock { type Cursor = CursorMock; type DupCursor = CursorMock; -} -impl<'a> DbTxMutGAT<'a> for TxMock { - type CursorMut = CursorMock; - type DupCursorMut = CursorMock; -} - -impl DbTx for TxMock { fn get(&self, _key: T::Key) -> Result, DatabaseError> { todo!() } @@ -65,13 +53,11 @@ impl DbTx for TxMock { fn abort(self) {} - fn cursor_read(&self) -> Result<>::Cursor, DatabaseError> { + fn cursor_read(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } - fn cursor_dup_read( - &self, - ) -> Result<>::DupCursor, DatabaseError> { + fn cursor_dup_read(&self) -> Result, DatabaseError> { Ok(CursorMock { _cursor: 0 }) } @@ -81,6 +67,9 @@ impl DbTx for TxMock { } impl DbTxMut for TxMock { + type CursorMut = CursorMock; + type DupCursorMut = CursorMock; + fn put(&self, _key: T::Key, _value: T::Value) -> Result<(), DatabaseError> { todo!() } @@ -97,15 +86,11 @@ impl DbTxMut for TxMock { todo!() } - fn cursor_write( - &self, - ) -> Result<>::CursorMut, DatabaseError> { + fn cursor_write(&self) -> Result, DatabaseError> { todo!() } - fn cursor_dup_write( - &self, - ) -> Result<>::DupCursorMut, DatabaseError> { + fn cursor_dup_write(&self) -> Result, DatabaseError> { todo!() } } diff --git a/crates/storage/db/src/abstraction/transaction.rs b/crates/storage/db/src/abstraction/transaction.rs index bbbd775d7a163..472563f3520b6 100644 --- a/crates/storage/db/src/abstraction/transaction.rs +++ b/crates/storage/db/src/abstraction/transaction.rs @@ -1,39 +1,16 @@ use crate::{ - common::{Bounds, Sealed}, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW}, table::{DupSort, Table}, DatabaseError, }; -/// Implements the GAT method from: -/// . -/// -/// Sealed trait which cannot be implemented by 3rd parties, exposed only for implementers -pub trait DbTxGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync { - /// Cursor GAT +/// Read only transaction +pub trait DbTx: Send + Sync { + /// Cursor type for this read-only transaction type Cursor: DbCursorRO + Send + Sync; - /// DupCursor GAT + /// DupCursor type for this read-only transaction type DupCursor: DbDupCursorRO + DbCursorRO + Send + Sync; -} - -/// Implements the GAT method from: -/// . -/// -/// Sealed trait which cannot be implemented by 3rd parties, exposed only for implementers -pub trait DbTxMutGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync { - /// Cursor GAT - type CursorMut: DbCursorRW + DbCursorRO + Send + Sync; - /// DupCursor GAT - type DupCursorMut: DbDupCursorRW - + DbCursorRW - + DbDupCursorRO - + DbCursorRO - + Send - + Sync; -} -/// Read only transaction -pub trait DbTx: for<'a> DbTxGAT<'a> { /// Get value fn get(&self, key: T::Key) -> Result, DatabaseError>; /// Commit for read only transaction will consume and free transaction and allows @@ -42,17 +19,25 @@ pub trait DbTx: for<'a> DbTxGAT<'a> { /// Aborts transaction fn abort(self); /// Iterate over read only values in table. - fn cursor_read(&self) -> Result<>::Cursor, DatabaseError>; + fn cursor_read(&self) -> Result, DatabaseError>; /// Iterate over read only values in dup sorted table. - fn cursor_dup_read( - &self, - ) -> Result<>::DupCursor, DatabaseError>; + fn cursor_dup_read(&self) -> Result, DatabaseError>; /// Returns number of entries in the table. fn entries(&self) -> Result; } /// Read write transaction that allows writing to database -pub trait DbTxMut: for<'a> DbTxMutGAT<'a> { +pub trait DbTxMut: Send + Sync { + /// Read-Write Cursor type + type CursorMut: DbCursorRW + DbCursorRO + Send + Sync; + /// Read-Write DupCursor type + type DupCursorMut: DbDupCursorRW + + DbCursorRW + + DbDupCursorRO + + DbCursorRO + + Send + + Sync; + /// Put value to database fn put(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; /// Delete value from database @@ -61,11 +46,7 @@ pub trait DbTxMut: for<'a> DbTxMutGAT<'a> { /// Clears database. fn clear(&self) -> Result<(), DatabaseError>; /// Cursor mut - fn cursor_write( - &self, - ) -> Result<>::CursorMut, DatabaseError>; + fn cursor_write(&self) -> Result, DatabaseError>; /// DupCursor mut. - fn cursor_dup_write( - &self, - ) -> Result<>::DupCursorMut, DatabaseError>; + fn cursor_dup_write(&self) -> Result, DatabaseError>; } diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index e895b4015ca01..3f6a5db881994 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -1,7 +1,7 @@ //! Module that interacts with MDBX. use crate::{ - database::{Database, DatabaseGAT}, + database::Database, tables::{TableType, Tables}, utils::default_page_size, DatabaseError, @@ -40,20 +40,18 @@ pub struct DatabaseEnv { with_metrics: bool, } -impl<'a> DatabaseGAT<'a> for DatabaseEnv { +impl Database for DatabaseEnv { type TX = tx::Tx; type TXMut = tx::Tx; -} -impl Database for DatabaseEnv { - fn tx(&self) -> Result<>::TX, DatabaseError> { + fn tx(&self) -> Result { Ok(Tx::new_with_metrics( self.inner.begin_ro_txn().map_err(|e| DatabaseError::InitTx(e.into()))?, self.with_metrics, )) } - fn tx_mut(&self) -> Result<>::TXMut, DatabaseError> { + fn tx_mut(&self) -> Result { Ok(Tx::new_with_metrics( self.inner.begin_rw_txn().map_err(|e| DatabaseError::InitTx(e.into()))?, self.with_metrics, diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index c62a54e6724ad..8da3df45ce889 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -7,7 +7,7 @@ use crate::{ }, table::{Compress, DupSort, Encode, Table, TableImporter}, tables::{utils::decode_one, Tables, NUM_TABLES}, - transaction::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT}, + transaction::{DbTx, DbTxMut}, DatabaseError, }; use parking_lot::RwLock; @@ -167,19 +167,12 @@ impl Drop for MetricsHandler { } } -impl<'a, K: TransactionKind> DbTxGAT<'a> for Tx { - type Cursor = Cursor; - type DupCursor = Cursor; -} - -impl<'a, K: TransactionKind> DbTxMutGAT<'a> for Tx { - type CursorMut = Cursor; - type DupCursorMut = Cursor; -} - impl TableImporter for Tx {} impl DbTx for Tx { + type Cursor = Cursor; + type DupCursor = Cursor; + fn get(&self, key: T::Key) -> Result::Value>, DatabaseError> { self.execute_with_operation_metric::(Operation::Get, None, |tx| { tx.get(self.get_dbi::()?, key.encode().as_ref()) @@ -202,14 +195,12 @@ impl DbTx for Tx { } // Iterate over read only values in database. - fn cursor_read(&self) -> Result<>::Cursor, DatabaseError> { + fn cursor_read(&self) -> Result, DatabaseError> { self.new_cursor() } /// Iterate over read only values in database. - fn cursor_dup_read( - &self, - ) -> Result<>::DupCursor, DatabaseError> { + fn cursor_dup_read(&self) -> Result, DatabaseError> { self.new_cursor() } @@ -224,6 +215,9 @@ impl DbTx for Tx { } impl DbTxMut for Tx { + type CursorMut = Cursor; + type DupCursorMut = Cursor; + fn put(&self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { let key = key.encode(); let value = value.compress(); @@ -268,15 +262,11 @@ impl DbTxMut for Tx { Ok(()) } - fn cursor_write( - &self, - ) -> Result<>::CursorMut, DatabaseError> { + fn cursor_write(&self) -> Result, DatabaseError> { self.new_cursor() } - fn cursor_dup_write( - &self, - ) -> Result<>::DupCursorMut, DatabaseError> { + fn cursor_dup_write(&self) -> Result, DatabaseError> { self.new_cursor() } } diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index 250177dfb1821..e813bf0d1169b 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -153,7 +153,7 @@ pub fn open_db(path: &Path, log_level: Option) -> eyre::Result DatabaseGAT<'a> for TempDatabase { - type TX = >::TX; - type TXMut = >::TXMut; - } - impl Database for TempDatabase { - fn tx(&self) -> Result<>::TX, DatabaseError> { + type TX = ::TX; + type TXMut = ::TXMut; + fn tx(&self) -> Result { self.db().tx() } - fn tx_mut(&self) -> Result<>::TXMut, DatabaseError> { + fn tx_mut(&self) -> Result { self.db().tx_mut() } } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index c21cbdd686c77..48fa6eaa09c20 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -50,7 +50,7 @@ impl ProviderFactory { /// Returns a provider with a created `DbTx` inside, which allows fetching data from the /// database using different types of providers. Example: [`HeaderProvider`] /// [`BlockHashReader`]. This may fail if the inner read database transaction fails to open. - pub fn provider(&self) -> ProviderResult> { + pub fn provider(&self) -> ProviderResult> { let mut provider = DatabaseProvider::new(self.db.tx()?, self.chain_spec.clone()); if let Some(snapshot_provider) = &self.snapshot_provider { @@ -64,7 +64,7 @@ impl ProviderFactory { /// data from the database using different types of providers. Example: [`HeaderProvider`] /// [`BlockHashReader`]. This may fail if the inner read/write database transaction fails to /// open. - pub fn provider_rw(&self) -> ProviderResult> { + pub fn provider_rw(&self) -> ProviderResult> { let mut provider = DatabaseProvider::new_rw(self.db.tx_mut()?, self.chain_spec.clone()); if let Some(snapshot_provider) = &self.snapshot_provider { @@ -123,7 +123,7 @@ impl Clone for ProviderFactory { impl ProviderFactory { /// Storage provider for latest block - pub fn latest(&self) -> ProviderResult> { + pub fn latest(&self) -> ProviderResult { trace!(target: "providers::db", "Returning latest state provider"); Ok(Box::new(LatestStateProvider::new(self.db.tx()?))) } @@ -132,7 +132,7 @@ impl ProviderFactory { fn state_provider_by_block_number( &self, mut block_number: BlockNumber, - ) -> ProviderResult> { + ) -> ProviderResult { let provider = self.provider()?; if block_number == provider.best_block_number().unwrap_or_default() && @@ -175,17 +175,14 @@ impl ProviderFactory { pub fn history_by_block_number( &self, block_number: BlockNumber, - ) -> ProviderResult> { + ) -> ProviderResult { let state_provider = self.state_provider_by_block_number(block_number)?; trace!(target: "providers::db", ?block_number, "Returning historical state provider for block number"); Ok(state_provider) } /// Storage provider for state at that given block hash - pub fn history_by_block_hash( - &self, - block_hash: BlockHash, - ) -> ProviderResult> { + pub fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { let block_number = self .provider()? .block_number(block_hash)? diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index ad289f19883af..81daf53de771d 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -14,7 +14,7 @@ use itertools::{izip, Itertools}; use reth_db::{ common::KeyValue, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - database::{Database, DatabaseGAT}, + database::Database, models::{ sharded_key, storage_sharded_key::StorageShardedKey, AccountBeforeTx, BlockNumberAddress, ShardedKey, StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals, @@ -55,39 +55,37 @@ use std::{ use tracing::{debug, warn}; /// A [`DatabaseProvider`] that holds a read-only database transaction. -pub type DatabaseProviderRO<'this, DB> = DatabaseProvider<>::TX>; +pub type DatabaseProviderRO = DatabaseProvider<::TX>; /// A [`DatabaseProvider`] that holds a read-write database transaction. /// /// Ideally this would be an alias type. However, there's some weird compiler error (), that forces us to wrap this in a struct instead. /// Once that issue is solved, we can probably revert back to being an alias type. #[derive(Debug)] -pub struct DatabaseProviderRW<'this, DB: Database>( - pub DatabaseProvider<>::TXMut>, -); +pub struct DatabaseProviderRW(pub DatabaseProvider<::TXMut>); -impl<'this, DB: Database> Deref for DatabaseProviderRW<'this, DB> { - type Target = DatabaseProvider<>::TXMut>; +impl Deref for DatabaseProviderRW { + type Target = DatabaseProvider<::TXMut>; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for DatabaseProviderRW<'_, DB> { +impl DerefMut for DatabaseProviderRW { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } -impl<'this, DB: Database> DatabaseProviderRW<'this, DB> { +impl DatabaseProviderRW { /// Commit database transaction pub fn commit(self) -> ProviderResult { self.0.commit() } /// Consume `DbTx` or `DbTxMut`. - pub fn into_tx(self) -> >::TXMut { + pub fn into_tx(self) -> ::TXMut { self.0.into_tx() } } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 528577963e8fa..df782365e6042 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -508,7 +508,7 @@ where Tree: BlockchainTreePendingStateProvider + BlockchainTreeViewer, { /// Storage provider for latest block - fn latest(&self) -> ProviderResult> { + fn latest(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting latest block state provider"); self.database.latest() } @@ -516,18 +516,18 @@ where fn history_by_block_number( &self, block_number: BlockNumber, - ) -> ProviderResult> { + ) -> ProviderResult { trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); self.ensure_canonical_block(block_number)?; self.database.history_by_block_number(block_number) } - fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult> { + fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); self.database.history_by_block_hash(block_hash) } - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult> { + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); let mut state = self.history_by_block_hash(block); @@ -546,7 +546,7 @@ where /// /// If there's no pending block available then the latest state provider is returned: /// [Self::latest] - fn pending(&self) -> ProviderResult> { + fn pending(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting provider for pending state"); if let Some(block) = self.tree.pending_block_num_hash() { @@ -559,10 +559,7 @@ where self.latest() } - fn pending_state_by_hash( - &self, - block_hash: B256, - ) -> ProviderResult>> { + fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult> { if let Some(state) = self.tree.find_pending_state_provider(block_hash) { return Ok(Some(self.pending_with_provider(state)?)) } @@ -572,7 +569,7 @@ where fn pending_with_provider( &self, bundle_state_data: Box, - ) -> ProviderResult> { + ) -> ProviderResult { let canonical_fork = bundle_state_data.canonical_fork(); trace!(target: "providers::blockchain", ?canonical_fork, "Returning post state provider"); diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 30ee18f6c95ed..3162266bc7818 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -10,7 +10,7 @@ use reth_primitives::{ use std::collections::HashMap; /// Assert genesis block -pub fn assert_genesis_block(provider: &DatabaseProviderRW<'_, DB>, g: SealedBlock) { +pub fn assert_genesis_block(provider: &DatabaseProviderRW, g: SealedBlock) { let n = g.number; let h = B256::ZERO; let tx = provider; diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index f32b4fd8132c4..b9f3dd66746b0 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -581,73 +581,67 @@ impl EvmEnvProvider for MockEthProvider { } impl StateProviderFactory for MockEthProvider { - fn latest(&self) -> ProviderResult> { + fn latest(&self) -> ProviderResult { Ok(Box::new(self.clone())) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult> { + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { Ok(Box::new(self.clone())) } - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { Ok(Box::new(self.clone())) } - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { Ok(Box::new(self.clone())) } - fn pending(&self) -> ProviderResult> { + fn pending(&self) -> ProviderResult { Ok(Box::new(self.clone())) } - fn pending_state_by_hash( - &self, - _block_hash: B256, - ) -> ProviderResult>> { + fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult> { Ok(Some(Box::new(self.clone()))) } fn pending_with_provider<'a>( &'a self, _bundle_state_data: Box, - ) -> ProviderResult> { + ) -> ProviderResult { Ok(Box::new(self.clone())) } } impl StateProviderFactory for Arc { - fn latest(&self) -> ProviderResult> { + fn latest(&self) -> ProviderResult { Ok(Box::new(self.clone())) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult> { + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { Ok(Box::new(self.clone())) } - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { Ok(Box::new(self.clone())) } - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { Ok(Box::new(self.clone())) } - fn pending(&self) -> ProviderResult> { + fn pending(&self) -> ProviderResult { Ok(Box::new(self.clone())) } - fn pending_state_by_hash( - &self, - _block_hash: B256, - ) -> ProviderResult>> { + fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult> { Ok(Some(Box::new(self.clone()))) } fn pending_with_provider<'a>( &'a self, _bundle_state_data: Box, - ) -> ProviderResult> { + ) -> ProviderResult { Ok(Box::new(self.clone())) } } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 7c2b761cef3dd..87632f1d980d3 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -347,37 +347,34 @@ impl EvmEnvProvider for NoopProvider { } impl StateProviderFactory for NoopProvider { - fn latest(&self) -> ProviderResult> { + fn latest(&self) -> ProviderResult { Ok(Box::new(*self)) } - fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult> { + fn history_by_block_number(&self, _block: BlockNumber) -> ProviderResult { Ok(Box::new(*self)) } - fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { + fn history_by_block_hash(&self, _block: BlockHash) -> ProviderResult { Ok(Box::new(*self)) } - fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult> { + fn state_by_block_hash(&self, _block: BlockHash) -> ProviderResult { Ok(Box::new(*self)) } - fn pending(&self) -> ProviderResult> { + fn pending(&self) -> ProviderResult { Ok(Box::new(*self)) } - fn pending_state_by_hash( - &self, - _block_hash: B256, - ) -> ProviderResult>> { + fn pending_state_by_hash(&self, _block_hash: B256) -> ProviderResult> { Ok(Some(Box::new(*self))) } fn pending_with_provider<'a>( &'a self, _bundle_state_data: Box, - ) -> ProviderResult> { + ) -> ProviderResult { Ok(Box::new(*self)) } } diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 158ec2bb4f594..8cb985f359d84 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -9,7 +9,7 @@ use reth_primitives::{ use reth_trie::updates::TrieUpdates; /// Type alias of boxed [StateProvider]. -pub type StateProviderBox<'a> = Box; +pub type StateProviderBox = Box; /// An abstraction for a type that provides state data. #[auto_impl(&, Arc, Box)] @@ -100,13 +100,13 @@ pub trait StateProvider: BlockHashReader + AccountReader + StateRootProvider + S /// to be used, since block `n` was executed on its parent block's state. pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// Storage provider for latest block. - fn latest(&self) -> ProviderResult>; + fn latest(&self) -> ProviderResult; /// Returns a [StateProvider] indexed by the given [BlockId]. /// /// Note: if a number or hash is provided this will __only__ look at historical(canonical) /// state. - fn state_by_block_id(&self, block_id: BlockId) -> ProviderResult> { + fn state_by_block_id(&self, block_id: BlockId) -> ProviderResult { match block_id { BlockId::Number(block_number) => self.state_by_block_number_or_tag(block_number), BlockId::Hash(block_hash) => self.history_by_block_hash(block_hash.into()), @@ -119,7 +119,7 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { fn state_by_block_number_or_tag( &self, number_or_tag: BlockNumberOrTag, - ) -> ProviderResult> { + ) -> ProviderResult { match number_or_tag { BlockNumberOrTag::Latest => self.latest(), BlockNumberOrTag::Finalized => { @@ -153,40 +153,37 @@ pub trait StateProviderFactory: BlockIdReader + Send + Sync { /// /// /// Note: this only looks at historical blocks, not pending blocks. - fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult>; + fn history_by_block_number(&self, block: BlockNumber) -> ProviderResult; /// Returns a historical [StateProvider] indexed by the given block hash. /// /// Note: this only looks at historical blocks, not pending blocks. - fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult>; + fn history_by_block_hash(&self, block: BlockHash) -> ProviderResult; /// Returns _any_[StateProvider] with matching block hash. /// /// This will return a [StateProvider] for either a historical or pending block. - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult>; + fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult; /// Storage provider for pending state. /// /// Represents the state at the block that extends the canonical chain by one. /// If there's no `pending` block, then this is equal to [StateProviderFactory::latest] - fn pending(&self) -> ProviderResult>; + fn pending(&self) -> ProviderResult; /// Storage provider for pending state for the given block hash. /// /// Represents the state at the block that extends the canonical chain. /// /// If the block couldn't be found, returns `None`. - fn pending_state_by_hash( - &self, - block_hash: B256, - ) -> ProviderResult>>; + fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult>; /// Return a [StateProvider] that contains bundle state data provider. /// Used to inspect or execute transaction on the pending state. fn pending_with_provider( &self, bundle_state_data: Box, - ) -> ProviderResult>; + ) -> ProviderResult; } /// Blockchain trait provider that gives access to the blockchain state that is not yet committed diff --git a/crates/trie/src/hashed_cursor/default.rs b/crates/trie/src/hashed_cursor/default.rs index 5641c289280a1..d49feedd1849d 100644 --- a/crates/trie/src/hashed_cursor/default.rs +++ b/crates/trie/src/hashed_cursor/default.rs @@ -2,13 +2,13 @@ use super::{HashedAccountCursor, HashedCursorFactory, HashedStorageCursor}; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRO}, tables, - transaction::{DbTx, DbTxGAT}, + transaction::DbTx, }; use reth_primitives::{Account, StorageEntry, B256}; impl<'a, TX: DbTx> HashedCursorFactory for &'a TX { - type AccountCursor = >::Cursor; - type StorageCursor = >::DupCursor; + type AccountCursor = ::Cursor; + type StorageCursor = ::DupCursor; fn hashed_account_cursor(&self) -> Result { self.cursor_read::() diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index b6dff3027b11d..62e028657fc09 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -3,7 +3,7 @@ use crate::prefix_set::{PrefixSet, PrefixSetMut}; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRO}, tables, - transaction::{DbTx, DbTxGAT}, + transaction::DbTx, }; use reth_primitives::{trie::Nibbles, Account, StorageEntry, B256, U256}; use std::collections::{HashMap, HashSet}; @@ -171,9 +171,9 @@ impl<'a, 'b, TX> HashedPostStateCursorFactory<'a, 'b, TX> { impl<'a, 'b, TX: DbTx> HashedCursorFactory for HashedPostStateCursorFactory<'a, 'b, TX> { type AccountCursor = - HashedPostStateAccountCursor<'b, >::Cursor>; + HashedPostStateAccountCursor<'b, ::Cursor>; type StorageCursor = - HashedPostStateStorageCursor<'b, >::DupCursor>; + HashedPostStateStorageCursor<'b, ::DupCursor>; fn hashed_account_cursor(&self) -> Result { let cursor = self.tx.cursor_read::()?; diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index 7a7d3a5a0d872..b1ca79681ee37 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -1254,7 +1254,7 @@ mod tests { } fn extension_node_storage_trie( - tx: &DatabaseProviderRW<'_, &DatabaseEnv>, + tx: &DatabaseProviderRW<&DatabaseEnv>, hashed_address: B256, ) -> (B256, HashMap) { let value = U256::from(1); @@ -1282,7 +1282,7 @@ mod tests { (root, updates) } - fn extension_node_trie(tx: &DatabaseProviderRW<'_, &DatabaseEnv>) -> B256 { + fn extension_node_trie(tx: &DatabaseProviderRW<&DatabaseEnv>) -> B256 { let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; let val = encode_account(a, None); diff --git a/docs/crates/db.md b/docs/crates/db.md index 679ddcb016a9b..cf0161d2b5c3d 100644 --- a/docs/crates/db.md +++ b/docs/crates/db.md @@ -65,24 +65,23 @@ There are many tables within the node, all used to store different types of data ## Database -Reth's database design revolves around it's main [Database trait](https://github.com/paradigmxyz/reth/blob/eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda/crates/storage/db/src/abstraction/database.rs#L21), which takes advantage of [generic associated types](https://blog.rust-lang.org/2022/10/28/gats-stabilization.html) and [a few design tricks](https://sabrinajewson.org/blog/the-better-alternative-to-lifetime-gats#the-better-gats) to implement the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. +Reth's database design revolves around it's main [Database trait](https://github.com/paradigmxyz/reth/blob/eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda/crates/storage/db/src/abstraction/database.rs#L21), which implements the database's functionality across many types. Let's take a quick look at the `Database` trait and how it works. [File: crates/storage/db/src/abstraction/database.rs](https://github.com/paradigmxyz/reth/blob/eaca2a4a7fbbdc2f5cd15eab9a8a18ede1891bda/crates/storage/db/src/abstraction/database.rs#L21) ```rust ignore /// Main Database trait that spawns transactions to be executed. -pub trait Database: for<'a> DatabaseGAT<'a> { - /// Create read only transaction. - fn tx(&self) -> Result<>::TX, Error>; - - /// Create read write transaction only possible if database is open with write access. - fn tx_mut(&self) -> Result<>::TXMut, Error>; +pub trait Database { + /// RO database transaction + type TX: DbTx + Send + Sync + Debug; + /// RW database transaction + type TXMut: DbTxMut + DbTx + TableImporter + Send + Sync + Debug; /// Takes a function and passes a read-only transaction into it, making sure it's closed in the /// end of the execution. fn view(&self, f: F) -> Result where - F: Fn(&>::TX) -> T, + F: Fn(&::TX) -> T, { let tx = self.tx()?; @@ -96,7 +95,7 @@ pub trait Database: for<'a> DatabaseGAT<'a> { /// the end of the execution. fn update(&self, f: F) -> Result where - F: Fn(&>::TXMut) -> T, + F: Fn(&::TXMut) -> T, { let tx = self.tx_mut()?; @@ -116,7 +115,7 @@ Any type that implements the `Database` trait can create a database transaction, pub struct Transaction<'this, DB: Database> { /// A handle to the DB. pub(crate) db: &'this DB, - tx: Option<>::TXMut>, + tx: Option<::TXMut>, } //--snip-- @@ -134,26 +133,10 @@ where } ``` -The `Database` trait also implements the `DatabaseGAT` trait which defines two associated types `TX` and `TXMut`. +The `Database` defines two associated types `TX` and `TXMut`. [File: crates/storage/db/src/abstraction/database.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/database.rs#L11) -```rust ignore -/// Implements the GAT method from: -/// https://sabrinajewson.org/blog/the-better-alternative-to-lifetime-gats#the-better-gats. -/// -/// Sealed trait which cannot be implemented by 3rd parties, exposed only for implementers -pub trait DatabaseGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync { - /// RO database transaction - type TX: DbTx + Send + Sync; - /// RW database transaction - type TXMut: DbTxMut + DbTx + Send + Sync; -} -``` - -In Rust, associated types are like generics in that they can be any type fitting the generic's definition, with the difference being that associated types are associated with a trait and can only be used in the context of that trait. - -In the code snippet above, the `DatabaseGAT` trait has two associated types, `TX` and `TXMut`. The `TX` type can be any type that implements the `DbTx` trait, which provides a set of functions to interact with read only transactions. @@ -161,26 +144,40 @@ The `TX` type can be any type that implements the `DbTx` trait, which provides a ```rust ignore /// Read only transaction -pub trait DbTx: for<'a> DbTxGAT<'a> { +pub trait DbTx: Send + Sync { + /// Cursor type for this read-only transaction + type Cursor: DbCursorRO + Send + Sync; + /// DupCursor type for this read-only transaction + type DupCursor: DbDupCursorRO + DbCursorRO + Send + Sync; + /// Get value fn get(&self, key: T::Key) -> Result, Error>; /// Commit for read only transaction will consume and free transaction and allows /// freeing of memory pages fn commit(self) -> Result; /// Iterate over read only values in table. - fn cursor(&self) -> Result<>::Cursor, Error>; + fn cursor(&self) -> Result, Error>; /// Iterate over read only values in dup sorted table. - fn cursor_dup(&self) -> Result<>::DupCursor, Error>; + fn cursor_dup(&self) -> Result, Error>; } ``` -The `TXMut` type can be any type that implements the `DbTxMut` trait, which provides a set of functions to interact with read/write transactions. +The `TXMut` type can be any type that implements the `DbTxMut` trait, which provides a set of functions to interact with read/write transactions and the associated cursor types. [File: crates/storage/db/src/abstraction/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/transaction.rs#L49) ```rust ignore /// Read write transaction that allows writing to database -pub trait DbTxMut: for<'a> DbTxMutGAT<'a> { +pub trait DbTxMut: Send + Sync { + /// Read-Write Cursor type + type CursorMut: DbCursorRW + DbCursorRO + Send + Sync; + /// Read-Write DupCursor type + type DupCursorMut: DbDupCursorRW + + DbCursorRW + + DbDupCursorRO + + DbCursorRO + + Send + + Sync; /// Put value to database fn put(&self, key: T::Key, value: T::Value) -> Result<(), Error>; /// Delete value from database @@ -188,11 +185,11 @@ pub trait DbTxMut: for<'a> DbTxMutGAT<'a> { /// Clears database. fn clear(&self) -> Result<(), Error>; /// Cursor for writing - fn cursor_write(&self) -> Result<>::CursorMut, Error>; + fn cursor_write(&self) -> Result, Error>; /// DupCursor for writing fn cursor_dup_write( &self, - ) -> Result<>::DupCursorMut, Error>; + ) -> Result, Error>; } ``` @@ -220,14 +217,14 @@ where //--snip-- impl<'a, DB: Database> Deref for Transaction<'a, DB> { - type Target = >::TXMut; + type Target = ::TXMut; fn deref(&self) -> &Self::Target { self.tx.as_ref().expect("Tried getting a reference to a non-existent transaction") } } ``` -The `Transaction` struct implements the `Deref` trait, which returns a reference to its `tx` field, which is a `TxMut`. Recall that `TxMut` is a generic type on the `DatabaseGAT` trait, which is defined as `type TXMut: DbTxMut + DbTx + Send + Sync;`, giving it access to all of the functions available to `DbTx`, including the `DbTx::get()` function. +The `Transaction` struct implements the `Deref` trait, which returns a reference to its `tx` field, which is a `TxMut`. Recall that `TxMut` is a generic type on the `Database` trait, which is defined as `type TXMut: DbTxMut + DbTx + Send + Sync;`, giving it access to all of the functions available to `DbTx`, including the `DbTx::get()` function. Notice that the function uses a [turbofish](https://techblog.tonsser.com/posts/what-is-rusts-turbofish) to define which table to use when passing in the `key` to the `DbTx::get()` function. Taking a quick look at the function definition, a generic `T` is defined that implements the `Table` trait mentioned at the beginning of this chapter. @@ -268,19 +265,6 @@ This next example uses the `DbTx::cursor()` method to get a `Cursor`. The `Curso ``` -We are almost at the last stop in the tour of the `db` crate. In addition to the methods provided by the `DbTx` and `DbTxMut` traits, `DbTx` also inherits the `DbTxGAT` trait, while `DbTxMut` inherits `DbTxMutGAT`. These next two traits provide various associated types related to cursors as well as methods to utilize the cursor types. - -[File: crates/storage/db/src/abstraction/transaction.rs](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/src/abstraction/transaction.rs#L12-L17) - -```rust ignore -pub trait DbTxGAT<'a, __ImplicitBounds: Sealed = Bounds<&'a Self>>: Send + Sync { - /// Cursor GAT - type Cursor: DbCursorRO<'a, T> + Send + Sync; - /// DupCursor GAT - type DupCursor: DbDupCursorRO<'a, T> + DbCursorRO<'a, T> + Send + Sync; -} -``` - Lets look at an examples of how cursors are used. The code snippet below contains the `unwind` method from the `BodyStage` defined in the `stages` crate. This function is responsible for unwinding any changes to the database if there is an error when executing the body stage within the Reth pipeline. [File: crates/stages/src/stages/bodies.rs](https://github.com/paradigmxyz/reth/blob/main/crates/stages/src/stages/bodies.rs#L205-L238) From bfa21101919dd6e2e8253a0cf9aed713e891f55f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 19 Nov 2023 17:12:07 +0100 Subject: [PATCH 49/77] chore(deps): weekly `cargo update` (#5492) Co-authored-by: github-merge-queue Co-authored-by: Matthias Seitz --- Cargo.lock | 214 +++++++++++++++++++++-------------------------------- 1 file changed, 86 insertions(+), 128 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a6f9284f91aa8..7a70b68ad11c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -499,9 +499,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f658e2baef915ba0f26f1f7c42bfb8e12f532a01f449a090ded75ae7a07e9ba2" +checksum = "bc2d0cfb2a7388d34f590e76686704c494ed7aaceed62ee1ba35cbf363abc2a5" dependencies = [ "brotli", "flate2", @@ -1104,25 +1104,11 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo_metadata" -version = "0.17.0" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" +checksum = "e34637b3140142bdf929fb439e8aa4ebad7651ebf7b1080b3930aa16ac1459ff" dependencies = [ - "camino", - "cargo-platform", - "semver 1.0.20", "serde", - "serde_json", - "thiserror", ] [[package]] @@ -1652,9 +1638,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.5.3" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -2109,9 +2095,9 @@ checksum = "6c8adcce29eef18ae1369bbd268fd56bf98144e80281315e9d4a82e34df001c7" [[package]] name = "ecdsa" -version = "0.16.8" +version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4b1e0c257a9e9f25f90ff76d7a68360ed497ee519c8e428d1825ef0000799d4" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ "der", "digest 0.10.7", @@ -2133,15 +2119,16 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", "sha2", + "subtle", "zeroize", ] @@ -2183,9 +2170,9 @@ checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "elliptic-curve" -version = "0.13.6" +version = "0.13.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97ca172ae9dc9f9b779a6e3a65d308f2af74e5b8c921299075bdb4a0370e914" +checksum = "e9775b22bc152ad86a0cf23f0f348b884b26add12bf741e7ffc4d4ab2ab4d205" dependencies = [ "base16ct", "crypto-bigint", @@ -2303,9 +2290,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e" +checksum = "f258a7194e7f7c2a7837a8913aeab7fd8c383457034fa20ce4dd3dcb813e8eb8" dependencies = [ "libc", "windows-sys 0.48.0", @@ -2383,9 +2370,9 @@ dependencies = [ [[package]] name = "ethers-contract" -version = "2.0.10" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d79269278125006bb0552349c03593ffa9702112ca88bc7046cc669f148fb47c" +checksum = "0111ead599d17a7bff6985fd5756f39ca7033edc79a31b23026a8d5d64fa95cd" dependencies = [ "const-hex", "ethers-contract-abigen", @@ -2402,9 +2389,9 @@ dependencies = [ [[package]] name = "ethers-contract-abigen" -version = "2.0.10" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce95a43c939b2e4e2f3191c5ad4a1f279780b8a39139c9905b43a7433531e2ab" +checksum = "51258120c6b47ea9d9bec0d90f9e8af71c977fbefbef8213c91bfed385fe45eb" dependencies = [ "Inflector", "const-hex", @@ -2418,15 +2405,15 @@ dependencies = [ "serde", "serde_json", "syn 2.0.39", - "toml 0.7.8", + "toml 0.8.8", "walkdir", ] [[package]] name = "ethers-contract-derive" -version = "2.0.10" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9ce44906fc871b3ee8c69a695ca7ec7f70e50cb379c9b9cb5e532269e492f6" +checksum = "936e7a0f1197cee2b62dc89f63eff3201dbf87c283ff7e18d86d38f83b845483" dependencies = [ "Inflector", "const-hex", @@ -2440,13 +2427,13 @@ dependencies = [ [[package]] name = "ethers-core" -version = "2.0.10" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a17f0708692024db9956b31d7a20163607d2745953f5ae8125ab368ba280ad" +checksum = "2f03e0bdc216eeb9e355b90cf610ef6c5bb8aca631f97b5ae9980ce34ea7878d" dependencies = [ "arrayvec", "bytes", - "cargo_metadata 0.17.0", + "cargo_metadata", "chrono", "const-hex", "elliptic-curve", @@ -2468,32 +2455,16 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "ethers-etherscan" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e53451ea4a8128fbce33966da71132cf9e1040dcfd2a2084fd7733ada7b2045" -dependencies = [ - "ethers-core", - "reqwest", - "semver 1.0.20", - "serde", - "serde_json", - "thiserror", - "tracing", -] - [[package]] name = "ethers-middleware" -version = "2.0.10" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "473f1ccd0c793871bbc248729fa8df7e6d2981d6226e4343e3bbaa9281074d5d" +checksum = "681ece6eb1d10f7cf4f873059a77c04ff1de4f35c63dd7bccde8f438374fcb93" dependencies = [ "async-trait", "auto_impl", "ethers-contract", "ethers-core", - "ethers-etherscan", "ethers-providers", "ethers-signers", "futures-channel", @@ -2512,9 +2483,9 @@ dependencies = [ [[package]] name = "ethers-providers" -version = "2.0.10" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5" +checksum = "25d6c0c9455d93d4990c06e049abf9b30daf148cf461ee939c11d88907c60816" dependencies = [ "async-trait", "auto_impl", @@ -2550,9 +2521,9 @@ dependencies = [ [[package]] name = "ethers-signers" -version = "2.0.10" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea44bec930f12292866166f9ddbea6aa76304850e4d8dcd66dc492b43d00ff1" +checksum = "0cb1b714e227bbd2d8c53528adb580b203009728b17d0d0e4119353aa9bc5532" dependencies = [ "async-trait", "coins-bip32", @@ -2598,9 +2569,9 @@ dependencies = [ [[package]] name = "eyre" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +checksum = "80f656be11ddf91bd709454d15d5bd896fbaf4cc3314e69349e4d1569f5b46cd" dependencies = [ "indenter", "once_cell", @@ -2659,9 +2630,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.3" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f69037fe1b785e84986b4f2cbcf647381876a00671d25ceef715d7812dd7e1dd" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" [[package]] name = "findshlibs" @@ -2995,9 +2966,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.21" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" +checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" dependencies = [ "bytes", "fnv", @@ -3005,7 +2976,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 1.9.3", + "indexmap 2.1.0", "slab", "tokio", "tokio-util", @@ -3079,9 +3050,9 @@ dependencies = [ [[package]] name = "hdrhistogram" -version = "7.5.3" +version = "7.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b38e5c02b7c7be48c8dc5217c4f1634af2ea221caae2e024bffc7a7651c691" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" dependencies = [ "byteorder", "num-traits", @@ -3168,9 +3139,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f95b9abcae896730d42b78e09c155ed4ddf82c07b4de772c64aee5b2d8b7c150" +checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", @@ -3927,9 +3898,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" +checksum = "3f01b677d82ef7a676aa37e099defd83a28e15687112cafdd112d60236b6115b" dependencies = [ "cfg-if", "ecdsa", @@ -4030,9 +4001,9 @@ checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "litemap" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a1a2647d5b7134127971a6de0d533c49de2159167e7f259c427195f87168a1" +checksum = "f9d642685b028806386b2b6e75685faadd3eb65a85fff7df711ce18446a422da" [[package]] name = "lock_api" @@ -6869,9 +6840,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "9ad981d6c340a49cdc40a1028d9c6084ec7e9fa33fcb839cab656a267071e234" dependencies = [ "bitflags 2.4.1", "errno", @@ -6882,9 +6853,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", "ring 0.17.5", @@ -7395,9 +7366,9 @@ dependencies = [ [[package]] name = "signature" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", "rand_core 0.6.4", @@ -7646,9 +7617,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "12.5.0" +version = "12.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d3aa424281de488c1ddbaffb55a421ad87d04b0fdd5106e7e71d748c0c71ea6" +checksum = "39eac77836da383d35edbd9ff4585b4fc1109929ff641232f2e9a1aefdfc9e91" dependencies = [ "debugid", "memmap2 0.8.0", @@ -7658,9 +7629,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.5.0" +version = "12.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bdcf77effe2908a21c1011b4d49a7122e0f44487a6ad89db67c55a1687e2572" +checksum = "4ee1608a1d13061fb0e307a316de29f6c6e737b05459fe6bbf5dd8d7837c4fb7" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -7755,9 +7726,9 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" +checksum = "ff1bc3d3f05aff0403e8ac0d92ced918ec05b666a43f83297ccef5bea8a3d449" dependencies = [ "winapi-util", ] @@ -7770,9 +7741,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "test-fuzz" -version = "4.0.3" +version = "4.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59bdd14ea6ac9fd993d966b0133da233f534bac0c1a44a2200cec1eb244c733c" +checksum = "de8cb3597f1463b9c98b21c08d11033166a57942e60e8044e7e3bb4a8ca5416b" dependencies = [ "serde", "test-fuzz-internal", @@ -7782,20 +7753,20 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "4.0.3" +version = "4.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb212edbf2406eed119bd5e1b89bf3201f3f9d9961b5ae39324873f2a0805ed" +checksum = "3dd8da182ee4e8b195da3aa38f72b84d267bda3874cd6ef8dd29c03a71f866f2" dependencies = [ "bincode", - "cargo_metadata 0.18.1", + "cargo_metadata", "serde", ] [[package]] name = "test-fuzz-macro" -version = "4.0.3" +version = "4.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b2f42720e86f42661bd88d7aaa9d041056530f79c1f0bc6ac90dfb681905e86" +checksum = "86cb030b9e51def5bd7bf98b3ee6e81aae7f021ebf2e05e70029b768508c376f" dependencies = [ "darling 0.20.3", "if_chain", @@ -7810,9 +7781,9 @@ dependencies = [ [[package]] name = "test-fuzz-runtime" -version = "4.0.3" +version = "4.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0aae6ea22e9e0730b79eac5cb7426dc257503d07ecedf7bd799598070908d1" +checksum = "dd6e7a964e6c5b20df8b03572f7fa43aa28d80fa4871b3083e597ed32664f614" dependencies = [ "hex", "num-traits", @@ -8041,18 +8012,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.19.15", -] - [[package]] name = "toml" version = "0.8.8" @@ -8081,8 +8040,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ "indexmap 2.1.0", - "serde", - "serde_spanned", "toml_datetime", "winnow", ] @@ -8213,11 +8170,12 @@ dependencies = [ [[package]] name = "tracing-appender" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" dependencies = [ "crossbeam-channel", + "thiserror", "time", "tracing-subscriber", ] @@ -8268,9 +8226,9 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.4" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ "log", "once_cell", @@ -8279,9 +8237,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", @@ -9007,9 +8965,9 @@ checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" [[package]] name = "writeable" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0af0c3d13faebf8dda0b5256fa7096a2d5ccb662f7b9f54a40fe201077ab1c2" +checksum = "dad7bb64b8ef9c0aa27b6da38b452b0ee9fd82beaf276a87dd796fb55cbae14e" [[package]] name = "ws_stream_wasm" @@ -9071,9 +9029,9 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "yoke" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e38c508604d6bbbd292dadb3c02559aa7fff6b654a078a36217cad871636e4" +checksum = "65e71b2e4f287f467794c671e2b8f8a5f3716b3c829079a1c44740148eff07e4" dependencies = [ "serde", "stable_deref_trait", @@ -9083,9 +9041,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5e19fb6ed40002bab5403ffa37e53e0e56f914a4450c8765f533018db1db35f" +checksum = "9e6936f0cce458098a201c245a11bef556c6a0181129c7034d10d76d1ec3a2b8" dependencies = [ "proc-macro2", "quote", @@ -9095,18 +9053,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.25" +version = "0.7.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" +checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.25" +version = "0.7.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f" dependencies = [ "proc-macro2", "quote", @@ -9136,9 +9094,9 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" dependencies = [ "zeroize_derive", ] From e6040e8f234c2a2b78d08619039381c702bbae8c Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Mon, 20 Nov 2023 13:28:45 +0100 Subject: [PATCH 50/77] ci: add MSRV lint job (#5495) --- .github/workflows/lint.yml | 23 +++++++++++++++++++++++ Cargo.toml | 2 +- README.md | 7 +++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 61a28bcb777bd..60341fcb805b7 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -32,6 +32,29 @@ jobs: env: RUSTFLAGS: -D warnings + msrv: + name: MSRV / ${{ matrix.network }} + runs-on: ubuntu-latest + timeout-minutes: 30 + strategy: + matrix: + include: + - binary: reth + network: ethereum + - binary: op-reth + network: optimism + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: "1.70" # MSRV + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - run: cargo build --bin "${{ matrix.binary }}" --workspace --features "${{ matrix.network }}" + env: + RUSTFLAGS: -D warnings + docs: name: docs runs-on: ubuntu-latest diff --git a/Cargo.toml b/Cargo.toml index d0bbf106e8794..494015c7fe339 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -64,7 +64,7 @@ resolver = "2" [workspace.package] version = "0.1.0-alpha.10" edition = "2021" -rust-version = "1.70" # Remember to update clippy.toml and README.md +rust-version = "1.70" license = "MIT OR Apache-2.0" homepage = "https://paradigmxyz.github.io/reth" repository = "https://github.com/paradigmxyz/reth" diff --git a/README.md b/README.md index 8dd1c987d7a4f..910a7cf401d23 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,13 @@ If you want to contribute, or follow along with contributor discussion, you can ### Building and testing + + The Minimum Supported Rust Version (MSRV) of this project is [1.70.0](https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html). See the book for detailed instructions on how to [build from source](https://paradigmxyz.github.io/reth/installation/source.html). From 1cc68fcca6c6698bee15bef5bfa6a3a00a0595c3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 20 Nov 2023 13:32:37 +0100 Subject: [PATCH 51/77] chore: preallocate buffer for rlp (#5494) --- crates/primitives/src/transaction/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 47a262705d4b1..9702ae19eb4d3 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -819,7 +819,8 @@ impl TransactionSignedNoHash { /// Calculates the transaction hash. If used more than once, it's better to convert it to /// [`TransactionSigned`] first. pub fn hash(&self) -> B256 { - let mut buf = Vec::new(); + // pre-allocate buffer for the transaction + let mut buf = Vec::with_capacity(128 + self.transaction.input().len()); self.transaction.encode_with_signature(&self.signature, &mut buf, false); keccak256(&buf) } From 3c7989c54111ccb4246de3d6a117fe7892d5aabc Mon Sep 17 00:00:00 2001 From: "Supernovahs.eth" <91280922+supernovahs@users.noreply.github.com> Date: Mon, 20 Nov 2023 20:26:08 +0530 Subject: [PATCH 52/77] block_with_senders in ethstatecache (#5302) Co-authored-by: Matthias Seitz --- crates/blockchain-tree/src/blockchain_tree.rs | 12 ++ crates/blockchain-tree/src/noop.rs | 4 + crates/blockchain-tree/src/shareable.rs | 5 + crates/blockchain-tree/src/state.rs | 13 +- crates/interfaces/src/blockchain_tree/mod.rs | 11 ++ crates/primitives/src/block.rs | 17 +++ crates/rpc/rpc-types-compat/src/block.rs | 49 ++++---- crates/rpc/rpc/src/eth/api/block.rs | 20 +++- crates/rpc/rpc/src/eth/api/mod.rs | 6 +- crates/rpc/rpc/src/eth/api/pending_block.rs | 19 +-- crates/rpc/rpc/src/eth/cache/mod.rs | 112 +++++++++++++----- crates/storage/provider/src/chain.rs | 9 +- .../provider/src/providers/database/mod.rs | 10 +- .../src/providers/database/provider.rs | 4 + crates/storage/provider/src/providers/mod.rs | 8 ++ .../storage/provider/src/test_utils/mock.rs | 8 +- .../storage/provider/src/test_utils/noop.rs | 8 +- crates/storage/provider/src/traits/block.rs | 6 + 18 files changed, 239 insertions(+), 82 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index f869eb4859566..2aec2d2454fe1 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -209,10 +209,22 @@ impl BlockchainTree { /// Returns the block with matching hash from any side-chain. /// /// Caution: This will not return blocks from the canonical chain. + #[inline] pub fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { self.state.block_by_hash(block_hash) } + /// Returns the block with matching hash from any side-chain. + /// + /// Caution: This will not return blocks from the canonical chain. + #[inline] + pub fn block_with_senders_by_hash( + &self, + block_hash: BlockHash, + ) -> Option<&SealedBlockWithSenders> { + self.state.block_with_senders_by_hash(block_hash) + } + /// Returns the block's receipts with matching hash from any side-chain. /// /// Caution: This will not return blocks from the canonical chain. diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs index 95709dc7de812..732a7d1a09c96 100644 --- a/crates/blockchain-tree/src/noop.rs +++ b/crates/blockchain-tree/src/noop.rs @@ -74,6 +74,10 @@ impl BlockchainTreeViewer for NoopBlockchainTree { None } + fn block_with_senders_by_hash(&self, _hash: BlockHash) -> Option { + None + } + fn buffered_block_by_hash(&self, _block_hash: BlockHash) -> Option { None } diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index ebb57ca1c7830..d4776a67ce50e 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -117,6 +117,11 @@ impl BlockchainTreeViewer for ShareableBlockc self.tree.read().block_by_hash(block_hash).cloned() } + fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option { + trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); + self.tree.read().block_with_senders_by_hash(block_hash).cloned() + } + fn buffered_block_by_hash(&self, block_hash: BlockHash) -> Option { self.tree.read().get_buffered_block(&block_hash).map(|b| b.block.clone()) } diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index bca7ddf409571..8c4c582294149 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -56,10 +56,21 @@ impl TreeState { /// Returns the block with matching hash from any side-chain. /// /// Caution: This will not return blocks from the canonical chain. + #[inline] pub(crate) fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { + self.block_with_senders_by_hash(block_hash).map(|block| &block.block) + } + /// Returns the block with matching hash from any side-chain. + /// + /// Caution: This will not return blocks from the canonical chain. + #[inline] + pub(crate) fn block_with_senders_by_hash( + &self, + block_hash: BlockHash, + ) -> Option<&SealedBlockWithSenders> { let id = self.block_indices.get_blocks_chain_id(&block_hash)?; let chain = self.chains.get(&id)?; - chain.block(block_hash) + chain.block_with_senders(block_hash) } /// Returns the block's receipts with matching hash from any side-chain. diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index 8a365361bd885..3d77fef03cf94 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -237,6 +237,12 @@ pub trait BlockchainTreeViewer: Send + Sync { /// disconnected from the canonical chain. fn block_by_hash(&self, hash: BlockHash) -> Option; + /// Returns the block with matching hash from the tree, if it exists. + /// + /// Caution: This will not return blocks from the canonical chain or buffered blocks that are + /// disconnected from the canonical chain. + fn block_with_senders_by_hash(&self, hash: BlockHash) -> Option; + /// Returns the _buffered_ (disconnected) block with matching hash from the internal buffer if /// it exists. /// @@ -295,6 +301,11 @@ pub trait BlockchainTreeViewer: Send + Sync { self.block_by_hash(self.pending_block_num_hash()?.hash) } + /// Returns the pending block if there is one. + fn pending_block_with_senders(&self) -> Option { + self.block_with_senders_by_hash(self.pending_block_num_hash()?.hash) + } + /// Returns the pending block and its receipts in one call. /// /// This exists to prevent a potential data race if the pending block changes in between diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index a432cdbc20896..54e0085f531be 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -98,7 +98,17 @@ impl BlockWithSenders { (!block.body.len() != senders.len()).then_some(Self { block, senders }) } + /// Seal the block with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + #[inline] + pub fn seal(self, hash: B256) -> SealedBlockWithSenders { + let Self { block, senders } = self; + SealedBlockWithSenders { block: block.seal(hash), senders } + } + /// Split Structure to its components + #[inline] pub fn into_components(self) -> (Block, Vec
) { (self.block, self.senders) } @@ -288,6 +298,13 @@ impl SealedBlockWithSenders { (self.block, self.senders) } + /// Returns the unsealed [BlockWithSenders] + #[inline] + pub fn unseal(self) -> BlockWithSenders { + let Self { block, senders } = self; + BlockWithSenders { block: block.unseal(), senders } + } + /// Returns an iterator over all transactions in the block. #[inline] pub fn transactions(&self) -> impl Iterator + '_ { diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 570697dffb760..578a47b36929c 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -2,7 +2,9 @@ use crate::transaction::from_recovered_with_block_context; use alloy_rlp::Encodable; -use reth_primitives::{Block as PrimitiveBlock, Header as PrimitiveHeader, B256, U256, U64}; +use reth_primitives::{ + Block as PrimitiveBlock, BlockWithSenders, Header as PrimitiveHeader, B256, U256, U64, +}; use reth_rpc_types::{Block, BlockError, BlockTransactions, BlockTransactionsKind, Header}; /// Converts the given primitive block into a [Block] response with the given @@ -10,7 +12,7 @@ use reth_rpc_types::{Block, BlockError, BlockTransactions, BlockTransactionsKind /// /// If a `block_hash` is provided, then this is used, otherwise the block hash is computed. pub fn from_block( - block: PrimitiveBlock, + block: BlockWithSenders, total_difficulty: U256, kind: BlockTransactionsKind, block_hash: Option, @@ -29,7 +31,7 @@ pub fn from_block( /// This will populate the `transactions` field with only the hashes of the transactions in the /// block: [BlockTransactions::Hashes] pub fn from_block_with_tx_hashes( - block: PrimitiveBlock, + block: BlockWithSenders, total_difficulty: U256, block_hash: Option, ) -> Block { @@ -39,7 +41,7 @@ pub fn from_block_with_tx_hashes( from_block_with_transactions( block.length(), block_hash, - block, + block.block, total_difficulty, BlockTransactions::Hashes(transactions), ) @@ -51,35 +53,38 @@ pub fn from_block_with_tx_hashes( /// This will populate the `transactions` field with the _full_ /// [Transaction](reth_rpc_types::Transaction) objects: [BlockTransactions::Full] pub fn from_block_full( - mut block: PrimitiveBlock, + mut block: BlockWithSenders, total_difficulty: U256, block_hash: Option, ) -> Result { - let block_hash = block_hash.unwrap_or_else(|| block.header.hash_slow()); - let block_number = block.number; - let base_fee_per_gas = block.base_fee_per_gas; + let block_hash = block_hash.unwrap_or_else(|| block.block.header.hash_slow()); + let block_number = block.block.number; + let base_fee_per_gas = block.block.base_fee_per_gas; // NOTE: we can safely remove the body here because not needed to finalize the `Block` in // `from_block_with_transactions`, however we need to compute the length before - let block_length = block.length(); - let body = std::mem::take(&mut block.body); + let block_length = block.block.length(); + let body = std::mem::take(&mut block.block.body); + let transactions_with_senders = body.into_iter().zip(block.senders); + let transactions = transactions_with_senders + .enumerate() + .map(|(idx, (tx, sender))| { + let signed_tx_ec_recovered = tx.with_signer(sender); - let mut transactions = Vec::with_capacity(block.body.len()); - for (idx, tx) in body.into_iter().enumerate() { - let signed_tx = tx.into_ecrecovered().ok_or(BlockError::InvalidSignature)?; - transactions.push(from_recovered_with_block_context( - signed_tx, - block_hash, - block_number, - base_fee_per_gas, - U256::from(idx), - )) - } + from_recovered_with_block_context( + signed_tx_ec_recovered, + block_hash, + block_number, + base_fee_per_gas, + U256::from(idx), + ) + }) + .collect::>(); Ok(from_block_with_transactions( block_length, block_hash, - block, + block.block, total_difficulty, BlockTransactions::Full(transactions), )) diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index 3cd33be201f90..c1c835107cdb6 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -146,13 +146,23 @@ where &self, block_id: impl Into, ) -> EthResult> { + self.block_with_senders(block_id) + .await + .map(|maybe_block| maybe_block.map(|block| block.block)) + } + + /// Returns the block object for the given block id. + pub(crate) async fn block_with_senders( + &self, + block_id: impl Into, + ) -> EthResult> { let block_id = block_id.into(); if block_id.is_pending() { // Pending block can be fetched directly without need for caching - let maybe_pending = self.provider().pending_block()?; + let maybe_pending = self.provider().pending_block_with_senders()?; return if maybe_pending.is_some() { - return Ok(maybe_pending) + Ok(maybe_pending) } else { self.local_pending_block().await } @@ -163,7 +173,7 @@ where None => return Ok(None), }; - Ok(self.cache().get_sealed_block(block_hash).await?) + Ok(self.cache().get_sealed_block_with_senders(block_hash).await?) } /// Returns the populated rpc block object for the given block id. @@ -175,7 +185,7 @@ where block_id: impl Into, full: bool, ) -> EthResult> { - let block = match self.block(block_id).await? { + let block = match self.block_with_senders(block_id).await? { Some(block) => block, None => return Ok(None), }; @@ -184,7 +194,7 @@ where .provider() .header_td_by_number(block.number)? .ok_or(EthApiError::UnknownBlockNumber)?; - let block = from_block(block.into(), total_difficulty, full.into(), Some(block_hash))?; + let block = from_block(block.unseal(), total_difficulty, full.into(), Some(block_hash))?; Ok(Some(block.into())) } } diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 5312c822921c5..4ff22571dec99 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -15,7 +15,7 @@ use reth_interfaces::RethResult; use reth_network_api::NetworkInfo; use reth_primitives::{ revm_primitives::{BlockEnv, CfgEnv}, - Address, BlockId, BlockNumberOrTag, ChainInfo, SealedBlock, B256, U256, U64, + Address, BlockId, BlockNumberOrTag, ChainInfo, SealedBlockWithSenders, B256, U256, U64, }; use reth_provider::{ BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderBox, StateProviderFactory, @@ -246,7 +246,7 @@ where /// /// If no pending block is available, this will derive it from the `latest` block pub(crate) fn pending_block_env_and_cfg(&self) -> EthResult { - let origin = if let Some(pending) = self.provider().pending_block()? { + let origin = if let Some(pending) = self.provider().pending_block_with_senders()? { PendingBlockEnvOrigin::ActualPending(pending) } else { // no pending block from the CL yet, so we use the latest block and modify the env @@ -281,7 +281,7 @@ where } /// Returns the locally built pending block - pub(crate) async fn local_pending_block(&self) -> EthResult> { + pub(crate) async fn local_pending_block(&self) -> EthResult> { let pending = self.pending_block_env_and_cfg()?; if pending.origin.is_actual_pending() { return Ok(pending.origin.into_actual_pending()) diff --git a/crates/rpc/rpc/src/eth/api/pending_block.rs b/crates/rpc/rpc/src/eth/api/pending_block.rs index 827dfec1a17b6..3c6f6b58793cc 100644 --- a/crates/rpc/rpc/src/eth/api/pending_block.rs +++ b/crates/rpc/rpc/src/eth/api/pending_block.rs @@ -9,7 +9,7 @@ use reth_primitives::{ BlockEnv, CfgEnv, EVMError, Env, InvalidTransaction, ResultAndState, SpecId, }, Block, BlockId, BlockNumberOrTag, ChainSpec, Header, IntoRecoveredTransaction, Receipt, - Receipts, SealedBlock, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, + Receipts, SealedBlockWithSenders, SealedHeader, B256, EMPTY_OMMER_ROOT_HASH, U256, }; use reth_provider::{BundleStateWithReceipts, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ @@ -42,7 +42,7 @@ impl PendingBlockEnv { self, client: &Client, pool: &Pool, - ) -> EthResult + ) -> EthResult where Client: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, @@ -61,6 +61,7 @@ impl PendingBlockEnv { let block_number = block_env.number.to::(); let mut executed_txs = Vec::new(); + let mut senders = Vec::new(); let mut best_txs = pool.best_transactions_with_base_fee(base_fee); let (withdrawals, withdrawals_root) = match origin { @@ -176,7 +177,9 @@ impl PendingBlockEnv { })); // append transaction to the list of executed transactions - executed_txs.push(tx.into_signed()); + let (tx, sender) = tx.to_components(); + executed_txs.push(tx); + senders.push(sender); } // executes the withdrawals and commits them to the Database and BundleState. @@ -236,9 +239,7 @@ impl PendingBlockEnv { // seal the block let block = Block { header, body: executed_txs, ommers: vec![], withdrawals }; - let sealed_block = block.seal_slow(); - - Ok(sealed_block) + Ok(SealedBlockWithSenders { block: block.seal_slow(), senders }) } } @@ -286,7 +287,7 @@ where #[derive(Clone, Debug)] pub(crate) enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(SealedBlock), + ActualPending(SealedBlockWithSenders), /// The header of the latest block DerivedFromLatest(SealedHeader), } @@ -298,7 +299,7 @@ impl PendingBlockEnvOrigin { } /// Consumes the type and returns the actual pending block. - pub(crate) fn into_actual_pending(self) -> Option { + pub(crate) fn into_actual_pending(self) -> Option { match self { PendingBlockEnvOrigin::ActualPending(block) => Some(block), _ => None, @@ -337,7 +338,7 @@ impl PendingBlockEnvOrigin { #[derive(Debug)] pub(crate) struct PendingBlock { /// The cached pending block - pub(crate) block: SealedBlock, + pub(crate) block: SealedBlockWithSenders, /// Timestamp when the pending block is considered outdated pub(crate) expires_at: Instant, } diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index fb1f65d1db594..735d15418d23a 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -2,9 +2,12 @@ use futures::{future::Either, Stream, StreamExt}; use reth_interfaces::provider::{ProviderError, ProviderResult}; -use reth_primitives::{Block, Receipt, SealedBlock, TransactionSigned, B256}; +use reth_primitives::{ + Block, BlockHashOrNumber, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, + TransactionSigned, B256, +}; use reth_provider::{ - BlockReader, BlockSource, CanonStateNotification, EvmEnvProvider, StateProviderFactory, + BlockReader, CanonStateNotification, EvmEnvProvider, StateProviderFactory, TransactionVariant, }; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use revm::primitives::{BlockEnv, CfgEnv}; @@ -29,13 +32,13 @@ mod metrics; mod multi_consumer; pub use multi_consumer::MultiConsumerLruCache; -/// The type that can send the response to a requested [Block] -type BlockResponseSender = oneshot::Sender>>; - /// The type that can send the response to a requested [Block] type BlockTransactionsResponseSender = oneshot::Sender>>>; +/// The type that can send the response to a requested [BlockWithSenders] +type BlockWithSendersResponseSender = oneshot::Sender>>; + /// The type that can send the response to the requested receipts of a block. type ReceiptsResponseSender = oneshot::Sender>>>; @@ -44,9 +47,9 @@ type EnvResponseSender = oneshot::Sender>; type BlockLruCache = MultiConsumerLruCache< B256, - Block, + BlockWithSenders, L, - Either, + Either, >; type ReceiptsLruCache = MultiConsumerLruCache, L, ReceiptsResponseSender>; @@ -130,8 +133,15 @@ impl EthStateCache { /// Returns `None` if the block does not exist. pub(crate) async fn get_block(&self, block_hash: B256) -> ProviderResult> { let (response_tx, rx) = oneshot::channel(); - let _ = self.to_service.send(CacheAction::GetBlock { block_hash, response_tx }); - rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? + let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); + let block_with_senders_res = + rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)?; + + if let Ok(Some(block_with_senders)) = block_with_senders_res { + Ok(Some(block_with_senders.block)) + } else { + Ok(None) + } } /// Requests the [Block] for the block hash, sealed with the given block hash. @@ -169,6 +179,28 @@ impl EthStateCache { Ok(transactions.zip(receipts)) } + /// Requests the [BlockWithSenders] for the block hash + /// + /// Returns `None` if the block does not exist. + pub(crate) async fn get_block_with_senders( + &self, + block_hash: B256, + ) -> ProviderResult> { + let (response_tx, rx) = oneshot::channel(); + let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); + rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? + } + + /// Requests the [SealedBlockWithSenders] for the block hash + /// + /// Returns `None` if the block does not exist. + pub(crate) async fn get_sealed_block_with_senders( + &self, + block_hash: B256, + ) -> ProviderResult> { + Ok(self.get_block_with_senders(block_hash).await?.map(|block| block.seal(block_hash))) + } + /// Requests the [Receipt] for the block hash /// /// Returns `None` if the block was not found. @@ -228,7 +260,7 @@ pub(crate) struct EthStateCacheService< LimitReceipts = ByLength, LimitEnvs = ByLength, > where - LimitBlocks: Limiter, + LimitBlocks: Limiter, LimitReceipts: Limiter>, LimitEnvs: Limiter, { @@ -255,17 +287,18 @@ where Provider: StateProviderFactory + BlockReader + EvmEnvProvider + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { - fn on_new_block(&mut self, block_hash: B256, res: ProviderResult>) { + fn on_new_block(&mut self, block_hash: B256, res: ProviderResult>) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { match tx { - Either::Left(block_tx) => { - let _ = block_tx.send(res.clone()); + Either::Left(block_with_senders) => { + let _ = block_with_senders.send(res.clone()); } Either::Right(transaction_tx) => { let _ = transaction_tx.send( - res.clone().map(|maybe_block| maybe_block.map(|block| block.body)), + res.clone() + .map(|maybe_block| maybe_block.map(|block| block.block.body)), ); } } @@ -316,8 +349,7 @@ where } Some(action) => { match action { - CacheAction::GetBlock { block_hash, response_tx } => { - // check if block is cached + CacheAction::GetBlockWithSenders { block_hash, response_tx } => { if let Some(block) = this.full_block_cache.get(&block_hash).cloned() { let _ = response_tx.send(Ok(Some(block))); continue @@ -333,10 +365,14 @@ where let _permit = rate_limiter.acquire().await; // Only look in the database to prevent situations where we // looking up the tree is blocking - let res = provider - .find_block_by_hash(block_hash, BlockSource::Database); - let _ = action_tx - .send(CacheAction::BlockResult { block_hash, res }); + let block_sender = provider.block_with_senders( + BlockHashOrNumber::Hash(block_hash), + TransactionVariant::WithHash, + ); + let _ = action_tx.send(CacheAction::BlockWithSendersResult { + block_hash, + res: block_sender, + }); })); } } @@ -357,10 +393,14 @@ where let _permit = rate_limiter.acquire().await; // Only look in the database to prevent situations where we // looking up the tree is blocking - let res = provider - .find_block_by_hash(block_hash, BlockSource::Database); - let _ = action_tx - .send(CacheAction::BlockResult { block_hash, res }); + let res = provider.block_with_senders( + BlockHashOrNumber::Hash(block_hash), + TransactionVariant::WithHash, + ); + let _ = action_tx.send(CacheAction::BlockWithSendersResult { + block_hash, + res, + }); })); } } @@ -413,12 +453,20 @@ where })); } } - CacheAction::BlockResult { block_hash, res } => { - this.on_new_block(block_hash, res); - } CacheAction::ReceiptsResult { block_hash, res } => { this.on_new_receipts(block_hash, res); } + CacheAction::BlockWithSendersResult { block_hash, res } => match res { + Ok(Some(block_with_senders)) => { + this.on_new_block(block_hash, Ok(Some(block_with_senders))); + } + Ok(None) => { + this.on_new_block(block_hash, Ok(None)); + } + Err(e) => { + this.on_new_block(block_hash, Err(e)); + } + }, CacheAction::EnvResult { block_hash, res } => { let res = *res; if let Some(queued) = this.evm_env_cache.remove(&block_hash) { @@ -457,14 +505,14 @@ where /// All message variants sent through the channel enum CacheAction { - GetBlock { block_hash: B256, response_tx: BlockResponseSender }, + GetBlockWithSenders { block_hash: B256, response_tx: BlockWithSendersResponseSender }, GetBlockTransactions { block_hash: B256, response_tx: BlockTransactionsResponseSender }, GetEnv { block_hash: B256, response_tx: EnvResponseSender }, GetReceipts { block_hash: B256, response_tx: ReceiptsResponseSender }, - BlockResult { block_hash: B256, res: ProviderResult> }, + BlockWithSendersResult { block_hash: B256, res: ProviderResult> }, ReceiptsResult { block_hash: B256, res: ProviderResult>> }, EnvResult { block_hash: B256, res: Box> }, - CacheNewCanonicalChain { blocks: Vec, receipts: Vec }, + CacheNewCanonicalChain { blocks: Vec, receipts: Vec }, } struct BlockReceipts { @@ -483,13 +531,13 @@ where // we're only interested in new committed blocks let (blocks, state) = committed.inner(); - let blocks = blocks.iter().map(|(_, block)| block.block.clone()).collect::>(); + let blocks = blocks.iter().map(|(_, block)| block.clone()).collect::>(); // also cache all receipts of the blocks let mut receipts = Vec::with_capacity(blocks.len()); for block in &blocks { let block_receipts = BlockReceipts { - block_hash: block.hash, + block_hash: block.block.hash, receipts: state.receipts_by_block(block.number).to_vec(), }; receipts.push(block_receipts); diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index ea240dc6d8f61..3a6ae3fffc9f9 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -71,9 +71,12 @@ impl Chain { /// Returns the block with matching hash. pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlock> { - self.blocks - .iter() - .find_map(|(_num, block)| (block.hash() == block_hash).then_some(&block.block)) + self.block_with_senders(block_hash).map(|block| &block.block) + } + + /// Returns the block with matching hash. + pub fn block_with_senders(&self, block_hash: BlockHash) -> Option<&SealedBlockWithSenders> { + self.blocks.iter().find_map(|(_num, block)| (block.hash() == block_hash).then_some(block)) } /// Return post state of the block at the `block_number` or None if block is not known diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 48fa6eaa09c20..6b6c0842d0cd1 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -15,9 +15,9 @@ use reth_primitives::{ snapshot::HighestSnapshots, stage::{StageCheckpoint, StageId}, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, - ChainSpec, Header, PruneCheckpoint, PruneSegment, Receipt, SealedBlock, SealedHeader, - TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, - B256, U256, + ChainSpec, Header, PruneCheckpoint, PruneSegment, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, + Withdrawal, B256, U256, }; use revm::primitives::{BlockEnv, CfgEnv}; use std::{ @@ -290,6 +290,10 @@ impl BlockReader for ProviderFactory { self.provider()?.pending_block() } + fn pending_block_with_senders(&self) -> ProviderResult> { + self.provider()?.pending_block_with_senders() + } + fn pending_block_and_receipts(&self) -> ProviderResult)>> { self.provider()?.pending_block_and_receipts() } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 81daf53de771d..244011c7b6196 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1073,6 +1073,10 @@ impl BlockReader for DatabaseProvider { Ok(None) } + fn pending_block_with_senders(&self) -> ProviderResult> { + Ok(None) + } + fn pending_block_and_receipts(&self) -> ProviderResult)>> { Ok(None) } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index df782365e6042..11dba5e6a6c44 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -252,6 +252,10 @@ where Ok(self.tree.pending_block()) } + fn pending_block_with_senders(&self) -> ProviderResult> { + Ok(self.tree.pending_block_with_senders()) + } + fn pending_block_and_receipts(&self) -> ProviderResult)>> { Ok(self.tree.pending_block_and_receipts()) } @@ -637,6 +641,10 @@ where self.tree.block_by_hash(block_hash) } + fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option { + self.tree.block_with_senders_by_hash(block_hash) + } + fn buffered_block_by_hash(&self, block_hash: BlockHash) -> Option { self.tree.buffered_block_by_hash(block_hash) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index b9f3dd66746b0..8a1cb6ca379ef 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -12,8 +12,8 @@ use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, BlockWithSenders, Bytecode, Bytes, ChainInfo, ChainSpec, Header, Receipt, - SealedBlock, SealedHeader, StorageKey, StorageValue, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, TxHash, TxNumber, B256, U256, + SealedBlock, SealedBlockWithSenders, SealedHeader, StorageKey, StorageValue, TransactionMeta, + TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, B256, U256, }; use reth_trie::updates::TrieUpdates; use revm::primitives::{BlockEnv, CfgEnv}; @@ -438,6 +438,10 @@ impl BlockReader for MockEthProvider { Ok(None) } + fn pending_block_with_senders(&self) -> ProviderResult> { + Ok(None) + } + fn pending_block_and_receipts(&self) -> ProviderResult)>> { Ok(None) } diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 87632f1d980d3..dc36ac948fc30 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -14,8 +14,8 @@ use reth_primitives::{ trie::AccountProof, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, Bytecode, ChainInfo, ChainSpec, Header, PruneCheckpoint, PruneSegment, Receipt, SealedBlock, - SealedHeader, StorageKey, StorageValue, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, TxHash, TxNumber, B256, MAINNET, U256, + SealedBlockWithSenders, SealedHeader, StorageKey, StorageValue, TransactionMeta, + TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, B256, MAINNET, U256, }; use reth_trie::updates::TrieUpdates; use revm::primitives::{BlockEnv, CfgEnv}; @@ -85,6 +85,10 @@ impl BlockReader for NoopProvider { Ok(None) } + fn pending_block_with_senders(&self) -> ProviderResult> { + Ok(None) + } + fn pending_block_and_receipts(&self) -> ProviderResult)>> { Ok(None) } diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 76690580620b1..44951a3fcac83 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -85,6 +85,12 @@ pub trait BlockReader: /// and the caller does not know the hash. fn pending_block(&self) -> ProviderResult>; + /// Returns the pending block if available + /// + /// Note: This returns a [SealedBlockWithSenders] because it's expected that this is sealed by + /// the provider and the caller does not know the hash. + fn pending_block_with_senders(&self) -> ProviderResult>; + /// Returns the pending block and receipts if available. fn pending_block_and_receipts(&self) -> ProviderResult)>>; From 39a2dc6bc8d511825b1a3d69e6ae65a99c4f0be4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 20 Nov 2023 16:03:03 +0100 Subject: [PATCH 53/77] chore: add another module parser test (#5496) --- bin/reth/src/args/rpc_server_args.rs | 17 +++++++++++++++++ crates/rpc/rpc-builder/src/lib.rs | 14 ++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 32005b1638f24..6541797db4b99 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -532,6 +532,23 @@ mod tests { assert_eq!(apis, expected); } + #[test] + fn test_rpc_server_eth_call_bundle_args() { + let args = CommandParser::::parse_from([ + "reth", + "--http.api", + "eth,admin,debug,eth-call-bundle", + ]) + .args; + + let apis = args.http_api.unwrap(); + let expected = + RpcModuleSelection::try_from_selection(["eth", "admin", "debug", "eth-call-bundle"]) + .unwrap(); + + assert_eq!(apis, expected); + } + #[test] fn test_rpc_server_args_parser_none() { let args = CommandParser::::parse_from(["reth", "--http.api", "none"]).args; diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index b23029b75fc2e..bd2a8db3b711b 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -2081,6 +2081,20 @@ mod tests { assert_eq!(selection, RethRpcModule::EthCallBundle); } + #[test] + fn parse_eth_call_bundle_selection() { + let selection = "eth,admin,debug,eth-call-bundle".parse::().unwrap(); + assert_eq!( + selection, + RpcModuleSelection::Selection(vec![ + RethRpcModule::Eth, + RethRpcModule::Admin, + RethRpcModule::Debug, + RethRpcModule::EthCallBundle, + ]) + ); + } + #[test] fn parse_rpc_module_selection() { let selection = "all".parse::().unwrap(); From 24ca2410bb8264f5a6822f9a0b7c60a46b54b297 Mon Sep 17 00:00:00 2001 From: DoTheBestToGetTheBest <146037313+DoTheBestToGetTheBest@users.noreply.github.com> Date: Mon, 20 Nov 2023 09:46:54 -0800 Subject: [PATCH 54/77] (storage): impl some function (#5499) --- crates/storage/db/src/abstraction/mock.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/storage/db/src/abstraction/mock.rs b/crates/storage/db/src/abstraction/mock.rs index f1f0854fb5cd1..d5427b49fbe70 100644 --- a/crates/storage/db/src/abstraction/mock.rs +++ b/crates/storage/db/src/abstraction/mock.rs @@ -105,31 +105,31 @@ pub struct CursorMock { impl DbCursorRO for CursorMock { fn first(&mut self) -> PairResult { - todo!() + Ok(None) } fn seek_exact(&mut self, _key: T::Key) -> PairResult { - todo!() + Ok(None) } fn seek(&mut self, _key: T::Key) -> PairResult { - todo!() + Ok(None) } fn next(&mut self) -> PairResult { - todo!() + Ok(None) } fn prev(&mut self) -> PairResult { - todo!() + Ok(None) } fn last(&mut self) -> PairResult { - todo!() + Ok(None) } fn current(&mut self) -> PairResult { - todo!() + Ok(None) } fn walk(&mut self, _start_key: Option) -> Result, DatabaseError> { From efe49643bdfdd476d02f34ca20279192f6b85fd8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 20 Nov 2023 23:12:06 +0100 Subject: [PATCH 55/77] chore: add Extension heading (#5506) --- bin/reth/src/node/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index da07c17f1e804..3019d0cd215ed 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -188,6 +188,7 @@ pub struct NodeCommand { /// Additional cli arguments #[clap(flatten)] + #[clap(next_help_heading = "Extension")] pub ext: Ext::Node, } From 59251f0cae46df7a1d84b16f12396982cbf6b419 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 20 Nov 2023 23:44:22 +0100 Subject: [PATCH 56/77] chore: simplify clap next heading and use clap attribute (#5507) --- bin/reth/src/args/database_args.rs | 2 +- bin/reth/src/args/debug_args.rs | 2 +- bin/reth/src/args/dev_args.rs | 2 +- bin/reth/src/args/gas_price_oracle_args.rs | 2 +- bin/reth/src/args/network_args.rs | 2 +- bin/reth/src/args/payload_builder_args.rs | 16 ++++++---------- bin/reth/src/args/pruning_args.rs | 2 +- bin/reth/src/args/rollup_args.rs | 2 +- bin/reth/src/args/rpc_server_args.rs | 2 +- bin/reth/src/args/txpool_args.rs | 19 ++++++++++--------- 10 files changed, 24 insertions(+), 27 deletions(-) diff --git a/bin/reth/src/args/database_args.rs b/bin/reth/src/args/database_args.rs index b94c36d7eeb7e..954390cdc2112 100644 --- a/bin/reth/src/args/database_args.rs +++ b/bin/reth/src/args/database_args.rs @@ -5,7 +5,7 @@ use reth_interfaces::db::LogLevel; /// Parameters for database configuration #[derive(Debug, Args, PartialEq, Default, Clone, Copy)] -#[command(next_help_heading = "Database")] +#[clap(next_help_heading = "Database")] pub struct DatabaseArgs { /// Database logging level. Levels higher than "notice" require a debug build. #[arg(long = "db.log-level", value_enum)] diff --git a/bin/reth/src/args/debug_args.rs b/bin/reth/src/args/debug_args.rs index ffc5e36a27a51..3772aa52cb2fb 100644 --- a/bin/reth/src/args/debug_args.rs +++ b/bin/reth/src/args/debug_args.rs @@ -5,7 +5,7 @@ use reth_primitives::{TxHash, B256}; /// Parameters for debugging purposes #[derive(Debug, Args, PartialEq, Default)] -#[command(next_help_heading = "Debug")] +#[clap(next_help_heading = "Debug")] pub struct DebugArgs { /// Prompt the downloader to download blocks one at a time. /// diff --git a/bin/reth/src/args/dev_args.rs b/bin/reth/src/args/dev_args.rs index ec951e7959120..da046225a89e8 100644 --- a/bin/reth/src/args/dev_args.rs +++ b/bin/reth/src/args/dev_args.rs @@ -6,7 +6,7 @@ use humantime::parse_duration; /// Parameters for Dev testnet configuration #[derive(Debug, Args, PartialEq, Default, Clone, Copy)] -#[command(next_help_heading = "Dev testnet")] +#[clap(next_help_heading = "Dev testnet")] pub struct DevArgs { /// Start the node in dev mode /// diff --git a/bin/reth/src/args/gas_price_oracle_args.rs b/bin/reth/src/args/gas_price_oracle_args.rs index 9d417f3481591..42237f91276c6 100644 --- a/bin/reth/src/args/gas_price_oracle_args.rs +++ b/bin/reth/src/args/gas_price_oracle_args.rs @@ -2,7 +2,7 @@ use clap::Args; /// Parameters to configure Gas Price Oracle #[derive(Debug, Clone, Args, PartialEq, Eq, Default)] -#[command(next_help_heading = "Gas Price Oracle")] +#[clap(next_help_heading = "Gas Price Oracle")] pub struct GasPriceOracleArgs { /// Number of recent blocks to check for gas price #[arg(long = "gpo.blocks", default_value = "20")] diff --git a/bin/reth/src/args/network_args.rs b/bin/reth/src/args/network_args.rs index 70053d4ad4054..44fd622825992 100644 --- a/bin/reth/src/args/network_args.rs +++ b/bin/reth/src/args/network_args.rs @@ -12,7 +12,7 @@ use std::{net::Ipv4Addr, path::PathBuf, sync::Arc}; /// Parameters for configuring the network more granularity via CLI #[derive(Debug, Args)] -#[command(next_help_heading = "Networking")] +#[clap(next_help_heading = "Networking")] pub struct NetworkArgs { /// Disable the discovery service. #[command(flatten)] diff --git a/bin/reth/src/args/payload_builder_args.rs b/bin/reth/src/args/payload_builder_args.rs index 93f9ad2571d4d..7de104987c796 100644 --- a/bin/reth/src/args/payload_builder_args.rs +++ b/bin/reth/src/args/payload_builder_args.rs @@ -11,30 +11,26 @@ use std::{borrow::Cow, ffi::OsStr, time::Duration}; /// Parameters for configuring the Payload Builder #[derive(Debug, Args, PartialEq, Default)] +#[clap(next_help_heading = "Builder")] pub struct PayloadBuilderArgs { /// Block extra data set by the payload builder. - #[arg(long = "builder.extradata", help_heading = "Builder", value_parser=ExtradataValueParser::default(), default_value_t = default_extradata())] + #[arg(long = "builder.extradata", value_parser=ExtradataValueParser::default(), default_value_t = default_extradata())] pub extradata: String, /// Target gas ceiling for built blocks. - #[arg( - long = "builder.gaslimit", - help_heading = "Builder", - default_value = "30000000", - value_name = "GAS_LIMIT" - )] + #[arg(long = "builder.gaslimit", default_value = "30000000", value_name = "GAS_LIMIT")] pub max_gas_limit: u64, /// The interval at which the job should build a new payload after the last (in seconds). - #[arg(long = "builder.interval", help_heading = "Builder", value_parser = parse_duration_from_secs, default_value = "1", value_name = "SECONDS")] + #[arg(long = "builder.interval", value_parser = parse_duration_from_secs, default_value = "1", value_name = "SECONDS")] pub interval: Duration, /// The deadline for when the payload builder job should resolve. - #[arg(long = "builder.deadline", help_heading = "Builder", value_parser = parse_duration_from_secs, default_value = "12", value_name = "SECONDS")] + #[arg(long = "builder.deadline", value_parser = parse_duration_from_secs, default_value = "12", value_name = "SECONDS")] pub deadline: Duration, /// Maximum number of tasks to spawn for building a payload. - #[arg(long = "builder.max-tasks", help_heading = "Builder", default_value = "3", value_parser = RangedU64ValueParser::::new().range(1..))] + #[arg(long = "builder.max-tasks", default_value = "3", value_parser = RangedU64ValueParser::::new().range(1..))] pub max_payload_tasks: usize, /// By default the pending block equals the latest block diff --git a/bin/reth/src/args/pruning_args.rs b/bin/reth/src/args/pruning_args.rs index f233c810efa4a..251e1a9186694 100644 --- a/bin/reth/src/args/pruning_args.rs +++ b/bin/reth/src/args/pruning_args.rs @@ -9,7 +9,7 @@ use std::sync::Arc; /// Parameters for pruning and full node #[derive(Debug, Args, PartialEq, Default)] -#[command(next_help_heading = "Pruning")] +#[clap(next_help_heading = "Pruning")] pub struct PruningArgs { /// Run full node. Only the most recent [`MINIMUM_PRUNING_DISTANCE`] block states are stored. /// This flag takes priority over pruning configuration in reth.toml. diff --git a/bin/reth/src/args/rollup_args.rs b/bin/reth/src/args/rollup_args.rs index abd38f6db7a35..c97fe19147e5f 100644 --- a/bin/reth/src/args/rollup_args.rs +++ b/bin/reth/src/args/rollup_args.rs @@ -2,7 +2,7 @@ /// Parameters for rollup configuration #[derive(Debug, clap::Args)] -#[command(next_help_heading = "Rollup")] +#[clap(next_help_heading = "Rollup")] pub struct RollupArgs { /// HTTP endpoint for the sequencer mempool #[arg(long = "rollup.sequencer-http", value_name = "HTTP_URL")] diff --git a/bin/reth/src/args/rpc_server_args.rs b/bin/reth/src/args/rpc_server_args.rs index 6541797db4b99..fb66122456c8e 100644 --- a/bin/reth/src/args/rpc_server_args.rs +++ b/bin/reth/src/args/rpc_server_args.rs @@ -62,7 +62,7 @@ pub(crate) const RPC_DEFAULT_MAX_CONNECTIONS: u32 = 500; /// Parameters for configuring the rpc more granularity via CLI #[derive(Debug, Clone, Args)] -#[command(next_help_heading = "RPC")] +#[clap(next_help_heading = "RPC")] pub struct RpcServerArgs { /// Enable the HTTP-RPC server #[arg(long, default_value_if("dev", "true", "true"))] diff --git a/bin/reth/src/args/txpool_args.rs b/bin/reth/src/args/txpool_args.rs index 0f01f5c422843..f6c3a37bbf754 100644 --- a/bin/reth/src/args/txpool_args.rs +++ b/bin/reth/src/args/txpool_args.rs @@ -9,38 +9,39 @@ use reth_transaction_pool::{ /// Parameters for debugging purposes #[derive(Debug, Args, PartialEq, Default)] +#[clap(next_help_heading = "TxPool")] pub struct TxPoolArgs { /// Max number of transaction in the pending sub-pool. - #[arg(long = "txpool.pending_max_count", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.pending_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] pub pending_max_count: usize, /// Max size of the pending sub-pool in megabytes. - #[arg(long = "txpool.pending_max_size", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.pending_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] pub pending_max_size: usize, /// Max number of transaction in the basefee sub-pool - #[arg(long = "txpool.basefee_max_count", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.basefee_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] pub basefee_max_count: usize, /// Max size of the basefee sub-pool in megabytes. - #[arg(long = "txpool.basefee_max_size", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.basefee_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] pub basefee_max_size: usize, /// Max number of transaction in the queued sub-pool - #[arg(long = "txpool.queued_max_count", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] + #[arg(long = "txpool.queued_max_count", default_value_t = TXPOOL_SUBPOOL_MAX_TXS_DEFAULT)] pub queued_max_count: usize, /// Max size of the queued sub-pool in megabytes. - #[arg(long = "txpool.queued_max_size", help_heading = "TxPool", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] + #[arg(long = "txpool.queued_max_size", default_value_t = TXPOOL_SUBPOOL_MAX_SIZE_MB_DEFAULT)] pub queued_max_size: usize, /// Max number of executable transaction slots guaranteed per account - #[arg(long = "txpool.max_account_slots", help_heading = "TxPool", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] + #[arg(long = "txpool.max_account_slots", default_value_t = TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER)] pub max_account_slots: usize, /// Price bump (in %) for the transaction pool underpriced check. - #[arg(long = "txpool.pricebump", help_heading = "TxPool", default_value_t = DEFAULT_PRICE_BUMP)] + #[arg(long = "txpool.pricebump", default_value_t = DEFAULT_PRICE_BUMP)] pub price_bump: u128, /// Price bump percentage to replace an already existing blob transaction - #[arg(long = "blobpool.pricebump", help_heading = "TxPool", default_value_t = REPLACE_BLOB_PRICE_BUMP)] + #[arg(long = "blobpool.pricebump", default_value_t = REPLACE_BLOB_PRICE_BUMP)] pub blob_transaction_price_bump: u128, } From 0dc90eba19a2074692a9d60c8d873e4453b2b0b8 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 21 Nov 2023 03:19:30 +0100 Subject: [PATCH 57/77] chore: remove duplicate serde helpers (#5508) --- crates/primitives/src/genesis.rs | 3 +- crates/primitives/src/serde_helper/mod.rs | 69 +----- crates/primitives/src/serde_helper/num.rs | 216 ------------------ crates/primitives/src/serde_helper/storage.rs | 102 --------- .../rpc/rpc-types/src/eth/engine/payload.rs | 2 +- crates/rpc/rpc-types/src/lib.rs | 2 +- crates/rpc/rpc-types/src/serde_helpers/mod.rs | 28 ++- crates/rpc/rpc-types/src/serde_helpers/num.rs | 84 +++++++ .../rpc-types/src/serde_helpers/u64_hex.rs | 33 --- 9 files changed, 105 insertions(+), 434 deletions(-) delete mode 100644 crates/primitives/src/serde_helper/num.rs delete mode 100644 crates/primitives/src/serde_helper/storage.rs delete mode 100644 crates/rpc/rpc-types/src/serde_helpers/u64_hex.rs diff --git a/crates/primitives/src/genesis.rs b/crates/primitives/src/genesis.rs index dd1f4a0ffd64e..02b4700fe8d90 100644 --- a/crates/primitives/src/genesis.rs +++ b/crates/primitives/src/genesis.rs @@ -2,8 +2,9 @@ use crate::{ constants::EMPTY_ROOT_HASH, keccak256, serde_helper::{ - deserialize_json_ttd_opt, deserialize_json_u256, deserialize_storage_map, + json_u256::{deserialize_json_ttd_opt, deserialize_json_u256}, num::{u64_hex_or_decimal, u64_hex_or_decimal_opt}, + storage::deserialize_storage_map, }, trie::{HashBuilder, Nibbles}, Account, Address, Bytes, B256, KECCAK_EMPTY, U256, diff --git a/crates/primitives/src/serde_helper/mod.rs b/crates/primitives/src/serde_helper/mod.rs index 6e78c5a7984f7..2e897ebcdab12 100644 --- a/crates/primitives/src/serde_helper/mod.rs +++ b/crates/primitives/src/serde_helper/mod.rs @@ -1,73 +1,6 @@ //! [serde] utilities. -use crate::{B256, U64}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -mod storage; -pub use storage::*; - -pub use reth_rpc_types::json_u256::*; - -pub mod num; +pub use reth_rpc_types::serde_helpers::*; mod prune; pub use prune::deserialize_opt_prune_mode_with_min_blocks; - -/// serde functions for handling primitive `u64` as [`U64`]. -pub mod u64_hex { - use super::*; - - /// Deserializes an `u64` from [U64] accepting a hex quantity string with optional 0x prefix - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - U64::deserialize(deserializer).map(|val| val.to()) - } - - /// Serializes u64 as hex string - pub fn serialize(value: &u64, s: S) -> Result { - U64::from(*value).serialize(s) - } -} - -/// Serialize a byte vec as a hex string _without_ the "0x" prefix. -/// -/// This behaves the same as [`hex::encode`](crate::hex::encode). -pub fn serialize_hex_string_no_prefix(x: T, s: S) -> Result -where - S: Serializer, - T: AsRef<[u8]>, -{ - s.serialize_str(&crate::hex::encode(x.as_ref())) -} - -/// Serialize a [B256] as a hex string _without_ the "0x" prefix. -pub fn serialize_b256_hex_string_no_prefix(x: &B256, s: S) -> Result -where - S: Serializer, -{ - s.serialize_str(&format!("{x:x}")) -} - -#[cfg(test)] -mod tests { - use super::*; - use serde::{Deserialize, Serialize}; - - #[test] - fn test_hex_u64() { - #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] - struct Value { - #[serde(with = "u64_hex")] - inner: u64, - } - - let val = Value { inner: 1000 }; - let s = serde_json::to_string(&val).unwrap(); - assert_eq!(s, "{\"inner\":\"0x3e8\"}"); - - let deserialized: Value = serde_json::from_str(&s).unwrap(); - assert_eq!(val, deserialized); - } -} diff --git a/crates/primitives/src/serde_helper/num.rs b/crates/primitives/src/serde_helper/num.rs deleted file mode 100644 index e2262ccca78b9..0000000000000 --- a/crates/primitives/src/serde_helper/num.rs +++ /dev/null @@ -1,216 +0,0 @@ -//! Numeric helpers - -use crate::{U256, U64}; -use serde::{de, Deserialize, Deserializer, Serialize}; -use std::str::FromStr; - -/// A `u64` wrapper type that deserializes from hex or a u64 and serializes as hex. -/// -/// -/// ```rust -/// use reth_primitives::serde_helper::num::U64HexOrNumber; -/// let number_json = "100"; -/// let hex_json = "\"0x64\""; -/// -/// let number: U64HexOrNumber = serde_json::from_str(number_json).unwrap(); -/// let hex: U64HexOrNumber = serde_json::from_str(hex_json).unwrap(); -/// assert_eq!(number, hex); -/// assert_eq!(hex.to(), 100); -/// ``` -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] -pub struct U64HexOrNumber(U64); - -impl U64HexOrNumber { - /// Returns the wrapped u64 - pub fn to(self) -> u64 { - self.0.to() - } -} - -impl From for U64HexOrNumber { - fn from(value: u64) -> Self { - Self(U64::from(value)) - } -} - -impl From for U64HexOrNumber { - fn from(value: U64) -> Self { - Self(value) - } -} - -impl From for u64 { - fn from(value: U64HexOrNumber) -> Self { - value.to() - } -} - -impl From for U64 { - fn from(value: U64HexOrNumber) -> Self { - value.0 - } -} - -impl<'de> Deserialize<'de> for U64HexOrNumber { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - #[derive(Deserialize)] - #[serde(untagged)] - enum NumberOrHexU64 { - Hex(U64), - Int(u64), - } - match NumberOrHexU64::deserialize(deserializer)? { - NumberOrHexU64::Int(val) => Ok(val.into()), - NumberOrHexU64::Hex(val) => Ok(val.into()), - } - } -} - -/// serde functions for handling primitive `u64` as [U64] -pub mod u64_hex_or_decimal { - use crate::serde_helper::num::U64HexOrNumber; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - - /// Deserializes an `u64` accepting a hex quantity string with optional 0x prefix or - /// a number - pub fn deserialize<'de, D>(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - U64HexOrNumber::deserialize(deserializer).map(Into::into) - } - - /// Serializes u64 as hex string - pub fn serialize(value: &u64, s: S) -> Result { - U64HexOrNumber::from(*value).serialize(s) - } -} - -/// serde functions for handling primitive optional `u64` as [U64] -pub mod u64_hex_or_decimal_opt { - use crate::serde_helper::num::U64HexOrNumber; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - - /// Deserializes an `u64` accepting a hex quantity string with optional 0x prefix or - /// a number - pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, - { - match Option::::deserialize(deserializer)? { - Some(val) => Ok(Some(val.into())), - None => Ok(None), - } - } - - /// Serializes u64 as hex string - pub fn serialize(value: &Option, s: S) -> Result { - match value { - Some(val) => U64HexOrNumber::from(*val).serialize(s), - None => s.serialize_none(), - } - } -} - -/// Deserializes the input into an `Option`, using [`from_int_or_hex`] to deserialize the -/// inner value. -pub fn from_int_or_hex_opt<'de, D>(deserializer: D) -> Result, D::Error> -where - D: Deserializer<'de>, -{ - match Option::::deserialize(deserializer)? { - Some(val) => val.try_into_u256().map(Some), - None => Ok(None), - } -} - -/// Deserializes the input into a U256, accepting both 0x-prefixed hex and decimal strings with -/// arbitrary precision, defined by serde_json's [`Number`](serde_json::Number). -pub fn from_int_or_hex<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - NumberOrHexU256::deserialize(deserializer)?.try_into_u256() -} - -#[derive(Deserialize)] -#[serde(untagged)] -enum NumberOrHexU256 { - Int(serde_json::Number), - Hex(U256), -} - -impl NumberOrHexU256 { - fn try_into_u256(self) -> Result { - match self { - NumberOrHexU256::Int(num) => { - U256::from_str(num.to_string().as_str()).map_err(E::custom) - } - NumberOrHexU256::Hex(val) => Ok(val), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_u256_int_or_hex() { - #[derive(Debug, Deserialize, PartialEq, Eq)] - struct V(#[serde(deserialize_with = "from_int_or_hex")] U256); - - proptest::proptest!(|(value: u64)| { - let u256_val = U256::from(value); - - let num_obj = serde_json::to_string(&value).unwrap(); - let hex_obj = serde_json::to_string(&u256_val).unwrap(); - - let int_val:V = serde_json::from_str(&num_obj).unwrap(); - let hex_val = serde_json::from_str(&hex_obj).unwrap(); - assert_eq!(int_val, hex_val); - }); - } - - #[test] - fn test_u256_int_or_hex_opt() { - #[derive(Debug, Deserialize, PartialEq, Eq)] - struct V(#[serde(deserialize_with = "from_int_or_hex_opt")] Option); - - let null = serde_json::to_string(&None::).unwrap(); - let val: V = serde_json::from_str(&null).unwrap(); - assert!(val.0.is_none()); - - proptest::proptest!(|(value: u64)| { - let u256_val = U256::from(value); - - let num_obj = serde_json::to_string(&value).unwrap(); - let hex_obj = serde_json::to_string(&u256_val).unwrap(); - - let int_val:V = serde_json::from_str(&num_obj).unwrap(); - let hex_val = serde_json::from_str(&hex_obj).unwrap(); - assert_eq!(int_val, hex_val); - assert_eq!(int_val.0, Some(u256_val)); - }); - } - - #[test] - fn serde_hex_or_number_u64() { - #[derive(Debug, Deserialize, PartialEq, Eq)] - struct V(U64HexOrNumber); - - proptest::proptest!(|(value: u64)| { - let val = U64::from(value); - - let num_obj = serde_json::to_string(&value).unwrap(); - let hex_obj = serde_json::to_string(&val).unwrap(); - - let int_val:V = serde_json::from_str(&num_obj).unwrap(); - let hex_val = serde_json::from_str(&hex_obj).unwrap(); - assert_eq!(int_val, hex_val); - }); - } -} diff --git a/crates/primitives/src/serde_helper/storage.rs b/crates/primitives/src/serde_helper/storage.rs deleted file mode 100644 index 7d0b5045f4530..0000000000000 --- a/crates/primitives/src/serde_helper/storage.rs +++ /dev/null @@ -1,102 +0,0 @@ -use crate::{Bytes, B256, U256}; -use serde::{Deserialize, Deserializer, Serialize}; -use std::{collections::HashMap, fmt::Write}; - -/// A storage key type that can be serialized to and from a hex string up to 32 bytes. Used for -/// `eth_getStorageAt` and `eth_getProof` RPCs. -/// -/// This is a wrapper type meant to mirror geth's serialization and deserialization behavior for -/// storage keys. -/// -/// In `eth_getStorageAt`, this is used for deserialization of the `index` field. Internally, the -/// index is a [B256], but in `eth_getStorageAt` requests, its serialization can be _up to_ 32 -/// bytes. To support this, the storage key is deserialized first as a U256, and converted to a -/// B256 for use internally. -/// -/// `eth_getProof` also takes storage keys up to 32 bytes as input, so the `keys` field is -/// similarly deserialized. However, geth populates the storage proof `key` fields in the response -/// by mirroring the `key` field used in the input. -/// * See how `storageKey`s (the input) are populated in the `StorageResult` (the output): -/// -/// -/// The contained [B256] and From implementation for String are used to preserve the input and -/// implement this behavior from geth. -#[derive(Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] -#[serde(from = "U256", into = "String")] -pub struct JsonStorageKey(pub B256); - -impl From for JsonStorageKey { - fn from(value: U256) -> Self { - // SAFETY: Address (B256) and U256 have the same number of bytes - JsonStorageKey(B256::from(value.to_be_bytes())) - } -} - -impl From for String { - fn from(value: JsonStorageKey) -> Self { - // SAFETY: Address (B256) and U256 have the same number of bytes - let uint = U256::from_be_bytes(value.0 .0); - - // serialize byte by byte - // - // this is mainly so we can return an output that hive testing expects, because the - // `eth_getProof` implementation in geth simply mirrors the input - // - // see the use of `hexKey` in the `eth_getProof` response: - // - let bytes = uint.to_be_bytes_trimmed_vec(); - let mut hex = String::with_capacity(2 + bytes.len() * 2); - hex.push_str("0x"); - for byte in bytes { - write!(hex, "{:02x}", byte).unwrap(); - } - hex - } -} - -/// Converts a Bytes value into a B256, accepting inputs that are less than 32 bytes long. These -/// inputs will be left padded with zeros. -pub fn from_bytes_to_b256<'de, D>(bytes: Bytes) -> Result -where - D: Deserializer<'de>, -{ - if bytes.0.len() > 32 { - return Err(serde::de::Error::custom("input too long to be a B256")) - } - - // left pad with zeros to 32 bytes - let mut padded = [0u8; 32]; - padded[32 - bytes.0.len()..].copy_from_slice(&bytes.0); - - // then convert to B256 without a panic - Ok(B256::from_slice(&padded)) -} - -/// Deserializes the input into an Option>, using [from_bytes_to_b256] which -/// allows cropped values: -/// -/// ```json -/// { -/// "0x0000000000000000000000000000000000000000000000000000000000000001": "0x22" -/// } -/// ``` -pub fn deserialize_storage_map<'de, D>( - deserializer: D, -) -> Result>, D::Error> -where - D: Deserializer<'de>, -{ - let map = Option::>::deserialize(deserializer)?; - match map { - Some(mut map) => { - let mut res_map = HashMap::with_capacity(map.len()); - for (k, v) in map.drain() { - let k_deserialized = from_bytes_to_b256::<'de, D>(k)?; - let v_deserialized = from_bytes_to_b256::<'de, D>(v)?; - res_map.insert(k_deserialized, v_deserialized); - } - Ok(Some(res_map)) - } - None => Ok(None), - } -} diff --git a/crates/rpc/rpc-types/src/eth/engine/payload.rs b/crates/rpc/rpc-types/src/eth/engine/payload.rs index 739a6a10ad41b..9ae18a351b109 100644 --- a/crates/rpc/rpc-types/src/eth/engine/payload.rs +++ b/crates/rpc/rpc-types/src/eth/engine/payload.rs @@ -447,7 +447,7 @@ pub struct OptimismPayloadAttributes { #[serde( default, skip_serializing_if = "Option::is_none", - deserialize_with = "crate::serde_helpers::u64_hex::u64_hex_opt::deserialize" + deserialize_with = "crate::serde_helpers::u64_hex_opt::deserialize" )] pub gas_limit: Option, } diff --git a/crates/rpc/rpc-types/src/lib.rs b/crates/rpc/rpc-types/src/lib.rs index 8c9e22485bbda..f71f7660810ec 100644 --- a/crates/rpc/rpc-types/src/lib.rs +++ b/crates/rpc/rpc-types/src/lib.rs @@ -20,7 +20,7 @@ mod otterscan; mod peer; pub mod relay; mod rpc; -mod serde_helpers; +pub mod serde_helpers; pub use admin::*; pub use eth::*; diff --git a/crates/rpc/rpc-types/src/serde_helpers/mod.rs b/crates/rpc/rpc-types/src/serde_helpers/mod.rs index 1c45b0d56d425..adeb4a24583f4 100644 --- a/crates/rpc/rpc-types/src/serde_helpers/mod.rs +++ b/crates/rpc/rpc-types/src/serde_helpers/mod.rs @@ -1,22 +1,18 @@ //! Serde helpers for primitive types. -use alloy_primitives::U256; -use serde::{Deserialize, Deserializer, Serializer}; +use alloy_primitives::B256; +use serde::Serializer; pub mod json_u256; +pub use json_u256::JsonU256; + +/// Helpers for dealing with numbers. pub mod num; +pub use num::*; + /// Storage related helpers. pub mod storage; -pub mod u64_hex; - -/// Deserializes the input into a U256, accepting both 0x-prefixed hex and decimal strings with -/// arbitrary precision, defined by serde_json's [`Number`](serde_json::Number). -pub fn from_int_or_hex<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - num::NumberOrHexU256::deserialize(deserializer)?.try_into_u256() -} +pub use storage::JsonStorageKey; /// Serialize a byte vec as a hex string _without_ the "0x" prefix. /// @@ -28,3 +24,11 @@ where { s.serialize_str(&alloy_primitives::hex::encode(x.as_ref())) } + +/// Serialize a [B256] as a hex string _without_ the "0x" prefix. +pub fn serialize_b256_hex_string_no_prefix(x: &B256, s: S) -> Result +where + S: Serializer, +{ + s.serialize_str(&format!("{x:x}")) +} diff --git a/crates/rpc/rpc-types/src/serde_helpers/num.rs b/crates/rpc/rpc-types/src/serde_helpers/num.rs index d1e6959065fe6..4c34471cd7d07 100644 --- a/crates/rpc/rpc-types/src/serde_helpers/num.rs +++ b/crates/rpc/rpc-types/src/serde_helpers/num.rs @@ -69,6 +69,68 @@ impl<'de> Deserialize<'de> for U64HexOrNumber { } } +/// serde functions for handling `u64` as [U64] +pub mod u64_hex { + use alloy_primitives::U64; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + /// Deserializes an `u64` from [U64] accepting a hex quantity string with optional 0x prefix + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + U64::deserialize(deserializer).map(|val| val.to()) + } + + /// Serializes u64 as hex string + pub fn serialize(value: &u64, s: S) -> Result { + U64::from(*value).serialize(s) + } +} + +/// serde functions for handling `Option` as [U64] +pub mod u64_hex_opt { + use alloy_primitives::U64; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + /// Serializes u64 as hex string + pub fn serialize(value: &Option, s: S) -> Result { + match value { + Some(val) => U64::from(*val).serialize(s), + None => s.serialize_none(), + } + } + + /// Deserializes an `Option` from [U64] accepting a hex quantity string with optional 0x prefix + pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + Ok(U64::deserialize(deserializer) + .map_or(None, |v| Some(u64::from_be_bytes(v.to_be_bytes())))) + } +} + +/// serde functions for handling primitive `u64` as [U64] +pub mod u64_hex_or_decimal { + use crate::serde_helpers::num::U64HexOrNumber; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + /// Deserializes an `u64` accepting a hex quantity string with optional 0x prefix or + /// a number + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + U64HexOrNumber::deserialize(deserializer).map(Into::into) + } + + /// Serializes u64 as hex string + pub fn serialize(value: &u64, s: S) -> Result { + U64HexOrNumber::from(*value).serialize(s) + } +} + /// serde functions for handling primitive optional `u64` as [U64] pub mod u64_hex_or_decimal_opt { use crate::serde_helpers::num::U64HexOrNumber; @@ -137,3 +199,25 @@ where { NumberOrHexU256::deserialize(deserializer)?.try_into_u256() } + +#[cfg(test)] +mod tests { + use super::*; + use serde::{Deserialize, Serialize}; + + #[test] + fn test_hex_u64() { + #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] + struct Value { + #[serde(with = "u64_hex")] + inner: u64, + } + + let val = Value { inner: 1000 }; + let s = serde_json::to_string(&val).unwrap(); + assert_eq!(s, "{\"inner\":\"0x3e8\"}"); + + let deserialized: Value = serde_json::from_str(&s).unwrap(); + assert_eq!(val, deserialized); + } +} diff --git a/crates/rpc/rpc-types/src/serde_helpers/u64_hex.rs b/crates/rpc/rpc-types/src/serde_helpers/u64_hex.rs deleted file mode 100644 index e73061cdc9365..0000000000000 --- a/crates/rpc/rpc-types/src/serde_helpers/u64_hex.rs +++ /dev/null @@ -1,33 +0,0 @@ -//! Helper to deserialize an `u64` from [U64] accepting a hex quantity string with optional 0x -//! prefix - -use alloy_primitives::U64; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; - -/// Deserializes an `u64` from [U64] accepting a hex quantity string with optional 0x prefix -pub fn deserialize<'de, D>(deserializer: D) -> Result -where - D: Deserializer<'de>, -{ - U64::deserialize(deserializer).map(|val| val.to()) -} - -/// Serializes u64 as hex string -pub fn serialize(value: &u64, s: S) -> Result { - U64::from(*value).serialize(s) -} - -/// serde functions for handling `Option` as [U64] -pub mod u64_hex_opt { - use alloy_primitives::U64; - use serde::{Deserialize, Deserializer}; - - /// Deserializes an `Option` from [U64] accepting a hex quantity string with optional 0x prefix - pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> - where - D: Deserializer<'de>, - { - Ok(U64::deserialize(deserializer) - .map_or(None, |v| Some(u64::from_be_bytes(v.to_be_bytes())))) - } -} From 2f220a79b3295b4c80c30336cb723614e9d31f79 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 21 Nov 2023 03:48:52 +0100 Subject: [PATCH 58/77] test: add serialize 0x1 test block number (#5511) --- crates/rpc/rpc-types/src/eth/block.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/rpc/rpc-types/src/eth/block.rs b/crates/rpc/rpc-types/src/eth/block.rs index cdad6cf12749b..17ecf8ce755d0 100644 --- a/crates/rpc/rpc-types/src/eth/block.rs +++ b/crates/rpc/rpc-types/src/eth/block.rs @@ -938,4 +938,11 @@ mod tests { let block2 = serde_json::from_str::(&serialized).unwrap(); assert_eq!(block, block2); } + + #[test] + fn compact_block_number_serde() { + let num: BlockNumberOrTag = 1u64.into(); + let serialized = serde_json::to_string(&num).unwrap(); + assert_eq!(serialized, "\"0x1\""); + } } From 1064da5d41a2c4a298c3b41fd15d19282c68a133 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 21 Nov 2023 04:14:03 +0100 Subject: [PATCH 59/77] feat: add get_block_transactions_ecrecovered (#5500) --- crates/rpc/rpc/src/eth/cache/mod.rs | 39 +++++++++++++++++------------ 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/crates/rpc/rpc/src/eth/cache/mod.rs b/crates/rpc/rpc/src/eth/cache/mod.rs index 735d15418d23a..2ca9406cb10a7 100644 --- a/crates/rpc/rpc/src/eth/cache/mod.rs +++ b/crates/rpc/rpc/src/eth/cache/mod.rs @@ -4,7 +4,7 @@ use futures::{future::Either, Stream, StreamExt}; use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ Block, BlockHashOrNumber, BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, - TransactionSigned, B256, + TransactionSigned, TransactionSignedEcRecovered, B256, }; use reth_provider::{ BlockReader, CanonStateNotification, EvmEnvProvider, StateProviderFactory, TransactionVariant, @@ -131,7 +131,7 @@ impl EthStateCache { /// Requests the [Block] for the block hash /// /// Returns `None` if the block does not exist. - pub(crate) async fn get_block(&self, block_hash: B256) -> ProviderResult> { + pub async fn get_block(&self, block_hash: B256) -> ProviderResult> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); let block_with_senders_res = @@ -147,17 +147,14 @@ impl EthStateCache { /// Requests the [Block] for the block hash, sealed with the given block hash. /// /// Returns `None` if the block does not exist. - pub(crate) async fn get_sealed_block( - &self, - block_hash: B256, - ) -> ProviderResult> { + pub async fn get_sealed_block(&self, block_hash: B256) -> ProviderResult> { Ok(self.get_block(block_hash).await?.map(|block| block.seal(block_hash))) } /// Requests the transactions of the [Block] /// /// Returns `None` if the block does not exist. - pub(crate) async fn get_block_transactions( + pub async fn get_block_transactions( &self, block_hash: B256, ) -> ProviderResult>> { @@ -166,8 +163,21 @@ impl EthStateCache { rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? } + /// Requests the ecrecovered transactions of the [Block] + /// + /// Returns `None` if the block does not exist. + pub async fn get_block_transactions_ecrecovered( + &self, + block_hash: B256, + ) -> ProviderResult>> { + Ok(self + .get_block_with_senders(block_hash) + .await? + .map(|block| block.into_transactions_ecrecovered().collect())) + } + /// Fetches both transactions and receipts for the given block hash. - pub(crate) async fn get_transactions_and_receipts( + pub async fn get_transactions_and_receipts( &self, block_hash: B256, ) -> ProviderResult, Vec)>> { @@ -182,7 +192,7 @@ impl EthStateCache { /// Requests the [BlockWithSenders] for the block hash /// /// Returns `None` if the block does not exist. - pub(crate) async fn get_block_with_senders( + pub async fn get_block_with_senders( &self, block_hash: B256, ) -> ProviderResult> { @@ -194,7 +204,7 @@ impl EthStateCache { /// Requests the [SealedBlockWithSenders] for the block hash /// /// Returns `None` if the block does not exist. - pub(crate) async fn get_sealed_block_with_senders( + pub async fn get_sealed_block_with_senders( &self, block_hash: B256, ) -> ProviderResult> { @@ -204,17 +214,14 @@ impl EthStateCache { /// Requests the [Receipt] for the block hash /// /// Returns `None` if the block was not found. - pub(crate) async fn get_receipts( - &self, - block_hash: B256, - ) -> ProviderResult>> { + pub async fn get_receipts(&self, block_hash: B256) -> ProviderResult>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetReceipts { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? } /// Fetches both receipts and block for the given block hash. - pub(crate) async fn get_block_and_receipts( + pub async fn get_block_and_receipts( &self, block_hash: B256, ) -> ProviderResult)>> { @@ -230,7 +237,7 @@ impl EthStateCache { /// /// Returns an error if the corresponding header (required for populating the envs) was not /// found. - pub(crate) async fn get_evm_env(&self, block_hash: B256) -> ProviderResult<(CfgEnv, BlockEnv)> { + pub async fn get_evm_env(&self, block_hash: B256) -> ProviderResult<(CfgEnv, BlockEnv)> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetEnv { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? From f8451fd984065aae8de9f96135ebb90c20221bc2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 21 Nov 2023 05:16:41 +0100 Subject: [PATCH 60/77] fix: dont skip gas used ratio if empty (#5512) Co-authored-by: evalir --- crates/rpc/rpc-types/src/eth/fee.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/fee.rs b/crates/rpc/rpc-types/src/eth/fee.rs index c7717994ea180..5fb145ab68a3f 100644 --- a/crates/rpc/rpc-types/src/eth/fee.rs +++ b/crates/rpc/rpc-types/src/eth/fee.rs @@ -36,9 +36,8 @@ pub struct FeeHistory { /// /// # Note /// - /// The `Option` is only for compatability with Erigon and Geth. - #[serde(skip_serializing_if = "Vec::is_empty")] - #[serde(default)] + /// Empty list is skipped only for compatibility with Erigon and Geth. + #[serde(default, skip_serializing_if = "Vec::is_empty")] pub base_fee_per_gas: Vec, /// An array of block gas used ratios. These are calculated as the ratio /// of `gasUsed` and `gasLimit`. @@ -46,13 +45,11 @@ pub struct FeeHistory { /// # Note /// /// The `Option` is only for compatability with Erigon and Geth. - #[serde(skip_serializing_if = "Vec::is_empty")] - #[serde(default)] pub gas_used_ratio: Vec, /// Lowest number block of the returned range. pub oldest_block: U256, /// An (optional) array of effective priority fee per gas data points from a single /// block. All zeroes are returned if the block is empty. - #[serde(default)] + #[serde(skip_serializing_if = "Option::is_none")] pub reward: Option>>, } From 3e0caae5b6b74d2956e80fa264380c4a9a22d149 Mon Sep 17 00:00:00 2001 From: George Datskos Date: Tue, 21 Nov 2023 02:06:43 -0500 Subject: [PATCH 61/77] fix(op-reth): clear accesslist for Deposit txs (#5513) --- crates/primitives/src/revm/env.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index 2f3d8005c572e..26b87a159b40d 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -309,6 +309,7 @@ pub fn fill_tx_env( } #[cfg(feature = "optimism")] Transaction::Deposit(tx) => { + tx_env.access_list.clear(); tx_env.gas_limit = tx.gas_limit; tx_env.gas_price = U256::ZERO; tx_env.gas_priority_fee = None; From 6904194204b38a780e38fc0dbd4c716ab191d984 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 21 Nov 2023 15:00:53 +0100 Subject: [PATCH 62/77] feat: add getters for tx by origin (#5516) --- crates/transaction-pool/src/lib.rs | 7 +++++++ crates/transaction-pool/src/noop.rs | 7 +++++++ crates/transaction-pool/src/pool/mod.rs | 8 ++++++++ crates/transaction-pool/src/traits.rs | 21 +++++++++++++++++++++ 4 files changed, 43 insertions(+) diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index de60ab7c5fffb..84c9e15ecf9d0 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -473,6 +473,13 @@ where self.pool.get_transactions_by_sender(sender) } + fn get_transactions_by_origin( + &self, + origin: TransactionOrigin, + ) -> Vec>> { + self.pool.get_transactions_by_origin(origin) + } + fn unique_senders(&self) -> HashSet
{ self.pool.unique_senders() } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 6e6be8f2b8f45..a1cadb5f65970 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -216,6 +216,13 @@ impl TransactionPool for NoopTransactionPool { } Err(BlobStoreError::MissingSidecar(tx_hashes[0])) } + + fn get_transactions_by_origin( + &self, + _origin: TransactionOrigin, + ) -> Vec>> { + vec![] + } } /// A [`TransactionValidator`] that does nothing. diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 7242099c1588b..cc60955470b4c 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -695,6 +695,14 @@ where self.pool.read().get_transactions_by_sender(sender_id) } + /// Returns all transactions that where submitted with the given [TransactionOrigin] + pub(crate) fn get_transactions_by_origin( + &self, + origin: TransactionOrigin, + ) -> Vec>> { + self.pool.read().all().transactions_iter().filter(|tx| tx.origin == origin).collect() + } + /// Returns all the transactions belonging to the hashes. /// /// If no transaction exists, it is skipped. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 91455df3d8668..3888b1078666f 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -306,6 +306,27 @@ pub trait TransactionPool: Send + Sync + Clone { sender: Address, ) -> Vec>>; + /// Returns all transactions that where submitted with the given [TransactionOrigin] + fn get_transactions_by_origin( + &self, + origin: TransactionOrigin, + ) -> Vec>>; + + /// Returns all transactions that where submitted as [TransactionOrigin::Local] + fn get_local_transactions(&self) -> Vec>> { + self.get_transactions_by_origin(TransactionOrigin::Local) + } + + /// Returns all transactions that where submitted as [TransactionOrigin::Private] + fn get_private_transactions(&self) -> Vec>> { + self.get_transactions_by_origin(TransactionOrigin::Private) + } + + /// Returns all transactions that where submitted as [TransactionOrigin::External] + fn get_external_transactions(&self) -> Vec>> { + self.get_transactions_by_origin(TransactionOrigin::External) + } + /// Returns a set of all senders of transactions in the pool fn unique_senders(&self) -> HashSet
; From afdb0a314845a80c83d99d946007585229174d83 Mon Sep 17 00:00:00 2001 From: rakita Date: Tue, 21 Nov 2023 20:58:41 +0300 Subject: [PATCH 63/77] chore(revm): freeze revm, add Optimism Canyon fork (#5519) --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7a70b68ad11c4..c53376c9863b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6607,7 +6607,7 @@ dependencies = [ [[package]] name = "revm" version = "3.5.0" -source = "git+https://github.com/bluealloy/revm?rev=1609e07c68048909ad1682c98cf2b9baa76310b5#1609e07c68048909ad1682c98cf2b9baa76310b5" +source = "git+https://github.com/bluealloy/revm?branch=reth_freeze#74643d37fc6231d558868ccc8b97400506e10906" dependencies = [ "auto_impl", "revm-interpreter", @@ -6617,7 +6617,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "1.3.0" -source = "git+https://github.com/bluealloy/revm?rev=1609e07c68048909ad1682c98cf2b9baa76310b5#1609e07c68048909ad1682c98cf2b9baa76310b5" +source = "git+https://github.com/bluealloy/revm?branch=reth_freeze#74643d37fc6231d558868ccc8b97400506e10906" dependencies = [ "revm-primitives", ] @@ -6625,7 +6625,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "2.2.0" -source = "git+https://github.com/bluealloy/revm?rev=1609e07c68048909ad1682c98cf2b9baa76310b5#1609e07c68048909ad1682c98cf2b9baa76310b5" +source = "git+https://github.com/bluealloy/revm?branch=reth_freeze#74643d37fc6231d558868ccc8b97400506e10906" dependencies = [ "aurora-engine-modexp", "c-kzg", @@ -6641,7 +6641,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "1.3.0" -source = "git+https://github.com/bluealloy/revm?rev=1609e07c68048909ad1682c98cf2b9baa76310b5#1609e07c68048909ad1682c98cf2b9baa76310b5" +source = "git+https://github.com/bluealloy/revm?branch=reth_freeze#74643d37fc6231d558868ccc8b97400506e10906" dependencies = [ "alloy-primitives", "alloy-rlp", diff --git a/Cargo.toml b/Cargo.toml index 494015c7fe339..a308873d3fb1f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,8 +135,8 @@ reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } # revm -revm = { git = "https://github.com/bluealloy/revm", rev = "1609e07c68048909ad1682c98cf2b9baa76310b5" } -revm-primitives = { git = "https://github.com/bluealloy/revm", rev = "1609e07c68048909ad1682c98cf2b9baa76310b5" } +revm = { git = "https://github.com/bluealloy/revm", branch = "reth_freeze" } +revm-primitives = { git = "https://github.com/bluealloy/revm", branch = "reth_freeze" } # eth alloy-primitives = "0.4" From c825b26f2e75f09dcd1dd6f4843405c0b804488d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 21 Nov 2023 20:58:36 +0100 Subject: [PATCH 64/77] fix: dont use Stack::default (#5521) --- .../revm-inspectors/src/tracing/builder/parity.rs | 10 ++++------ crates/revm/revm-inspectors/src/tracing/mod.rs | 12 +++++++----- crates/revm/revm-inspectors/src/tracing/types.rs | 9 ++------- crates/rpc/rpc/src/trace.rs | 1 + 4 files changed, 14 insertions(+), 18 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index d17ecc53118ae..9d984bcfb69ce 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -454,8 +454,10 @@ impl ParityTraceBuilder { }; let mut push_stack = step.push_stack.clone().unwrap_or_default(); for idx in (0..show_stack).rev() { - if step.stack.len() > idx { - push_stack.push(step.stack.peek(idx).unwrap_or_default()) + if let Some(stack) = step.stack.as_ref() { + if stack.len() > idx { + push_stack.push(stack.peek(idx).unwrap_or_default()) + } } } push_stack @@ -487,10 +489,6 @@ impl ParityTraceBuilder { } /// An iterator for [TransactionTrace]s -/// -/// This iterator handles additional selfdestruct actions based on the last emitted -/// [TransactionTrace], since selfdestructs are not recorded as individual call traces but are -/// derived from recorded call struct TransactionTraceIter { iter: Iter, next_selfdestruct: Option, diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index a947c42538c41..dbd7101ed8f64 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -282,8 +282,7 @@ impl TracingInspector { .record_memory_snapshots .then(|| RecordedMemory::new(interp.shared_memory.context_memory().to_vec())) .unwrap_or_default(); - let stack = - self.config.record_stack_snapshots.then(|| interp.stack.clone()).unwrap_or_default(); + let stack = self.config.record_stack_snapshots.then(|| interp.stack.clone()); let op = OpCode::new(interp.current_opcode()) .or_else(|| { @@ -326,9 +325,12 @@ impl TracingInspector { self.step_stack.pop().expect("can't fill step without starting a step first"); let step = &mut self.traces.arena[trace_idx].trace.steps[step_idx]; - if interp.stack.len() > step.stack.len() { - // if the stack grew, we need to record the new values - step.push_stack = Some(interp.stack.data()[step.stack.len()..].to_vec()); + if let Some(stack) = step.stack.as_ref() { + // only check stack changes if record stack snapshots is enabled: if stack is Some + if interp.stack.len() > stack.len() { + // if the stack grew, we need to record the new values + step.push_stack = Some(interp.stack.data()[stack.len()..].to_vec()); + } } if self.config.record_memory_snapshots { diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index ec0e7269a0800..ecc2e261b2b81 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -429,8 +429,6 @@ impl CallTraceNode { } /// Converts this call trace into an _empty_ geth [CallFrame] - /// - /// Caution: this does not include any of the child calls pub(crate) fn geth_empty_call_frame(&self, include_logs: bool) -> CallFrame { let mut call_frame = CallFrame { typ: self.trace.kind.to_string(), @@ -485,9 +483,6 @@ pub(crate) struct CallTraceStepStackItem<'a> { } /// Ordering enum for calls and logs -/// -/// i.e. if Call 0 occurs before Log 0, it will be pushed into the `CallTraceNode`'s ordering before -/// the log. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) enum LogCallOrder { Log(usize), @@ -516,7 +511,7 @@ pub(crate) struct CallTraceStep { /// Current contract address pub(crate) contract: Address, /// Stack before step execution - pub(crate) stack: Stack, + pub(crate) stack: Option, /// The new stack items placed by this step if any pub(crate) push_stack: Option>, /// All allocated memory in a step @@ -568,7 +563,7 @@ impl CallTraceStep { }; if opts.is_stack_enabled() { - log.stack = Some(self.stack.data().clone()); + log.stack = self.stack.as_ref().map(|stack| stack.data().clone()); } if opts.is_memory_enabled() { diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 5399ec49071a5..7c21930868541 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -558,6 +558,7 @@ fn tracing_config(trace_types: &HashSet) -> TracingInspectorConfig { let needs_vm_trace = trace_types.contains(&TraceType::VmTrace); TracingInspectorConfig::default_parity() .set_steps(needs_vm_trace) + .set_stack_snapshots(needs_vm_trace) .set_memory_snapshots(needs_vm_trace) } From 06eeb35366f5fb43b4f59fd09723f2fd1586dcdf Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 21 Nov 2023 23:57:42 +0000 Subject: [PATCH 65/77] feat(bin): improve status logs (#5518) --- bin/reth/src/chain/import.rs | 4 +- bin/reth/src/debug_cmd/execution.rs | 4 +- bin/reth/src/node/events.rs | 219 ++++++++++++--------- bin/reth/src/node/mod.rs | 2 +- crates/primitives/src/stage/checkpoints.rs | 18 +- crates/stages/src/pipeline/event.rs | 11 +- crates/stages/src/pipeline/mod.rs | 90 ++++----- 7 files changed, 196 insertions(+), 152 deletions(-) diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index 7478aad47f174..2d1800fc36c0a 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -109,12 +109,12 @@ impl ImportCommand { pipeline.set_tip(tip); debug!(target: "reth::cli", ?tip, "Tip manually set"); - let factory = ProviderFactory::new(&db, self.chain.clone()); + let factory = ProviderFactory::new(db.clone(), self.chain.clone()); let provider = factory.provider()?; let latest_block_number = provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); - tokio::spawn(handle_events(None, latest_block_number, events)); + tokio::spawn(handle_events(None, latest_block_number, events, db.clone())); // Run pipeline info!(target: "reth::cli", "Starting sync pipeline"); diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index c248819a0ea27..8a91ca73f09cb 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -235,7 +235,7 @@ impl Command { &ctx.task_executor, )?; - let factory = ProviderFactory::new(&db, self.chain.clone()); + let factory = ProviderFactory::new(db.clone(), self.chain.clone()); let provider = factory.provider()?; let latest_block_number = @@ -252,7 +252,7 @@ impl Command { ); ctx.task_executor.spawn_critical( "events task", - events::handle_events(Some(network.clone()), latest_block_number, events), + events::handle_events(Some(network.clone()), latest_block_number, events, db.clone()), ); let mut current_max_block = latest_block_number.unwrap_or_default(); diff --git a/bin/reth/src/node/events.rs b/bin/reth/src/node/events.rs index 8b5d7c76ad6a9..0fb17f43b33c3 100644 --- a/bin/reth/src/node/events.rs +++ b/bin/reth/src/node/events.rs @@ -3,6 +3,7 @@ use crate::node::cl_events::ConsensusLayerHealthEvent; use futures::Stream; use reth_beacon_consensus::BeaconConsensusEngineEvent; +use reth_db::DatabaseEnv; use reth_interfaces::consensus::ForkchoiceState; use reth_network::{NetworkEvent, NetworkHandle}; use reth_network_api::PeersInfo; @@ -13,8 +14,10 @@ use reth_primitives::{ use reth_prune::PrunerEvent; use reth_stages::{ExecOutput, PipelineEvent}; use std::{ + fmt::{Display, Formatter}, future::Future, pin::Pin, + sync::Arc, task::{Context, Poll}, time::{Duration, Instant}, }; @@ -26,27 +29,25 @@ const INFO_MESSAGE_INTERVAL: Duration = Duration::from_secs(25); /// The current high-level state of the node. struct NodeState { + /// Database environment. + /// Used for freelist calculation reported in the "Status" log message. + /// See [EventHandler::poll]. + db: Arc, /// Connection to the network. network: Option, /// The stage currently being executed. - current_stage: Option, - /// The ETA for the current stage. - eta: Eta, - /// The current checkpoint of the executing stage. - current_checkpoint: StageCheckpoint, + current_stage: Option, /// The latest block reached by either pipeline or consensus engine. latest_block: Option, } impl NodeState { - fn new(network: Option, latest_block: Option) -> Self { - Self { - network, - current_stage: None, - eta: Eta::default(), - current_checkpoint: StageCheckpoint::new(0), - latest_block, - } + fn new( + db: Arc, + network: Option, + latest_block: Option, + ) -> Self { + Self { db, network, current_stage: None, latest_block } } fn num_connected_peers(&self) -> usize { @@ -56,70 +57,80 @@ impl NodeState { /// Processes an event emitted by the pipeline fn handle_pipeline_event(&mut self, event: PipelineEvent) { match event { - PipelineEvent::Running { pipeline_stages_progress, stage_id, checkpoint } => { - let notable = self.current_stage.is_none(); - self.current_stage = Some(stage_id); - self.current_checkpoint = checkpoint.unwrap_or_default(); + PipelineEvent::Run { pipeline_stages_progress, stage_id, checkpoint, target } => { + let checkpoint = checkpoint.unwrap_or_default(); + let current_stage = CurrentStage { + stage_id, + eta: match &self.current_stage { + Some(current_stage) if current_stage.stage_id == stage_id => { + current_stage.eta + } + _ => Eta::default(), + }, + checkpoint, + target, + }; + + let progress = OptionalField( + checkpoint.entities().and_then(|entities| entities.fmt_percentage()), + ); + let eta = current_stage.eta.fmt_for_stage(stage_id); - if notable { - if let Some(progress) = self.current_checkpoint.entities() { - info!( - pipeline_stages = %pipeline_stages_progress, - stage = %stage_id, - from = self.current_checkpoint.block_number, - checkpoint = %self.current_checkpoint.block_number, - %progress, - eta = %self.eta.fmt_for_stage(stage_id), - "Executing stage", - ); - } else { - info!( - pipeline_stages = %pipeline_stages_progress, - stage = %stage_id, - from = self.current_checkpoint.block_number, - checkpoint = %self.current_checkpoint.block_number, - eta = %self.eta.fmt_for_stage(stage_id), - "Executing stage", - ); - } - } + info!( + pipeline_stages = %pipeline_stages_progress, + stage = %stage_id, + checkpoint = %checkpoint.block_number, + target = %OptionalField(target), + %progress, + %eta, + "Executing stage", + ); + + self.current_stage = Some(current_stage); } PipelineEvent::Ran { pipeline_stages_progress, stage_id, result: ExecOutput { checkpoint, done }, } => { - self.current_checkpoint = checkpoint; if stage_id.is_finish() { self.latest_block = Some(checkpoint.block_number); } - self.eta.update(self.current_checkpoint); - - let message = - if done { "Stage finished executing" } else { "Stage committed progress" }; - - if let Some(progress) = checkpoint.entities() { - info!( - pipeline_stages = %pipeline_stages_progress, - stage = %stage_id, - checkpoint = %checkpoint.block_number, - %progress, - eta = %self.eta.fmt_for_stage(stage_id), - "{message}", - ); - } else { - info!( - pipeline_stages = %pipeline_stages_progress, - stage = %stage_id, - checkpoint = %checkpoint.block_number, - eta = %self.eta.fmt_for_stage(stage_id), - "{message}", + + if let Some(current_stage) = self.current_stage.as_mut() { + current_stage.checkpoint = checkpoint; + current_stage.eta.update(checkpoint); + + let target = OptionalField(current_stage.target); + let progress = OptionalField( + checkpoint.entities().and_then(|entities| entities.fmt_percentage()), ); + + if done { + info!( + pipeline_stages = %pipeline_stages_progress, + stage = %stage_id, + checkpoint = %checkpoint.block_number, + %target, + %progress, + "Stage finished executing", + ) + } else { + let eta = current_stage.eta.fmt_for_stage(stage_id); + info!( + pipeline_stages = %pipeline_stages_progress, + stage = %stage_id, + checkpoint = %checkpoint.block_number, + %target, + %progress, + %eta, + "Stage committed progress", + ) + } } if done { self.current_stage = None; - self.eta = Eta::default(); } } _ => (), @@ -189,6 +200,29 @@ impl NodeState { } } +/// Helper type for formatting of optional fields: +/// - If [Some(x)], then `x` is written +/// - If [None], then `None` is written +struct OptionalField(Option); + +impl Display for OptionalField { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + if let Some(field) = &self.0 { + write!(f, "{field}") + } else { + write!(f, "None") + } + } +} + +/// The stage currently being executed. +struct CurrentStage { + stage_id: StageId, + eta: Eta, + checkpoint: StageCheckpoint, + target: Option, +} + /// A node event. #[derive(Debug)] pub enum NodeEvent { @@ -240,10 +274,11 @@ pub async fn handle_events( network: Option, latest_block_number: Option, events: E, + db: Arc, ) where E: Stream + Unpin, { - let state = NodeState::new(network, latest_block_number); + let state = NodeState::new(db, network, latest_block_number); let start = tokio::time::Instant::now() + Duration::from_secs(3); let mut info_interval = tokio::time::interval_at(start, INFO_MESSAGE_INTERVAL); @@ -273,32 +308,40 @@ where let mut this = self.project(); while this.info_interval.poll_tick(cx).is_ready() { - if let Some(stage) = this.state.current_stage { - if let Some(progress) = this.state.current_checkpoint.entities() { - info!( - target: "reth::cli", - connected_peers = this.state.num_connected_peers(), - %stage, - checkpoint = %this.state.current_checkpoint.block_number, - %progress, - eta = %this.state.eta.fmt_for_stage(stage), - "Status" - ); - } else { - info!( - target: "reth::cli", - connected_peers = this.state.num_connected_peers(), - %stage, - checkpoint = %this.state.current_checkpoint.block_number, - eta = %this.state.eta.fmt_for_stage(stage), - "Status" - ); - } + let freelist = OptionalField(this.state.db.freelist().ok()); + + if let Some(CurrentStage { stage_id, eta, checkpoint, target }) = + &this.state.current_stage + { + let progress = OptionalField( + checkpoint.entities().and_then(|entities| entities.fmt_percentage()), + ); + let eta = eta.fmt_for_stage(*stage_id); + + info!( + target: "reth::cli", + connected_peers = this.state.num_connected_peers(), + %freelist, + stage = %stage_id, + checkpoint = checkpoint.block_number, + target = %OptionalField(*target), + %progress, + %eta, + "Status" + ); + } else if let Some(latest_block) = this.state.latest_block { + info!( + target: "reth::cli", + connected_peers = this.state.num_connected_peers(), + %freelist, + %latest_block, + "Status" + ); } else { info!( target: "reth::cli", connected_peers = this.state.num_connected_peers(), - latest_block = this.state.latest_block.unwrap_or(this.state.current_checkpoint.block_number), + %freelist, "Status" ); } @@ -332,7 +375,7 @@ where /// checkpoints reported by the pipeline. /// /// One `Eta` is only valid for a single stage. -#[derive(Default)] +#[derive(Default, Copy, Clone)] struct Eta { /// The last stage checkpoint last_checkpoint: EntitiesCheckpoint, @@ -375,8 +418,8 @@ impl Eta { } } -impl std::fmt::Display for Eta { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl Display for Eta { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { if let Some((eta, last_checkpoint_time)) = self.eta.zip(self.last_checkpoint_time) { let remaining = eta.checked_sub(last_checkpoint_time.elapsed()); diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 3019d0cd215ed..316168f4eb284 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -516,7 +516,7 @@ impl NodeCommand { ); ctx.task_executor.spawn_critical( "events task", - events::handle_events(Some(network.clone()), Some(head.number), events), + events::handle_events(Some(network.clone()), Some(head.number), events, db.clone()), ); let engine_api = EngineApi::new( diff --git a/crates/primitives/src/stage/checkpoints.rs b/crates/primitives/src/stage/checkpoints.rs index 0304f0f727079..b238c77e19d34 100644 --- a/crates/primitives/src/stage/checkpoints.rs +++ b/crates/primitives/src/stage/checkpoints.rs @@ -5,10 +5,7 @@ use crate::{ use bytes::{Buf, BufMut}; use reth_codecs::{derive_arbitrary, main_codec, Compact}; use serde::{Deserialize, Serialize}; -use std::{ - fmt::{Display, Formatter}, - ops::RangeInclusive, -}; +use std::ops::RangeInclusive; /// Saves the progress of Merkle stage. #[derive(Default, Debug, Clone, PartialEq)] @@ -169,9 +166,16 @@ pub struct EntitiesCheckpoint { pub total: u64, } -impl Display for EntitiesCheckpoint { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{:.2}%", 100.0 * self.processed as f64 / self.total as f64) +impl EntitiesCheckpoint { + /// Formats entities checkpoint as percentage, i.e. `processed / total`. + /// + /// Return [None] if `total == 0`. + pub fn fmt_percentage(&self) -> Option { + if self.total == 0 { + return None + } + + Some(format!("{:.2}%", 100.0 * self.processed as f64 / self.total as f64)) } } diff --git a/crates/stages/src/pipeline/event.rs b/crates/stages/src/pipeline/event.rs index 05d7945d33195..d5b02610a5416 100644 --- a/crates/stages/src/pipeline/event.rs +++ b/crates/stages/src/pipeline/event.rs @@ -1,5 +1,8 @@ use crate::stage::{ExecOutput, UnwindInput, UnwindOutput}; -use reth_primitives::stage::{StageCheckpoint, StageId}; +use reth_primitives::{ + stage::{StageCheckpoint, StageId}, + BlockNumber, +}; use std::fmt::{Display, Formatter}; /// An event emitted by a [Pipeline][crate::Pipeline]. @@ -12,13 +15,15 @@ use std::fmt::{Display, Formatter}; #[derive(Debug, PartialEq, Eq, Clone)] pub enum PipelineEvent { /// Emitted when a stage is about to be run. - Running { + Run { /// Pipeline stages progress. pipeline_stages_progress: PipelineStagesProgress, /// The stage that is about to be run. stage_id: StageId, /// The previous checkpoint of the stage. checkpoint: Option, + /// The block number up to which the stage is running, if known. + target: Option, }, /// Emitted when a stage has run a single time. Ran { @@ -30,7 +35,7 @@ pub enum PipelineEvent { result: ExecOutput, }, /// Emitted when a stage is about to be unwound. - Unwinding { + Unwind { /// The stage that is about to be unwound. stage_id: StageId, /// The unwind parameters. diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index 06f487858d96c..a48c5b462ad7e 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -290,7 +290,7 @@ where ); while checkpoint.block_number > to { let input = UnwindInput { checkpoint, unwind_to: to, bad_block }; - self.listeners.notify(PipelineEvent::Unwinding { stage_id, input }); + self.listeners.notify(PipelineEvent::Unwind { stage_id, input }); let output = stage.unwind(&provider_rw, input); match output { @@ -378,13 +378,14 @@ where }; } - self.listeners.notify(PipelineEvent::Running { + self.listeners.notify(PipelineEvent::Run { pipeline_stages_progress: event::PipelineStagesProgress { current: stage_index + 1, total: total_stages, }, stage_id, checkpoint: prev_checkpoint, + target, }); let provider_rw = factory.provider_rw()?; @@ -393,26 +394,6 @@ where made_progress |= checkpoint.block_number != prev_checkpoint.unwrap_or_default().block_number; - if let Some(progress) = checkpoint.entities() { - debug!( - target: "sync::pipeline", - stage = %stage_id, - checkpoint = checkpoint.block_number, - ?target, - %progress, - %done, - "Stage committed progress" - ); - } else { - debug!( - target: "sync::pipeline", - stage = %stage_id, - checkpoint = checkpoint.block_number, - ?target, - %done, - "Stage committed progress" - ); - } if let Some(metrics_tx) = &mut self.metrics_tx { let _ = metrics_tx.send(MetricEvent::StageCheckpoint { stage_id, @@ -608,20 +589,22 @@ mod tests { assert_eq!( events.collect::>().await, vec![ - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 }, stage_id: StageId::Other("A"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 }, stage_id: StageId::Other("A"), result: ExecOutput { checkpoint: StageCheckpoint::new(20), done: true }, }, - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 }, stage_id: StageId::Other("B"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 }, @@ -671,30 +654,33 @@ mod tests { events.collect::>().await, vec![ // Executing - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 3 }, stage_id: StageId::Other("A"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 3 }, stage_id: StageId::Other("A"), result: ExecOutput { checkpoint: StageCheckpoint::new(100), done: true }, }, - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 3 }, stage_id: StageId::Other("B"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 3 }, stage_id: StageId::Other("B"), result: ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }, }, - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 3, total: 3 }, stage_id: StageId::Other("C"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 3, total: 3 }, @@ -702,7 +688,7 @@ mod tests { result: ExecOutput { checkpoint: StageCheckpoint::new(20), done: true }, }, // Unwinding - PipelineEvent::Unwinding { + PipelineEvent::Unwind { stage_id: StageId::Other("C"), input: UnwindInput { checkpoint: StageCheckpoint::new(20), @@ -714,7 +700,7 @@ mod tests { stage_id: StageId::Other("C"), result: UnwindOutput { checkpoint: StageCheckpoint::new(1) }, }, - PipelineEvent::Unwinding { + PipelineEvent::Unwind { stage_id: StageId::Other("B"), input: UnwindInput { checkpoint: StageCheckpoint::new(10), @@ -726,7 +712,7 @@ mod tests { stage_id: StageId::Other("B"), result: UnwindOutput { checkpoint: StageCheckpoint::new(1) }, }, - PipelineEvent::Unwinding { + PipelineEvent::Unwind { stage_id: StageId::Other("A"), input: UnwindInput { checkpoint: StageCheckpoint::new(100), @@ -775,20 +761,22 @@ mod tests { events.collect::>().await, vec![ // Executing - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 }, stage_id: StageId::Other("A"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 }, stage_id: StageId::Other("A"), result: ExecOutput { checkpoint: StageCheckpoint::new(100), done: true }, }, - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 }, stage_id: StageId::Other("B"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 }, @@ -798,7 +786,7 @@ mod tests { // Unwinding // Nothing to unwind in stage "B" PipelineEvent::Skipped { stage_id: StageId::Other("B") }, - PipelineEvent::Unwinding { + PipelineEvent::Unwind { stage_id: StageId::Other("A"), input: UnwindInput { checkpoint: StageCheckpoint::new(100), @@ -865,23 +853,25 @@ mod tests { assert_eq!( events.collect::>().await, vec![ - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 }, stage_id: StageId::Other("A"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 }, stage_id: StageId::Other("A"), result: ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }, }, - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 }, stage_id: StageId::Other("B"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Error { stage_id: StageId::Other("B") }, - PipelineEvent::Unwinding { + PipelineEvent::Unwind { stage_id: StageId::Other("A"), input: UnwindInput { checkpoint: StageCheckpoint::new(10), @@ -893,20 +883,22 @@ mod tests { stage_id: StageId::Other("A"), result: UnwindOutput { checkpoint: StageCheckpoint::new(0) }, }, - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 }, stage_id: StageId::Other("A"), - checkpoint: Some(StageCheckpoint::new(0)) + checkpoint: Some(StageCheckpoint::new(0)), + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 1, total: 2 }, stage_id: StageId::Other("A"), result: ExecOutput { checkpoint: StageCheckpoint::new(10), done: true }, }, - PipelineEvent::Running { + PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 }, stage_id: StageId::Other("B"), - checkpoint: None + checkpoint: None, + target: Some(10), }, PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: 2, total: 2 }, From e5b33dbe74a1eb5811dfb5d2828d8c15bae286e3 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Tue, 21 Nov 2023 23:58:13 +0000 Subject: [PATCH 66/77] release: v0.1.0-alpha.11 (#5523) --- Cargo.lock | 94 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c53376c9863b1..37baca9780cda 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1291,7 +1291,7 @@ checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15" [[package]] name = "codecs-derive" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "convert_case 0.6.0", "parity-scale-codec", @@ -2146,7 +2146,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "reth-db", @@ -5537,7 +5537,7 @@ dependencies = [ [[package]] name = "reth" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "aquamarine", @@ -5614,7 +5614,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "clap", "eyre", @@ -5637,7 +5637,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "futures-core", @@ -5658,7 +5658,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "assert_matches", "cfg-if", @@ -5691,7 +5691,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "aquamarine", "assert_matches", @@ -5711,7 +5711,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "arbitrary", "bytes", @@ -5726,7 +5726,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "confy", "reth-discv4", @@ -5743,7 +5743,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "assert_matches", "cfg-if", @@ -5755,7 +5755,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "arbitrary", "assert_matches", @@ -5800,7 +5800,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "discv5", @@ -5823,7 +5823,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "async-trait", @@ -5847,7 +5847,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "assert_matches", @@ -5874,7 +5874,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "aes 0.8.3", "alloy-rlp", @@ -5904,7 +5904,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "arbitrary", @@ -5936,7 +5936,7 @@ dependencies = [ [[package]] name = "reth-interfaces" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "arbitrary", "async-trait", @@ -5963,7 +5963,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "async-trait", "bytes", @@ -5982,7 +5982,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "bitflags 2.4.1", "byteorder", @@ -6001,7 +6001,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "bindgen 0.68.1", "cc", @@ -6010,7 +6010,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "futures", "metrics", @@ -6021,7 +6021,7 @@ dependencies = [ [[package]] name = "reth-metrics-derive" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "metrics", "once_cell", @@ -6035,7 +6035,7 @@ dependencies = [ [[package]] name = "reth-net-common" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "pin-project", "reth-primitives", @@ -6044,7 +6044,7 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "igd", "pin-project-lite", @@ -6058,7 +6058,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "aquamarine", @@ -6108,7 +6108,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "async-trait", "reth-discv4", @@ -6122,7 +6122,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "anyhow", "bincode", @@ -6142,7 +6142,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "futures-util", @@ -6164,7 +6164,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6212,7 +6212,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "assert_matches", @@ -6239,7 +6239,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "assert_matches", "itertools 0.11.0", @@ -6261,7 +6261,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "reth-consensus-common", "reth-interfaces", @@ -6275,7 +6275,7 @@ dependencies = [ [[package]] name = "reth-revm-inspectors" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-sol-types", "boa_engine", @@ -6291,7 +6291,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -6344,7 +6344,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "jsonrpsee", "reth-primitives", @@ -6354,7 +6354,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "async-trait", "futures", @@ -6368,7 +6368,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "hyper", "jsonrpsee", @@ -6401,7 +6401,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "assert_matches", @@ -6427,7 +6427,7 @@ dependencies = [ [[package]] name = "reth-rpc-types" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6449,7 +6449,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "reth-primitives", @@ -6458,7 +6458,7 @@ dependencies = [ [[package]] name = "reth-snapshot" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "assert_matches", "clap", @@ -6476,7 +6476,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "aquamarine", @@ -6516,7 +6516,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "dyn-clone", "futures-util", @@ -6530,7 +6530,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "tokio", "tokio-stream", @@ -6538,7 +6538,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "rolling-file", "tracing", @@ -6549,7 +6549,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "aquamarine", @@ -6583,7 +6583,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" dependencies = [ "alloy-rlp", "auto_impl", diff --git a/Cargo.toml b/Cargo.toml index a308873d3fb1f..88d3defd508f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ default-members = ["bin/reth"] resolver = "2" [workspace.package] -version = "0.1.0-alpha.10" +version = "0.1.0-alpha.11" edition = "2021" rust-version = "1.70" license = "MIT OR Apache-2.0" From 857ceebbd8a3b377147ab4bc10cead700527f0b0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 22 Nov 2023 02:24:24 +0100 Subject: [PATCH 67/77] chore(deps): make c-kzg actually a feature of reth-primitives (#5525) --- Cargo.toml | 4 ++-- crates/primitives/Cargo.toml | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 88d3defd508f3..009997b8478a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -135,8 +135,8 @@ reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } # revm -revm = { git = "https://github.com/bluealloy/revm", branch = "reth_freeze" } -revm-primitives = { git = "https://github.com/bluealloy/revm", branch = "reth_freeze" } +revm = { git = "https://github.com/bluealloy/revm", branch = "reth_freeze", features = ["std", "secp256k1"], default-features = false } +revm-primitives = { git = "https://github.com/bluealloy/revm", branch = "reth_freeze", features = ["std"], default-features = false } # eth alloy-primitives = "0.4" diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 4a9cfcedc4ab0..8204636cec5b7 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -12,7 +12,8 @@ description = "Commonly used types in reth." # reth reth-codecs.workspace = true reth-rpc-types.workspace = true -revm-primitives = { workspace = true, features = ["serde"] } +revm-primitives.workspace = true +revm.workspace = true # ethereum alloy-primitives = { workspace = true, features = ["rand", "rlp"] } @@ -61,8 +62,6 @@ proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } strum = { workspace = true, features = ["derive"] } -revm.workspace = true - [dev-dependencies] serde_json.workspace = true test-fuzz = "4" @@ -87,10 +86,10 @@ pprof = { workspace = true, features = ["flamegraph", "frame-pointer", "criterio [features] default = ["c-kzg"] arbitrary = ["revm-primitives/arbitrary", "reth-rpc-types/arbitrary", "dep:arbitrary", "dep:proptest", "dep:proptest-derive"] -c-kzg = ["revm-primitives/c-kzg", "dep:c-kzg"] -test-utils = ["dep:plain_hasher", "dep:hash-db", "dep:ethers-core"] +c-kzg = ["dep:c-kzg", "revm/c-kzg", "revm-primitives/c-kzg"] clap = ["dep:clap"] optimism = ["reth-codecs/optimism", "revm-primitives/optimism", "revm/optimism"] +test-utils = ["dep:plain_hasher", "dep:hash-db", "dep:ethers-core"] [[bench]] name = "recover_ecdsa_crit" From 0f1f47ac80f1e69fad4608322cab1c7352007c2c Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 22 Nov 2023 04:56:44 -0800 Subject: [PATCH 68/77] test(provider): utility functions for initializing provider factory (#5530) --- crates/storage/provider/src/test_utils/mod.rs | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/crates/storage/provider/src/test_utils/mod.rs b/crates/storage/provider/src/test_utils/mod.rs index bbbe973908af5..0da47c47940ba 100644 --- a/crates/storage/provider/src/test_utils/mod.rs +++ b/crates/storage/provider/src/test_utils/mod.rs @@ -1,3 +1,11 @@ +use crate::ProviderFactory; +use reth_db::{ + test_utils::{create_test_rw_db, TempDatabase}, + DatabaseEnv, +}; +use reth_primitives::{ChainSpec, MAINNET}; +use std::sync::Arc; + pub mod blocks; mod events; mod executor; @@ -8,3 +16,16 @@ pub use events::TestCanonStateSubscriptions; pub use executor::{TestExecutor, TestExecutorFactory}; pub use mock::{ExtendedAccount, MockEthProvider}; pub use noop::NoopProvider; + +/// Creates test provider factory with mainnet chain spec. +pub fn create_test_provider_factory() -> ProviderFactory>> { + create_test_provider_factory_with_chain_spec(MAINNET.clone()) +} + +/// Creates test provider factory with provided chain spec. +pub fn create_test_provider_factory_with_chain_spec( + chain_spec: Arc, +) -> ProviderFactory>> { + let db = create_test_rw_db(); + ProviderFactory::new(db, chain_spec) +} From 3818dea392e857c648d3f71846076a7ae7c163b0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 22 Nov 2023 06:54:33 -0800 Subject: [PATCH 69/77] meta: fix release draft cmd (#5529) --- .github/workflows/release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7dd8e1fbdbdac..945feefd7d019 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -185,7 +185,7 @@ jobs: ) assets=() for asset in ./reth-*.tar.gz*; do - assets+=("-a" "$asset/$asset") + assets+=("$asset/$asset") done tag_name="${{ env.VERSION }}" - echo "$body" | gh release create --draft "${assets[@]}" -F "-" "$tag_name" + echo "$body" | gh release create --draft -F "-" "$tag_name" "${assets[@]}" From 3c7f32d839e84fb438eb4dd2e8d2111a21ed7c9e Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 22 Nov 2023 07:09:53 -0800 Subject: [PATCH 70/77] chore(tree): migrate tree externals to `ProviderFactory` (#5531) --- bin/reth/src/debug_cmd/build_block.rs | 10 +-- bin/reth/src/node/mod.rs | 25 +++--- crates/blockchain-tree/src/blockchain_tree.rs | 78 ++++++++++--------- crates/blockchain-tree/src/chain.rs | 4 +- crates/blockchain-tree/src/externals.rs | 23 ++---- .../consensus/beacon/src/engine/test_utils.rs | 7 +- 6 files changed, 69 insertions(+), 78 deletions(-) diff --git a/bin/reth/src/debug_cmd/build_block.rs b/bin/reth/src/debug_cmd/build_block.rs index 082c9a7067271..62e1d5d66bf8c 100644 --- a/bin/reth/src/debug_cmd/build_block.rs +++ b/bin/reth/src/debug_cmd/build_block.rs @@ -142,15 +142,15 @@ impl Command { // initialize the database let db = Arc::new(init_db(db_path, self.db.log_level)?); + let provider_factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)); let consensus: Arc = Arc::new(BeaconConsensus::new(Arc::clone(&self.chain))); // configure blockchain tree let tree_externals = TreeExternals::new( - Arc::clone(&db), + provider_factory.clone(), Arc::clone(&consensus), Factory::new(self.chain.clone()), - Arc::clone(&self.chain), ); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; let blockchain_tree = ShareableBlockchainTree::new(tree); @@ -159,8 +159,8 @@ impl Command { let best_block = self.lookup_best_block(Arc::clone(&db)).wrap_err("the head block is missing")?; - let factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)); - let blockchain_db = BlockchainProvider::new(factory.clone(), blockchain_tree.clone())?; + let blockchain_db = + BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; let blob_store = InMemoryBlobStore::default(); let validator = TransactionValidationTaskExecutor::eth_builder(Arc::clone(&self.chain)) @@ -278,7 +278,7 @@ impl Command { debug!(target: "reth::cli", ?state, "Executed block"); // Attempt to insert new block without committing - let provider_rw = factory.provider_rw()?; + let provider_rw = provider_factory.provider_rw()?; provider_rw.append_blocks_with_bundle_state( Vec::from([block_with_senders]), state, diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 316168f4eb284..a429228365912 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -259,6 +259,16 @@ impl NodeCommand { let db = Arc::new(init_db(&db_path, self.db.log_level)?.with_metrics()); info!(target: "reth::cli", "Database opened"); + // configure snapshotter + let snapshotter = reth_snapshot::Snapshotter::new( + db.clone(), + data_dir.snapshots_path(), + self.chain.clone(), + self.chain.snapshot_block_interval, + )?; + let provider_factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)) + .with_snapshots(data_dir.snapshots_path(), snapshotter.highest_snapshot_receiver()); + self.start_metrics_endpoint(prometheus_handle, Arc::clone(&db)).await?; debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); @@ -281,10 +291,9 @@ impl NodeCommand { // configure blockchain tree let tree_externals = TreeExternals::new( - Arc::clone(&db), + provider_factory.clone(), Arc::clone(&consensus), Factory::new(self.chain.clone()), - Arc::clone(&self.chain), ); let tree = BlockchainTree::new( tree_externals, @@ -299,18 +308,8 @@ impl NodeCommand { // fetch the head block from the database let head = self.lookup_head(Arc::clone(&db)).wrap_err("the head block is missing")?; - // configure snapshotter - let snapshotter = reth_snapshot::Snapshotter::new( - db.clone(), - data_dir.snapshots_path(), - self.chain.clone(), - self.chain.snapshot_block_interval, - )?; - // setup the blockchain provider - let factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)) - .with_snapshots(data_dir.snapshots_path(), snapshotter.highest_snapshot_receiver()); - let blockchain_db = BlockchainProvider::new(factory, blockchain_tree.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory, blockchain_tree.clone())?; let blob_store = InMemoryBlobStore::default(); let validator = TransactionValidationTaskExecutor::eth_builder(Arc::clone(&self.chain)) .with_head_timestamp(head.timestamp) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 2aec2d2454fe1..8d399dda2599d 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -24,7 +24,7 @@ use reth_provider::{ chain::{ChainSplit, ChainSplitTarget}, BlockExecutionWriter, BlockNumReader, BlockWriter, BundleStateWithReceipts, CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, Chain, - DatabaseProvider, DisplayBlocksChain, ExecutorFactory, HeaderProvider, + ChainSpecProvider, DisplayBlocksChain, ExecutorFactory, HeaderProvider, }; use reth_stages::{MetricEvent, MetricEventsSender}; use std::{collections::BTreeMap, sync::Arc}; @@ -161,7 +161,7 @@ impl BlockchainTree { } // check if block is inside database - if self.externals.database().provider()?.block_number(block.hash)?.is_some() { + if self.externals.provider_factory.provider()?.block_number(block.hash)?.is_some() { return Ok(Some(BlockStatus::Valid)) } @@ -380,8 +380,9 @@ impl BlockchainTree { // https://github.com/paradigmxyz/reth/issues/1713 let (block_status, chain) = { - let factory = self.externals.database(); - let provider = factory + let provider = self + .externals + .provider_factory .provider() .map_err(|err| InsertBlockError::new(block.block.clone(), err.into()))?; @@ -397,7 +398,12 @@ impl BlockchainTree { })?; // Pass the parent total difficulty to short-circuit unnecessary calculations. - if !self.externals.chain_spec.fork(Hardfork::Paris).active_at_ttd(parent_td, U256::ZERO) + if !self + .externals + .provider_factory + .chain_spec() + .fork(Hardfork::Paris) + .active_at_ttd(parent_td, U256::ZERO) { return Err(InsertBlockError::execution_error( BlockValidationError::BlockPreMerge { hash: block.hash }.into(), @@ -881,8 +887,7 @@ impl BlockchainTree { // canonical, but in the db. If it is in a sidechain, it is not canonical. If it is not in // the db, then it is not canonical. - let factory = self.externals.database(); - let provider = factory.provider()?; + let provider = self.externals.provider_factory.provider()?; let mut header = None; if let Some(num) = self.block_indices().get_canonical_block_number(hash) { @@ -930,12 +935,18 @@ impl BlockchainTree { if let Some(header) = canonical_header { info!(target: "blockchain_tree", ?block_hash, "Block is already canonical, ignoring."); // TODO: this could be fetched from the chainspec first - let td = self.externals.database().provider()?.header_td(block_hash)?.ok_or( + let td = self.externals.provider_factory.provider()?.header_td(block_hash)?.ok_or( CanonicalError::from(BlockValidationError::MissingTotalDifficulty { hash: *block_hash, }), )?; - if !self.externals.chain_spec.fork(Hardfork::Paris).active_at_ttd(td, U256::ZERO) { + if !self + .externals + .provider_factory + .chain_spec() + .fork(Hardfork::Paris) + .active_at_ttd(td, U256::ZERO) + { return Err(CanonicalError::from(BlockValidationError::BlockPreMerge { hash: *block_hash, }) @@ -1093,14 +1104,11 @@ impl BlockchainTree { /// Write the given chain to the database as canonical. fn commit_canonical_to_database(&self, chain: Chain) -> RethResult<()> { - let provider = DatabaseProvider::new_rw( - self.externals.db.tx_mut()?, - self.externals.chain_spec.clone(), - ); + let provider_rw = self.externals.provider_factory.provider_rw()?; let (blocks, state) = chain.into_inner(); - provider + provider_rw .append_blocks_with_bundle_state( blocks.into_blocks().collect(), state, @@ -1108,7 +1116,7 @@ impl BlockchainTree { ) .map_err(|e| BlockExecutionError::CanonicalCommit { inner: e.to_string() })?; - provider.commit()?; + provider_rw.commit()?; Ok(()) } @@ -1141,21 +1149,20 @@ impl BlockchainTree { revert_until: BlockNumber, ) -> RethResult> { // read data that is needed for new sidechain + let provider_rw = self.externals.provider_factory.provider_rw()?; - let provider = DatabaseProvider::new_rw( - self.externals.db.tx_mut()?, - self.externals.chain_spec.clone(), - ); - - let tip = provider.last_block_number()?; + let tip = provider_rw.last_block_number()?; let revert_range = (revert_until + 1)..=tip; info!(target: "blockchain_tree", "Unwinding canonical chain blocks: {:?}", revert_range); // read block and execution result from database. and remove traces of block from tables. - let blocks_and_execution = provider - .take_block_and_execution_range(self.externals.chain_spec.as_ref(), revert_range) + let blocks_and_execution = provider_rw + .take_block_and_execution_range( + self.externals.provider_factory.chain_spec().as_ref(), + revert_range, + ) .map_err(|e| BlockExecutionError::CanonicalRevert { inner: e.to_string() })?; - provider.commit()?; + provider_rw.commit()?; if blocks_and_execution.is_empty() { Ok(None) @@ -1197,18 +1204,16 @@ mod tests { use crate::block_buffer::BufferedBlocks; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; - use reth_db::{ - tables, - test_utils::{create_test_rw_db, TempDatabase}, - transaction::DbTxMut, - DatabaseEnv, - }; + use reth_db::{tables, test_utils::TempDatabase, transaction::DbTxMut, DatabaseEnv}; use reth_interfaces::test_utils::TestConsensus; use reth_primitives::{ constants::EMPTY_ROOT_HASH, stage::StageCheckpoint, ChainSpecBuilder, B256, MAINNET, }; use reth_provider::{ - test_utils::{blocks::BlockChainTestData, TestExecutorFactory}, + test_utils::{ + blocks::BlockChainTestData, create_test_provider_factory_with_chain_spec, + TestExecutorFactory, + }, BlockWriter, BundleStateWithReceipts, ProviderFactory, }; use std::{ @@ -1219,8 +1224,6 @@ mod tests { fn setup_externals( exec_res: Vec, ) -> TreeExternals>, TestExecutorFactory> { - let db = create_test_rw_db(); - let consensus = Arc::new(TestConsensus::default()); let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) @@ -1228,18 +1231,19 @@ mod tests { .shanghai_activated() .build(), ); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + let consensus = Arc::new(TestConsensus::default()); let executor_factory = TestExecutorFactory::new(chain_spec.clone()); executor_factory.extend(exec_res); - TreeExternals::new(db, consensus, executor_factory, chain_spec) + TreeExternals::new(provider_factory, consensus, executor_factory) } - fn setup_genesis(db: DB, mut genesis: SealedBlock) { + fn setup_genesis(factory: &ProviderFactory, mut genesis: SealedBlock) { // insert genesis to db. genesis.header.header.number = 10; genesis.header.header.state_root = EMPTY_ROOT_HASH; - let factory = ProviderFactory::new(&db, MAINNET.clone()); let provider = factory.provider_rw().unwrap(); provider.insert_block(genesis, None, None).unwrap(); @@ -1339,7 +1343,7 @@ mod tests { let externals = setup_externals(vec![exec2.clone(), exec1.clone(), exec2, exec1]); // last finalized block would be number 9. - setup_genesis(externals.db.clone(), genesis); + setup_genesis(&externals.provider_factory, genesis); // make tree let config = BlockchainTreeConfig::new(1, 2, 3, 2); diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index c52ccb3043f72..1b01b2787931f 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -206,9 +206,9 @@ impl AppendableChain { let block = block.unseal(); // get the state provider. - let db = externals.database(); let canonical_fork = bundle_state_data_provider.canonical_fork(); - let state_provider = db.history_by_block_number(canonical_fork.number)?; + let state_provider = + externals.provider_factory.history_by_block_number(canonical_fork.number)?; let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 06cd694d3b0dd..9bd12195aa447 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -2,7 +2,7 @@ use reth_db::{cursor::DbCursorRO, database::Database, tables, transaction::DbTx}; use reth_interfaces::{consensus::Consensus, RethResult}; -use reth_primitives::{BlockHash, BlockNumber, ChainSpec}; +use reth_primitives::{BlockHash, BlockNumber}; use reth_provider::ProviderFactory; use std::{collections::BTreeMap, sync::Arc}; @@ -17,34 +17,26 @@ use std::{collections::BTreeMap, sync::Arc}; /// - The chain spec #[derive(Debug)] pub struct TreeExternals { - /// The database, used to commit the canonical chain, or unwind it. - pub(crate) db: DB, + /// The provider factory, used to commit the canonical chain, or unwind it. + pub(crate) provider_factory: ProviderFactory, /// The consensus engine. pub(crate) consensus: Arc, /// The executor factory to execute blocks with. pub(crate) executor_factory: EF, - /// The chain spec. - pub(crate) chain_spec: Arc, } impl TreeExternals { /// Create new tree externals. pub fn new( - db: DB, + provider_factory: ProviderFactory, consensus: Arc, executor_factory: EF, - chain_spec: Arc, ) -> Self { - Self { db, consensus, executor_factory, chain_spec } + Self { provider_factory, consensus, executor_factory } } } impl TreeExternals { - /// Return shareable database helper structure. - pub fn database(&self) -> ProviderFactory<&DB> { - ProviderFactory::new(&self.db, self.chain_spec.clone()) - } - /// Fetches the latest canonical block hashes by walking backwards from the head. /// /// Returns the hashes sorted by increasing block numbers @@ -53,8 +45,9 @@ impl TreeExternals { num_hashes: usize, ) -> RethResult> { Ok(self - .db - .tx()? + .provider_factory + .provider()? + .tx_ref() .cursor_read::()? .walk_back(None)? .take(num_hashes) diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index b916d3e89c857..c37e142a624ea 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -519,12 +519,7 @@ where let pipeline = pipeline.build(db.clone(), self.base_config.chain_spec.clone()); // Setup blockchain tree - let externals = TreeExternals::new( - db.clone(), - consensus, - executor_factory, - self.base_config.chain_spec.clone(), - ); + let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); let config = BlockchainTreeConfig::new(1, 2, 3, 2); let tree = ShareableBlockchainTree::new( BlockchainTree::new(externals, config, None).expect("failed to create tree"), From 5e378b13cab7bb2529e8cf44823639db82a5aed5 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 22 Nov 2023 07:29:19 -0800 Subject: [PATCH 71/77] test(provider): instantiate provider with util function (#5534) --- .../bundle_state_with_receipts.rs | 16 +++---- .../provider/src/providers/database/mod.rs | 39 +++++----------- .../provider/src/providers/snapshot/mod.rs | 44 +++++++++---------- 3 files changed, 37 insertions(+), 62 deletions(-) diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index a2858142971c9..2a65909fdb65f 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -381,7 +381,7 @@ impl BundleStateWithReceipts { #[cfg(test)] mod tests { use super::*; - use crate::{AccountReader, BundleStateWithReceipts, ProviderFactory}; + use crate::{test_utils::create_test_provider_factory, AccountReader, BundleStateWithReceipts}; use reth_db::{ cursor::{DbCursorRO, DbDupCursorRO}, database::Database, @@ -391,7 +391,7 @@ mod tests { transaction::DbTx, }; use reth_primitives::{ - revm::compat::into_reth_acc, Address, Receipt, Receipts, StorageEntry, B256, MAINNET, U256, + revm::compat::into_reth_acc, Address, Receipt, Receipts, StorageEntry, B256, U256, }; use reth_trie::test_utils::state_root; use revm::{ @@ -413,8 +413,7 @@ mod tests { #[test] fn write_to_db_account_info() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db, MAINNET.clone()); + let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); let address_a = Address::ZERO; @@ -552,8 +551,7 @@ mod tests { #[test] fn write_to_db_storage() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db, MAINNET.clone()); + let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); let address_a = Address::ZERO; @@ -741,8 +739,7 @@ mod tests { #[test] fn write_to_db_multiple_selfdestructs() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db, MAINNET.clone()); + let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); let address1 = Address::random(); @@ -1050,8 +1047,7 @@ mod tests { #[test] fn storage_change_after_selfdestruct_within_block() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db, MAINNET.clone()); + let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); let address1 = Address::random(); diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 6b6c0842d0cd1..0d1ca70ab465f 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -490,18 +490,13 @@ impl PruneCheckpointReader for ProviderFactory { mod tests { use super::ProviderFactory; use crate::{ - BlockHashReader, BlockNumReader, BlockWriter, HeaderSyncGapProvider, HeaderSyncMode, - TransactionsProvider, + test_utils::create_test_provider_factory, BlockHashReader, BlockNumReader, BlockWriter, + HeaderSyncGapProvider, HeaderSyncMode, TransactionsProvider, }; use alloy_rlp::Decodable; use assert_matches::assert_matches; use rand::Rng; - use reth_db::{ - tables, - test_utils::{create_test_rw_db, ERROR_TEMPDIR}, - transaction::DbTxMut, - DatabaseEnv, - }; + use reth_db::{tables, test_utils::ERROR_TEMPDIR, transaction::DbTxMut, DatabaseEnv}; use reth_interfaces::{ provider::ProviderError, test_utils::{ @@ -518,17 +513,13 @@ mod tests { #[test] fn common_history_provider() { - let chain_spec = ChainSpecBuilder::mainnet().build(); - let db = create_test_rw_db(); - let provider = ProviderFactory::new(db, Arc::new(chain_spec)); - let _ = provider.latest(); + let factory = create_test_provider_factory(); + let _ = factory.latest(); } #[test] fn default_chain_info() { - let chain_spec = ChainSpecBuilder::mainnet().build(); - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db, Arc::new(chain_spec)); + let factory = create_test_provider_factory(); let provider = factory.provider().unwrap(); let chain_info = provider.chain_info().expect("should be ok"); @@ -538,9 +529,7 @@ mod tests { #[test] fn provider_flow() { - let chain_spec = ChainSpecBuilder::mainnet().build(); - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db, Arc::new(chain_spec)); + let factory = create_test_provider_factory(); let provider = factory.provider().unwrap(); provider.block_hash(0).unwrap(); let provider_rw = factory.provider_rw().unwrap(); @@ -567,9 +556,7 @@ mod tests { #[test] fn insert_block_with_prune_modes() { - let chain_spec = ChainSpecBuilder::mainnet().build(); - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db, Arc::new(chain_spec)); + let factory = create_test_provider_factory(); let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); @@ -605,9 +592,7 @@ mod tests { #[test] fn get_take_block_transaction_range_recover_senders() { - let chain_spec = ChainSpecBuilder::mainnet().build(); - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db, Arc::new(chain_spec)); + let factory = create_test_provider_factory(); let mut rng = generators::rng(); let block = random_block(&mut rng, 0, None, Some(3), None); @@ -646,12 +631,10 @@ mod tests { #[test] fn header_sync_gap_lookup() { - let mut rng = generators::rng(); - let chain_spec = ChainSpecBuilder::mainnet().build(); - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db, Arc::new(chain_spec)); + let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); + let mut rng = generators::rng(); let consensus_tip = rng.gen(); let (_tip_tx, tip_rx) = watch::channel(consensus_tip); let mode = HeaderSyncMode::Tip(tip_rx); diff --git a/crates/storage/provider/src/providers/snapshot/mod.rs b/crates/storage/provider/src/providers/snapshot/mod.rs index a5244c78e8910..26f180e853de4 100644 --- a/crates/storage/provider/src/providers/snapshot/mod.rs +++ b/crates/storage/provider/src/providers/snapshot/mod.rs @@ -41,19 +41,17 @@ impl Deref for LoadedJar { #[cfg(test)] mod test { use super::*; - use crate::{HeaderProvider, ProviderFactory}; + use crate::{test_utils::create_test_provider_factory, HeaderProvider}; use rand::{self, seq::SliceRandom}; use reth_db::{ cursor::DbCursorRO, - database::Database, snapshot::create_snapshot_T1_T2_T3, - test_utils::create_test_rw_db, transaction::{DbTx, DbTxMut}, - CanonicalHeaders, DatabaseError, HeaderNumbers, HeaderTD, Headers, RawTable, + CanonicalHeaders, HeaderNumbers, HeaderTD, Headers, RawTable, }; use reth_interfaces::test_utils::generators::{self, random_header_range}; use reth_nippy_jar::NippyJar; - use reth_primitives::{BlockNumber, B256, MAINNET, U256}; + use reth_primitives::{BlockNumber, B256, U256}; #[test] fn test_snap() { @@ -64,8 +62,7 @@ mod test { SegmentHeader::new(range.clone(), range.clone(), SnapshotSegment::Headers); // Data sources - let db = create_test_rw_db(); - let factory = ProviderFactory::new(&db, MAINNET.clone()); + let factory = create_test_provider_factory(); let snap_path = tempfile::tempdir().unwrap(); let snap_file = snap_path.path().join(SnapshotSegment::Headers.filename(&range, &range)); @@ -76,21 +73,19 @@ mod test { B256::random(), ); - db.update(|tx| -> Result<(), DatabaseError> { - let mut td = U256::ZERO; - for header in headers.clone() { - td += header.header.difficulty; - let hash = header.hash(); - - tx.put::(header.number, hash)?; - tx.put::(header.number, header.clone().unseal())?; - tx.put::(header.number, td.into())?; - tx.put::(hash, header.number)?; - } - Ok(()) - }) - .unwrap() - .unwrap(); + let mut provider_rw = factory.provider_rw().unwrap(); + let tx = provider_rw.tx_mut(); + let mut td = U256::ZERO; + for header in headers.clone() { + td += header.header.difficulty; + let hash = header.hash(); + + tx.put::(header.number, hash).unwrap(); + tx.put::(header.number, header.clone().unseal()).unwrap(); + tx.put::(header.number, td.into()).unwrap(); + tx.put::(hash, header.number).unwrap(); + } + provider_rw.commit().unwrap(); // Create Snapshot { @@ -107,7 +102,8 @@ mod test { nippy_jar = nippy_jar.with_cuckoo_filter(row_count as usize + 10).with_fmph(); } - let tx = db.tx().unwrap(); + let provider = factory.provider().unwrap(); + let tx = provider.tx_ref(); // Hacky type inference. TODO fix let mut none_vec = Some(vec![vec![vec![0u8]].into_iter()]); @@ -127,7 +123,7 @@ mod test { BlockNumber, SegmentHeader, >( - &tx, range, None, none_vec, Some(hashes), row_count as usize, &mut nippy_jar + tx, range, None, none_vec, Some(hashes), row_count as usize, &mut nippy_jar ) .unwrap(); } From 5ae4fd1c658b3be4d651d44c0af179a9c92ae1bb Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 22 Nov 2023 08:40:56 -0800 Subject: [PATCH 72/77] chore(sync): migrate pipeline to `ProviderFactory` (#5532) --- bin/reth/src/chain/import.rs | 21 +-- bin/reth/src/debug_cmd/execution.rs | 20 +-- bin/reth/src/node/mod.rs | 31 ++-- bin/reth/src/stage/run.rs | 2 +- crates/consensus/beacon/src/engine/sync.rs | 8 +- .../consensus/beacon/src/engine/test_utils.rs | 2 +- crates/interfaces/src/p2p/error.rs | 8 +- crates/prune/src/segments/account_history.rs | 34 ++-- crates/prune/src/segments/headers.rs | 38 ++-- crates/prune/src/segments/receipts.rs | 36 ++-- crates/prune/src/segments/receipts_by_logs.rs | 32 ++-- crates/prune/src/segments/sender_recovery.rs | 36 ++-- crates/prune/src/segments/storage_history.rs | 34 ++-- .../prune/src/segments/transaction_lookup.rs | 36 ++-- crates/prune/src/segments/transactions.rs | 30 ++-- crates/snapshot/src/snapshotter.rs | 26 ++- crates/stages/benches/criterion.rs | 10 +- .../stages/benches/setup/account_hashing.rs | 10 +- crates/stages/benches/setup/mod.rs | 19 +- crates/stages/src/lib.rs | 10 +- crates/stages/src/pipeline/builder.rs | 12 +- crates/stages/src/pipeline/mod.rs | 62 +++---- crates/stages/src/sets.rs | 7 +- crates/stages/src/stage.rs | 4 +- crates/stages/src/stages/bodies.rs | 84 ++++----- crates/stages/src/stages/execution.rs | 33 ++-- crates/stages/src/stages/finish.rs | 16 +- crates/stages/src/stages/hashing_account.rs | 38 ++-- crates/stages/src/stages/hashing_storage.rs | 49 ++--- crates/stages/src/stages/headers.rs | 33 ++-- .../src/stages/index_account_history.rs | 134 +++++++------- .../src/stages/index_storage_history.rs | 134 +++++++------- crates/stages/src/stages/merkle.rs | 43 ++--- crates/stages/src/stages/mod.rs | 47 +++-- crates/stages/src/stages/sender_recovery.rs | 54 +++--- crates/stages/src/stages/total_difficulty.rs | 31 ++-- crates/stages/src/stages/tx_lookup.rs | 61 +++---- crates/stages/src/test_utils/mod.rs | 2 +- crates/stages/src/test_utils/runner.rs | 20 +-- crates/stages/src/test_utils/stage.rs | 4 +- crates/stages/src/test_utils/test_db.rs | 167 +++++------------- 41 files changed, 702 insertions(+), 776 deletions(-) diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index 2d1800fc36c0a..0486358f20277 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -86,6 +86,7 @@ impl ImportCommand { info!(target: "reth::cli", path = ?db_path, "Opening database"); let db = Arc::new(init_db(db_path, self.db.log_level)?); info!(target: "reth::cli", "Database opened"); + let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone()); debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); @@ -102,15 +103,15 @@ impl ImportCommand { let tip = file_client.tip().expect("file client has no tip"); info!(target: "reth::cli", "Chain file imported"); - let (mut pipeline, events) = - self.build_import_pipeline(config, Arc::clone(&db), &consensus, file_client).await?; + let (mut pipeline, events) = self + .build_import_pipeline(config, provider_factory.clone(), &consensus, file_client) + .await?; // override the tip pipeline.set_tip(tip); debug!(target: "reth::cli", ?tip, "Tip manually set"); - let factory = ProviderFactory::new(db.clone(), self.chain.clone()); - let provider = factory.provider()?; + let provider = provider_factory.provider()?; let latest_block_number = provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); @@ -130,7 +131,7 @@ impl ImportCommand { async fn build_import_pipeline( &self, config: Config, - db: DB, + provider_factory: ProviderFactory, consensus: &Arc, file_client: Arc, ) -> eyre::Result<(Pipeline, impl Stream)> @@ -147,11 +148,7 @@ impl ImportCommand { .into_task(); let body_downloader = BodiesDownloaderBuilder::from(config.stages.bodies) - .build( - file_client.clone(), - consensus.clone(), - ProviderFactory::new(db.clone(), self.chain.clone()), - ) + .build(file_client.clone(), consensus.clone(), provider_factory.clone()) .into_task(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); @@ -164,7 +161,7 @@ impl ImportCommand { .with_max_block(max_block) .add_stages( DefaultStages::new( - ProviderFactory::new(db.clone(), self.chain.clone()), + provider_factory.clone(), HeaderSyncMode::Tip(tip_rx), consensus.clone(), header_downloader, @@ -194,7 +191,7 @@ impl ImportCommand { config.prune.map(|prune| prune.segments).unwrap_or_default(), )), ) - .build(db, self.chain.clone()); + .build(provider_factory); let events = pipeline.events().map(Into::into); diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index 8a91ca73f09cb..58a7b31197953 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -89,7 +89,7 @@ impl Command { config: &Config, client: Client, consensus: Arc, - db: DB, + provider_factory: ProviderFactory, task_executor: &TaskExecutor, ) -> eyre::Result> where @@ -102,11 +102,7 @@ impl Command { .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::from(config.stages.bodies) - .build( - client, - Arc::clone(&consensus), - ProviderFactory::new(db.clone(), self.chain.clone()), - ) + .build(client, Arc::clone(&consensus), provider_factory.clone()) .into_task_with(task_executor); let stage_conf = &config.stages; @@ -119,7 +115,7 @@ impl Command { .with_tip_sender(tip_tx) .add_stages( DefaultStages::new( - ProviderFactory::new(db.clone(), self.chain.clone()), + provider_factory.clone(), header_mode, Arc::clone(&consensus), header_downloader, @@ -148,7 +144,7 @@ impl Command { config.prune.as_ref().map(|prune| prune.segments.clone()).unwrap_or_default(), )), ) - .build(db, self.chain.clone()); + .build(provider_factory); Ok(pipeline) } @@ -206,6 +202,7 @@ impl Command { let db_path = data_dir.db_path(); fs::create_dir_all(&db_path)?; let db = Arc::new(init_db(db_path, self.db.log_level)?); + let provider_factory = ProviderFactory::new(db.clone(), self.chain.clone()); debug!(target: "reth::cli", chain=%self.chain.chain, genesis=?self.chain.genesis_hash(), "Initializing genesis"); init_genesis(db.clone(), self.chain.clone())?; @@ -231,12 +228,11 @@ impl Command { &config, fetch_client.clone(), Arc::clone(&consensus), - db.clone(), + provider_factory.clone(), &ctx.task_executor, )?; - let factory = ProviderFactory::new(db.clone(), self.chain.clone()); - let provider = factory.provider()?; + let provider = provider_factory.provider()?; let latest_block_number = provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); @@ -270,7 +266,7 @@ impl Command { // Unwind the pipeline without committing. { - factory + provider_factory .provider_rw()? .take_block_and_execution_range(&self.chain, next_block..=target_block)?; } diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index a429228365912..e7f559b6fe66f 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -259,14 +259,16 @@ impl NodeCommand { let db = Arc::new(init_db(&db_path, self.db.log_level)?.with_metrics()); info!(target: "reth::cli", "Database opened"); + let mut provider_factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)); + // configure snapshotter let snapshotter = reth_snapshot::Snapshotter::new( - db.clone(), + provider_factory.clone(), data_dir.snapshots_path(), - self.chain.clone(), self.chain.snapshot_block_interval, )?; - let provider_factory = ProviderFactory::new(Arc::clone(&db), Arc::clone(&self.chain)) + + provider_factory = provider_factory .with_snapshots(data_dir.snapshots_path(), snapshotter.highest_snapshot_receiver()); self.start_metrics_endpoint(prometheus_handle, Arc::clone(&db)).await?; @@ -309,7 +311,8 @@ impl NodeCommand { let head = self.lookup_head(Arc::clone(&db)).wrap_err("the head block is missing")?; // setup the blockchain provider - let blockchain_db = BlockchainProvider::new(provider_factory, blockchain_tree.clone())?; + let blockchain_db = + BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; let blob_store = InMemoryBlobStore::default(); let validator = TransactionValidationTaskExecutor::eth_builder(Arc::clone(&self.chain)) .with_head_timestamp(head.timestamp) @@ -417,7 +420,7 @@ impl NodeCommand { &config, client.clone(), Arc::clone(&consensus), - db.clone(), + provider_factory, &ctx.task_executor, sync_metrics_tx, prune_config.clone(), @@ -437,7 +440,7 @@ impl NodeCommand { &config, network_client.clone(), Arc::clone(&consensus), - db.clone(), + provider_factory, &ctx.task_executor, sync_metrics_tx, prune_config.clone(), @@ -601,7 +604,7 @@ impl NodeCommand { config: &Config, client: Client, consensus: Arc, - db: DB, + provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, @@ -617,16 +620,12 @@ impl NodeCommand { .into_task_with(task_executor); let body_downloader = BodiesDownloaderBuilder::from(config.stages.bodies) - .build( - client, - Arc::clone(&consensus), - ProviderFactory::new(db.clone(), self.chain.clone()), - ) + .build(client, Arc::clone(&consensus), provider_factory.clone()) .into_task_with(task_executor); let pipeline = self .build_pipeline( - db, + provider_factory, config, header_downloader, body_downloader, @@ -848,7 +847,7 @@ impl NodeCommand { #[allow(clippy::too_many_arguments)] async fn build_pipeline( &self, - db: DB, + provider_factory: ProviderFactory, config: &Config, header_downloader: H, body_downloader: B, @@ -900,7 +899,7 @@ impl NodeCommand { .with_metrics_tx(metrics_tx.clone()) .add_stages( DefaultStages::new( - ProviderFactory::new(db.clone(), self.chain.clone()), + provider_factory.clone(), header_mode, Arc::clone(&consensus), header_downloader, @@ -953,7 +952,7 @@ impl NodeCommand { prune_modes.storage_history, )), ) - .build(db, self.chain.clone()); + .build(provider_factory); Ok(pipeline) } diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 6c821451907e2..872f1da2b7e04 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -124,7 +124,7 @@ impl Command { let db = Arc::new(init_db(db_path, self.db.log_level)?); info!(target: "reth::cli", "Database opened"); - let factory = ProviderFactory::new(&db, self.chain.clone()); + let factory = ProviderFactory::new(Arc::clone(&db), self.chain.clone()); let mut provider_rw = factory.provider_rw()?; if let Some(listen_addr) = self.metrics { diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 07780d4330e1f..a49391b5ec550 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -403,7 +403,10 @@ mod tests { constants::ETHEREUM_BLOCK_GAS_LIMIT, stage::StageCheckpoint, BlockBody, ChainSpec, ChainSpecBuilder, Header, SealedHeader, MAINNET, }; - use reth_provider::{test_utils::TestExecutorFactory, BundleStateWithReceipts}; + use reth_provider::{ + test_utils::{create_test_provider_factory_with_chain_spec, TestExecutorFactory}, + BundleStateWithReceipts, ProviderFactory, + }; use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; use reth_tasks::TokioTaskExecutor; use std::{collections::VecDeque, future::poll_fn, sync::Arc}; @@ -451,7 +454,6 @@ mod tests { /// Builds the pipeline. fn build(self, chain_spec: Arc) -> Pipeline>> { reth_tracing::init_test_tracing(); - let db = create_test_rw_db(); let executor_factory = TestExecutorFactory::new(chain_spec.clone()); executor_factory.extend(self.executor_results); @@ -466,7 +468,7 @@ mod tests { pipeline = pipeline.with_max_block(max_block); } - pipeline.build(db, chain_spec) + pipeline.build(create_test_provider_factory_with_chain_spec(chain_spec)) } } diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index c37e142a624ea..a37e2e148463b 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -516,7 +516,7 @@ where pipeline = pipeline.with_max_block(max_block); } - let pipeline = pipeline.build(db.clone(), self.base_config.chain_spec.clone()); + let pipeline = pipeline.build(provider_factory.clone()); // Setup blockchain tree let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); diff --git a/crates/interfaces/src/p2p/error.rs b/crates/interfaces/src/p2p/error.rs index 9758c8ab2e434..29c238e5f5293 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/interfaces/src/p2p/error.rs @@ -1,5 +1,5 @@ use super::headers::client::HeadersRequest; -use crate::{consensus::ConsensusError, provider::ProviderError}; +use crate::{consensus::ConsensusError, db::DatabaseError, provider::ProviderError}; use reth_network_api::ReputationChangeKind; use reth_primitives::{ BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, WithPeerId, B256, @@ -182,6 +182,12 @@ pub enum DownloadError { Provider(#[from] ProviderError), } +impl From for DownloadError { + fn from(error: DatabaseError) -> Self { + Self::Provider(ProviderError::Database(error)) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/prune/src/segments/account_history.rs b/crates/prune/src/segments/account_history.rs index d8d94764bd93a..bfebad1a95c0b 100644 --- a/crates/prune/src/segments/account_history.rs +++ b/crates/prune/src/segments/account_history.rs @@ -90,16 +90,16 @@ mod tests { }; use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, B256}; use reth_provider::PruneCheckpointReader; - use reth_stages::test_utils::TestTransaction; + use reth_stages::test_utils::TestStageDB; use std::{collections::BTreeMap, ops::AddAssign}; #[test] fn prune() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 1..=5000, B256::ZERO, 0..1); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); let accounts = random_eoa_account_range(&mut rng, 0..2).into_iter().collect::>(); @@ -111,10 +111,10 @@ mod tests { 0..0, 0..0, ); - tx.insert_changesets(changesets.clone(), None).expect("insert changesets"); - tx.insert_history(changesets.clone(), None).expect("insert history"); + db.insert_changesets(changesets.clone(), None).expect("insert changesets"); + db.insert_history(changesets.clone(), None).expect("insert history"); - let account_occurrences = tx.table::().unwrap().into_iter().fold( + let account_occurrences = db.table::().unwrap().into_iter().fold( BTreeMap::<_, usize>::new(), |mut map, (key, _)| { map.entry(key.key).or_default().add_assign(1); @@ -124,17 +124,19 @@ mod tests { assert!(account_occurrences.into_iter().any(|(_, occurrences)| occurrences > 1)); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), changesets.iter().flatten().count() ); - let original_shards = tx.table::().unwrap(); + let original_shards = db.table::().unwrap(); let test_prune = |to_block: BlockNumber, run: usize, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); let input = PruneInput { - previous_checkpoint: tx - .inner() + previous_checkpoint: db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::AccountHistory) .unwrap(), to_block, @@ -142,7 +144,7 @@ mod tests { }; let segment = AccountHistory::new(prune_mode); - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); assert_matches!( result, @@ -200,11 +202,11 @@ mod tests { ); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), pruned_changesets.values().flatten().count() ); - let actual_shards = tx.table::().unwrap(); + let actual_shards = db.table::().unwrap(); let expected_shards = original_shards .iter() @@ -221,7 +223,11 @@ mod tests { assert_eq!(actual_shards, expected_shards); assert_eq!( - tx.inner().get_prune_checkpoint(PruneSegment::AccountHistory).unwrap(), + db.factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::AccountHistory) + .unwrap(), Some(PruneCheckpoint { block_number: Some(last_pruned_block_number), tx_number: None, diff --git a/crates/prune/src/segments/headers.rs b/crates/prune/src/segments/headers.rs index a1e951665e1b0..f6913fe15d2a1 100644 --- a/crates/prune/src/segments/headers.rs +++ b/crates/prune/src/segments/headers.rs @@ -116,25 +116,27 @@ mod tests { use reth_interfaces::test_utils::{generators, generators::random_header_range}; use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, B256}; use reth_provider::PruneCheckpointReader; - use reth_stages::test_utils::TestTransaction; + use reth_stages::test_utils::TestStageDB; #[test] fn prune() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let headers = random_header_range(&mut rng, 0..100, B256::ZERO); - tx.insert_headers_with_td(headers.iter()).expect("insert headers"); + db.insert_headers_with_td(headers.iter()).expect("insert headers"); - assert_eq!(tx.table::().unwrap().len(), headers.len()); - assert_eq!(tx.table::().unwrap().len(), headers.len()); - assert_eq!(tx.table::().unwrap().len(), headers.len()); + assert_eq!(db.table::().unwrap().len(), headers.len()); + assert_eq!(db.table::().unwrap().len(), headers.len()); + assert_eq!(db.table::().unwrap().len(), headers.len()); let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); let input = PruneInput { - previous_checkpoint: tx - .inner() + previous_checkpoint: db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::Headers) .unwrap(), to_block, @@ -142,15 +144,17 @@ mod tests { }; let segment = Headers::new(prune_mode); - let next_block_number_to_prune = tx - .inner() + let next_block_number_to_prune = db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::Headers) .unwrap() .and_then(|checkpoint| checkpoint.block_number) .map(|block_number| block_number + 1) .unwrap_or_default(); - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); assert_matches!( result, @@ -169,19 +173,19 @@ mod tests { .min(next_block_number_to_prune + input.delete_limit as BlockNumber / 3 - 1); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), headers.len() - (last_pruned_block_number + 1) as usize ); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), headers.len() - (last_pruned_block_number + 1) as usize ); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), headers.len() - (last_pruned_block_number + 1) as usize ); assert_eq!( - tx.inner().get_prune_checkpoint(PruneSegment::Headers).unwrap(), + db.factory.provider().unwrap().get_prune_checkpoint(PruneSegment::Headers).unwrap(), Some(PruneCheckpoint { block_number: Some(last_pruned_block_number), tx_number: None, @@ -196,7 +200,7 @@ mod tests { #[test] fn prune_cannot_be_done() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let input = PruneInput { previous_checkpoint: None, @@ -206,7 +210,7 @@ mod tests { }; let segment = Headers::new(PruneMode::Full); - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); assert_eq!(result, PruneOutput::not_done()); } diff --git a/crates/prune/src/segments/receipts.rs b/crates/prune/src/segments/receipts.rs index acbdd6829cf15..fdd4d0402e401 100644 --- a/crates/prune/src/segments/receipts.rs +++ b/crates/prune/src/segments/receipts.rs @@ -99,16 +99,16 @@ mod tests { }; use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber, B256}; use reth_provider::PruneCheckpointReader; - use reth_stages::test_utils::TestTransaction; + use reth_stages::test_utils::TestStageDB; use std::ops::Sub; #[test] fn prune() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 1..=10, B256::ZERO, 2..3); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); let mut receipts = Vec::new(); for block in &blocks { @@ -117,22 +117,24 @@ mod tests { .push((receipts.len() as u64, random_receipt(&mut rng, transaction, Some(0)))); } } - tx.insert_receipts(receipts.clone()).expect("insert receipts"); + db.insert_receipts(receipts.clone()).expect("insert receipts"); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), blocks.iter().map(|block| block.body.len()).sum::() ); assert_eq!( - tx.table::().unwrap().len(), - tx.table::().unwrap().len() + db.table::().unwrap().len(), + db.table::().unwrap().len() ); let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); let input = PruneInput { - previous_checkpoint: tx - .inner() + previous_checkpoint: db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::Receipts) .unwrap(), to_block, @@ -140,8 +142,10 @@ mod tests { }; let segment = Receipts::new(prune_mode); - let next_tx_number_to_prune = tx - .inner() + let next_tx_number_to_prune = db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::Receipts) .unwrap() .and_then(|checkpoint| checkpoint.tx_number) @@ -156,7 +160,7 @@ mod tests { .min(next_tx_number_to_prune as usize + input.delete_limit) .sub(1); - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); assert_matches!( result, @@ -187,11 +191,15 @@ mod tests { .checked_sub(if result.done { 0 } else { 1 }); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), receipts.len() - (last_pruned_tx_number + 1) ); assert_eq!( - tx.inner().get_prune_checkpoint(PruneSegment::Receipts).unwrap(), + db.factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::Receipts) + .unwrap(), Some(PruneCheckpoint { block_number: last_pruned_block_number, tx_number: Some(last_pruned_tx_number as TxNumber), diff --git a/crates/prune/src/segments/receipts_by_logs.rs b/crates/prune/src/segments/receipts_by_logs.rs index aec6d7a2cf661..e05c87533812b 100644 --- a/crates/prune/src/segments/receipts_by_logs.rs +++ b/crates/prune/src/segments/receipts_by_logs.rs @@ -216,12 +216,12 @@ mod tests { }; use reth_primitives::{PruneMode, PruneSegment, ReceiptsLogPruneConfig, B256}; use reth_provider::{PruneCheckpointReader, TransactionsProvider}; - use reth_stages::test_utils::TestTransaction; + use reth_stages::test_utils::TestStageDB; use std::collections::BTreeMap; #[test] fn prune_receipts_by_logs() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let tip = 20000; @@ -231,7 +231,7 @@ mod tests { random_block_range(&mut rng, (tip - 100 + 1)..=tip, B256::ZERO, 1..5), ] .concat(); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); let mut receipts = Vec::new(); @@ -247,19 +247,19 @@ mod tests { receipts.push((receipts.len() as u64, receipt)); } } - tx.insert_receipts(receipts).expect("insert receipts"); + db.insert_receipts(receipts).expect("insert receipts"); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), blocks.iter().map(|block| block.body.len()).sum::() ); assert_eq!( - tx.table::().unwrap().len(), - tx.table::().unwrap().len() + db.table::().unwrap().len(), + db.table::().unwrap().len() ); let run_prune = || { - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); let prune_before_block: usize = 20; let prune_mode = PruneMode::Before(prune_before_block as u64); @@ -269,8 +269,10 @@ mod tests { let result = ReceiptsByLogs::new(receipts_log_filter).prune( &provider, PruneInput { - previous_checkpoint: tx - .inner() + previous_checkpoint: db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::ContractLogs) .unwrap(), to_block: tip, @@ -282,8 +284,10 @@ mod tests { assert_matches!(result, Ok(_)); let output = result.unwrap(); - let (pruned_block, pruned_tx) = tx - .inner() + let (pruned_block, pruned_tx) = db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::ContractLogs) .unwrap() .map(|checkpoint| (checkpoint.block_number.unwrap(), checkpoint.tx_number.unwrap())) @@ -293,7 +297,7 @@ mod tests { let unprunable = pruned_block.saturating_sub(prune_before_block as u64 - 1); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), blocks.iter().map(|block| block.body.len()).sum::() - ((pruned_tx + 1) - unprunable) as usize ); @@ -303,7 +307,7 @@ mod tests { while !run_prune() {} - let provider = tx.inner(); + let provider = db.factory.provider().unwrap(); let mut cursor = provider.tx_ref().cursor_read::().unwrap(); let walker = cursor.walk(None).unwrap(); for receipt in walker { diff --git a/crates/prune/src/segments/sender_recovery.rs b/crates/prune/src/segments/sender_recovery.rs index 14fcdfae3b7e0..ec2d189f55cc3 100644 --- a/crates/prune/src/segments/sender_recovery.rs +++ b/crates/prune/src/segments/sender_recovery.rs @@ -81,16 +81,16 @@ mod tests { use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber, B256}; use reth_provider::PruneCheckpointReader; - use reth_stages::test_utils::TestTransaction; + use reth_stages::test_utils::TestStageDB; use std::ops::Sub; #[test] fn prune() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 1..=10, B256::ZERO, 2..3); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); let mut transaction_senders = Vec::new(); for block in &blocks { @@ -101,23 +101,25 @@ mod tests { )); } } - tx.insert_transaction_senders(transaction_senders.clone()) + db.insert_transaction_senders(transaction_senders.clone()) .expect("insert transaction senders"); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), blocks.iter().map(|block| block.body.len()).sum::() ); assert_eq!( - tx.table::().unwrap().len(), - tx.table::().unwrap().len() + db.table::().unwrap().len(), + db.table::().unwrap().len() ); let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); let input = PruneInput { - previous_checkpoint: tx - .inner() + previous_checkpoint: db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::SenderRecovery) .unwrap(), to_block, @@ -125,8 +127,10 @@ mod tests { }; let segment = SenderRecovery::new(prune_mode); - let next_tx_number_to_prune = tx - .inner() + let next_tx_number_to_prune = db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::SenderRecovery) .unwrap() .and_then(|checkpoint| checkpoint.tx_number) @@ -155,7 +159,7 @@ mod tests { .into_inner() .0; - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); assert_matches!( result, @@ -174,11 +178,15 @@ mod tests { last_pruned_block_number.checked_sub(if result.done { 0 } else { 1 }); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), transaction_senders.len() - (last_pruned_tx_number + 1) ); assert_eq!( - tx.inner().get_prune_checkpoint(PruneSegment::SenderRecovery).unwrap(), + db.factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::SenderRecovery) + .unwrap(), Some(PruneCheckpoint { block_number: last_pruned_block_number, tx_number: Some(last_pruned_tx_number as TxNumber), diff --git a/crates/prune/src/segments/storage_history.rs b/crates/prune/src/segments/storage_history.rs index 1bf294a9736f4..45713760c7da6 100644 --- a/crates/prune/src/segments/storage_history.rs +++ b/crates/prune/src/segments/storage_history.rs @@ -94,16 +94,16 @@ mod tests { }; use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, B256}; use reth_provider::PruneCheckpointReader; - use reth_stages::test_utils::TestTransaction; + use reth_stages::test_utils::TestStageDB; use std::{collections::BTreeMap, ops::AddAssign}; #[test] fn prune() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 0..=5000, B256::ZERO, 0..1); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); let accounts = random_eoa_account_range(&mut rng, 0..2).into_iter().collect::>(); @@ -115,10 +115,10 @@ mod tests { 2..3, 1..2, ); - tx.insert_changesets(changesets.clone(), None).expect("insert changesets"); - tx.insert_history(changesets.clone(), None).expect("insert history"); + db.insert_changesets(changesets.clone(), None).expect("insert changesets"); + db.insert_history(changesets.clone(), None).expect("insert history"); - let storage_occurrences = tx.table::().unwrap().into_iter().fold( + let storage_occurrences = db.table::().unwrap().into_iter().fold( BTreeMap::<_, usize>::new(), |mut map, (key, _)| { map.entry((key.address, key.sharded_key.key)).or_default().add_assign(1); @@ -128,17 +128,19 @@ mod tests { assert!(storage_occurrences.into_iter().any(|(_, occurrences)| occurrences > 1)); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), changesets.iter().flatten().flat_map(|(_, _, entries)| entries).count() ); - let original_shards = tx.table::().unwrap(); + let original_shards = db.table::().unwrap(); let test_prune = |to_block: BlockNumber, run: usize, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); let input = PruneInput { - previous_checkpoint: tx - .inner() + previous_checkpoint: db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::StorageHistory) .unwrap(), to_block, @@ -146,7 +148,7 @@ mod tests { }; let segment = StorageHistory::new(prune_mode); - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); assert_matches!( result, @@ -206,11 +208,11 @@ mod tests { ); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), pruned_changesets.values().flatten().count() ); - let actual_shards = tx.table::().unwrap(); + let actual_shards = db.table::().unwrap(); let expected_shards = original_shards .iter() @@ -227,7 +229,11 @@ mod tests { assert_eq!(actual_shards, expected_shards); assert_eq!( - tx.inner().get_prune_checkpoint(PruneSegment::StorageHistory).unwrap(), + db.factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::StorageHistory) + .unwrap(), Some(PruneCheckpoint { block_number: Some(last_pruned_block_number), tx_number: None, diff --git a/crates/prune/src/segments/transaction_lookup.rs b/crates/prune/src/segments/transaction_lookup.rs index 4a094f46018ca..342a764a68a6a 100644 --- a/crates/prune/src/segments/transaction_lookup.rs +++ b/crates/prune/src/segments/transaction_lookup.rs @@ -104,16 +104,16 @@ mod tests { use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber, B256}; use reth_provider::PruneCheckpointReader; - use reth_stages::test_utils::TestTransaction; + use reth_stages::test_utils::TestStageDB; use std::ops::Sub; #[test] fn prune() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 1..=10, B256::ZERO, 2..3); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); let mut tx_hash_numbers = Vec::new(); for block in &blocks { @@ -121,22 +121,24 @@ mod tests { tx_hash_numbers.push((transaction.hash, tx_hash_numbers.len() as u64)); } } - tx.insert_tx_hash_numbers(tx_hash_numbers.clone()).expect("insert tx hash numbers"); + db.insert_tx_hash_numbers(tx_hash_numbers.clone()).expect("insert tx hash numbers"); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), blocks.iter().map(|block| block.body.len()).sum::() ); assert_eq!( - tx.table::().unwrap().len(), - tx.table::().unwrap().len() + db.table::().unwrap().len(), + db.table::().unwrap().len() ); let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); let input = PruneInput { - previous_checkpoint: tx - .inner() + previous_checkpoint: db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::TransactionLookup) .unwrap(), to_block, @@ -144,8 +146,10 @@ mod tests { }; let segment = TransactionLookup::new(prune_mode); - let next_tx_number_to_prune = tx - .inner() + let next_tx_number_to_prune = db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::TransactionLookup) .unwrap() .and_then(|checkpoint| checkpoint.tx_number) @@ -174,7 +178,7 @@ mod tests { .into_inner() .0; - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); assert_matches!( result, @@ -193,11 +197,15 @@ mod tests { last_pruned_block_number.checked_sub(if result.done { 0 } else { 1 }); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), tx_hash_numbers.len() - (last_pruned_tx_number + 1) ); assert_eq!( - tx.inner().get_prune_checkpoint(PruneSegment::TransactionLookup).unwrap(), + db.factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::TransactionLookup) + .unwrap(), Some(PruneCheckpoint { block_number: last_pruned_block_number, tx_number: Some(last_pruned_tx_number as TxNumber), diff --git a/crates/prune/src/segments/transactions.rs b/crates/prune/src/segments/transactions.rs index c70fd1197a704..7155cd8888ad8 100644 --- a/crates/prune/src/segments/transactions.rs +++ b/crates/prune/src/segments/transactions.rs @@ -80,26 +80,28 @@ mod tests { use reth_interfaces::test_utils::{generators, generators::random_block_range}; use reth_primitives::{BlockNumber, PruneCheckpoint, PruneMode, PruneSegment, TxNumber, B256}; use reth_provider::PruneCheckpointReader; - use reth_stages::test_utils::TestTransaction; + use reth_stages::test_utils::TestStageDB; use std::ops::Sub; #[test] fn prune() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 1..=100, B256::ZERO, 2..3); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); let transactions = blocks.iter().flat_map(|block| &block.body).collect::>(); - assert_eq!(tx.table::().unwrap().len(), transactions.len()); + assert_eq!(db.table::().unwrap().len(), transactions.len()); let test_prune = |to_block: BlockNumber, expected_result: (bool, usize)| { let prune_mode = PruneMode::Before(to_block); let input = PruneInput { - previous_checkpoint: tx - .inner() + previous_checkpoint: db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::Transactions) .unwrap(), to_block, @@ -107,15 +109,17 @@ mod tests { }; let segment = Transactions::new(prune_mode); - let next_tx_number_to_prune = tx - .inner() + let next_tx_number_to_prune = db + .factory + .provider() + .unwrap() .get_prune_checkpoint(PruneSegment::Transactions) .unwrap() .and_then(|checkpoint| checkpoint.tx_number) .map(|tx_number| tx_number + 1) .unwrap_or_default(); - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); let result = segment.prune(&provider, input).unwrap(); assert_matches!( result, @@ -154,11 +158,15 @@ mod tests { .checked_sub(if result.done { 0 } else { 1 }); assert_eq!( - tx.table::().unwrap().len(), + db.table::().unwrap().len(), transactions.len() - (last_pruned_tx_number + 1) ); assert_eq!( - tx.inner().get_prune_checkpoint(PruneSegment::Transactions).unwrap(), + db.factory + .provider() + .unwrap() + .get_prune_checkpoint(PruneSegment::Transactions) + .unwrap(), Some(PruneCheckpoint { block_number: last_pruned_block_number, tx_number: Some(last_pruned_tx_number as TxNumber), diff --git a/crates/snapshot/src/snapshotter.rs b/crates/snapshot/src/snapshotter.rs index 030db93cea569..729b0c1b974f1 100644 --- a/crates/snapshot/src/snapshotter.rs +++ b/crates/snapshot/src/snapshotter.rs @@ -5,14 +5,13 @@ use reth_db::database::Database; use reth_interfaces::{RethError, RethResult}; use reth_primitives::{ snapshot::{iter_snapshots, HighestSnapshots}, - BlockNumber, ChainSpec, TxNumber, + BlockNumber, TxNumber, }; use reth_provider::{BlockReader, DatabaseProviderRO, ProviderFactory, TransactionsProviderExt}; use std::{ collections::HashMap, ops::RangeInclusive, path::{Path, PathBuf}, - sync::Arc, }; use tokio::sync::watch; use tracing::warn; @@ -94,15 +93,14 @@ impl SnapshotTargets { impl Snapshotter { /// Creates a new [Snapshotter]. pub fn new( - db: DB, + provider_factory: ProviderFactory, snapshots_path: impl AsRef, - chain_spec: Arc, block_interval: u64, ) -> RethResult { let (highest_snapshots_notifier, highest_snapshots_tracker) = watch::channel(None); let mut snapshotter = Self { - provider_factory: ProviderFactory::new(db, chain_spec), + provider_factory, snapshots_path: snapshots_path.as_ref().into(), highest_snapshots: HighestSnapshots::default(), highest_snapshots_notifier, @@ -329,16 +327,14 @@ mod tests { test_utils::{generators, generators::random_block_range}, RethError, }; - use reth_primitives::{snapshot::HighestSnapshots, B256, MAINNET}; - use reth_stages::test_utils::TestTransaction; + use reth_primitives::{snapshot::HighestSnapshots, B256}; + use reth_stages::test_utils::TestStageDB; #[test] fn new() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let snapshots_dir = tempfile::TempDir::new().unwrap(); - let snapshotter = - Snapshotter::new(tx.inner_raw(), snapshots_dir.into_path(), MAINNET.clone(), 2) - .unwrap(); + let snapshotter = Snapshotter::new(db.factory, snapshots_dir.into_path(), 2).unwrap(); assert_eq!( *snapshotter.highest_snapshot_receiver().borrow(), @@ -348,16 +344,14 @@ mod tests { #[test] fn get_snapshot_targets() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let snapshots_dir = tempfile::TempDir::new().unwrap(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 0..=3, B256::ZERO, 2..3); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); - let mut snapshotter = - Snapshotter::new(tx.inner_raw(), snapshots_dir.into_path(), MAINNET.clone(), 2) - .unwrap(); + let mut snapshotter = Snapshotter::new(db.factory, snapshots_dir.into_path(), 2).unwrap(); // Snapshot targets has data per part up to the passed finalized block number, // respecting the block interval diff --git a/crates/stages/benches/criterion.rs b/crates/stages/benches/criterion.rs index 98979ca5a6f82..2f73ec71f9f1a 100644 --- a/crates/stages/benches/criterion.rs +++ b/crates/stages/benches/criterion.rs @@ -9,7 +9,7 @@ use reth_primitives::{stage::StageCheckpoint, MAINNET}; use reth_provider::ProviderFactory; use reth_stages::{ stages::{MerkleStage, SenderRecoveryStage, TotalDifficultyStage, TransactionLookupStage}, - test_utils::TestTransaction, + test_utils::TestStageDB, ExecInput, Stage, StageExt, UnwindInput, }; use std::{path::PathBuf, sync::Arc}; @@ -123,9 +123,9 @@ fn measure_stage_with_path( label: String, ) where S: Clone + Stage, - F: Fn(S, &TestTransaction, StageRange), + F: Fn(S, &TestStageDB, StageRange), { - let tx = TestTransaction::new(&path); + let tx = TestStageDB::new(&path); let (input, _) = stage_range; group.bench_function(label, move |b| { @@ -136,7 +136,7 @@ fn measure_stage_with_path( }, |_| async { let mut stage = stage.clone(); - let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone()); + let factory = ProviderFactory::new(tx.factory.db(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); stage .execute_ready(input) @@ -157,7 +157,7 @@ fn measure_stage( label: String, ) where S: Clone + Stage, - F: Fn(S, &TestTransaction, StageRange), + F: Fn(S, &TestStageDB, StageRange), { let path = setup::txs_testdata(block_interval.end); diff --git a/crates/stages/benches/setup/account_hashing.rs b/crates/stages/benches/setup/account_hashing.rs index 341dbd42b61d3..a94a8250aedef 100644 --- a/crates/stages/benches/setup/account_hashing.rs +++ b/crates/stages/benches/setup/account_hashing.rs @@ -5,7 +5,7 @@ use reth_db::{ use reth_primitives::stage::StageCheckpoint; use reth_stages::{ stages::{AccountHashingStage, SeedOpts}, - test_utils::TestTransaction, + test_utils::TestStageDB, ExecInput, UnwindInput, }; use std::path::{Path, PathBuf}; @@ -31,8 +31,8 @@ pub fn prepare_account_hashing(num_blocks: u64) -> (PathBuf, AccountHashingStage fn find_stage_range(db: &Path) -> StageRange { let mut stage_range = None; - TestTransaction::new(db) - .tx + TestStageDB::new(db) + .factory .view(|tx| { let mut cursor = tx.cursor_read::()?; let from = cursor.first()?.unwrap().0; @@ -62,8 +62,8 @@ fn generate_testdata_db(num_blocks: u64) -> (PathBuf, StageRange) { // create the dirs std::fs::create_dir_all(&path).unwrap(); println!("Account Hashing testdata not found, generating to {:?}", path.display()); - let tx = TestTransaction::new(&path); - let provider = tx.inner_rw(); + let tx = TestStageDB::new(&path); + let provider = tx.provider_rw(); let _accounts = AccountHashingStage::seed(&provider, opts); provider.commit().expect("failed to commit"); } diff --git a/crates/stages/benches/setup/mod.rs b/crates/stages/benches/setup/mod.rs index 806f2d78fe49e..3850ca44fa551 100644 --- a/crates/stages/benches/setup/mod.rs +++ b/crates/stages/benches/setup/mod.rs @@ -16,7 +16,7 @@ use reth_primitives::{Account, Address, SealedBlock, B256, MAINNET}; use reth_provider::ProviderFactory; use reth_stages::{ stages::{AccountHashingStage, StorageHashingStage}, - test_utils::TestTransaction, + test_utils::TestStageDB, ExecInput, Stage, UnwindInput, }; use reth_trie::StateRoot; @@ -34,14 +34,14 @@ pub(crate) type StageRange = (ExecInput, UnwindInput); pub(crate) fn stage_unwind>( stage: S, - tx: &TestTransaction, + db: &TestStageDB, range: StageRange, ) { let (_, unwind) = range; tokio::runtime::Runtime::new().unwrap().block_on(async { let mut stage = stage.clone(); - let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone()); + let factory = ProviderFactory::new(db.factory.db(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); // Clear previous run @@ -50,7 +50,7 @@ pub(crate) fn stage_unwind>( .map_err(|e| { format!( "{e}\nMake sure your test database at `{}` isn't too old and incompatible with newer stage changes.", - tx.path.as_ref().unwrap().display() + db.path.as_ref().unwrap().display() ) }) .unwrap(); @@ -61,13 +61,13 @@ pub(crate) fn stage_unwind>( pub(crate) fn unwind_hashes>( stage: S, - tx: &TestTransaction, + db: &TestStageDB, range: StageRange, ) { let (input, unwind) = range; let mut stage = stage.clone(); - let factory = ProviderFactory::new(tx.tx.db(), MAINNET.clone()); + let factory = ProviderFactory::new(db.factory.db(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); StorageHashingStage::default().unwind(&provider, unwind).unwrap(); @@ -105,7 +105,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf { // create the dirs std::fs::create_dir_all(&path).unwrap(); println!("Transactions testdata not found, generating to {:?}", path.display()); - let tx = TestTransaction::new(&path); + let tx = TestStageDB::new(&path); let accounts: BTreeMap = concat([ random_eoa_account_range(&mut rng, 0..n_eoa), @@ -127,7 +127,8 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf { tx.insert_accounts_and_storages(start_state.clone()).unwrap(); // make first block after genesis have valid state root - let (root, updates) = StateRoot::new(tx.inner_rw().tx_ref()).root_with_updates().unwrap(); + let (root, updates) = + StateRoot::new(tx.provider_rw().tx_ref()).root_with_updates().unwrap(); let second_block = blocks.get_mut(1).unwrap(); let cloned_second = second_block.clone(); let mut updated_header = cloned_second.header.unseal(); @@ -153,7 +154,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> PathBuf { // make last block have valid state root let root = { - let tx_mut = tx.inner_rw(); + let tx_mut = tx.provider_rw(); let root = StateRoot::new(tx_mut.tx_ref()).root().unwrap(); tx_mut.commit().unwrap(); root diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 8651dce35194e..437bbe3f523c4 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -13,7 +13,6 @@ //! //! ``` //! # use std::sync::Arc; -//! # use reth_db::test_utils::create_test_rw_db; //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_interfaces::consensus::Consensus; @@ -25,6 +24,7 @@ //! # use tokio::sync::watch; //! # use reth_provider::ProviderFactory; //! # use reth_provider::HeaderSyncMode; +//! # use reth_provider::test_utils::create_test_provider_factory; //! # //! # let chain_spec = MAINNET.clone(); //! # let consensus: Arc = Arc::new(TestConsensus::default()); @@ -32,11 +32,11 @@ //! # Arc::new(TestHeadersClient::default()), //! # consensus.clone() //! # ); -//! # let db = create_test_rw_db(); +//! # let provider_factory = create_test_provider_factory(); //! # let bodies_downloader = BodiesDownloaderBuilder::default().build( //! # Arc::new(TestBodiesClient { responder: |_| Ok((PeerId::ZERO, vec![]).into()) }), //! # consensus.clone(), -//! # ProviderFactory::new(db.clone(), MAINNET.clone()) +//! # provider_factory.clone() //! # ); //! # let (tip_tx, tip_rx) = watch::channel(B256::default()); //! # let factory = Factory::new(chain_spec.clone()); @@ -45,14 +45,14 @@ //! Pipeline::builder() //! .with_tip_sender(tip_tx) //! .add_stages(DefaultStages::new( -//! ProviderFactory::new(db.clone(), chain_spec.clone()), +//! provider_factory.clone(), //! HeaderSyncMode::Tip(tip_rx), //! consensus, //! headers_downloader, //! bodies_downloader, //! factory, //! )) -//! .build(db, chain_spec.clone()); +//! .build(provider_factory); //! ``` //! //! ## Feature Flags diff --git a/crates/stages/src/pipeline/builder.rs b/crates/stages/src/pipeline/builder.rs index b5a0a2d409a1e..3e160577fddcb 100644 --- a/crates/stages/src/pipeline/builder.rs +++ b/crates/stages/src/pipeline/builder.rs @@ -1,8 +1,7 @@ -use std::sync::Arc; - use crate::{pipeline::BoxedStage, MetricEventsSender, Pipeline, Stage, StageSet}; use reth_db::database::Database; -use reth_primitives::{stage::StageId, BlockNumber, ChainSpec, B256}; +use reth_primitives::{stage::StageId, BlockNumber, B256}; +use reth_provider::ProviderFactory; use tokio::sync::watch; /// Builds a [`Pipeline`]. @@ -68,13 +67,10 @@ where } /// Builds the final [`Pipeline`] using the given database. - /// - /// Note: it's expected that this is either an [Arc] or an Arc wrapper type. - pub fn build(self, db: DB, chain_spec: Arc) -> Pipeline { + pub fn build(self, provider_factory: ProviderFactory) -> Pipeline { let Self { stages, max_block, tip_tx, metrics_tx } = self; Pipeline { - db, - chain_spec, + provider_factory, stages, max_block, tip_tx, diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index a48c5b462ad7e..344510b233322 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -7,11 +7,11 @@ use reth_db::database::Database; use reth_primitives::{ constants::BEACON_CONSENSUS_REORG_UNWIND_DEPTH, stage::{StageCheckpoint, StageId}, - BlockNumber, ChainSpec, B256, + BlockNumber, B256, }; use reth_provider::{ProviderFactory, StageCheckpointReader, StageCheckpointWriter}; use reth_tokio_util::EventListeners; -use std::{pin::Pin, sync::Arc}; +use std::pin::Pin; use tokio::sync::watch; use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; @@ -93,10 +93,8 @@ pub type PipelineWithResult = (Pipeline, Result { - /// The Database - db: DB, - /// Chain spec - chain_spec: Arc, + /// Provider factory. + provider_factory: ProviderFactory, /// All configured stages in the order they will be executed. stages: Vec>, /// The maximum block number to sync to. @@ -141,8 +139,7 @@ where /// Registers progress metrics for each registered stage pub fn register_metrics(&mut self) -> Result<(), PipelineError> { let Some(metrics_tx) = &mut self.metrics_tx else { return Ok(()) }; - let factory = ProviderFactory::new(&self.db, self.chain_spec.clone()); - let provider = factory.provider()?; + let provider = self.provider_factory.provider()?; for stage in &self.stages { let stage_id = stage.id(); @@ -236,10 +233,8 @@ where } } - let factory = ProviderFactory::new(&self.db, self.chain_spec.clone()); - previous_stage = Some( - factory + self.provider_factory .provider()? .get_stage_checkpoint(stage_id)? .unwrap_or_default() @@ -261,8 +256,7 @@ where // Unwind stages in reverse order of execution let unwind_pipeline = self.stages.iter_mut().rev(); - let factory = ProviderFactory::new(&self.db, self.chain_spec.clone()); - let mut provider_rw = factory.provider_rw()?; + let mut provider_rw = self.provider_factory.provider_rw()?; for stage in unwind_pipeline { let stage_id = stage.id(); @@ -319,7 +313,7 @@ where .notify(PipelineEvent::Unwound { stage_id, result: unwind_output }); provider_rw.commit()?; - provider_rw = factory.provider_rw()?; + provider_rw = self.provider_factory.provider_rw()?; } Err(err) => { self.listeners.notify(PipelineEvent::Error { stage_id }); @@ -344,10 +338,8 @@ where let mut made_progress = false; let target = self.max_block.or(previous_stage); - let factory = ProviderFactory::new(&self.db, self.chain_spec.clone()); - loop { - let prev_checkpoint = factory.get_stage_checkpoint(stage_id)?; + let prev_checkpoint = self.provider_factory.get_stage_checkpoint(stage_id)?; let stage_reached_max_block = prev_checkpoint .zip(self.max_block) @@ -372,7 +364,7 @@ where if let Err(err) = stage.execute_ready(exec_input).await { self.listeners.notify(PipelineEvent::Error { stage_id }); - match on_stage_error(&factory, stage_id, prev_checkpoint, err)? { + match on_stage_error(&self.provider_factory, stage_id, prev_checkpoint, err)? { Some(ctrl) => return Ok(ctrl), None => continue, }; @@ -388,7 +380,7 @@ where target, }); - let provider_rw = factory.provider_rw()?; + let provider_rw = self.provider_factory.provider_rw()?; match stage.execute(&provider_rw, exec_input) { Ok(out @ ExecOutput { checkpoint, done }) => { made_progress |= @@ -426,7 +418,9 @@ where Err(err) => { drop(provider_rw); self.listeners.notify(PipelineEvent::Error { stage_id }); - if let Some(ctrl) = on_stage_error(&factory, stage_id, prev_checkpoint, err)? { + if let Some(ctrl) = + on_stage_error(&self.provider_factory, stage_id, prev_checkpoint, err)? + { return Ok(ctrl) } } @@ -526,13 +520,13 @@ mod tests { use super::*; use crate::{test_utils::TestStage, UnwindOutput}; use assert_matches::assert_matches; - use reth_db::test_utils::create_test_rw_db; use reth_interfaces::{ consensus, provider::ProviderError, test_utils::{generators, generators::random_header}, }; - use reth_primitives::{stage::StageCheckpoint, MAINNET}; + use reth_primitives::stage::StageCheckpoint; + use reth_provider::test_utils::create_test_provider_factory; use tokio_stream::StreamExt; #[test] @@ -565,7 +559,7 @@ mod tests { /// Runs a simple pipeline. #[tokio::test] async fn run_pipeline() { - let db = create_test_rw_db(); + let provider_factory = create_test_provider_factory(); let mut pipeline = Pipeline::builder() .add_stage( @@ -577,7 +571,7 @@ mod tests { .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), ) .with_max_block(10) - .build(db, MAINNET.clone()); + .build(provider_factory); let events = pipeline.events(); // Run pipeline @@ -618,7 +612,7 @@ mod tests { /// Unwinds a simple pipeline. #[tokio::test] async fn unwind_pipeline() { - let db = create_test_rw_db(); + let provider_factory = create_test_provider_factory(); let mut pipeline = Pipeline::builder() .add_stage( @@ -637,7 +631,7 @@ mod tests { .add_unwind(Ok(UnwindOutput { checkpoint: StageCheckpoint::new(1) })), ) .with_max_block(10) - .build(db, MAINNET.clone()); + .build(provider_factory); let events = pipeline.events(); // Run pipeline @@ -731,7 +725,7 @@ mod tests { /// Unwinds a pipeline with intermediate progress. #[tokio::test] async fn unwind_pipeline_with_intermediate_progress() { - let db = create_test_rw_db(); + let provider_factory = create_test_provider_factory(); let mut pipeline = Pipeline::builder() .add_stage( @@ -744,7 +738,7 @@ mod tests { .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), ) .with_max_block(10) - .build(db, MAINNET.clone()); + .build(provider_factory); let events = pipeline.events(); // Run pipeline @@ -816,7 +810,7 @@ mod tests { /// - The pipeline finishes #[tokio::test] async fn run_pipeline_with_unwind() { - let db = create_test_rw_db(); + let provider_factory = create_test_provider_factory(); let mut pipeline = Pipeline::builder() .add_stage( @@ -841,7 +835,7 @@ mod tests { .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), ) .with_max_block(10) - .build(db, MAINNET.clone()); + .build(provider_factory); let events = pipeline.events(); // Run pipeline @@ -913,7 +907,7 @@ mod tests { #[tokio::test] async fn pipeline_error_handling() { // Non-fatal - let db = create_test_rw_db(); + let provider_factory = create_test_provider_factory(); let mut pipeline = Pipeline::builder() .add_stage( TestStage::new(StageId::Other("NonFatal")) @@ -921,17 +915,17 @@ mod tests { .add_exec(Ok(ExecOutput { checkpoint: StageCheckpoint::new(10), done: true })), ) .with_max_block(10) - .build(db, MAINNET.clone()); + .build(provider_factory); let result = pipeline.run().await; assert_matches!(result, Ok(())); // Fatal - let db = create_test_rw_db(); + let provider_factory = create_test_provider_factory(); let mut pipeline = Pipeline::builder() .add_stage(TestStage::new(StageId::Other("Fatal")).add_exec(Err( StageError::DatabaseIntegrity(ProviderError::BlockBodyIndicesNotFound(5)), ))) - .build(db, MAINNET.clone()); + .build(provider_factory); let result = pipeline.run().await; assert_matches!( result, diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 5a9ac7942449c..3ef73849672bb 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -14,13 +14,12 @@ //! # use reth_stages::sets::{OfflineStages}; //! # use reth_revm::Factory; //! # use reth_primitives::MAINNET; -//! use reth_db::test_utils::create_test_rw_db; +//! # use reth_provider::test_utils::create_test_provider_factory; //! //! # let factory = Factory::new(MAINNET.clone()); -//! # let db = create_test_rw_db(); +//! # let provider_factory = create_test_provider_factory(); //! // Build a pipeline with all offline stages. -//! # let pipeline = -//! Pipeline::builder().add_stages(OfflineStages::new(factory)).build(db, MAINNET.clone()); +//! # let pipeline = Pipeline::builder().add_stages(OfflineStages::new(factory)).build(provider_factory); //! ``` //! //! ```ignore diff --git a/crates/stages/src/stage.rs b/crates/stages/src/stage.rs index b165fd0709f7b..aa8360b7d9d8a 100644 --- a/crates/stages/src/stage.rs +++ b/crates/stages/src/stage.rs @@ -234,14 +234,14 @@ pub trait Stage: Send + Sync { /// upon invoking this method. fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result; /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result; } diff --git a/crates/stages/src/stages/bodies.rs b/crates/stages/src/stages/bodies.rs index cb57c44f81457..56001595cf762 100644 --- a/crates/stages/src/stages/bodies.rs +++ b/crates/stages/src/stages/bodies.rs @@ -98,7 +98,7 @@ impl Stage for BodyStage { /// header, limited by the stage's batch size. fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { if input.target_reached() { @@ -185,7 +185,7 @@ impl Stage for BodyStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { self.buffer.take(); @@ -440,7 +440,7 @@ mod tests { // Delete a transaction runner - .tx() + .db() .commit(|tx| { let mut tx_cursor = tx.cursor_write::()?; tx_cursor.last()?.expect("Could not read last transaction"); @@ -471,7 +471,7 @@ mod tests { use crate::{ stages::bodies::BodyStage, test_utils::{ - ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestTransaction, + ExecuteStageTestRunner, StageTestRunner, TestRunnerError, TestStageDB, UnwindStageTestRunner, }, ExecInput, ExecOutput, UnwindInput, @@ -479,12 +479,11 @@ mod tests { use futures_util::Stream; use reth_db::{ cursor::DbCursorRO, - database::Database, models::{StoredBlockBodyIndices, StoredBlockOmmers}, tables, test_utils::TempDatabase, transaction::{DbTx, DbTxMut}, - DatabaseEnv, DatabaseError, + DatabaseEnv, }; use reth_interfaces::{ p2p::{ @@ -494,7 +493,7 @@ mod tests { response::BlockResponse, }, download::DownloadClient, - error::{DownloadError, DownloadResult}, + error::DownloadResult, priority::Priority, }, test_utils::{ @@ -503,6 +502,7 @@ mod tests { }, }; use reth_primitives::{BlockBody, BlockNumber, SealedBlock, SealedHeader, TxNumber, B256}; + use reth_provider::ProviderFactory; use std::{ collections::{HashMap, VecDeque}, ops::RangeInclusive, @@ -529,17 +529,13 @@ mod tests { /// A helper struct for running the [BodyStage]. pub(crate) struct BodyTestRunner { responses: HashMap, - tx: TestTransaction, + db: TestStageDB, batch_size: u64, } impl Default for BodyTestRunner { fn default() -> Self { - Self { - responses: HashMap::default(), - tx: TestTransaction::default(), - batch_size: 1000, - } + Self { responses: HashMap::default(), db: TestStageDB::default(), batch_size: 1000 } } } @@ -556,13 +552,13 @@ mod tests { impl StageTestRunner for BodyTestRunner { type S = BodyStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { BodyStage::new(TestBodyDownloader::new( - self.tx.inner_raw(), + self.db.factory.clone(), self.responses.clone(), self.batch_size, )) @@ -578,10 +574,10 @@ mod tests { let end = input.target(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, start..=end, GENESIS_HASH, 0..2); - self.tx.insert_headers_with_td(blocks.iter().map(|block| &block.header))?; + self.db.insert_headers_with_td(blocks.iter().map(|block| &block.header))?; if let Some(progress) = blocks.first() { // Insert last progress data - self.tx.commit(|tx| { + self.db.commit(|tx| { let body = StoredBlockBodyIndices { first_tx_num: 0, tx_count: progress.body.len() as u64, @@ -629,16 +625,16 @@ mod tests { impl UnwindStageTestRunner for BodyTestRunner { fn validate_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> { - self.tx.ensure_no_entry_above::( + self.db.ensure_no_entry_above::( input.unwind_to, |key| key, )?; - self.tx + self.db .ensure_no_entry_above::(input.unwind_to, |key| key)?; if let Some(last_tx_id) = self.get_last_tx_id()? { - self.tx + self.db .ensure_no_entry_above::(last_tx_id, |key| key)?; - self.tx.ensure_no_entry_above::( + self.db.ensure_no_entry_above::( last_tx_id, |key| key, )?; @@ -650,7 +646,7 @@ mod tests { impl BodyTestRunner { /// Get the last available tx id if any pub(crate) fn get_last_tx_id(&self) -> Result, TestRunnerError> { - let last_body = self.tx.query(|tx| { + let last_body = self.db.query(|tx| { let v = tx.cursor_read::()?.last()?; Ok(v) })?; @@ -668,7 +664,7 @@ mod tests { prev_progress: BlockNumber, highest_block: BlockNumber, ) -> Result<(), TestRunnerError> { - self.tx.query(|tx| { + self.db.query(|tx| { // Acquire cursors on body related tables let mut headers_cursor = tx.cursor_read::()?; let mut bodies_cursor = tx.cursor_read::()?; @@ -759,7 +755,7 @@ mod tests { /// A [BodyDownloader] that is backed by an internal [HashMap] for testing. #[derive(Debug)] pub(crate) struct TestBodyDownloader { - db: Arc>, + provider_factory: ProviderFactory>>, responses: HashMap, headers: VecDeque, batch_size: u64, @@ -767,11 +763,11 @@ mod tests { impl TestBodyDownloader { pub(crate) fn new( - db: Arc>, + provider_factory: ProviderFactory>>, responses: HashMap, batch_size: u64, ) -> Self { - Self { db, responses, headers: VecDeque::default(), batch_size } + Self { provider_factory, responses, headers: VecDeque::default(), batch_size } } } @@ -780,27 +776,19 @@ mod tests { &mut self, range: RangeInclusive, ) -> DownloadResult<()> { - self.headers = VecDeque::from( - self.db - .view(|tx| -> Result, DatabaseError> { - let mut header_cursor = tx.cursor_read::()?; - - let mut canonical_cursor = - tx.cursor_read::()?; - let walker = canonical_cursor.walk_range(range)?; - - let mut headers = Vec::default(); - for entry in walker { - let (num, hash) = entry?; - let (_, header) = - header_cursor.seek_exact(num)?.expect("missing header"); - headers.push(header.seal(hash)); - } - Ok(headers) - }) - .map_err(|err| DownloadError::Provider(err.into()))? - .map_err(|err| DownloadError::Provider(err.into()))?, - ); + let provider = self.provider_factory.provider()?; + let mut header_cursor = provider.tx_ref().cursor_read::()?; + + let mut canonical_cursor = + provider.tx_ref().cursor_read::()?; + let walker = canonical_cursor.walk_range(range)?; + + for entry in walker { + let (num, hash) = entry?; + let (_, header) = header_cursor.seek_exact(num)?.expect("missing header"); + self.headers.push_back(header.seal(hash)); + } + Ok(()) } } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 15a97cb19b623..38d3a5979d39a 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -110,7 +110,7 @@ impl ExecutionStage { /// Execute the stage. pub fn execute_inner( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { if input.target_reached() { @@ -228,7 +228,7 @@ impl ExecutionStage { /// been previously executed. fn adjust_prune_modes( &self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, start_block: u64, max_block: u64, ) -> Result { @@ -247,7 +247,7 @@ impl ExecutionStage { } fn execution_checkpoint( - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, start_block: BlockNumber, max_block: BlockNumber, checkpoint: StageCheckpoint, @@ -314,7 +314,7 @@ fn execution_checkpoint( } fn calculate_gas_used_from_headers( - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, range: RangeInclusive, ) -> Result { let mut gas_total = 0; @@ -340,7 +340,7 @@ impl Stage for ExecutionStage { /// Execute the stage fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { self.execute_inner(provider, input) @@ -349,7 +349,7 @@ impl Stage for ExecutionStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { let tx = provider.tx_ref(); @@ -491,7 +491,7 @@ impl ExecutionStageThresholds { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::TestTransaction; + use crate::test_utils::TestStageDB; use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_db::{models::AccountBeforeTx, test_utils::create_test_rw_db}; @@ -826,9 +826,8 @@ mod tests { #[tokio::test] async fn test_selfdestruct() { - let test_tx = TestTransaction::default(); - let factory = ProviderFactory::new(test_tx.tx.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); + let test_db = TestStageDB::default(); + let provider = test_db.factory.provider_rw().unwrap(); let input = ExecInput { target: Some(1), checkpoint: None }; let mut genesis_rlp = hex!("f901f8f901f3a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0c9ceb8372c88cb461724d8d3d87e8b933f6fc5f679d4841800e662f4428ffd0da056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080830f4240808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); @@ -853,7 +852,7 @@ mod tests { Account { nonce: 0, balance: U256::ZERO, bytecode_hash: Some(code_hash) }; // set account - let provider = factory.provider_rw().unwrap(); + let provider = test_db.factory.provider_rw().unwrap(); provider.tx_ref().put::(caller_address, caller_info).unwrap(); provider .tx_ref() @@ -882,13 +881,13 @@ mod tests { provider.commit().unwrap(); // execute - let provider = factory.provider_rw().unwrap(); + let provider = test_db.factory.provider_rw().unwrap(); let mut execution_stage = stage(); let _ = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); // assert unwind stage - let provider = factory.provider_rw().unwrap(); + let provider = test_db.factory.provider_rw().unwrap(); assert_eq!(provider.basic_account(destroyed_address), Ok(None), "Account was destroyed"); assert_eq!( @@ -898,8 +897,8 @@ mod tests { ); // drops tx so that it returns write privilege to test_tx drop(provider); - let plain_accounts = test_tx.table::().unwrap(); - let plain_storage = test_tx.table::().unwrap(); + let plain_accounts = test_db.table::().unwrap(); + let plain_storage = test_db.table::().unwrap(); assert_eq!( plain_accounts, @@ -924,8 +923,8 @@ mod tests { ); assert!(plain_storage.is_empty()); - let account_changesets = test_tx.table::().unwrap(); - let storage_changesets = test_tx.table::().unwrap(); + let account_changesets = test_db.table::().unwrap(); + let storage_changesets = test_db.table::().unwrap(); assert_eq!( account_changesets, diff --git a/crates/stages/src/stages/finish.rs b/crates/stages/src/stages/finish.rs index 26357aedc4058..341be77dd1e1b 100644 --- a/crates/stages/src/stages/finish.rs +++ b/crates/stages/src/stages/finish.rs @@ -18,7 +18,7 @@ impl Stage for FinishStage { fn execute( &mut self, - _provider: &DatabaseProviderRW<&DB>, + _provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { Ok(ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true }) @@ -26,7 +26,7 @@ impl Stage for FinishStage { fn unwind( &mut self, - _provider: &DatabaseProviderRW<&DB>, + _provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { Ok(UnwindOutput { checkpoint: StageCheckpoint::new(input.unwind_to) }) @@ -38,7 +38,7 @@ mod tests { use super::*; use crate::test_utils::{ stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, - TestTransaction, UnwindStageTestRunner, + TestStageDB, UnwindStageTestRunner, }; use reth_interfaces::test_utils::{ generators, @@ -50,14 +50,14 @@ mod tests { #[derive(Default)] struct FinishTestRunner { - tx: TestTransaction, + db: TestStageDB, } impl StageTestRunner for FinishTestRunner { type S = FinishStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { @@ -72,7 +72,7 @@ mod tests { let start = input.checkpoint().block_number; let mut rng = generators::rng(); let head = random_header(&mut rng, start, None); - self.tx.insert_headers_with_td(std::iter::once(&head))?; + self.db.insert_headers_with_td(std::iter::once(&head))?; // use previous progress as seed size let end = input.target.unwrap_or_default() + 1; @@ -82,7 +82,7 @@ mod tests { } let mut headers = random_header_range(&mut rng, start + 1..end, head.hash()); - self.tx.insert_headers_with_td(headers.iter())?; + self.db.insert_headers_with_td(headers.iter())?; headers.insert(0, head); Ok(headers) } diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index fc3424f2ea573..308cfa71ea27f 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -134,7 +134,7 @@ impl Stage for AccountHashingStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { if input.target_reached() { @@ -266,7 +266,7 @@ impl Stage for AccountHashingStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = @@ -288,7 +288,7 @@ impl Stage for AccountHashingStage { } fn stage_checkpoint_progress( - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, ) -> Result { Ok(EntitiesCheckpoint { processed: provider.tx_ref().entries::()? as u64, @@ -341,7 +341,7 @@ mod tests { done: true, }) if block_number == previous_stage && processed == total && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); // Validate the stage execution @@ -368,7 +368,7 @@ mod tests { let result = rx.await.unwrap(); let fifth_address = runner - .tx + .db .query(|tx| { let (address, _) = tx .cursor_read::()? @@ -398,9 +398,9 @@ mod tests { }, done: false }) if address == fifth_address && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); - assert_eq!(runner.tx.table::().unwrap().len(), 5); + assert_eq!(runner.db.table::().unwrap().len(), 5); // second run, hash next five accounts. input.checkpoint = Some(result.unwrap().checkpoint); @@ -425,9 +425,9 @@ mod tests { }, done: true }) if processed == total && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); - assert_eq!(runner.tx.table::().unwrap().len(), 10); + assert_eq!(runner.db.table::().unwrap().len(), 10); // Validate the stage execution assert!(runner.validate_execution(input, result.ok()).is_ok(), "execution validation"); @@ -437,14 +437,14 @@ mod tests { use super::*; use crate::{ stages::hashing_account::AccountHashingStage, - test_utils::{StageTestRunner, TestTransaction}, + test_utils::{StageTestRunner, TestStageDB}, ExecInput, ExecOutput, UnwindInput, }; use reth_db::{cursor::DbCursorRO, tables, transaction::DbTx}; use reth_primitives::Address; pub(crate) struct AccountHashingTestRunner { - pub(crate) tx: TestTransaction, + pub(crate) db: TestStageDB, commit_threshold: u64, clean_threshold: u64, } @@ -462,7 +462,7 @@ mod tests { /// Iterates over PlainAccount table and checks that the accounts match the ones /// in the HashedAccount table pub(crate) fn check_hashed_accounts(&self) -> Result<(), TestRunnerError> { - self.tx.query(|tx| { + self.db.query(|tx| { let mut acc_cursor = tx.cursor_read::()?; let mut hashed_acc_cursor = tx.cursor_read::()?; @@ -481,7 +481,7 @@ mod tests { /// Same as check_hashed_accounts, only that checks with the old account state, /// namely, the same account with nonce - 1 and balance - 1. pub(crate) fn check_old_hashed_accounts(&self) -> Result<(), TestRunnerError> { - self.tx.query(|tx| { + self.db.query(|tx| { let mut acc_cursor = tx.cursor_read::()?; let mut hashed_acc_cursor = tx.cursor_read::()?; @@ -506,19 +506,15 @@ mod tests { impl Default for AccountHashingTestRunner { fn default() -> Self { - Self { - tx: TestTransaction::default(), - commit_threshold: 1000, - clean_threshold: 1000, - } + Self { db: TestStageDB::default(), commit_threshold: 1000, clean_threshold: 1000 } } } impl StageTestRunner for AccountHashingTestRunner { type S = AccountHashingStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { @@ -534,7 +530,7 @@ mod tests { type Seed = Vec<(Address, Account)>; fn seed_execution(&mut self, input: ExecInput) -> Result { - let provider = self.tx.inner_rw(); + let provider = self.db.factory.provider_rw()?; let res = Ok(AccountHashingStage::seed( &provider, SeedOpts { blocks: 1..=input.target(), accounts: 0..10, txs: 0..3 }, diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index 73d6277a830d8..d508846a43c4d 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -53,7 +53,7 @@ impl Stage for StorageHashingStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { let tx = provider.tx_ref(); @@ -192,7 +192,7 @@ impl Stage for StorageHashingStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = @@ -213,7 +213,7 @@ impl Stage for StorageHashingStage { } fn stage_checkpoint_progress( - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, ) -> Result { Ok(EntitiesCheckpoint { processed: provider.tx_ref().entries::()? as u64, @@ -226,7 +226,7 @@ mod tests { use super::*; use crate::test_utils::{ stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, - TestTransaction, UnwindStageTestRunner, + TestStageDB, UnwindStageTestRunner, }; use assert_matches::assert_matches; use rand::Rng; @@ -282,7 +282,7 @@ mod tests { }, .. }) if processed == previous_checkpoint.progress.processed + 1 && - total == runner.tx.table::().unwrap().len() as u64); + total == runner.db.table::().unwrap().len() as u64); // Continue from checkpoint input.checkpoint = Some(checkpoint); @@ -296,7 +296,7 @@ mod tests { }, .. }) if processed == total && - total == runner.tx.table::().unwrap().len() as u64); + total == runner.db.table::().unwrap().len() as u64); // Validate the stage execution assert!( @@ -331,7 +331,7 @@ mod tests { let result = rx.await.unwrap(); let (progress_address, progress_key) = runner - .tx + .db .query(|tx| { let (address, entry) = tx .cursor_read::()? @@ -363,9 +363,9 @@ mod tests { }, done: false }) if address == progress_address && storage == progress_key && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); - assert_eq!(runner.tx.table::().unwrap().len(), 500); + assert_eq!(runner.db.table::().unwrap().len(), 500); // second run with commit threshold of 2 to check if subkey is set. runner.set_commit_threshold(2); @@ -375,7 +375,7 @@ mod tests { let result = rx.await.unwrap(); let (progress_address, progress_key) = runner - .tx + .db .query(|tx| { let (address, entry) = tx .cursor_read::()? @@ -409,9 +409,9 @@ mod tests { }, done: false }) if address == progress_address && storage == progress_key && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); - assert_eq!(runner.tx.table::().unwrap().len(), 502); + assert_eq!(runner.db.table::().unwrap().len(), 502); // third last run, hash rest of storages. runner.set_commit_threshold(1000); @@ -441,11 +441,11 @@ mod tests { }, done: true }) if processed == total && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); assert_eq!( - runner.tx.table::().unwrap().len(), - runner.tx.table::().unwrap().len() + runner.db.table::().unwrap().len(), + runner.db.table::().unwrap().len() ); // Validate the stage execution @@ -453,22 +453,22 @@ mod tests { } struct StorageHashingTestRunner { - tx: TestTransaction, + db: TestStageDB, commit_threshold: u64, clean_threshold: u64, } impl Default for StorageHashingTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), commit_threshold: 1000, clean_threshold: 1000 } + Self { db: TestStageDB::default(), commit_threshold: 1000, clean_threshold: 1000 } } } impl StageTestRunner for StorageHashingTestRunner { type S = StorageHashingStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { @@ -493,7 +493,7 @@ mod tests { let blocks = random_block_range(&mut rng, stage_progress..=end, B256::ZERO, 0..3); - self.tx.insert_headers(blocks.iter().map(|block| &block.header))?; + self.db.insert_headers(blocks.iter().map(|block| &block.header))?; let iter = blocks.iter(); let mut next_tx_num = 0; @@ -501,7 +501,7 @@ mod tests { for progress in iter { // Insert last progress data let block_number = progress.number; - self.tx.commit(|tx| { + self.db.commit(|tx| { progress.body.iter().try_for_each( |transaction| -> Result<(), reth_db::DatabaseError> { tx.put::(transaction.hash(), next_tx_num)?; @@ -552,7 +552,8 @@ mod tests { first_tx_num = next_tx_num; - tx.put::(progress.number, body) + tx.put::(progress.number, body)?; + Ok(()) })?; } @@ -592,7 +593,7 @@ mod tests { } fn check_hashed_storage(&self) -> Result<(), TestRunnerError> { - self.tx + self.db .query(|tx| { let mut storage_cursor = tx.cursor_dup_read::()?; let mut hashed_storage_cursor = @@ -661,7 +662,7 @@ mod tests { fn unwind_storage(&self, input: UnwindInput) -> Result<(), TestRunnerError> { tracing::debug!("unwinding storage..."); let target_block = input.unwind_to; - self.tx.commit(|tx| { + self.db.commit(|tx| { let mut storage_cursor = tx.cursor_dup_write::()?; let mut changeset_cursor = tx.cursor_dup_read::()?; diff --git a/crates/stages/src/stages/headers.rs b/crates/stages/src/stages/headers.rs index a150415bc36ba..b57fcd279df96 100644 --- a/crates/stages/src/stages/headers.rs +++ b/crates/stages/src/stages/headers.rs @@ -176,7 +176,7 @@ where /// starting from the tip of the chain fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { let current_checkpoint = input.checkpoint(); @@ -279,7 +279,7 @@ where /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { self.buffer.take(); @@ -326,7 +326,7 @@ mod tests { mod test_runner { use super::*; - use crate::test_utils::{TestRunnerError, TestTransaction}; + use crate::test_utils::{TestRunnerError, TestStageDB}; use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_downloaders::headers::reverse_headers::{ ReverseHeadersDownloader, ReverseHeadersDownloaderBuilder, @@ -344,7 +344,7 @@ mod tests { pub(crate) client: TestHeadersClient, channel: (watch::Sender, watch::Receiver), downloader_factory: Box D + Send + Sync + 'static>, - tx: TestTransaction, + db: TestStageDB, } impl Default for HeadersTestRunner { @@ -361,7 +361,7 @@ mod tests { 1000, ) }), - tx: TestTransaction::default(), + db: TestStageDB::default(), } } } @@ -369,13 +369,13 @@ mod tests { impl StageTestRunner for HeadersTestRunner { type S = HeaderStage>>, D>; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { HeaderStage::new( - self.tx.factory.clone(), + self.db.factory.clone(), (*self.downloader_factory)(), HeaderSyncMode::Tip(self.channel.1.clone()), ) @@ -390,9 +390,10 @@ mod tests { let mut rng = generators::rng(); let start = input.checkpoint().block_number; let head = random_header(&mut rng, start, None); - self.tx.insert_headers(std::iter::once(&head))?; + self.db.insert_headers(std::iter::once(&head))?; // patch td table for `update_head` call - self.tx.commit(|tx| tx.put::(head.number, U256::ZERO.into()))?; + self.db + .commit(|tx| Ok(tx.put::(head.number, U256::ZERO.into())?))?; // use previous checkpoint as seed size let end = input.target.unwrap_or_default() + 1; @@ -415,7 +416,7 @@ mod tests { let initial_checkpoint = input.checkpoint().block_number; match output { Some(output) if output.checkpoint.block_number > initial_checkpoint => { - let provider = self.tx.factory.provider()?; + let provider = self.db.factory.provider()?; for block_num in (initial_checkpoint..output.checkpoint.block_number).rev() { // look up the header hash @@ -442,7 +443,7 @@ mod tests { headers.last().unwrap().hash() } else { let tip = random_header(&mut generators::rng(), 0, None); - self.tx.insert_headers(std::iter::once(&tip))?; + self.db.insert_headers(std::iter::once(&tip))?; tip.hash() }; self.send_tip(tip); @@ -467,7 +468,7 @@ mod tests { .stream_batch_size(500) .build(client.clone(), Arc::new(TestConsensus::default())) }), - tx: TestTransaction::default(), + db: TestStageDB::default(), } } } @@ -477,10 +478,10 @@ mod tests { &self, block: BlockNumber, ) -> Result<(), TestRunnerError> { - self.tx + self.db .ensure_no_entry_above_by_value::(block, |val| val)?; - self.tx.ensure_no_entry_above::(block, |key| key)?; - self.tx.ensure_no_entry_above::(block, |key| key)?; + self.db.ensure_no_entry_above::(block, |key| key)?; + self.db.ensure_no_entry_above::(block, |key| key)?; Ok(()) } diff --git a/crates/stages/src/stages/index_account_history.rs b/crates/stages/src/stages/index_account_history.rs index 4a0df0c29c8ed..355a63a7d5c24 100644 --- a/crates/stages/src/stages/index_account_history.rs +++ b/crates/stages/src/stages/index_account_history.rs @@ -44,7 +44,7 @@ impl Stage for IndexAccountHistoryStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, mut input: ExecInput, ) -> Result { if let Some((target_prunable_block, prune_mode)) = self @@ -87,7 +87,7 @@ impl Stage for IndexAccountHistoryStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = @@ -105,7 +105,7 @@ mod tests { use super::*; use crate::test_utils::{ stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, - TestTransaction, UnwindStageTestRunner, + TestStageDB, UnwindStageTestRunner, }; use itertools::Itertools; use reth_db::{ @@ -122,8 +122,7 @@ mod tests { generators, generators::{random_block_range, random_changeset_range, random_contract_account_range}, }; - use reth_primitives::{address, Address, BlockNumber, PruneMode, B256, MAINNET}; - use reth_provider::ProviderFactory; + use reth_primitives::{address, Address, BlockNumber, PruneMode, B256}; use std::collections::BTreeMap; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); @@ -153,9 +152,9 @@ mod tests { .collect() } - fn partial_setup(tx: &TestTransaction) { + fn partial_setup(db: &TestStageDB) { // setup - tx.commit(|tx| { + db.commit(|tx| { // we just need first and last tx.put::( 0, @@ -177,25 +176,23 @@ mod tests { .unwrap() } - fn run(tx: &TestTransaction, run_to: u64) { + fn run(db: &TestStageDB, run_to: u64) { let input = ExecInput { target: Some(run_to), ..Default::default() }; let mut stage = IndexAccountHistoryStage::default(); - let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); + let provider = db.factory.provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(5), done: true }); provider.commit().unwrap(); } - fn unwind(tx: &TestTransaction, unwind_from: u64, unwind_to: u64) { + fn unwind(db: &TestStageDB, unwind_from: u64, unwind_to: u64) { let input = UnwindInput { checkpoint: StageCheckpoint::new(unwind_from), unwind_to, ..Default::default() }; let mut stage = IndexAccountHistoryStage::default(); - let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); + let provider = db.factory.provider_rw().unwrap(); let out = stage.unwind(&provider, input).unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); provider.commit().unwrap(); @@ -204,116 +201,116 @@ mod tests { #[tokio::test] async fn insert_index_to_empty() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); // setup - partial_setup(&tx); + partial_setup(&db); // run - run(&tx, 5); + run(&db, 5); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![4, 5])])); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state - let table = tx.table::().unwrap(); + let table = db.table::().unwrap(); assert!(table.is_empty()); } #[tokio::test] async fn insert_index_to_not_empty_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(u64::MAX), list(&[1, 2, 3])).unwrap(); Ok(()) }) .unwrap(); // run - run(&tx, 5); + run(&db, 5); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3, 4, 5]),])); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3]),])); } #[tokio::test] async fn insert_index_to_full_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let full_list = vec![3; NUM_OF_INDICES_IN_SHARD]; // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(u64::MAX), list(&full_list)).unwrap(); Ok(()) }) .unwrap(); // run - run(&tx, 5); + run(&db, 5); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([(shard(3), full_list.clone()), (shard(u64::MAX), vec![4, 5])]) ); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), full_list)])); } #[tokio::test] async fn insert_index_to_fill_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut close_full_list = vec![1; NUM_OF_INDICES_IN_SHARD - 2]; // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(u64::MAX), list(&close_full_list)).unwrap(); Ok(()) }) .unwrap(); // run - run(&tx, 5); + run(&db, 5); // verify close_full_list.push(4); close_full_list.push(5); - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list.clone()),])); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state close_full_list.pop(); close_full_list.pop(); - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list),])); // verify initial state @@ -322,46 +319,46 @@ mod tests { #[tokio::test] async fn insert_index_second_half_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut close_full_list = vec![1; NUM_OF_INDICES_IN_SHARD - 1]; // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(u64::MAX), list(&close_full_list)).unwrap(); Ok(()) }) .unwrap(); // run - run(&tx, 5); + run(&db, 5); // verify close_full_list.push(4); - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([(shard(4), close_full_list.clone()), (shard(u64::MAX), vec![5])]) ); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state close_full_list.pop(); - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list),])); } #[tokio::test] async fn insert_index_to_third_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let full_list = vec![1; NUM_OF_INDICES_IN_SHARD]; // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(1), list(&full_list)).unwrap(); tx.put::(shard(2), list(&full_list)).unwrap(); tx.put::(shard(u64::MAX), list(&[2, 3])).unwrap(); @@ -369,10 +366,10 @@ mod tests { }) .unwrap(); - run(&tx, 5); + run(&db, 5); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -383,10 +380,10 @@ mod tests { ); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -400,10 +397,10 @@ mod tests { #[tokio::test] async fn insert_index_with_prune_mode() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); // setup - tx.commit(|tx| { + db.commit(|tx| { // we just need first and last tx.put::( 0, @@ -431,43 +428,42 @@ mod tests { prune_mode: Some(PruneMode::Before(36)), ..Default::default() }; - let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); + let provider = db.factory.provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(20000), done: true }); provider.commit().unwrap(); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100])])); // unwind - unwind(&tx, 20000, 0); + unwind(&db, 20000, 0); // verify initial state - let table = tx.table::().unwrap(); + let table = db.table::().unwrap(); assert!(table.is_empty()); } stage_test_suite_ext!(IndexAccountHistoryTestRunner, index_account_history); struct IndexAccountHistoryTestRunner { - pub(crate) tx: TestTransaction, + pub(crate) db: TestStageDB, commit_threshold: u64, prune_mode: Option, } impl Default for IndexAccountHistoryTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), commit_threshold: 1000, prune_mode: None } + Self { db: TestStageDB::default(), commit_threshold: 1000, prune_mode: None } } } impl StageTestRunner for IndexAccountHistoryTestRunner { type S = IndexAccountHistoryStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { @@ -500,7 +496,7 @@ mod tests { ); // add block changeset from block 1. - self.tx.insert_changesets(transitions, Some(start))?; + self.db.insert_changesets(transitions, Some(start))?; Ok(()) } @@ -522,7 +518,7 @@ mod tests { ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true } ); - let provider = self.tx.inner(); + let provider = self.db.factory.provider()?; let mut changeset_cursor = provider.tx_ref().cursor_read::()?; @@ -568,7 +564,7 @@ mod tests { }; } - let table = cast(self.tx.table::().unwrap()); + let table = cast(self.db.table::().unwrap()); assert_eq!(table, result); } Ok(()) @@ -577,7 +573,7 @@ mod tests { impl UnwindStageTestRunner for IndexAccountHistoryTestRunner { fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { - let table = self.tx.table::().unwrap(); + let table = self.db.table::().unwrap(); assert!(table.is_empty()); Ok(()) } diff --git a/crates/stages/src/stages/index_storage_history.rs b/crates/stages/src/stages/index_storage_history.rs index b5ef6fda99bf1..c189a90c320b9 100644 --- a/crates/stages/src/stages/index_storage_history.rs +++ b/crates/stages/src/stages/index_storage_history.rs @@ -43,7 +43,7 @@ impl Stage for IndexStorageHistoryStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, mut input: ExecInput, ) -> Result { if let Some((target_prunable_block, prune_mode)) = self @@ -85,7 +85,7 @@ impl Stage for IndexStorageHistoryStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { let (range, unwind_progress, _) = @@ -102,7 +102,7 @@ mod tests { use super::*; use crate::test_utils::{ stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, - TestTransaction, UnwindStageTestRunner, + TestStageDB, UnwindStageTestRunner, }; use itertools::Itertools; use reth_db::{ @@ -121,9 +121,8 @@ mod tests { generators::{random_block_range, random_changeset_range, random_contract_account_range}, }; use reth_primitives::{ - address, b256, Address, BlockNumber, PruneMode, StorageEntry, B256, MAINNET, U256, + address, b256, Address, BlockNumber, PruneMode, StorageEntry, B256, U256, }; - use reth_provider::ProviderFactory; use std::collections::BTreeMap; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); @@ -163,9 +162,9 @@ mod tests { .collect() } - fn partial_setup(tx: &TestTransaction) { + fn partial_setup(db: &TestStageDB) { // setup - tx.commit(|tx| { + db.commit(|tx| { // we just need first and last tx.put::( 0, @@ -187,25 +186,23 @@ mod tests { .unwrap() } - fn run(tx: &TestTransaction, run_to: u64) { + fn run(db: &TestStageDB, run_to: u64) { let input = ExecInput { target: Some(run_to), ..Default::default() }; let mut stage = IndexStorageHistoryStage::default(); - let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); + let provider = db.factory.provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(5), done: true }); provider.commit().unwrap(); } - fn unwind(tx: &TestTransaction, unwind_from: u64, unwind_to: u64) { + fn unwind(db: &TestStageDB, unwind_from: u64, unwind_to: u64) { let input = UnwindInput { checkpoint: StageCheckpoint::new(unwind_from), unwind_to, ..Default::default() }; let mut stage = IndexStorageHistoryStage::default(); - let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); + let provider = db.factory.provider_rw().unwrap(); let out = stage.unwind(&provider, input).unwrap(); assert_eq!(out, UnwindOutput { checkpoint: StageCheckpoint::new(unwind_to) }); provider.commit().unwrap(); @@ -214,119 +211,119 @@ mod tests { #[tokio::test] async fn insert_index_to_empty() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); // setup - partial_setup(&tx); + partial_setup(&db); // run - run(&tx, 5); + run(&db, 5); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![4, 5]),])); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state - let table = tx.table::().unwrap(); + let table = db.table::().unwrap(); assert!(table.is_empty()); } #[tokio::test] async fn insert_index_to_not_empty_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(u64::MAX), list(&[1, 2, 3])).unwrap(); Ok(()) }) .unwrap(); // run - run(&tx, 5); + run(&db, 5); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3, 4, 5]),])); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![1, 2, 3]),])); } #[tokio::test] async fn insert_index_to_full_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let _input = ExecInput { target: Some(5), ..Default::default() }; // change does not matter only that account is present in changeset. let full_list = vec![3; NUM_OF_INDICES_IN_SHARD]; // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(u64::MAX), list(&full_list)).unwrap(); Ok(()) }) .unwrap(); // run - run(&tx, 5); + run(&db, 5); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([(shard(3), full_list.clone()), (shard(u64::MAX), vec![4, 5])]) ); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), full_list)])); } #[tokio::test] async fn insert_index_to_fill_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut close_full_list = vec![1; NUM_OF_INDICES_IN_SHARD - 2]; // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(u64::MAX), list(&close_full_list)).unwrap(); Ok(()) }) .unwrap(); // run - run(&tx, 5); + run(&db, 5); // verify close_full_list.push(4); close_full_list.push(5); - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list.clone()),])); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state close_full_list.pop(); close_full_list.pop(); - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list),])); // verify initial state @@ -335,46 +332,46 @@ mod tests { #[tokio::test] async fn insert_index_second_half_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut close_full_list = vec![1; NUM_OF_INDICES_IN_SHARD - 1]; // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(u64::MAX), list(&close_full_list)).unwrap(); Ok(()) }) .unwrap(); // run - run(&tx, 5); + run(&db, 5); // verify close_full_list.push(4); - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([(shard(4), close_full_list.clone()), (shard(u64::MAX), vec![5])]) ); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state close_full_list.pop(); - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), close_full_list),])); } #[tokio::test] async fn insert_index_to_third_shard() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let full_list = vec![1; NUM_OF_INDICES_IN_SHARD]; // setup - partial_setup(&tx); - tx.commit(|tx| { + partial_setup(&db); + db.commit(|tx| { tx.put::(shard(1), list(&full_list)).unwrap(); tx.put::(shard(2), list(&full_list)).unwrap(); tx.put::(shard(u64::MAX), list(&[2, 3])).unwrap(); @@ -382,10 +379,10 @@ mod tests { }) .unwrap(); - run(&tx, 5); + run(&db, 5); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -396,10 +393,10 @@ mod tests { ); // unwind - unwind(&tx, 5, 0); + unwind(&db, 5, 0); // verify initial state - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!( table, BTreeMap::from([ @@ -413,10 +410,10 @@ mod tests { #[tokio::test] async fn insert_index_with_prune_mode() { // init - let tx = TestTransaction::default(); + let db = TestStageDB::default(); // setup - tx.commit(|tx| { + db.commit(|tx| { // we just need first and last tx.put::( 0, @@ -444,43 +441,42 @@ mod tests { prune_mode: Some(PruneMode::Before(36)), ..Default::default() }; - let factory = ProviderFactory::new(tx.tx.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); + let provider = db.factory.provider_rw().unwrap(); let out = stage.execute(&provider, input).unwrap(); assert_eq!(out, ExecOutput { checkpoint: StageCheckpoint::new(20000), done: true }); provider.commit().unwrap(); // verify - let table = cast(tx.table::().unwrap()); + let table = cast(db.table::().unwrap()); assert_eq!(table, BTreeMap::from([(shard(u64::MAX), vec![36, 100]),])); // unwind - unwind(&tx, 20000, 0); + unwind(&db, 20000, 0); // verify initial state - let table = tx.table::().unwrap(); + let table = db.table::().unwrap(); assert!(table.is_empty()); } stage_test_suite_ext!(IndexStorageHistoryTestRunner, index_storage_history); struct IndexStorageHistoryTestRunner { - pub(crate) tx: TestTransaction, + pub(crate) db: TestStageDB, commit_threshold: u64, prune_mode: Option, } impl Default for IndexStorageHistoryTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), commit_threshold: 1000, prune_mode: None } + Self { db: TestStageDB::default(), commit_threshold: 1000, prune_mode: None } } } impl StageTestRunner for IndexStorageHistoryTestRunner { type S = IndexStorageHistoryStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { @@ -513,7 +509,7 @@ mod tests { ); // add block changeset from block 1. - self.tx.insert_changesets(transitions, Some(start))?; + self.db.insert_changesets(transitions, Some(start))?; Ok(()) } @@ -535,7 +531,7 @@ mod tests { ExecOutput { checkpoint: StageCheckpoint::new(input.target()), done: true } ); - let provider = self.tx.inner(); + let provider = self.db.factory.provider()?; let mut changeset_cursor = provider.tx_ref().cursor_read::()?; @@ -586,7 +582,7 @@ mod tests { }; } - let table = cast(self.tx.table::().unwrap()); + let table = cast(self.db.table::().unwrap()); assert_eq!(table, result); } Ok(()) @@ -595,7 +591,7 @@ mod tests { impl UnwindStageTestRunner for IndexStorageHistoryTestRunner { fn validate_unwind(&self, _input: UnwindInput) -> Result<(), TestRunnerError> { - let table = self.tx.table::().unwrap(); + let table = self.db.table::().unwrap(); assert!(table.is_empty()); Ok(()) } diff --git a/crates/stages/src/stages/merkle.rs b/crates/stages/src/stages/merkle.rs index 602db57232efc..518ceac92e7cb 100644 --- a/crates/stages/src/stages/merkle.rs +++ b/crates/stages/src/stages/merkle.rs @@ -80,7 +80,7 @@ impl MerkleStage { /// Gets the hashing progress pub fn get_execution_checkpoint( &self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, ) -> Result, StageError> { let buf = provider.get_stage_checkpoint_progress(StageId::MerkleExecute)?.unwrap_or_default(); @@ -96,7 +96,7 @@ impl MerkleStage { /// Saves the hashing progress pub fn save_execution_checkpoint( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, checkpoint: Option, ) -> Result<(), StageError> { let mut buf = vec![]; @@ -127,7 +127,7 @@ impl Stage for MerkleStage { /// Execute the stage. fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { let threshold = match self { @@ -261,7 +261,7 @@ impl Stage for MerkleStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { let tx = provider.tx_ref(); @@ -338,7 +338,7 @@ mod tests { use super::*; use crate::test_utils::{ stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, - TestTransaction, UnwindStageTestRunner, + TestStageDB, UnwindStageTestRunner, }; use assert_matches::assert_matches; use reth_db::{ @@ -392,8 +392,8 @@ mod tests { done: true }) if block_number == previous_stage && processed == total && total == ( - runner.tx.table::().unwrap().len() + - runner.tx.table::().unwrap().len() + runner.db.table::().unwrap().len() + + runner.db.table::().unwrap().len() ) as u64 ); @@ -432,8 +432,8 @@ mod tests { done: true }) if block_number == previous_stage && processed == total && total == ( - runner.tx.table::().unwrap().len() + - runner.tx.table::().unwrap().len() + runner.db.table::().unwrap().len() + + runner.db.table::().unwrap().len() ) as u64 ); @@ -442,21 +442,21 @@ mod tests { } struct MerkleTestRunner { - tx: TestTransaction, + db: TestStageDB, clean_threshold: u64, } impl Default for MerkleTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), clean_threshold: 10000 } + Self { db: TestStageDB::default(), clean_threshold: 10000 } } } impl StageTestRunner for MerkleTestRunner { type S = MerkleStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { @@ -479,7 +479,7 @@ mod tests { .into_iter() .collect::>(); - self.tx.insert_accounts_and_storages( + self.db.insert_accounts_and_storages( accounts.iter().map(|(addr, acc)| (*addr, (*acc, std::iter::empty()))), )?; @@ -498,7 +498,7 @@ mod tests { let head_hash = sealed_head.hash(); let mut blocks = vec![sealed_head]; blocks.extend(random_block_range(&mut rng, start..=end, head_hash, 0..3)); - self.tx.insert_blocks(blocks.iter(), None)?; + self.db.insert_blocks(blocks.iter(), None)?; let (transitions, final_state) = random_changeset_range( &mut rng, @@ -508,11 +508,11 @@ mod tests { 0..256, ); // add block changeset from block 1. - self.tx.insert_changesets(transitions, Some(start))?; - self.tx.insert_accounts_and_storages(final_state)?; + self.db.insert_changesets(transitions, Some(start))?; + self.db.insert_accounts_and_storages(final_state)?; // Calculate state root - let root = self.tx.query(|tx| { + let root = self.db.query(|tx| { let mut accounts = BTreeMap::default(); let mut accounts_cursor = tx.cursor_read::()?; let mut storage_cursor = tx.cursor_dup_read::()?; @@ -536,10 +536,11 @@ mod tests { })?; let last_block_number = end; - self.tx.commit(|tx| { + self.db.commit(|tx| { let mut last_header = tx.get::(last_block_number)?.unwrap(); last_header.state_root = root; - tx.put::(last_block_number, last_header) + tx.put::(last_block_number, last_header)?; + Ok(()) })?; Ok(blocks) @@ -564,7 +565,7 @@ mod tests { fn before_unwind(&self, input: UnwindInput) -> Result<(), TestRunnerError> { let target_block = input.unwind_to + 1; - self.tx + self.db .commit(|tx| { let mut storage_changesets_cursor = tx.cursor_dup_read::().unwrap(); diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 8197ec8340081..2c775edddbf14 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -42,7 +42,7 @@ mod tests { use crate::{ stage::Stage, stages::{ExecutionStage, IndexAccountHistoryStage, IndexStorageHistoryStage}, - test_utils::TestTransaction, + test_utils::TestStageDB, ExecInput, }; use alloy_rlp::Decodable; @@ -50,17 +50,17 @@ mod tests { cursor::DbCursorRO, mdbx::{cursor::Cursor, RW}, tables, + test_utils::TempDatabase, transaction::{DbTx, DbTxMut}, AccountHistory, DatabaseEnv, }; use reth_interfaces::test_utils::generators::{self, random_block}; use reth_primitives::{ address, hex_literal::hex, keccak256, Account, Bytecode, ChainSpecBuilder, PruneMode, - PruneModes, SealedBlock, MAINNET, U256, + PruneModes, SealedBlock, U256, }; use reth_provider::{ - AccountExtReader, BlockWriter, DatabaseProviderRW, ProviderFactory, ReceiptProvider, - StorageReader, + AccountExtReader, BlockWriter, ProviderFactory, ReceiptProvider, StorageReader, }; use reth_revm::Factory; use std::sync::Arc; @@ -68,18 +68,17 @@ mod tests { #[tokio::test] #[ignore] async fn test_prune() { - let test_tx = TestTransaction::default(); - let factory = Arc::new(ProviderFactory::new(test_tx.tx.db(), MAINNET.clone())); + let test_db = TestStageDB::default(); - let provider = factory.provider_rw().unwrap(); + let provider_rw = test_db.factory.provider_rw().unwrap(); let tip = 66; let input = ExecInput { target: Some(tip), checkpoint: None }; let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_block(genesis, None, None).unwrap(); - provider.insert_block(block.clone(), None, None).unwrap(); + provider_rw.insert_block(genesis, None, None).unwrap(); + provider_rw.insert_block(block.clone(), None, None).unwrap(); // Fill with bogus blocks to respect PruneMode distance. let mut head = block.hash; @@ -87,22 +86,22 @@ mod tests { for block_number in 2..=tip { let nblock = random_block(&mut rng, block_number, Some(head), Some(0), Some(0)); head = nblock.hash; - provider.insert_block(nblock, None, None).unwrap(); + provider_rw.insert_block(nblock, None, None).unwrap(); } - provider.commit().unwrap(); + provider_rw.commit().unwrap(); // insert pre state - let provider = factory.provider_rw().unwrap(); + let provider_rw = test_db.factory.provider_rw().unwrap(); let code = hex!("5a465a905090036002900360015500"); let code_hash = keccak256(hex!("5a465a905090036002900360015500")); - provider + provider_rw .tx_ref() .put::( address!("1000000000000000000000000000000000000000"), Account { nonce: 0, balance: U256::ZERO, bytecode_hash: Some(code_hash) }, ) .unwrap(); - provider + provider_rw .tx_ref() .put::( address!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b"), @@ -113,18 +112,18 @@ mod tests { }, ) .unwrap(); - provider + provider_rw .tx_ref() .put::(code_hash, Bytecode::new_raw(code.to_vec().into())) .unwrap(); - provider.commit().unwrap(); + provider_rw.commit().unwrap(); - let check_pruning = |factory: Arc>, + let check_pruning = |factory: ProviderFactory>>, prune_modes: PruneModes, expect_num_receipts: usize, expect_num_acc_changesets: usize, expect_num_storage_changesets: usize| async move { - let provider: DatabaseProviderRW<&DatabaseEnv> = factory.provider_rw().unwrap(); + let provider = factory.provider_rw().unwrap(); // Check execution and create receipts and changesets according to the pruning // configuration @@ -195,34 +194,34 @@ mod tests { // In an unpruned configuration there is 1 receipt, 3 changed accounts and 1 changed // storage. let mut prune = PruneModes::none(); - check_pruning(factory.clone(), prune.clone(), 1, 3, 1).await; + check_pruning(test_db.factory.clone(), prune.clone(), 1, 3, 1).await; prune.receipts = Some(PruneMode::Full); prune.account_history = Some(PruneMode::Full); prune.storage_history = Some(PruneMode::Full); // This will result in error for account_history and storage_history, which is caught. - check_pruning(factory.clone(), prune.clone(), 0, 0, 0).await; + check_pruning(test_db.factory.clone(), prune.clone(), 0, 0, 0).await; prune.receipts = Some(PruneMode::Before(1)); prune.account_history = Some(PruneMode::Before(1)); prune.storage_history = Some(PruneMode::Before(1)); - check_pruning(factory.clone(), prune.clone(), 1, 3, 1).await; + check_pruning(test_db.factory.clone(), prune.clone(), 1, 3, 1).await; prune.receipts = Some(PruneMode::Before(2)); prune.account_history = Some(PruneMode::Before(2)); prune.storage_history = Some(PruneMode::Before(2)); // The one account is the miner - check_pruning(factory.clone(), prune.clone(), 0, 1, 0).await; + check_pruning(test_db.factory.clone(), prune.clone(), 0, 1, 0).await; prune.receipts = Some(PruneMode::Distance(66)); prune.account_history = Some(PruneMode::Distance(66)); prune.storage_history = Some(PruneMode::Distance(66)); - check_pruning(factory.clone(), prune.clone(), 1, 3, 1).await; + check_pruning(test_db.factory.clone(), prune.clone(), 1, 3, 1).await; prune.receipts = Some(PruneMode::Distance(64)); prune.account_history = Some(PruneMode::Distance(64)); prune.storage_history = Some(PruneMode::Distance(64)); // The one account is the miner - check_pruning(factory.clone(), prune.clone(), 0, 1, 0).await; + check_pruning(test_db.factory.clone(), prune.clone(), 0, 1, 0).await; } } diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 551a70ccddfbf..a7b19ca57a2d2 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -56,7 +56,7 @@ impl Stage for SenderRecoveryStage { /// the [`TxSenders`][reth_db::tables::TxSenders] table. fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { if input.target_reached() { @@ -168,7 +168,7 @@ impl Stage for SenderRecoveryStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { let (_, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold); @@ -207,7 +207,7 @@ fn recover_sender( } fn stage_checkpoint( - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, ) -> Result { let pruned_entries = provider .get_prune_checkpoint(PruneSegment::SenderRecovery)? @@ -250,14 +250,14 @@ mod tests { }; use reth_primitives::{ stage::StageUnitCheckpoint, BlockNumber, PruneCheckpoint, PruneMode, SealedBlock, - TransactionSigned, B256, MAINNET, + TransactionSigned, B256, }; - use reth_provider::{ProviderFactory, PruneCheckpointWriter, TransactionsProvider}; + use reth_provider::{PruneCheckpointWriter, TransactionsProvider}; use super::*; use crate::test_utils::{ stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, - TestTransaction, UnwindStageTestRunner, + TestStageDB, UnwindStageTestRunner, }; stage_test_suite_ext!(SenderRecoveryTestRunner, sender_recovery); @@ -288,7 +288,7 @@ mod tests { ) }) .collect::>(); - runner.tx.insert_blocks(blocks.iter(), None).expect("failed to insert blocks"); + runner.db.insert_blocks(blocks.iter(), None).expect("failed to insert blocks"); let rx = runner.execute(input); @@ -322,9 +322,9 @@ mod tests { // Manually seed once with full input range let seed = random_block_range(&mut rng, stage_progress + 1..=previous_stage, B256::ZERO, 0..4); // set tx count range high enough to hit the threshold - runner.tx.insert_blocks(seed.iter(), None).expect("failed to seed execution"); + runner.db.insert_blocks(seed.iter(), None).expect("failed to seed execution"); - let total_transactions = runner.tx.table::().unwrap().len() as u64; + let total_transactions = runner.db.table::().unwrap().len() as u64; let first_input = ExecInput { target: Some(previous_stage), @@ -348,7 +348,7 @@ mod tests { ExecOutput { checkpoint: StageCheckpoint::new(expected_progress).with_entities_stage_checkpoint( EntitiesCheckpoint { - processed: runner.tx.table::().unwrap().len() as u64, + processed: runner.db.table::().unwrap().len() as u64, total: total_transactions } ), @@ -379,11 +379,11 @@ mod tests { #[test] fn stage_checkpoint_pruned() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 0..=100, B256::ZERO, 0..10); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); let max_pruned_block = 30; let max_processed_block = 70; @@ -399,9 +399,9 @@ mod tests { tx_number += 1; } } - tx.insert_transaction_senders(tx_senders).expect("insert tx hash numbers"); + db.insert_transaction_senders(tx_senders).expect("insert tx hash numbers"); - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); provider .save_prune_checkpoint( PruneSegment::SenderRecovery, @@ -419,10 +419,7 @@ mod tests { .expect("save stage checkpoint"); provider.commit().expect("commit"); - let db = tx.inner_raw(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().expect("provider rw"); - + let provider = db.factory.provider_rw().unwrap(); assert_eq!( stage_checkpoint(&provider).expect("stage checkpoint"), EntitiesCheckpoint { @@ -436,13 +433,13 @@ mod tests { } struct SenderRecoveryTestRunner { - tx: TestTransaction, + db: TestStageDB, threshold: u64, } impl Default for SenderRecoveryTestRunner { fn default() -> Self { - Self { threshold: 1000, tx: TestTransaction::default() } + Self { threshold: 1000, db: TestStageDB::default() } } } @@ -459,16 +456,17 @@ mod tests { /// not empty. fn ensure_no_senders_by_block(&self, block: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self - .tx - .inner_rw() + .db + .factory + .provider_rw()? .block_body_indices(block)? .ok_or(ProviderError::BlockBodyIndicesNotFound(block)); match body_result { Ok(body) => self - .tx + .db .ensure_no_entry_above::(body.last_tx_num(), |key| key)?, Err(_) => { - assert!(self.tx.table_is_empty::()?); + assert!(self.db.table_is_empty::()?); } }; @@ -479,8 +477,8 @@ mod tests { impl StageTestRunner for SenderRecoveryTestRunner { type S = SenderRecoveryStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { @@ -497,7 +495,7 @@ mod tests { let end = input.target(); let blocks = random_block_range(&mut rng, stage_progress..=end, B256::ZERO, 0..2); - self.tx.insert_blocks(blocks.iter(), None)?; + self.db.insert_blocks(blocks.iter(), None)?; Ok(blocks) } @@ -508,7 +506,7 @@ mod tests { ) -> Result<(), TestRunnerError> { match output { Some(output) => { - let provider = self.tx.inner(); + let provider = self.db.factory.provider()?; let start_block = input.next_block(); let end_block = output.checkpoint.block_number; diff --git a/crates/stages/src/stages/total_difficulty.rs b/crates/stages/src/stages/total_difficulty.rs index 042f1b6a6c069..d523cf4ce8505 100644 --- a/crates/stages/src/stages/total_difficulty.rs +++ b/crates/stages/src/stages/total_difficulty.rs @@ -50,7 +50,7 @@ impl Stage for TotalDifficultyStage { /// Write total difficulty entries fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: ExecInput, ) -> Result { let tx = provider.tx_ref(); @@ -100,7 +100,7 @@ impl Stage for TotalDifficultyStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { let (_, unwind_to, _) = input.unwind_block_range_with_threshold(self.commit_threshold); @@ -138,7 +138,7 @@ mod tests { use super::*; use crate::test_utils::{ stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, - TestTransaction, UnwindStageTestRunner, + TestStageDB, UnwindStageTestRunner, }; stage_test_suite_ext!(TotalDifficultyTestRunner, total_difficulty); @@ -171,7 +171,7 @@ mod tests { total })) }, done: false }) if block_number == expected_progress && processed == 1 + threshold && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); // Execute second time @@ -189,14 +189,14 @@ mod tests { total })) }, done: true }) if block_number == previous_stage && processed == total && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); assert!(runner.validate_execution(first_input, result.ok()).is_ok(), "validation failed"); } struct TotalDifficultyTestRunner { - tx: TestTransaction, + db: TestStageDB, consensus: Arc, commit_threshold: u64, } @@ -204,7 +204,7 @@ mod tests { impl Default for TotalDifficultyTestRunner { fn default() -> Self { Self { - tx: Default::default(), + db: Default::default(), consensus: Arc::new(TestConsensus::default()), commit_threshold: 500, } @@ -214,8 +214,8 @@ mod tests { impl StageTestRunner for TotalDifficultyTestRunner { type S = TotalDifficultyStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { @@ -234,15 +234,16 @@ mod tests { let mut rng = generators::rng(); let start = input.checkpoint().block_number; let head = random_header(&mut rng, start, None); - self.tx.insert_headers(std::iter::once(&head))?; - self.tx.commit(|tx| { + self.db.insert_headers(std::iter::once(&head))?; + self.db.commit(|tx| { let td: U256 = tx .cursor_read::()? .last()? .map(|(_, v)| v) .unwrap_or_default() .into(); - tx.put::(head.number, (td + head.difficulty).into()) + tx.put::(head.number, (td + head.difficulty).into())?; + Ok(()) })?; // use previous progress as seed size @@ -253,7 +254,7 @@ mod tests { } let mut headers = random_header_range(&mut rng, start + 1..end, head.hash()); - self.tx.insert_headers(headers.iter())?; + self.db.insert_headers(headers.iter())?; headers.insert(0, head); Ok(headers) } @@ -267,7 +268,7 @@ mod tests { let initial_stage_progress = input.checkpoint().block_number; match output { Some(output) if output.checkpoint.block_number > initial_stage_progress => { - let provider = self.tx.inner(); + let provider = self.db.factory.provider()?; let mut header_cursor = provider.tx_ref().cursor_read::()?; let (_, mut current_header) = header_cursor @@ -301,7 +302,7 @@ mod tests { impl TotalDifficultyTestRunner { fn check_no_td_above(&self, block: BlockNumber) -> Result<(), TestRunnerError> { - self.tx.ensure_no_entry_above::(block, |num| num)?; + self.db.ensure_no_entry_above::(block, |num| num)?; Ok(()) } diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index f6ef73a9af5e5..a741bed285827 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -51,7 +51,7 @@ impl Stage for TransactionLookupStage { /// Write transaction hash -> id entries fn execute( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, mut input: ExecInput, ) -> Result { if let Some((target_prunable_block, prune_mode)) = self @@ -129,7 +129,7 @@ impl Stage for TransactionLookupStage { /// Unwind the stage. fn unwind( &mut self, - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, input: UnwindInput, ) -> Result { let tx = provider.tx_ref(); @@ -164,7 +164,7 @@ impl Stage for TransactionLookupStage { } fn stage_checkpoint( - provider: &DatabaseProviderRW<&DB>, + provider: &DatabaseProviderRW, ) -> Result { let pruned_entries = provider .get_prune_checkpoint(PruneSegment::TransactionLookup)? @@ -186,7 +186,7 @@ mod tests { use super::*; use crate::test_utils::{ stage_test_suite_ext, ExecuteStageTestRunner, StageTestRunner, TestRunnerError, - TestTransaction, UnwindStageTestRunner, + TestStageDB, UnwindStageTestRunner, }; use assert_matches::assert_matches; use reth_interfaces::test_utils::{ @@ -195,11 +195,8 @@ mod tests { }; use reth_primitives::{ stage::StageUnitCheckpoint, BlockNumber, PruneCheckpoint, PruneMode, SealedBlock, B256, - MAINNET, - }; - use reth_provider::{ - BlockReader, ProviderError, ProviderFactory, PruneCheckpointWriter, TransactionsProvider, }; + use reth_provider::{BlockReader, ProviderError, PruneCheckpointWriter, TransactionsProvider}; use std::ops::Sub; // Implement stage test suite. @@ -230,7 +227,7 @@ mod tests { ) }) .collect::>(); - runner.tx.insert_blocks(blocks.iter(), None).expect("failed to insert blocks"); + runner.db.insert_blocks(blocks.iter(), None).expect("failed to insert blocks"); let rx = runner.execute(input); @@ -246,7 +243,7 @@ mod tests { total })) }, done: true }) if block_number == previous_stage && processed == total && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); // Validate the stage execution @@ -269,9 +266,9 @@ mod tests { // Seed only once with full input range let seed = random_block_range(&mut rng, stage_progress + 1..=previous_stage, B256::ZERO, 0..4); // set tx count range high enough to hit the threshold - runner.tx.insert_blocks(seed.iter(), None).expect("failed to seed execution"); + runner.db.insert_blocks(seed.iter(), None).expect("failed to seed execution"); - let total_txs = runner.tx.table::().unwrap().len() as u64; + let total_txs = runner.db.table::().unwrap().len() as u64; // Execute first time let result = runner.execute(first_input).await.unwrap(); @@ -290,7 +287,7 @@ mod tests { ExecOutput { checkpoint: StageCheckpoint::new(expected_progress).with_entities_stage_checkpoint( EntitiesCheckpoint { - processed: runner.tx.table::().unwrap().len() as u64, + processed: runner.db.table::().unwrap().len() as u64, total: total_txs } ), @@ -334,7 +331,7 @@ mod tests { // Seed only once with full input range let seed = random_block_range(&mut rng, stage_progress + 1..=previous_stage, B256::ZERO, 0..2); - runner.tx.insert_blocks(seed.iter(), None).expect("failed to seed execution"); + runner.db.insert_blocks(seed.iter(), None).expect("failed to seed execution"); runner.set_prune_mode(PruneMode::Before(prune_target)); @@ -352,7 +349,7 @@ mod tests { total })) }, done: true }) if block_number == previous_stage && processed == total && - total == runner.tx.table::().unwrap().len() as u64 + total == runner.db.table::().unwrap().len() as u64 ); // Validate the stage execution @@ -361,11 +358,11 @@ mod tests { #[test] fn stage_checkpoint_pruned() { - let tx = TestTransaction::default(); + let db = TestStageDB::default(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 0..=100, B256::ZERO, 0..10); - tx.insert_blocks(blocks.iter(), None).expect("insert blocks"); + db.insert_blocks(blocks.iter(), None).expect("insert blocks"); let max_pruned_block = 30; let max_processed_block = 70; @@ -380,9 +377,9 @@ mod tests { tx_hash_number += 1; } } - tx.insert_tx_hash_numbers(tx_hash_numbers).expect("insert tx hash numbers"); + db.insert_tx_hash_numbers(tx_hash_numbers).expect("insert tx hash numbers"); - let provider = tx.inner_rw(); + let provider = db.factory.provider_rw().unwrap(); provider .save_prune_checkpoint( PruneSegment::TransactionLookup, @@ -401,10 +398,7 @@ mod tests { .expect("save stage checkpoint"); provider.commit().expect("commit"); - let db = tx.inner_raw(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); - let provider = factory.provider_rw().expect("provider rw"); - + let provider = db.factory.provider_rw().unwrap(); assert_eq!( stage_checkpoint(&provider).expect("stage checkpoint"), EntitiesCheckpoint { @@ -418,14 +412,14 @@ mod tests { } struct TransactionLookupTestRunner { - tx: TestTransaction, + db: TestStageDB, commit_threshold: u64, prune_mode: Option, } impl Default for TransactionLookupTestRunner { fn default() -> Self { - Self { tx: TestTransaction::default(), commit_threshold: 1000, prune_mode: None } + Self { db: TestStageDB::default(), commit_threshold: 1000, prune_mode: None } } } @@ -447,17 +441,18 @@ mod tests { /// not empty. fn ensure_no_hash_by_block(&self, number: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self - .tx - .inner_rw() + .db + .factory + .provider_rw()? .block_body_indices(number)? .ok_or(ProviderError::BlockBodyIndicesNotFound(number)); match body_result { - Ok(body) => self.tx.ensure_no_entry_above_by_value::( + Ok(body) => self.db.ensure_no_entry_above_by_value::( body.last_tx_num(), |key| key, )?, Err(_) => { - assert!(self.tx.table_is_empty::()?); + assert!(self.db.table_is_empty::()?); } }; @@ -468,8 +463,8 @@ mod tests { impl StageTestRunner for TransactionLookupTestRunner { type S = TransactionLookupStage; - fn tx(&self) -> &TestTransaction { - &self.tx + fn db(&self) -> &TestStageDB { + &self.db } fn stage(&self) -> Self::S { @@ -489,7 +484,7 @@ mod tests { let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, stage_progress + 1..=end, B256::ZERO, 0..2); - self.tx.insert_blocks(blocks.iter(), None)?; + self.db.insert_blocks(blocks.iter(), None)?; Ok(blocks) } @@ -500,7 +495,7 @@ mod tests { ) -> Result<(), TestRunnerError> { match output { Some(output) => { - let provider = self.tx.inner(); + let provider = self.db.factory.provider()?; if let Some((target_prunable_block, _)) = self .prune_mode diff --git a/crates/stages/src/test_utils/mod.rs b/crates/stages/src/test_utils/mod.rs index b9fe397a88739..b74b3e9455c17 100644 --- a/crates/stages/src/test_utils/mod.rs +++ b/crates/stages/src/test_utils/mod.rs @@ -10,7 +10,7 @@ pub(crate) use runner::{ }; mod test_db; -pub use test_db::TestTransaction; +pub use test_db::TestStageDB; mod stage; pub use stage::TestStage; diff --git a/crates/stages/src/test_utils/runner.rs b/crates/stages/src/test_utils/runner.rs index 0be375edcd9f7..17289b9cf20b7 100644 --- a/crates/stages/src/test_utils/runner.rs +++ b/crates/stages/src/test_utils/runner.rs @@ -1,6 +1,6 @@ -use super::TestTransaction; +use super::TestStageDB; use crate::{ExecInput, ExecOutput, Stage, StageError, StageExt, UnwindInput, UnwindOutput}; -use reth_db::DatabaseEnv; +use reth_db::{test_utils::TempDatabase, DatabaseEnv}; use reth_interfaces::db::DatabaseError; use reth_primitives::MAINNET; use reth_provider::{ProviderError, ProviderFactory}; @@ -19,10 +19,10 @@ pub(crate) enum TestRunnerError { /// A generic test runner for stages. pub(crate) trait StageTestRunner { - type S: Stage + 'static; + type S: Stage>> + 'static; /// Return a reference to the database. - fn tx(&self) -> &TestTransaction; + fn db(&self) -> &TestStageDB; /// Return an instance of a Stage. fn stage(&self) -> Self::S; @@ -45,12 +45,10 @@ pub(crate) trait ExecuteStageTestRunner: StageTestRunner { /// Run [Stage::execute] and return a receiver for the result. fn execute(&self, input: ExecInput) -> oneshot::Receiver> { let (tx, rx) = oneshot::channel(); - let (db, mut stage) = (self.tx().inner_raw(), self.stage()); + let (db, mut stage) = (self.db().factory.clone(), self.stage()); tokio::spawn(async move { - let factory = ProviderFactory::new(db.db(), MAINNET.clone()); - let result = stage.execute_ready(input).await.and_then(|_| { - let provider_rw = factory.provider_rw().unwrap(); + let provider_rw = db.provider_rw().unwrap(); let result = stage.execute(&provider_rw, input); provider_rw.commit().expect("failed to commit"); result @@ -74,11 +72,9 @@ pub(crate) trait UnwindStageTestRunner: StageTestRunner { /// Run [Stage::unwind] and return a receiver for the result. async fn unwind(&self, input: UnwindInput) -> Result { let (tx, rx) = oneshot::channel(); - let (db, mut stage) = (self.tx().inner_raw(), self.stage()); + let (db, mut stage) = (self.db().factory.clone(), self.stage()); tokio::spawn(async move { - let factory = ProviderFactory::new(db.db(), MAINNET.clone()); - let provider = factory.provider_rw().unwrap(); - + let provider = db.provider_rw().unwrap(); let result = stage.unwind(&provider, input); provider.commit().expect("failed to commit"); tx.send(result).expect("failed to send result"); diff --git a/crates/stages/src/test_utils/stage.rs b/crates/stages/src/test_utils/stage.rs index a73773e6b8b98..a76e46e67cd6b 100644 --- a/crates/stages/src/test_utils/stage.rs +++ b/crates/stages/src/test_utils/stage.rs @@ -47,7 +47,7 @@ impl Stage for TestStage { fn execute( &mut self, - _: &DatabaseProviderRW<&DB>, + _: &DatabaseProviderRW, _input: ExecInput, ) -> Result { self.exec_outputs @@ -57,7 +57,7 @@ impl Stage for TestStage { fn unwind( &mut self, - _: &DatabaseProviderRW<&DB>, + _: &DatabaseProviderRW, _input: UnwindInput, ) -> Result { self.unwind_outputs diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index 586a58c99d7ad..4582bb86acf4b 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -9,7 +9,7 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, DatabaseEnv, DatabaseError as DbError, }; -use reth_interfaces::{test_utils::generators::ChangeSet, RethResult}; +use reth_interfaces::{provider::ProviderResult, test_utils::generators::ChangeSet, RethResult}; use reth_primitives::{ keccak256, Account, Address, BlockNumber, Receipt, SealedBlock, SealedHeader, StorageEntry, TxHash, TxNumber, B256, MAINNET, U256, @@ -18,80 +18,50 @@ use reth_provider::{DatabaseProviderRO, DatabaseProviderRW, HistoryWriter, Provi use std::{ borrow::Borrow, collections::BTreeMap, - ops::RangeInclusive, + ops::{Deref, RangeInclusive}, path::{Path, PathBuf}, sync::Arc, }; -/// The [TestTransaction] is used as an internal -/// database for testing stage implementation. -/// -/// ```rust,ignore -/// let tx = TestTransaction::default(); -/// stage.execute(&mut tx.container(), input); -/// ``` +/// Test database that is used for testing stage implementations. #[derive(Debug)] -pub struct TestTransaction { - /// DB - pub tx: Arc>, - pub path: Option, +pub struct TestStageDB { pub factory: ProviderFactory>>, } -impl Default for TestTransaction { - /// Create a new instance of [TestTransaction] +impl Default for TestStageDB { + /// Create a new instance of [TestStageDB] fn default() -> Self { - let tx = create_test_rw_db(); - Self { tx: tx.clone(), path: None, factory: ProviderFactory::new(tx, MAINNET.clone()) } + Self { factory: ProviderFactory::new(create_test_rw_db(), MAINNET.clone()) } } } -impl TestTransaction { +impl TestStageDB { pub fn new(path: &Path) -> Self { - let tx = create_test_rw_db_with_path(path); - Self { - tx: tx.clone(), - path: Some(path.to_path_buf()), - factory: ProviderFactory::new(tx, MAINNET.clone()), - } - } - - /// Return a database wrapped in [DatabaseProviderRW]. - pub fn inner_rw(&self) -> DatabaseProviderRW>> { - self.factory.provider_rw().expect("failed to create db container") - } - - /// Return a database wrapped in [DatabaseProviderRO]. - pub fn inner(&self) -> DatabaseProviderRO>> { - self.factory.provider().expect("failed to create db container") - } - - /// Get a pointer to an internal database. - pub fn inner_raw(&self) -> Arc> { - self.tx.clone() + Self { factory: ProviderFactory::new(create_test_rw_db_with_path(path), MAINNET.clone()) } } /// Invoke a callback with transaction committing it afterwards - pub fn commit(&self, f: F) -> Result<(), DbError> + pub fn commit(&self, f: F) -> ProviderResult<()> where - F: FnOnce(&::TXMut) -> Result<(), DbError>, + F: FnOnce(&::TXMut) -> ProviderResult<()>, { - let mut tx = self.inner_rw(); + let mut tx = self.factory.provider_rw()?; f(tx.tx_ref())?; tx.commit().expect("failed to commit"); Ok(()) } /// Invoke a callback with a read transaction - pub fn query(&self, f: F) -> Result + pub fn query(&self, f: F) -> ProviderResult where - F: FnOnce(&::TX) -> Result, + F: FnOnce(&::TX) -> ProviderResult, { - f(self.inner().tx_ref()) + f(self.factory.provider()?.tx_ref()) } /// Check if the table is empty - pub fn table_is_empty(&self) -> Result { + pub fn table_is_empty(&self) -> ProviderResult { self.query(|tx| { let last = tx.cursor_read::()?.last()?; Ok(last.is_none()) @@ -99,70 +69,21 @@ impl TestTransaction { } /// Return full table as Vec - pub fn table(&self) -> Result>, DbError> + pub fn table(&self) -> ProviderResult>> where T::Key: Default + Ord, { self.query(|tx| { - tx.cursor_read::()? + Ok(tx + .cursor_read::()? .walk(Some(T::Key::default()))? - .collect::, DbError>>() - }) - } - - /// Map a collection of values and store them in the database. - /// This function commits the transaction before exiting. - /// - /// ```rust,ignore - /// let tx = TestTransaction::default(); - /// tx.map_put::(&items, |item| item)?; - /// ``` - #[allow(dead_code)] - pub fn map_put(&self, values: &[S], mut map: F) -> Result<(), DbError> - where - T: Table, - S: Clone, - F: FnMut(&S) -> TableRow, - { - self.commit(|tx| { - values.iter().try_for_each(|src| { - let (k, v) = map(src); - tx.put::(k, v) - }) - }) - } - - /// Transform a collection of values using a callback and store - /// them in the database. The callback additionally accepts the - /// optional last element that was stored. - /// This function commits the transaction before exiting. - /// - /// ```rust,ignore - /// let tx = TestTransaction::default(); - /// tx.transform_append::(&items, |prev, item| prev.unwrap_or_default() + item)?; - /// ``` - #[allow(dead_code)] - pub fn transform_append(&self, values: &[S], mut transform: F) -> Result<(), DbError> - where - T: Table, - ::Value: Clone, - S: Clone, - F: FnMut(&Option<::Value>, &S) -> TableRow, - { - self.commit(|tx| { - let mut cursor = tx.cursor_write::()?; - let mut last = cursor.last()?.map(|(_, v)| v); - values.iter().try_for_each(|src| { - let (k, v) = transform(&last, src); - last = Some(v.clone()); - cursor.append(k, v) - }) + .collect::, DbError>>()?) }) } /// Check that there is no table entry above a given /// number by [Table::Key] - pub fn ensure_no_entry_above(&self, num: u64, mut selector: F) -> Result<(), DbError> + pub fn ensure_no_entry_above(&self, num: u64, mut selector: F) -> ProviderResult<()> where T: Table, F: FnMut(T::Key) -> BlockNumber, @@ -182,7 +103,7 @@ impl TestTransaction { &self, num: u64, mut selector: F, - ) -> Result<(), DbError> + ) -> ProviderResult<()> where T: Table, F: FnMut(T::Value) -> BlockNumber, @@ -206,17 +127,19 @@ impl TestTransaction { /// Insert ordered collection of [SealedHeader] into the corresponding tables /// that are supposed to be populated by the headers stage. - pub fn insert_headers<'a, I>(&self, headers: I) -> Result<(), DbError> + pub fn insert_headers<'a, I>(&self, headers: I) -> ProviderResult<()> where I: Iterator, { - self.commit(|tx| headers.into_iter().try_for_each(|header| Self::insert_header(tx, header))) + self.commit(|tx| { + Ok(headers.into_iter().try_for_each(|header| Self::insert_header(tx, header))?) + }) } /// Inserts total difficulty of headers into the corresponding tables. /// - /// Superset functionality of [TestTransaction::insert_headers]. - pub fn insert_headers_with_td<'a, I>(&self, headers: I) -> Result<(), DbError> + /// Superset functionality of [TestStageDB::insert_headers]. + pub fn insert_headers_with_td<'a, I>(&self, headers: I) -> ProviderResult<()> where I: Iterator, { @@ -225,16 +148,16 @@ impl TestTransaction { headers.into_iter().try_for_each(|header| { Self::insert_header(tx, header)?; td += header.difficulty; - tx.put::(header.number, td.into()) + Ok(tx.put::(header.number, td.into())?) }) }) } /// Insert ordered collection of [SealedBlock] into corresponding tables. - /// Superset functionality of [TestTransaction::insert_headers]. + /// Superset functionality of [TestStageDB::insert_headers]. /// /// Assumes that there's a single transition for each transaction (i.e. no block rewards). - pub fn insert_blocks<'a, I>(&self, blocks: I, tx_offset: Option) -> Result<(), DbError> + pub fn insert_blocks<'a, I>(&self, blocks: I, tx_offset: Option) -> ProviderResult<()> where I: Iterator, { @@ -266,45 +189,45 @@ impl TestTransaction { }) } - pub fn insert_tx_hash_numbers(&self, tx_hash_numbers: I) -> Result<(), DbError> + pub fn insert_tx_hash_numbers(&self, tx_hash_numbers: I) -> ProviderResult<()> where I: IntoIterator, { self.commit(|tx| { tx_hash_numbers.into_iter().try_for_each(|(tx_hash, tx_num)| { // Insert into tx hash numbers table. - tx.put::(tx_hash, tx_num) + Ok(tx.put::(tx_hash, tx_num)?) }) }) } /// Insert collection of ([TxNumber], [Receipt]) into the corresponding table. - pub fn insert_receipts(&self, receipts: I) -> Result<(), DbError> + pub fn insert_receipts(&self, receipts: I) -> ProviderResult<()> where I: IntoIterator, { self.commit(|tx| { receipts.into_iter().try_for_each(|(tx_num, receipt)| { // Insert into receipts table. - tx.put::(tx_num, receipt) + Ok(tx.put::(tx_num, receipt)?) }) }) } - pub fn insert_transaction_senders(&self, transaction_senders: I) -> Result<(), DbError> + pub fn insert_transaction_senders(&self, transaction_senders: I) -> ProviderResult<()> where I: IntoIterator, { self.commit(|tx| { transaction_senders.into_iter().try_for_each(|(tx_num, sender)| { // Insert into receipts table. - tx.put::(tx_num, sender) + Ok(tx.put::(tx_num, sender)?) }) }) } /// Insert collection of ([Address], [Account]) into corresponding tables. - pub fn insert_accounts_and_storages(&self, accounts: I) -> Result<(), DbError> + pub fn insert_accounts_and_storages(&self, accounts: I) -> ProviderResult<()> where I: IntoIterator, S: IntoIterator, @@ -350,7 +273,7 @@ impl TestTransaction { &self, changesets: I, block_offset: Option, - ) -> Result<(), DbError> + ) -> ProviderResult<()> where I: IntoIterator, { @@ -369,14 +292,14 @@ impl TestTransaction { // Insert into storage changeset. old_storage.into_iter().try_for_each(|entry| { - tx.put::(block_address, entry) + Ok(tx.put::(block_address, entry)?) }) }) }) }) } - pub fn insert_history(&self, changesets: I, block_offset: Option) -> RethResult<()> + pub fn insert_history(&self, changesets: I, block_offset: Option) -> ProviderResult<()> where I: IntoIterator, { @@ -392,10 +315,10 @@ impl TestTransaction { } } - let provider = self.factory.provider_rw()?; - provider.insert_account_history_index(accounts)?; - provider.insert_storage_history_index(storages)?; - provider.commit()?; + let provider_rw = self.factory.provider_rw()?; + provider_rw.insert_account_history_index(accounts)?; + provider_rw.insert_storage_history_index(storages)?; + provider_rw.commit()?; Ok(()) } From 9ecdea7eefff68069d1b7c45948d50eb8c430f58 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 22 Nov 2023 09:03:12 -0800 Subject: [PATCH 73/77] chore(execution): rename `EVMProcessor` factory (#5533) --- bin/reth/src/chain/import.rs | 2 +- bin/reth/src/debug_cmd/build_block.rs | 6 +++--- bin/reth/src/debug_cmd/execution.rs | 2 +- bin/reth/src/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/debug_cmd/merkle.rs | 2 +- bin/reth/src/node/mod.rs | 6 +++--- bin/reth/src/stage/dump/execution.rs | 7 ++++--- bin/reth/src/stage/dump/merkle.rs | 2 +- bin/reth/src/stage/run.rs | 2 +- crates/consensus/beacon/src/engine/test_utils.rs | 10 +++++----- crates/revm/src/factory.rs | 8 ++++---- crates/revm/src/lib.rs | 2 +- crates/stages/src/lib.rs | 6 +++--- crates/stages/src/sets.rs | 12 ++++++------ crates/stages/src/stages/execution.rs | 13 +++++++------ crates/stages/src/stages/mod.rs | 6 ++++-- testing/ef-tests/src/cases/blockchain_test.rs | 6 +++--- 17 files changed, 49 insertions(+), 45 deletions(-) diff --git a/bin/reth/src/chain/import.rs b/bin/reth/src/chain/import.rs index 0486358f20277..fffd5abed7e93 100644 --- a/bin/reth/src/chain/import.rs +++ b/bin/reth/src/chain/import.rs @@ -152,7 +152,7 @@ impl ImportCommand { .into_task(); let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = reth_revm::Factory::new(self.chain.clone()); + let factory = reth_revm::EvmProcessorFactory::new(self.chain.clone()); let max_block = file_client.max_block().unwrap_or(0); let mut pipeline = Pipeline::builder() diff --git a/bin/reth/src/debug_cmd/build_block.rs b/bin/reth/src/debug_cmd/build_block.rs index 62e1d5d66bf8c..e1d57e4b7d930 100644 --- a/bin/reth/src/debug_cmd/build_block.rs +++ b/bin/reth/src/debug_cmd/build_block.rs @@ -32,7 +32,7 @@ use reth_provider::{ providers::BlockchainProvider, BlockHashReader, BlockReader, BlockWriter, ExecutorFactory, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; -use reth_revm::Factory; +use reth_revm::EvmProcessorFactory; use reth_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, @@ -150,7 +150,7 @@ impl Command { let tree_externals = TreeExternals::new( provider_factory.clone(), Arc::clone(&consensus), - Factory::new(self.chain.clone()), + EvmProcessorFactory::new(self.chain.clone()), ); let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default(), None)?; let blockchain_tree = ShareableBlockchainTree::new(tree); @@ -267,7 +267,7 @@ impl Command { let block_with_senders = SealedBlockWithSenders::new(block.clone(), senders).unwrap(); - let executor_factory = Factory::new(self.chain.clone()); + let executor_factory = EvmProcessorFactory::new(self.chain.clone()); let mut executor = executor_factory.with_state(blockchain_db.latest()?); executor.execute_and_verify_receipt( &block_with_senders.block.clone().unseal(), diff --git a/bin/reth/src/debug_cmd/execution.rs b/bin/reth/src/debug_cmd/execution.rs index 58a7b31197953..aa17451911729 100644 --- a/bin/reth/src/debug_cmd/execution.rs +++ b/bin/reth/src/debug_cmd/execution.rs @@ -108,7 +108,7 @@ impl Command { let stage_conf = &config.stages; let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let factory = reth_revm::Factory::new(self.chain.clone()); + let factory = reth_revm::EvmProcessorFactory::new(self.chain.clone()); let header_mode = HeaderSyncMode::Tip(tip_rx); let pipeline = Pipeline::builder() diff --git a/bin/reth/src/debug_cmd/in_memory_merkle.rs b/bin/reth/src/debug_cmd/in_memory_merkle.rs index 81db51c5ce635..223a104efabfe 100644 --- a/bin/reth/src/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/debug_cmd/in_memory_merkle.rs @@ -159,7 +159,7 @@ impl Command { ) .await?; - let executor_factory = reth_revm::Factory::new(self.chain.clone()); + let executor_factory = reth_revm::EvmProcessorFactory::new(self.chain.clone()); let mut executor = executor_factory.with_state(LatestStateProviderRef::new(provider.tx_ref())); diff --git a/bin/reth/src/debug_cmd/merkle.rs b/bin/reth/src/debug_cmd/merkle.rs index 765d1f866132e..fd6a9c0c51096 100644 --- a/bin/reth/src/debug_cmd/merkle.rs +++ b/bin/reth/src/debug_cmd/merkle.rs @@ -197,7 +197,7 @@ impl Command { checkpoint.stage_checkpoint.is_some() }); - let factory = reth_revm::Factory::new(self.chain.clone()); + let factory = reth_revm::EvmProcessorFactory::new(self.chain.clone()); let mut execution_stage = ExecutionStage::new( factory, ExecutionStageThresholds { diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index e7f559b6fe66f..63b04af3174bc 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -64,7 +64,7 @@ use reth_provider::{ HeaderProvider, HeaderSyncMode, ProviderFactory, StageCheckpointReader, }; use reth_prune::{segments::SegmentSet, Pruner}; -use reth_revm::Factory; +use reth_revm::EvmProcessorFactory; use reth_revm_inspectors::stack::Hook; use reth_rpc_engine_api::EngineApi; use reth_snapshot::HighestSnapshotsTracker; @@ -295,7 +295,7 @@ impl NodeCommand { let tree_externals = TreeExternals::new( provider_factory.clone(), Arc::clone(&consensus), - Factory::new(self.chain.clone()), + EvmProcessorFactory::new(self.chain.clone()), ); let tree = BlockchainTree::new( tree_externals, @@ -873,7 +873,7 @@ impl NodeCommand { let (tip_tx, tip_rx) = watch::channel(B256::ZERO); use reth_revm_inspectors::stack::InspectorStackConfig; - let factory = reth_revm::Factory::new(self.chain.clone()); + let factory = reth_revm::EvmProcessorFactory::new(self.chain.clone()); let stack_config = InspectorStackConfig { use_printer_tracer: self.debug.print_inspector, diff --git a/bin/reth/src/stage/dump/execution.rs b/bin/reth/src/stage/dump/execution.rs index 5bc301bf8101e..d0ce96ce5dfcb 100644 --- a/bin/reth/src/stage/dump/execution.rs +++ b/bin/reth/src/stage/dump/execution.rs @@ -7,7 +7,7 @@ use reth_db::{ }; use reth_primitives::{stage::StageCheckpoint, ChainSpec}; use reth_provider::ProviderFactory; -use reth_revm::Factory; +use reth_revm::EvmProcessorFactory; use reth_stages::{stages::ExecutionStage, Stage, UnwindInput}; use std::{path::PathBuf, sync::Arc}; use tracing::info; @@ -98,7 +98,8 @@ async fn unwind_and_copy( let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone()); let provider = factory.provider_rw()?; - let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(db_tool.chain.clone())); + let mut exec_stage = + ExecutionStage::new_with_factory(EvmProcessorFactory::new(db_tool.chain.clone())); exec_stage.unwind( &provider, @@ -129,7 +130,7 @@ async fn dry_run( info!(target: "reth::cli", "Executing stage. [dry-run]"); let factory = ProviderFactory::new(&output_db, chain.clone()); - let mut exec_stage = ExecutionStage::new_with_factory(Factory::new(chain.clone())); + let mut exec_stage = ExecutionStage::new_with_factory(EvmProcessorFactory::new(chain.clone())); let input = reth_stages::ExecInput { target: Some(to), checkpoint: Some(StageCheckpoint::new(from)) }; diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index 4615b884c35ea..c57a85c597b9c 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -68,7 +68,7 @@ async fn unwind_and_copy( // Bring Plainstate to TO (hashing stage execution requires it) let mut exec_stage = ExecutionStage::new( - reth_revm::Factory::new(db_tool.chain.clone()), + reth_revm::EvmProcessorFactory::new(db_tool.chain.clone()), ExecutionStageThresholds { max_blocks: Some(u64::MAX), max_changes: None, diff --git a/bin/reth/src/stage/run.rs b/bin/reth/src/stage/run.rs index 872f1da2b7e04..d1b0cbf670cd9 100644 --- a/bin/reth/src/stage/run.rs +++ b/bin/reth/src/stage/run.rs @@ -195,7 +195,7 @@ impl Command { } StageEnum::Senders => (Box::new(SenderRecoveryStage::new(batch_size)), None), StageEnum::Execution => { - let factory = reth_revm::Factory::new(self.chain.clone()); + let factory = reth_revm::EvmProcessorFactory::new(self.chain.clone()); ( Box::new(ExecutionStage::new( factory, diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index a37e2e148463b..781161bb3a02d 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -30,7 +30,7 @@ use reth_provider::{ PrunableBlockExecutor, }; use reth_prune::Pruner; -use reth_revm::Factory; +use reth_revm::EvmProcessorFactory; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; @@ -45,7 +45,7 @@ type TestBeaconConsensusEngine = BeaconConsensusEngine< Arc, ShareableBlockchainTree< Arc, - EitherExecutorFactory, + EitherExecutorFactory, >, >, Arc>, @@ -481,9 +481,9 @@ where executor_factory.extend(results); EitherExecutorFactory::Left(executor_factory) } - TestExecutorConfig::Real => { - EitherExecutorFactory::Right(Factory::new(self.base_config.chain_spec.clone())) - } + TestExecutorConfig::Real => EitherExecutorFactory::Right(EvmProcessorFactory::new( + self.base_config.chain_spec.clone(), + )), }; // Setup pipeline diff --git a/crates/revm/src/factory.rs b/crates/revm/src/factory.rs index 6e326b5cd3629..c35b1ad11af6d 100644 --- a/crates/revm/src/factory.rs +++ b/crates/revm/src/factory.rs @@ -7,14 +7,14 @@ use reth_primitives::ChainSpec; use reth_provider::{ExecutorFactory, PrunableBlockExecutor, StateProvider}; use std::sync::Arc; -/// Factory that spawn Executor. +/// Factory for creating [EVMProcessor]. #[derive(Clone, Debug)] -pub struct Factory { +pub struct EvmProcessorFactory { chain_spec: Arc, stack: Option, } -impl Factory { +impl EvmProcessorFactory { /// Create new factory pub fn new(chain_spec: Arc) -> Self { Self { chain_spec, stack: None } @@ -33,7 +33,7 @@ impl Factory { } } -impl ExecutorFactory for Factory { +impl ExecutorFactory for EvmProcessorFactory { fn with_state<'a, SP: StateProvider + 'a>( &'a self, sp: SP, diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index b1714e00578e2..116269da668b5 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -22,7 +22,7 @@ pub mod processor; pub mod state_change; /// revm executor factory. -pub use factory::Factory; +pub use factory::EvmProcessorFactory; /// reexport for convenience pub use reth_revm_inspectors::*; diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index 437bbe3f523c4..e59597ebabc14 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -17,7 +17,7 @@ //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_interfaces::consensus::Consensus; //! # use reth_interfaces::test_utils::{TestBodiesClient, TestConsensus, TestHeadersClient}; -//! # use reth_revm::Factory; +//! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::{PeerId, MAINNET, B256}; //! # use reth_stages::Pipeline; //! # use reth_stages::sets::DefaultStages; @@ -39,7 +39,7 @@ //! # provider_factory.clone() //! # ); //! # let (tip_tx, tip_rx) = watch::channel(B256::default()); -//! # let factory = Factory::new(chain_spec.clone()); +//! # let executor_factory = EvmProcessorFactory::new(chain_spec.clone()); //! // Create a pipeline that can fully sync //! # let pipeline = //! Pipeline::builder() @@ -50,7 +50,7 @@ //! consensus, //! headers_downloader, //! bodies_downloader, -//! factory, +//! executor_factory, //! )) //! .build(provider_factory); //! ``` diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index 3ef73849672bb..ac97fd5c742c5 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -12,26 +12,26 @@ //! ```no_run //! # use reth_stages::Pipeline; //! # use reth_stages::sets::{OfflineStages}; -//! # use reth_revm::Factory; +//! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::MAINNET; //! # use reth_provider::test_utils::create_test_provider_factory; //! -//! # let factory = Factory::new(MAINNET.clone()); +//! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone()); //! # let provider_factory = create_test_provider_factory(); //! // Build a pipeline with all offline stages. -//! # let pipeline = Pipeline::builder().add_stages(OfflineStages::new(factory)).build(provider_factory); +//! # let pipeline = Pipeline::builder().add_stages(OfflineStages::new(executor_factory)).build(provider_factory); //! ``` //! //! ```ignore //! # use reth_stages::Pipeline; //! # use reth_stages::{StageSet, sets::OfflineStages}; -//! # use reth_revm::Factory; +//! # use reth_revm::EvmProcessorFactory; //! # use reth_primitives::MAINNET; //! // Build a pipeline with all offline stages and a custom stage at the end. -//! # let factory = Factory::new(MAINNET.clone()); +//! # let executor_factory = EvmProcessorFactory::new(MAINNET.clone()); //! Pipeline::builder() //! .add_stages( -//! OfflineStages::new(factory).builder().add_stage(MyCustomStage) +//! OfflineStages::new(executor_factory).builder().add_stage(MyCustomStage) //! ) //! .build(); //! ``` diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 38d3a5979d39a..41a26165c9bbf 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -500,14 +500,15 @@ mod tests { ChainSpecBuilder, PruneModes, SealedBlock, StorageEntry, B256, MAINNET, U256, }; use reth_provider::{AccountReader, BlockWriter, ProviderFactory, ReceiptProvider}; - use reth_revm::Factory; + use reth_revm::EvmProcessorFactory; use std::sync::Arc; - fn stage() -> ExecutionStage { - let factory = - Factory::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build())); + fn stage() -> ExecutionStage { + let executor_factory = EvmProcessorFactory::new(Arc::new( + ChainSpecBuilder::mainnet().berlin_activated().build(), + )); ExecutionStage::new( - factory, + executor_factory, ExecutionStageThresholds { max_blocks: Some(100), max_changes: None, @@ -684,7 +685,7 @@ mod tests { provider.commit().unwrap(); let provider = factory.provider_rw().unwrap(); - let mut execution_stage: ExecutionStage = stage(); + let mut execution_stage: ExecutionStage = stage(); let output = execution_stage.execute(&provider, input).unwrap(); provider.commit().unwrap(); assert_matches!(output, ExecOutput { diff --git a/crates/stages/src/stages/mod.rs b/crates/stages/src/stages/mod.rs index 2c775edddbf14..ffe8ae1da1f64 100644 --- a/crates/stages/src/stages/mod.rs +++ b/crates/stages/src/stages/mod.rs @@ -62,7 +62,7 @@ mod tests { use reth_provider::{ AccountExtReader, BlockWriter, ProviderFactory, ReceiptProvider, StorageReader, }; - use reth_revm::Factory; + use reth_revm::EvmProcessorFactory; use std::sync::Arc; #[tokio::test] @@ -128,7 +128,9 @@ mod tests { // Check execution and create receipts and changesets according to the pruning // configuration let mut execution_stage = ExecutionStage::new( - Factory::new(Arc::new(ChainSpecBuilder::mainnet().berlin_activated().build())), + EvmProcessorFactory::new(Arc::new( + ChainSpecBuilder::mainnet().berlin_activated().build(), + )), ExecutionStageThresholds { max_blocks: Some(100), max_changes: None, diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index d775550295f44..3706f13ffd35c 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -101,9 +101,9 @@ impl Case for BlockchainTestCase { // Call execution stage { - let mut stage = ExecutionStage::new_with_factory(reth_revm::Factory::new( - Arc::new(case.network.clone().into()), - )); + let mut stage = ExecutionStage::new_with_factory( + reth_revm::EvmProcessorFactory::new(Arc::new(case.network.clone().into())), + ); let target = last_block.as_ref().map(|b| b.number); tokio::runtime::Builder::new_current_thread() From de048c45613e9ea26e6812a6511c5de9b33dea35 Mon Sep 17 00:00:00 2001 From: gbrew <127057440+gbrew@users.noreply.github.com> Date: Wed, 22 Nov 2023 10:32:28 -0700 Subject: [PATCH 74/77] fix(tracing): reduce stack memory when tracing (#5528) --- crates/revm/revm-inspectors/src/tracing/builder/parity.rs | 6 +++--- crates/revm/revm-inspectors/src/tracing/mod.rs | 2 +- crates/revm/revm-inspectors/src/tracing/types.rs | 8 +++----- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs index 9d984bcfb69ce..ae9560a2632b4 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/parity.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/parity.rs @@ -453,10 +453,10 @@ impl ParityTraceBuilder { } }; let mut push_stack = step.push_stack.clone().unwrap_or_default(); - for idx in (0..show_stack).rev() { - if let Some(stack) = step.stack.as_ref() { + if let Some(stack) = step.stack.as_ref() { + for idx in (0..show_stack).rev() { if stack.len() > idx { - push_stack.push(stack.peek(idx).unwrap_or_default()) + push_stack.push(stack[stack.len() - idx - 1]) } } } diff --git a/crates/revm/revm-inspectors/src/tracing/mod.rs b/crates/revm/revm-inspectors/src/tracing/mod.rs index dbd7101ed8f64..70314c8b19c83 100644 --- a/crates/revm/revm-inspectors/src/tracing/mod.rs +++ b/crates/revm/revm-inspectors/src/tracing/mod.rs @@ -282,7 +282,7 @@ impl TracingInspector { .record_memory_snapshots .then(|| RecordedMemory::new(interp.shared_memory.context_memory().to_vec())) .unwrap_or_default(); - let stack = self.config.record_stack_snapshots.then(|| interp.stack.clone()); + let stack = self.config.record_stack_snapshots.then(|| interp.stack.data().clone()); let op = OpCode::new(interp.current_opcode()) .or_else(|| { diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index ecc2e261b2b81..8659ed7f82218 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -10,9 +10,7 @@ use reth_rpc_types::trace::{ SelfdestructAction, TraceOutput, TransactionTrace, }, }; -use revm::interpreter::{ - opcode, CallContext, CallScheme, CreateScheme, InstructionResult, OpCode, Stack, -}; +use revm::interpreter::{opcode, CallContext, CallScheme, CreateScheme, InstructionResult, OpCode}; use serde::{Deserialize, Serialize}; use std::collections::{BTreeMap, VecDeque}; @@ -511,7 +509,7 @@ pub(crate) struct CallTraceStep { /// Current contract address pub(crate) contract: Address, /// Stack before step execution - pub(crate) stack: Option, + pub(crate) stack: Option>, /// The new stack items placed by this step if any pub(crate) push_stack: Option>, /// All allocated memory in a step @@ -563,7 +561,7 @@ impl CallTraceStep { }; if opts.is_stack_enabled() { - log.stack = self.stack.as_ref().map(|stack| stack.data().clone()); + log.stack = self.stack.clone(); } if opts.is_memory_enabled() { From de8b4526ff738b682c214c1ab7309bf2e63811f3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 22 Nov 2023 20:28:14 +0100 Subject: [PATCH 75/77] feat: add graceful shutdown tasks (#5517) --- bin/reth/src/node/mod.rs | 2 +- crates/tasks/src/lib.rs | 174 +++++++++++++++++++++++++++++++++-- crates/tasks/src/shutdown.rs | 55 ++++++++++- 3 files changed, 221 insertions(+), 10 deletions(-) diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 63b04af3174bc..3a694c74137d2 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -716,7 +716,7 @@ impl NodeCommand { task_executor.spawn_critical("p2p eth request handler", eth); let known_peers_file = self.network.persistent_peers_file(default_peers_path); - task_executor.spawn_critical_with_signal("p2p network task", |shutdown| { + task_executor.spawn_critical_with_shutdown_signal("p2p network task", |shutdown| { run_network_until_shutdown(shutdown, network, known_peers_file) }); diff --git a/crates/tasks/src/lib.rs b/crates/tasks/src/lib.rs index c3f1195f1f29e..02fe964de1d2f 100644 --- a/crates/tasks/src/lib.rs +++ b/crates/tasks/src/lib.rs @@ -11,7 +11,7 @@ use crate::{ metrics::{IncCounterOnDrop, TaskExecutorMetrics}, - shutdown::{signal, Shutdown, Signal}, + shutdown::{signal, GracefulShutdown, GracefulShutdownGuard, Shutdown, Signal}, }; use dyn_clone::DynClone; use futures_util::{ @@ -22,6 +22,10 @@ use std::{ any::Any, fmt::{Display, Formatter}, pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, task::{ready, Context, Poll}, }; use tokio::{ @@ -29,7 +33,7 @@ use tokio::{ sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, task::JoinHandle, }; -use tracing::error; +use tracing::{debug, error}; use tracing_futures::Instrument; pub mod metrics; @@ -147,10 +151,12 @@ pub struct TaskManager { panicked_tasks_rx: UnboundedReceiver, /// The [Signal] to fire when all tasks should be shutdown. /// - /// This is fired on drop. - _signal: Signal, + /// This is fired when dropped. + signal: Option, /// Receiver of the shutdown signal. on_shutdown: Shutdown, + /// How many [GracefulShutdown] tasks are currently active + graceful_tasks: Arc, } // === impl TaskManager === @@ -159,8 +165,15 @@ impl TaskManager { /// Create a new instance connected to the given handle's tokio runtime. pub fn new(handle: Handle) -> Self { let (panicked_tasks_tx, panicked_tasks_rx) = unbounded_channel(); - let (_signal, on_shutdown) = signal(); - Self { handle, panicked_tasks_tx, panicked_tasks_rx, _signal, on_shutdown } + let (signal, on_shutdown) = signal(); + Self { + handle, + panicked_tasks_tx, + panicked_tasks_rx, + signal: Some(signal), + on_shutdown, + graceful_tasks: Arc::new(AtomicUsize::new(0)), + } } /// Returns a new [`TaskExecutor`] that can spawn new tasks onto the tokio runtime this type is @@ -171,8 +184,36 @@ impl TaskManager { on_shutdown: self.on_shutdown.clone(), panicked_tasks_tx: self.panicked_tasks_tx.clone(), metrics: Default::default(), + graceful_tasks: Arc::clone(&self.graceful_tasks), } } + + /// Fires the shutdown signal and awaits until all tasks are shutdown. + pub fn graceful_shutdown(self) { + let _ = self.do_graceful_shutdown(None); + } + + /// Fires the shutdown signal and awaits until all tasks are shutdown. + /// + /// Returns true if all tasks were shutdown before the timeout elapsed. + pub fn graceful_shutdown_with_timeout(self, timeout: std::time::Duration) -> bool { + self.do_graceful_shutdown(Some(timeout)) + } + + fn do_graceful_shutdown(self, timeout: Option) -> bool { + drop(self.signal); + let when = timeout.map(|t| std::time::Instant::now() + t); + while self.graceful_tasks.load(Ordering::Relaxed) > 0 { + if when.map(|when| std::time::Instant::now() > when).unwrap_or(false) { + debug!("graceful shutdown timed out"); + return false + } + std::hint::spin_loop(); + } + + debug!("gracefully shut down"); + true + } } /// An endless future that resolves if a critical task panicked. @@ -232,6 +273,8 @@ pub struct TaskExecutor { panicked_tasks_tx: UnboundedSender, // Task Executor Metrics metrics: TaskExecutorMetrics, + /// How many [GracefulShutdown] tasks are currently active + graceful_tasks: Arc, } // === impl TaskExecutor === @@ -382,7 +425,7 @@ impl TaskExecutor { /// This spawns a critical task onto the runtime. /// /// If this task panics, the [`TaskManager`] is notified. - pub fn spawn_critical_with_signal( + pub fn spawn_critical_with_shutdown_signal( &self, name: &'static str, f: impl FnOnce(Shutdown) -> F, @@ -407,6 +450,55 @@ impl TaskExecutor { self.handle.spawn(task) } + + /// This spawns a critical task onto the runtime. + /// + /// If this task panics, the [TaskManager] is notified. + /// The [TaskManager] will wait until the given future has completed before shutting down. + /// + /// # Example + /// + /// ```no_run + /// # async fn t(executor: reth_tasks::TaskExecutor) { + /// + /// executor.spawn_critical_with_graceful_shutdown_signal("grace", |shutdown| async move { + /// // await the shutdown signal + /// let guard = shutdown.await; + /// // do work before exiting the program + /// tokio::time::sleep(std::time::Duration::from_secs(1)).await; + /// // allow graceful shutdown + /// drop(guard); + /// }); + /// # } + /// ``` + pub fn spawn_critical_with_graceful_shutdown_signal( + &self, + name: &'static str, + f: impl FnOnce(GracefulShutdown) -> F, + ) -> JoinHandle<()> + where + F: Future + Send + 'static, + { + let panicked_tasks_tx = self.panicked_tasks_tx.clone(); + let on_shutdown = GracefulShutdown::new( + self.on_shutdown.clone(), + GracefulShutdownGuard::new(Arc::clone(&self.graceful_tasks)), + ); + let fut = f(on_shutdown); + + // wrap the task in catch unwind + let task = std::panic::AssertUnwindSafe(fut) + .catch_unwind() + .map_err(move |error| { + let task_error = PanickedTaskError::new(name, error); + error!("{task_error}"); + let _ = panicked_tasks_tx.send(task_error); + }) + .map(|_| ()) + .in_current_span(); + + self.handle.spawn(task) + } } impl TaskSpawner for TaskExecutor { @@ -444,7 +536,7 @@ enum TaskKind { #[cfg(test)] mod tests { use super::*; - use std::time::Duration; + use std::{sync::atomic::AtomicBool, time::Duration}; #[test] fn test_cloneable() { @@ -521,4 +613,70 @@ mod tests { handle.block_on(shutdown); } + + #[test] + fn test_manager_graceful_shutdown() { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let manager = TaskManager::new(handle.clone()); + let executor = manager.executor(); + + let val = Arc::new(AtomicBool::new(false)); + let c = val.clone(); + executor.spawn_critical_with_graceful_shutdown_signal("grace", |shutdown| async move { + let _guard = shutdown.await; + tokio::time::sleep(Duration::from_millis(200)).await; + c.store(true, Ordering::Relaxed); + }); + + manager.graceful_shutdown(); + assert!(val.load(Ordering::Relaxed)); + } + + #[test] + fn test_manager_graceful_shutdown_many() { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let manager = TaskManager::new(handle.clone()); + let executor = manager.executor(); + let _e = executor.clone(); + + let counter = Arc::new(AtomicUsize::new(0)); + let num = 10; + for _ in 0..num { + let c = counter.clone(); + executor.spawn_critical_with_graceful_shutdown_signal( + "grace", + move |shutdown| async move { + let _guard = shutdown.await; + tokio::time::sleep(Duration::from_millis(200)).await; + c.fetch_add(1, Ordering::SeqCst); + }, + ); + } + + manager.graceful_shutdown(); + assert_eq!(counter.load(Ordering::Relaxed), num); + } + + #[test] + fn test_manager_graceful_shutdown_timeout() { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let manager = TaskManager::new(handle.clone()); + let executor = manager.executor(); + + let timeout = Duration::from_millis(500); + let val = Arc::new(AtomicBool::new(false)); + let val2 = val.clone(); + executor.spawn_critical_with_graceful_shutdown_signal("grace", |shutdown| async move { + let _guard = shutdown.await; + tokio::time::sleep(timeout * 3).await; + val2.store(true, Ordering::Relaxed); + unreachable!("should not be reached"); + }); + + manager.graceful_shutdown_with_timeout(timeout); + assert!(!val.load(Ordering::Relaxed)); + } } diff --git a/crates/tasks/src/shutdown.rs b/crates/tasks/src/shutdown.rs index 6264841ae2427..5cc012d8ec749 100644 --- a/crates/tasks/src/shutdown.rs +++ b/crates/tasks/src/shutdown.rs @@ -7,10 +7,63 @@ use futures_util::{ use std::{ future::Future, pin::Pin, - task::{Context, Poll}, + sync::{atomic::AtomicUsize, Arc}, + task::{ready, Context, Poll}, }; use tokio::sync::oneshot; +/// A Future that resolves when the shutdown event has been fired. +/// +/// The [TaskManager](crate) +#[derive(Debug)] +pub struct GracefulShutdown { + shutdown: Shutdown, + guard: Option, +} + +impl GracefulShutdown { + pub(crate) fn new(shutdown: Shutdown, guard: GracefulShutdownGuard) -> Self { + Self { shutdown, guard: Some(guard) } + } +} + +impl Future for GracefulShutdown { + type Output = GracefulShutdownGuard; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + ready!(self.shutdown.poll_unpin(cx)); + Poll::Ready(self.get_mut().guard.take().expect("Future polled after completion")) + } +} + +impl Clone for GracefulShutdown { + fn clone(&self) -> Self { + Self { + shutdown: self.shutdown.clone(), + guard: self.guard.as_ref().map(|g| GracefulShutdownGuard::new(Arc::clone(&g.0))), + } + } +} + +/// A guard that fires once dropped to signal the [TaskManager](crate::TaskManager) that the +/// [GracefulShutdown] has completed. +#[derive(Debug)] +#[must_use = "if unused the task will not be gracefully shutdown"] +pub struct GracefulShutdownGuard(Arc); + +impl GracefulShutdownGuard { + pub(crate) fn new(counter: Arc) -> Self { + counter.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + Self(counter) + } +} + +impl Drop for GracefulShutdownGuard { + fn drop(&mut self) { + self.0.fetch_sub(1, std::sync::atomic::Ordering::SeqCst); + } +} + /// A Future that resolves when the shutdown event has been fired. #[derive(Debug, Clone)] pub struct Shutdown(Shared>); From 1aa4ae8c6d071604a17a0a2656b591b448202454 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 22 Nov 2023 11:39:50 -0800 Subject: [PATCH 76/77] test(trie): instantiate provider with util function (#5535) --- crates/trie/Cargo.toml | 2 +- crates/trie/src/proof.rs | 54 +++++++++---------- crates/trie/src/trie.rs | 48 +++++++---------- crates/trie/src/trie_cursor/account_cursor.rs | 8 ++- crates/trie/src/trie_cursor/storage_cursor.rs | 14 ++--- crates/trie/src/walker.rs | 15 ++---- 6 files changed, 58 insertions(+), 83 deletions(-) diff --git a/crates/trie/Cargo.toml b/crates/trie/Cargo.toml index 1aaf2be53be9c..43b87026c83f1 100644 --- a/crates/trie/Cargo.toml +++ b/crates/trie/Cargo.toml @@ -34,7 +34,7 @@ triehash = { version = "0.8", optional = true } # reth reth-primitives = { workspace = true, features = ["test-utils", "arbitrary"] } reth-db = { workspace = true, features = ["test-utils"] } -reth-provider.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } # trie triehash = "0.8" diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs index f37e55e1f43b6..eb7c438c4b555 100644 --- a/crates/trie/src/proof.rs +++ b/crates/trie/src/proof.rs @@ -166,10 +166,10 @@ mod tests { use super::*; use crate::StateRoot; use once_cell::sync::Lazy; - use reth_db::{database::Database, test_utils::create_test_rw_db}; + use reth_db::database::Database; use reth_interfaces::RethResult; use reth_primitives::{Account, Bytes, Chain, ChainSpec, StorageEntry, HOLESKY, MAINNET, U256}; - use reth_provider::{HashingWriter, ProviderFactory}; + use reth_provider::{test_utils::create_test_provider_factory, HashingWriter, ProviderFactory}; use std::{str::FromStr, sync::Arc}; /* @@ -197,8 +197,10 @@ mod tests { path.into_iter().map(Bytes::from_str).collect::, _>>().unwrap() } - fn insert_genesis(db: DB, chain_spec: Arc) -> RethResult<()> { - let provider_factory = ProviderFactory::new(db, chain_spec.clone()); + fn insert_genesis( + provider_factory: &ProviderFactory, + chain_spec: Arc, + ) -> RethResult<()> { let mut provider = provider_factory.provider_rw()?; // Hash accounts and insert them into hashing table. @@ -233,10 +235,8 @@ mod tests { #[test] fn testspec_proofs() { // Create test database and insert genesis accounts. - let db = create_test_rw_db(); - insert_genesis(db.clone(), TEST_SPEC.clone()).unwrap(); - - let tx = db.tx().unwrap(); + let factory = create_test_provider_factory(); + insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); let data = Vec::from([ ( @@ -277,9 +277,10 @@ mod tests { ), ]); + let provider = factory.provider().unwrap(); for (target, expected_proof) in data { let target = Address::from_str(target).unwrap(); - let account_proof = Proof::new(&tx).account_proof(target, &[]).unwrap(); + let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); pretty_assertions::assert_eq!( account_proof.proof, expected_proof, @@ -291,14 +292,14 @@ mod tests { #[test] fn testspec_empty_storage_proof() { // Create test database and insert genesis accounts. - let db = create_test_rw_db(); - insert_genesis(db.clone(), TEST_SPEC.clone()).unwrap(); - - let tx = db.tx().unwrap(); + let factory = create_test_provider_factory(); + insert_genesis(&factory, TEST_SPEC.clone()).unwrap(); let target = Address::from_str("0x1ed9b1dd266b607ee278726d324b855a093394a6").unwrap(); let slots = Vec::from([B256::with_last_byte(1), B256::with_last_byte(3)]); - let account_proof = Proof::new(&tx).account_proof(target, &slots).unwrap(); + + let provider = factory.provider().unwrap(); + let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &slots).unwrap(); assert_eq!(account_proof.storage_root, EMPTY_ROOT_HASH, "expected empty storage root"); assert_eq!(slots.len(), account_proof.storage_proofs.len()); @@ -310,8 +311,8 @@ mod tests { #[test] fn mainnet_genesis_account_proof() { // Create test database and insert genesis accounts. - let db = create_test_rw_db(); - insert_genesis(db.clone(), MAINNET.clone()).unwrap(); + let factory = create_test_provider_factory(); + insert_genesis(&factory, MAINNET.clone()).unwrap(); // Address from mainnet genesis allocation. // keccak256 - `0xcf67b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4` @@ -326,16 +327,16 @@ mod tests { "0xf8719f20b71c90b0d523dd5004cf206f325748da347685071b34812e21801f5270c4b84ff84d80890ad78ebc5ac6200000a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" ]); - let tx = db.tx().unwrap(); - let account_proof = Proof::new(&tx).account_proof(target, &[]).unwrap(); + let provider = factory.provider().unwrap(); + let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); pretty_assertions::assert_eq!(account_proof.proof, expected_account_proof); } #[test] fn mainnet_genesis_account_proof_nonexistent() { // Create test database and insert genesis accounts. - let db = create_test_rw_db(); - insert_genesis(db.clone(), MAINNET.clone()).unwrap(); + let factory = create_test_provider_factory(); + insert_genesis(&factory, MAINNET.clone()).unwrap(); // Address that does not exist in mainnet genesis allocation. // keccak256 - `0x18f415ffd7f66bb1924d90f0e82fb79ca8c6d8a3473cd9a95446a443b9db1761` @@ -348,18 +349,16 @@ mod tests { "0xf901d1a0b7c55b381eb205712a2f5d1b7d6309ac725da79ab159cb77dc2783af36e6596da0b3b48aa390e0f3718b486ccc32b01682f92819e652315c1629058cd4d9bb1545a0e3c0cc68af371009f14416c27e17f05f4f696566d2ba45362ce5711d4a01d0e4a0bad1e085e431b510508e2a9e3712633a414b3fe6fd358635ab206021254c1e10a0f8407fe8d5f557b9e012d52e688139bd932fec40d48630d7ff4204d27f8cc68da08c6ca46eff14ad4950e65469c394ca9d6b8690513b1c1a6f91523af00082474c80a0630c034178cb1290d4d906edf28688804d79d5e37a3122c909adab19ac7dc8c5a059f6d047c5d1cc75228c4517a537763cb410c38554f273e5448a53bc3c7166e7a0d842f53ce70c3aad1e616fa6485d3880d15c936fcc306ec14ae35236e5a60549a0218ee2ee673c69b4e1b953194b2568157a69085b86e4f01644fa06ab472c6cf9a016a35a660ea496df7c0da646378bfaa9562f401e42a5c2fe770b7bbe22433585a0dd0fbbe227a4d50868cdbb3107573910fd97131ea8d835bef81d91a2fc30b175a06aafa3d78cf179bf055bd5ec629be0ff8352ce0aec9125a4d75be3ee7eb71f10a01d6817ef9f64fcbb776ff6df0c83138dcd2001bd752727af3e60f4afc123d8d58080" ]); - let tx = db.tx().unwrap(); - let account_proof = Proof::new(&tx).account_proof(target, &[]).unwrap(); + let provider = factory.provider().unwrap(); + let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &[]).unwrap(); pretty_assertions::assert_eq!(account_proof.proof, expected_account_proof); } #[test] fn holesky_deposit_contract_proof() { // Create test database and insert genesis accounts. - let db = create_test_rw_db(); - insert_genesis(db.clone(), HOLESKY.clone()).unwrap(); - - let tx = db.tx().unwrap(); + let factory = create_test_provider_factory(); + insert_genesis(&factory, HOLESKY.clone()).unwrap(); let target = Address::from_str("0x4242424242424242424242424242424242424242").unwrap(); // existent @@ -435,7 +434,8 @@ mod tests { ]) }; - let account_proof = Proof::new(&tx).account_proof(target, &slots).unwrap(); + let provider = factory.provider().unwrap(); + let account_proof = Proof::new(provider.tx_ref()).account_proof(target, &slots).unwrap(); pretty_assertions::assert_eq!(account_proof, expected); } } diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index b1ca79681ee37..4fdc41a7dd665 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -496,7 +496,7 @@ mod tests { use reth_db::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, tables, - test_utils::create_test_rw_db, + test_utils::TempDatabase, transaction::DbTxMut, DatabaseEnv, }; @@ -505,10 +505,10 @@ mod tests { keccak256, proofs::triehash::KeccakHasher, trie::{BranchNodeCompact, TrieMask}, - Account, Address, StorageEntry, B256, MAINNET, U256, + Account, Address, StorageEntry, B256, U256, }; - use reth_provider::{DatabaseProviderRW, ProviderFactory}; - use std::{collections::BTreeMap, ops::Mul, str::FromStr}; + use reth_provider::{test_utils::create_test_provider_factory, DatabaseProviderRW}; + use std::{collections::BTreeMap, ops::Mul, str::FromStr, sync::Arc}; fn insert_account( tx: &impl DbTxMut, @@ -532,8 +532,7 @@ mod tests { } fn incremental_vs_full_root(inputs: &[&str], modified: &str) { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let hashed_address = B256::with_last_byte(1); @@ -598,8 +597,7 @@ mod tests { let (address, storage) = item; let hashed_address = keccak256(address); - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); for (key, value) in &storage { tx.tx_ref().put::( @@ -656,8 +654,7 @@ mod tests { #[test] // This ensures we return an empty root when there are no storage entries fn test_empty_storage_root() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let address = Address::random(); @@ -678,8 +675,7 @@ mod tests { #[test] // This ensures that the walker goes over all the storage slots fn test_storage_root() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let address = Address::random(); @@ -720,8 +716,7 @@ mod tests { let hashed_entries_total = state.len() + state.values().map(|(_, slots)| slots.len()).sum::(); - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); for (address, (account, storage)) in &state { @@ -759,8 +754,7 @@ mod tests { } fn test_state_root_with_state(state: State) { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); for (address, (account, storage)) in &state { @@ -786,8 +780,7 @@ mod tests { #[test] fn storage_root_regression() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); // Some address whose hash starts with 0xB041 let address3 = Address::from_str("16b07afd1c635f77172e842a000ead9a2a222459").unwrap(); @@ -831,8 +824,7 @@ mod tests { .map(|(slot, val)| (B256::from_str(slot).unwrap(), U256::from(val))), ); - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let mut hashed_account_cursor = @@ -1138,8 +1130,7 @@ mod tests { #[test] fn account_trie_around_extension_node() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.db(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let expected = extension_node_trie(&tx); @@ -1164,8 +1155,7 @@ mod tests { #[test] fn account_trie_around_extension_node_with_dbtrie() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.db(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let expected = extension_node_trie(&tx); @@ -1193,8 +1183,7 @@ mod tests { #[test] fn fuzz_state_root_incremental(account_changes: [BTreeMap; 5]) { tokio::runtime::Runtime::new().unwrap().block_on(async { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let mut hashed_account_cursor = tx.tx_ref().cursor_write::().unwrap(); @@ -1227,8 +1216,7 @@ mod tests { #[test] fn storage_trie_around_extension_node() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.db(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let hashed_address = B256::random(); @@ -1254,7 +1242,7 @@ mod tests { } fn extension_node_storage_trie( - tx: &DatabaseProviderRW<&DatabaseEnv>, + tx: &DatabaseProviderRW>>, hashed_address: B256, ) -> (B256, HashMap) { let value = U256::from(1); @@ -1282,7 +1270,7 @@ mod tests { (root, updates) } - fn extension_node_trie(tx: &DatabaseProviderRW<&DatabaseEnv>) -> B256 { + fn extension_node_trie(tx: &DatabaseProviderRW>>) -> B256 { let a = Account { nonce: 0, balance: U256::from(1u64), bytecode_hash: Some(B256::random()) }; let val = encode_account(a, None); diff --git a/crates/trie/src/trie_cursor/account_cursor.rs b/crates/trie/src/trie_cursor/account_cursor.rs index 815396ab0ac10..0fe241760a06d 100644 --- a/crates/trie/src/trie_cursor/account_cursor.rs +++ b/crates/trie/src/trie_cursor/account_cursor.rs @@ -46,16 +46,14 @@ mod tests { use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, tables, - test_utils::create_test_rw_db, transaction::DbTxMut, }; - use reth_primitives::{hex_literal::hex, MAINNET}; - use reth_provider::ProviderFactory; + use reth_primitives::hex_literal::hex; + use reth_provider::test_utils::create_test_provider_factory; #[test] fn test_account_trie_order() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); let mut cursor = provider.tx_ref().cursor_write::().unwrap(); diff --git a/crates/trie/src/trie_cursor/storage_cursor.rs b/crates/trie/src/trie_cursor/storage_cursor.rs index 032ff97f64fed..19fe1b2819148 100644 --- a/crates/trie/src/trie_cursor/storage_cursor.rs +++ b/crates/trie/src/trie_cursor/storage_cursor.rs @@ -60,20 +60,14 @@ where mod tests { use super::*; - use reth_db::{ - cursor::DbCursorRW, tables, test_utils::create_test_rw_db, transaction::DbTxMut, - }; - use reth_primitives::{ - trie::{BranchNodeCompact, StorageTrieEntry}, - MAINNET, - }; - use reth_provider::ProviderFactory; + use reth_db::{cursor::DbCursorRW, tables, transaction::DbTxMut}; + use reth_primitives::trie::{BranchNodeCompact, StorageTrieEntry}; + use reth_provider::test_utils::create_test_provider_factory; // tests that upsert and seek match on the storagetrie cursor #[test] fn test_storage_cursor_abstraction() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let provider = factory.provider_rw().unwrap(); let mut cursor = provider.tx_ref().cursor_dup_write::().unwrap(); diff --git a/crates/trie/src/walker.rs b/crates/trie/src/walker.rs index 402977bfb1f0c..4ad38fe190ccf 100644 --- a/crates/trie/src/walker.rs +++ b/crates/trie/src/walker.rs @@ -252,11 +252,9 @@ mod tests { prefix_set::PrefixSetMut, trie_cursor::{AccountTrieCursor, StorageTrieCursor}, }; - use reth_db::{ - cursor::DbCursorRW, tables, test_utils::create_test_rw_db, transaction::DbTxMut, - }; - use reth_primitives::{trie::StorageTrieEntry, MAINNET}; - use reth_provider::ProviderFactory; + use reth_db::{cursor::DbCursorRW, tables, transaction::DbTxMut}; + use reth_primitives::trie::StorageTrieEntry; + use reth_provider::test_utils::create_test_provider_factory; #[test] fn walk_nodes_with_common_prefix() { @@ -281,9 +279,7 @@ mod tests { vec![0x5, 0x8, 0x2], ]; - let db = create_test_rw_db(); - - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let mut account_cursor = tx.tx_ref().cursor_write::().unwrap(); @@ -327,8 +323,7 @@ mod tests { #[test] fn cursor_rootnode_with_changesets() { - let db = create_test_rw_db(); - let factory = ProviderFactory::new(db.as_ref(), MAINNET.clone()); + let factory = create_test_provider_factory(); let tx = factory.provider_rw().unwrap(); let mut cursor = tx.tx_ref().cursor_dup_write::().unwrap(); From 5e2affb15a7c43dc7ab6f1f709e4565acba78cc3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 22 Nov 2023 20:58:02 +0100 Subject: [PATCH 77/77] feat: add configure network function to cli ext (#5536) --- bin/reth/src/cli/config.rs | 19 ++++++++++ bin/reth/src/cli/ext.rs | 46 ++++++++++++++++++++--- bin/reth/src/node/mod.rs | 54 ++++++++++++++------------- crates/net/network/src/builder.rs | 15 ++++++++ crates/net/network/src/manager.rs | 6 +++ crates/net/network/src/protocol.rs | 6 +++ crates/net/network/src/session/mod.rs | 8 +++- crates/net/network/src/swarm.rs | 16 ++++++-- 8 files changed, 133 insertions(+), 37 deletions(-) diff --git a/bin/reth/src/cli/config.rs b/bin/reth/src/cli/config.rs index 8700edf04b706..48c1e2bd5fedf 100644 --- a/bin/reth/src/cli/config.rs +++ b/bin/reth/src/cli/config.rs @@ -1,6 +1,7 @@ //! Config traits for various node components. use alloy_rlp::Encodable; +use reth_network::protocol::IntoRlpxSubProtocol; use reth_primitives::{Bytes, BytesMut}; use reth_rpc::{eth::gas_oracle::GasPriceOracleConfig, JwtError, JwtSecret}; use reth_rpc_builder::{ @@ -102,3 +103,21 @@ pub trait PayloadBuilderConfig { #[cfg(feature = "optimism")] fn compute_pending_block(&self) -> bool; } + +/// A trait that can be used to apply additional configuration to the network. +pub trait RethNetworkConfig { + /// Adds a new additional protocol to the RLPx sub-protocol list. + /// + /// These additional protocols are negotiated during the RLPx handshake. + /// If both peers share the same protocol, the corresponding handler will be included alongside + /// the `eth` protocol. + /// + /// See also [ProtocolHandler](reth_network::protocol::ProtocolHandler) + fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol); +} + +impl RethNetworkConfig for reth_network::NetworkManager { + fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { + reth_network::NetworkManager::add_rlpx_sub_protocol(self, protocol); + } +} diff --git a/bin/reth/src/cli/ext.rs b/bin/reth/src/cli/ext.rs index c48778f2c3afe..352997527396e 100644 --- a/bin/reth/src/cli/ext.rs +++ b/bin/reth/src/cli/ext.rs @@ -10,7 +10,7 @@ use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; use reth_tasks::TaskSpawner; use std::{fmt, marker::PhantomData}; -use crate::cli::components::RethRpcServerHandles; +use crate::cli::{components::RethRpcServerHandles, config::RethNetworkConfig}; /// A trait that allows for extending parts of the CLI with additional functionality. /// @@ -35,12 +35,30 @@ impl RethCliExt for () { /// /// The functions are invoked during the initialization of the node command in the following order: /// -/// 1. [on_components_initialized](RethNodeCommandConfig::on_components_initialized) -/// 2. [spawn_payload_builder_service](RethNodeCommandConfig::spawn_payload_builder_service) -/// 3. [extend_rpc_modules](RethNodeCommandConfig::extend_rpc_modules) -/// 4. [on_rpc_server_started](RethNodeCommandConfig::on_rpc_server_started) -/// 5. [on_node_started](RethNodeCommandConfig::on_node_started) +/// 1. [configure_network](RethNodeCommandConfig::configure_network) +/// 2. [on_components_initialized](RethNodeCommandConfig::on_components_initialized) +/// 3. [spawn_payload_builder_service](RethNodeCommandConfig::spawn_payload_builder_service) +/// 4. [extend_rpc_modules](RethNodeCommandConfig::extend_rpc_modules) +/// 5. [on_rpc_server_started](RethNodeCommandConfig::on_rpc_server_started) +/// 6. [on_node_started](RethNodeCommandConfig::on_node_started) pub trait RethNodeCommandConfig: fmt::Debug { + /// Invoked with the network configuration before the network is configured. + /// + /// This allows additional configuration of the network before it is launched. + fn configure_network( + &mut self, + config: &mut Conf, + components: &Reth, + ) -> eyre::Result<()> + where + Conf: RethNetworkConfig, + Reth: RethNodeComponents, + { + let _ = config; + let _ = components; + Ok(()) + } + /// Event hook called once all components have been initialized. /// /// This is called as soon as the node components have been initialized. @@ -224,6 +242,22 @@ impl NoArgs { } impl RethNodeCommandConfig for NoArgs { + fn configure_network( + &mut self, + config: &mut Conf, + components: &Reth, + ) -> eyre::Result<()> + where + Conf: RethNetworkConfig, + Reth: RethNodeComponents, + { + if let Some(conf) = self.inner_mut() { + conf.configure_network(config, components) + } else { + Ok(()) + } + } + fn on_components_initialized( &mut self, components: &Reth, diff --git a/bin/reth/src/node/mod.rs b/bin/reth/src/node/mod.rs index 3a694c74137d2..42d3e0136f59f 100644 --- a/bin/reth/src/node/mod.rs +++ b/bin/reth/src/node/mod.rs @@ -49,9 +49,7 @@ use reth_interfaces::{ }, RethResult, }; -use reth_network::{ - error::NetworkError, NetworkConfig, NetworkEvents, NetworkHandle, NetworkManager, -}; +use reth_network::{NetworkBuilder, NetworkConfig, NetworkEvents, NetworkHandle, NetworkManager}; use reth_network_api::{NetworkInfo, PeersInfo}; use reth_primitives::{ constants::eip4844::{LoadKzgSettingsError, MAINNET_KZG_TRUSTED_SETUP}, @@ -356,25 +354,34 @@ impl NodeCommand { secret_key, default_peers_path.clone(), ); - let network = self - .start_network( - network_config, - &ctx.task_executor, - transaction_pool.clone(), - default_peers_path, - ) - .await?; - info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), enode = %network.local_node_record(), "Connected to P2P network"); - debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); - let network_client = network.fetch_client().await?; + + let network_client = network_config.client.clone(); + let mut network_builder = NetworkManager::builder(network_config).await?; let components = RethNodeComponentsImpl { provider: blockchain_db.clone(), pool: transaction_pool.clone(), - network: network.clone(), + network: network_builder.handle(), task_executor: ctx.task_executor.clone(), events: blockchain_db.clone(), }; + + // allow network modifications + self.ext.configure_network(network_builder.network_mut(), &components)?; + + // launch network + let network = self.start_network( + network_builder, + &ctx.task_executor, + transaction_pool.clone(), + network_client, + default_peers_path, + ); + + info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), enode = %network.local_node_record(), "Connected to P2P network"); + debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); + let network_client = network.fetch_client().await?; + self.ext.on_components_initialized(&components)?; debug!(target: "reth::cli", "Spawning payload builder service"); @@ -694,23 +701,20 @@ impl NodeCommand { /// Spawns the configured network and associated tasks and returns the [NetworkHandle] connected /// to that network. - async fn start_network( + fn start_network( &self, - config: NetworkConfig, + builder: NetworkBuilder, task_executor: &TaskExecutor, pool: Pool, + client: C, default_peers_path: PathBuf, - ) -> Result + ) -> NetworkHandle where C: BlockReader + HeaderProvider + Clone + Unpin + 'static, Pool: TransactionPool + Unpin + 'static, { - let client = config.client.clone(); - let (handle, network, txpool, eth) = NetworkManager::builder(config) - .await? - .transactions(pool) - .request_handler(client) - .split_with_handle(); + let (handle, network, txpool, eth) = + builder.transactions(pool).request_handler(client).split_with_handle(); task_executor.spawn_critical("p2p txpool", txpool); task_executor.spawn_critical("p2p eth request handler", eth); @@ -720,7 +724,7 @@ impl NodeCommand { run_network_until_shutdown(shutdown, network, known_peers_file) }); - Ok(handle) + handle } /// Fetches the head block from the database. diff --git a/crates/net/network/src/builder.rs b/crates/net/network/src/builder.rs index efe17ec5a4c80..05c84b7da9b11 100644 --- a/crates/net/network/src/builder.rs +++ b/crates/net/network/src/builder.rs @@ -28,6 +28,21 @@ impl NetworkBuilder { (network, transactions, request_handler) } + /// Returns the network manager. + pub fn network(&self) -> &NetworkManager { + &self.network + } + + /// Returns the mutable network manager. + pub fn network_mut(&mut self) -> &mut NetworkManager { + &mut self.network + } + + /// Returns the handle to the network. + pub fn handle(&self) -> NetworkHandle { + self.network.handle().clone() + } + /// Consumes the type and returns all fields and also return a [`NetworkHandle`]. pub fn split_with_handle(self) -> (NetworkHandle, NetworkManager, Tx, Eth) { let NetworkBuilder { network, transactions, request_handler } = self; diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 7df8addb21015..13d57025314d6 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -26,6 +26,7 @@ use crate::{ metrics::{DisconnectMetrics, NetworkMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE}, network::{NetworkHandle, NetworkHandleMessage}, peers::{PeersHandle, PeersManager}, + protocol::IntoRlpxSubProtocol, session::SessionManager, state::NetworkState, swarm::{NetworkConnectionState, Swarm, SwarmEvent}, @@ -142,6 +143,11 @@ impl NetworkManager { self.to_eth_request_handler = Some(tx); } + /// Adds an additional protocol handler to the RLPx sub-protocol list. + pub fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { + self.swarm.add_rlpx_sub_protocol(protocol) + } + /// Returns the [`NetworkHandle`] that can be cloned and shared. /// /// The [`NetworkHandle`] can be used to interact with this [`NetworkManager`] diff --git a/crates/net/network/src/protocol.rs b/crates/net/network/src/protocol.rs index 24dd68690422b..1ba6464defa7c 100644 --- a/crates/net/network/src/protocol.rs +++ b/crates/net/network/src/protocol.rs @@ -116,6 +116,12 @@ where } } +impl IntoRlpxSubProtocol for RlpxSubProtocol { + fn into_rlpx_sub_protocol(self) -> RlpxSubProtocol { + self + } +} + /// Additional RLPx-based sub-protocols. #[derive(Debug, Default)] pub struct RlpxSubProtocols { diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 0e6fdcd7fbe41..863964ac97ed9 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -48,7 +48,7 @@ pub use handle::{ SessionCommand, }; -use crate::protocol::RlpxSubProtocols; +use crate::protocol::{IntoRlpxSubProtocol, RlpxSubProtocols}; pub use reth_network_api::{Direction, PeerInfo}; /// Internal identifier for active sessions. @@ -103,7 +103,6 @@ pub struct SessionManager { /// Receiver half that listens for [`ActiveSessionMessage`] produced by pending sessions. active_session_rx: ReceiverStream, /// Additional RLPx sub-protocols to be used by the session manager. - #[allow(unused)] extra_protocols: RlpxSubProtocols, /// Used to measure inbound & outbound bandwidth across all managed streams bandwidth_meter: BandwidthMeter, @@ -176,6 +175,11 @@ impl SessionManager { self.hello_message.clone() } + /// Adds an additional protocol handler to the RLPx sub-protocol list. + pub(crate) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { + self.extra_protocols.push(protocol) + } + /// Spawns the given future onto a new task that is tracked in the `spawned_tasks` /// [`JoinSet`](tokio::task::JoinSet). fn spawn(&self, f: F) diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index 9f32efd168520..ce647fe181e87 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -2,6 +2,7 @@ use crate::{ listener::{ConnectionListener, ListenerEvent}, message::{PeerMessage, PeerRequestSender}, peers::InboundConnectionError, + protocol::IntoRlpxSubProtocol, session::{Direction, PendingSessionHandshakeError, SessionEvent, SessionId, SessionManager}, state::{NetworkState, StateAction}, }; @@ -76,10 +77,7 @@ pub(crate) struct Swarm { // === impl Swarm === -impl Swarm -where - C: BlockNumReader, -{ +impl Swarm { /// Configures a new swarm instance. pub(crate) fn new( incoming: ConnectionListener, @@ -90,6 +88,11 @@ where Self { incoming, sessions, state, net_connection_state } } + /// Adds an additional protocol handler to the RLPx sub-protocol list. + pub(crate) fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) { + self.sessions_mut().add_rlpx_sub_protocol(protocol); + } + /// Access to the state. pub(crate) fn state(&self) -> &NetworkState { &self.state @@ -114,7 +117,12 @@ where pub(crate) fn sessions_mut(&mut self) -> &mut SessionManager { &mut self.sessions } +} +impl Swarm +where + C: BlockNumReader, +{ /// Triggers a new outgoing connection to the given node pub(crate) fn dial_outbound(&mut self, remote_addr: SocketAddr, remote_id: PeerId) { self.sessions.dial_outbound(remote_addr, remote_id)