From c8e0e746483e332efb5784f513553c0ab2cb68d8 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Tue, 20 Sep 2022 09:52:54 +0200 Subject: [PATCH 01/17] WIP Frontier DB block mapping one-to-many --- Cargo.lock | 3 + client/db/Cargo.toml | 8 +- client/db/src/lib.rs | 23 ++- client/db/src/upgrade.rs | 262 ++++++++++++++++++++++++++++++ client/db/src/utils.rs | 24 ++- client/rpc/src/eth/block.rs | 4 +- client/rpc/src/eth/filter.rs | 2 +- client/rpc/src/eth/transaction.rs | 6 +- client/rpc/src/lib.rs | 132 ++++++++++++++- ts-tests/tests/test-block.ts | 2 +- ts-tests/tests/util.ts | 2 +- 11 files changed, 440 insertions(+), 28 deletions(-) create mode 100644 client/db/src/upgrade.rs diff --git a/Cargo.lock b/Cargo.lock index fb19b9461d..d01e95592f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1648,6 +1648,7 @@ version = "2.0.0-dev" dependencies = [ "fp-storage", "kvdb-rocksdb", + "log", "parity-db", "parity-scale-codec", "parking_lot 0.12.1", @@ -1655,6 +1656,8 @@ dependencies = [ "sp-core", "sp-database", "sp-runtime", + "substrate-test-runtime-client", + "tempfile", ] [[package]] diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 79993e30fa..f4f17a6fb4 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -12,6 +12,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] parking_lot = "0.12.1" +log = "0.4.17" # Parity codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } @@ -28,4 +29,9 @@ sp-runtime = { version = "6.0.0", git = "https://github.com/paritytech/substrate fp-storage = { version = "2.0.0-dev", path = "../../primitives/storage" } [features] -default = ["kvdb-rocksdb", "parity-db"] +default = ["kvdb-rocksdb", "parity-db", "sc-client-db/rocksdb"] + +[dev-dependencies] +tempfile = "3.3.0" +substrate-test-runtime-client = { version = "2.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } + diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 3caa87d382..fb08030e9a 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -19,6 +19,7 @@ #[cfg(feature = "parity-db")] mod parity_db_adapter; mod utils; +mod upgrade; use std::{ marker::PhantomData, @@ -91,7 +92,7 @@ impl Backend { } pub fn new(config: &DatabaseSettings) -> Result { - let db = utils::open_database(config)?; + let db = utils::open_database::(config)?; Ok(Self { mapping: Arc::new(MappingDb { @@ -212,14 +213,16 @@ impl MappingDb { } } - pub fn block_hash(&self, ethereum_block_hash: &H256) -> Result, String> { + pub fn block_hash(&self, ethereum_block_hash: &H256) -> Result>, String> { match self .db .get(crate::columns::BLOCK_MAPPING, ðereum_block_hash.encode()) { - Some(raw) => Ok(Some( - Block::Hash::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, - )), + Some(raw) => { + Ok(Some( + Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, + )) + }, None => Ok(None), } } @@ -261,10 +264,18 @@ impl MappingDb { let mut transaction = sp_database::Transaction::new(); + let mut substrate_hashes = match self.block_hash(&commitment.ethereum_block_hash) { + Ok(Some(mut data)) => { + data.push(commitment.block_hash); + data + }, + _ => vec![commitment.block_hash] + }; + transaction.set( crate::columns::BLOCK_MAPPING, &commitment.ethereum_block_hash.encode(), - &commitment.block_hash.encode(), + &substrate_hashes.encode(), ); for (i, ethereum_transaction_hash) in commitment diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs new file mode 100644 index 0000000000..4d7e8f4dec --- /dev/null +++ b/client/db/src/upgrade.rs @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// This file is part of Frontier. +// +// Copyright (c) 2020-2022 Parity Technologies (UK) Ltd. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sp_runtime::traits::Block as BlockT; +use codec::{Decode, Encode}; + +use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; + +use std::{ + fmt, fs, + io::{self, ErrorKind, Read, Write}, + path::{Path, PathBuf}, +}; + +/// Version file name. +const VERSION_FILE_NAME: &str = "db_version"; + +/// Current db version. +const CURRENT_VERSION: u32 = 2; + +/// Number of columns in each version. +const _V1_NUM_COLUMNS: u32 = 4; +const V2_NUM_COLUMNS: u32 = 4; + +/// Database upgrade errors. +#[derive(Debug)] +pub(crate) enum UpgradeError { + /// Database version cannot be read from existing db_version file. + UnknownDatabaseVersion, + /// Missing database version file. + MissingDatabaseVersionFile, + /// Database version no longer supported. + UnsupportedVersion(u32), + /// Database version comes from future version of the client. + FutureDatabaseVersion(u32), + /// Common io error. + Io(io::Error), +} + +pub(crate) type UpgradeResult = Result; + +struct UpgradeVersion1To2Summary { + pub success: u32, + pub error: Vec, +} + +impl From for UpgradeError { + fn from(err: io::Error) -> Self { + UpgradeError::Io(err) + } +} + +impl fmt::Display for UpgradeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + UpgradeError::UnknownDatabaseVersion => { + write!(f, "Database version cannot be read from existing db_version file") + }, + UpgradeError::MissingDatabaseVersionFile => write!(f, "Missing database version file"), + UpgradeError::UnsupportedVersion(version) => { + write!(f, "Database version no longer supported: {}", version) + }, + UpgradeError::FutureDatabaseVersion(version) => { + write!(f, "Database version comes from future version of the client: {}", version) + }, + UpgradeError::Io(err) => write!(f, "Io error: {}", err), + } + } +} + +/// Upgrade database to current version. +pub(crate) fn upgrade_db(db_path: &Path) -> UpgradeResult<()> { + let db_version = current_version(db_path)?; + match db_version { + 0 => return Err(UpgradeError::UnsupportedVersion(db_version)), + 1 => { + let summary = migrate_1_to_2::(db_path)?; + if summary.error.len() > 0 { + panic!("Inconsistent migration from version 1 to 2. Failed on {:?}", summary.error); + } else { + log::info!("✔️ Successful Frontier DB migration from version 1 to version 2 ({:?} entries).", summary.success); + } + }, + CURRENT_VERSION => (), + _ => return Err(UpgradeError::FutureDatabaseVersion(db_version)), + } + update_version(db_path)?; + Ok(()) +} + +/// Reads current database version from the file at given path. +/// If the file does not exist it gets created with version 1. +pub(crate) fn current_version(path: &Path) -> UpgradeResult { + match fs::File::open(version_file_path(path)) { + Err(ref err) if err.kind() == ErrorKind::NotFound => { + fs::create_dir_all(path)?; + let mut file = fs::File::create(version_file_path(path))?; + file.write_all(format!("{}", 1).as_bytes())?; + Ok(1u32) + }, + Err(_) => Err(UpgradeError::UnknownDatabaseVersion), + Ok(mut file) => { + let mut s = String::new(); + file.read_to_string(&mut s).map_err(|_| UpgradeError::UnknownDatabaseVersion)?; + u32::from_str_radix(&s, 10).map_err(|_| UpgradeError::UnknownDatabaseVersion) + }, + } +} + +/// Writes current database version to the file. +/// Creates a new file if the version file does not exist yet. +pub(crate) fn update_version(path: &Path) -> io::Result<()> { + fs::create_dir_all(path)?; + let mut file = fs::File::create(version_file_path(path))?; + file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?; + Ok(()) +} + +/// Returns the version file path. +fn version_file_path(path: &Path) -> PathBuf { + let mut file_path = path.to_owned(); + file_path.push(VERSION_FILE_NAME); + file_path +} + +/// Migration from version1 to version2: +/// - The format of the Ethereum<>Substrate block mapping changed to support equivocation. +/// - Migrating schema from One-to-one to One-to-many (EthHash: Vec) relationship. +pub(crate) fn migrate_1_to_2(db_path: &Path) -> UpgradeResult { + log::info!("🔨 Running Frontier DB migration from version 1 to version 2. Please wait."); + let mut res = UpgradeVersion1To2Summary { + success: 0, + error: vec![], + }; + // Process a batch of hashes in a single db transaction + let mut process_chunk = |db: &kvdb_rocksdb::Database, ethereum_hashes: &[std::boxed::Box<[u8]>]| -> UpgradeResult<()> { + let mut transaction = db.transaction(); + for ethereum_hash in ethereum_hashes { + if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING, ethereum_hash)? { + // Only update version1 data + let decoded = Vec::::decode(&mut &substrate_hash[..]); + if decoded.is_err() || decoded.unwrap().is_empty() { + let mut hashes = Vec::new(); + hashes.push(sp_core::H256::from_slice(&substrate_hash[..])); + transaction.put_vec(crate::columns::BLOCK_MAPPING, ethereum_hash, hashes.encode()); + res.success = res.success + 1; + } else { + res.error.push(sp_core::H256::from_slice(ethereum_hash)); + } + } else { + res.error.push(sp_core::H256::from_slice(ethereum_hash)); + } + } + db.write(transaction).map_err(|_| + io::Error::new(ErrorKind::Other, "Failed to commit on migrate_1_to_2") + )?; + Ok(()) + }; + + let db_cfg = kvdb_rocksdb::DatabaseConfig::with_columns(V2_NUM_COLUMNS); + let db = kvdb_rocksdb::Database::open(&db_cfg, db_path)?; + + // Get all the block hashes we need to update + let ethereum_hashes: Vec<_> = db.iter(crate::columns::BLOCK_MAPPING).map(|entry| entry.0).collect(); + + // Read and update each entry in db transaction batches + const CHUNK_SIZE: usize = 10_000; + let chunks = ethereum_hashes.chunks(CHUNK_SIZE); + for chunk in chunks { + process_chunk(&db, chunk)?; + } + Ok(res) +} + +#[cfg(test)] +mod tests { + + use std::{collections::HashMap, path::PathBuf, sync::Arc}; + + use codec::Encode; + use sp_core::H256; + use sp_runtime::{ + generic::{Block, BlockId, Header}, + traits::{BlakeTwo256, Block as BlockT}, + }; + use std::str::FromStr; + use tempfile::tempdir; + + type OpaqueBlock = + Block, substrate_test_runtime_client::runtime::Extrinsic>; + + pub fn open_frontier_backend( + path: PathBuf, + ) -> Result>, String> { + Ok(Arc::new(crate::Backend::::new( + &crate::DatabaseSettings { + source: sc_client_db::DatabaseSource::RocksDb { + path, + cache_size: 0, + }, + }, + )?)) + } + + #[test] + fn upgrade_1_to_2_works() { + let tmp = tempdir().expect("create a temporary directory"); + let path = tmp.path().to_owned(); + let mut ethereum_hashes = vec![]; + let mut substrate_hashes = vec![]; + { + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(path.clone()).expect("a temporary db was created"); + + // Fill the tmp db with some data + let mut transaction = sp_database::Transaction::new(); + for n in 0..20_010 { + let ethhash = H256::random(); + let subhash = H256::random(); + ethereum_hashes.push(ethhash); + substrate_hashes.push(subhash); + transaction.set( + crate::columns::BLOCK_MAPPING, + ðhash.encode(), + &subhash.encode(), + ); + } + let _ = backend.mapping().db.commit(transaction); + + } + // Upgrade database from version 1 to 2 + let _ = super::upgrade_db::(&path); + + // Check data + let backend = open_frontier_backend(path.clone()).expect("a temporary db was created"); + for (i, original_ethereum_hash) in ethereum_hashes.iter().enumerate() { + let entry = backend.mapping().block_hash(original_ethereum_hash).unwrap().unwrap(); + // All entries now hold a single element Vec + assert_eq!(entry.len(), 1); + // The Vec holds the old value + assert_eq!(entry.first(), substrate_hashes.get(i)); + } + + // Upgrade db version file + assert_eq!(super::update_version(&path), Ok(2u32)); + } +} diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 37542e9ca4..ccb651a37b 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -18,17 +18,19 @@ use std::{path::Path, sync::Arc}; +use sp_runtime::traits::Block as BlockT; + use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; -pub fn open_database(config: &DatabaseSettings) -> Result>, String> { +pub fn open_database(config: &DatabaseSettings) -> Result>, String> { let db: Arc> = match &config.source { DatabaseSource::ParityDb { path } => open_parity_db(path)?, - DatabaseSource::RocksDb { path, .. } => open_kvdb_rocksdb(path, true)?, + DatabaseSource::RocksDb { path, .. } => open_kvdb_rocksdb::(path, true)?, DatabaseSource::Auto { paritydb_path, rocksdb_path, .. - } => match open_kvdb_rocksdb(rocksdb_path, false) { + } => match open_kvdb_rocksdb::(rocksdb_path, false) { Ok(db) => db, Err(_) => open_parity_db(paritydb_path)?, }, @@ -38,15 +40,21 @@ pub fn open_database(config: &DatabaseSettings) -> Result Result>, String> { +fn open_kvdb_rocksdb(path: &Path, create: bool) -> Result>, String> { + // first upgrade database to required version + match crate::upgrade::upgrade_db::(path) { + // in case of missing version file, assume that database simply does not exist at given + // location + Ok(_) | Err(crate::upgrade::UpgradeError::MissingDatabaseVersionFile) => (), + Err(err) => return Err("Todo".to_string()), + } + let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(crate::columns::NUM_COLUMNS); db_config.create_if_missing = create; - let path = path - .to_str() - .ok_or_else(|| "Invalid database path".to_string())?; - let db = kvdb_rocksdb::Database::open(&db_config, &path).map_err(|err| format!("{}", err))?; + // write database version only after the database is succesfully opened + crate::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string()); return Ok(sp_database::as_database(db)); } diff --git a/client/rpc/src/eth/block.rs b/client/rpc/src/eth/block.rs index 821ec9f4ad..2d64a7b395 100644 --- a/client/rpc/src/eth/block.rs +++ b/client/rpc/src/eth/block.rs @@ -48,7 +48,7 @@ where let block_data_cache = Arc::clone(&self.block_data_cache); let backend = Arc::clone(&self.backend); - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) + let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, @@ -137,7 +137,7 @@ where } pub fn block_transaction_count_by_hash(&self, hash: H256) -> Result> { - let id = match frontier_backend_client::load_hash::(self.backend.as_ref(), hash) + let id = match frontier_backend_client::load_hash::(self.client.as_ref(), self.backend.as_ref(), hash) .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, diff --git a/client/rpc/src/eth/filter.rs b/client/rpc/src/eth/filter.rs index 6e0dd142f7..eb1ae39273 100644 --- a/client/rpc/src/eth/filter.rs +++ b/client/rpc/src/eth/filter.rs @@ -363,7 +363,7 @@ where let mut ret: Vec = Vec::new(); if let Some(hash) = filter.block_hash { - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) + let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, diff --git a/client/rpc/src/eth/transaction.rs b/client/rpc/src/eth/transaction.rs index 980416ff4d..b1c466d4ab 100644 --- a/client/rpc/src/eth/transaction.rs +++ b/client/rpc/src/eth/transaction.rs @@ -127,7 +127,7 @@ where } }; - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) + let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, @@ -172,7 +172,7 @@ where let block_data_cache = Arc::clone(&self.block_data_cache); let backend = Arc::clone(&self.backend); - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) + let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, @@ -291,7 +291,7 @@ where None => return Ok(None), }; - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) + let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 5f9db08e6c..960782a6e9 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -76,7 +76,7 @@ pub mod frontier_backend_client { C: HeaderBackend + Send + Sync + 'static, { Ok(match number.unwrap_or(BlockNumber::Latest) { - BlockNumber::Hash { hash, .. } => load_hash::(backend, hash).unwrap_or(None), + BlockNumber::Hash { hash, .. } => load_hash::(client, backend, hash).unwrap_or(None), BlockNumber::Num(number) => Some(BlockId::Number(number.unique_saturated_into())), BlockNumber::Latest => Some(BlockId::Hash(client.info().best_hash)), BlockNumber::Earliest => Some(BlockId::Number(Zero::zero())), @@ -84,20 +84,26 @@ pub mod frontier_backend_client { }) } - pub fn load_hash( + pub fn load_hash( + client: &C, backend: &fc_db::Backend, hash: H256, ) -> RpcResult>> where B: BlockT + Send + Sync + 'static, + C: HeaderBackend + Send + Sync + 'static, { - let substrate_hash = backend + let substrate_hashes = backend .mapping() .block_hash(&hash) .map_err(|err| internal_err(format!("fetch aux store failed: {:?}", err)))?; - if let Some(substrate_hash) = substrate_hash { - return Ok(Some(BlockId::Hash(substrate_hash))); + if let Some(substrate_hashes) = substrate_hashes { + for substrate_hash in substrate_hashes { + if is_canon::(client, substrate_hash) { + return Ok(Some(BlockId::Hash(substrate_hash))); + } + } } Ok(None) } @@ -244,3 +250,119 @@ pub fn public_key(transaction: &EthereumTransaction) -> Result<[u8; 64], sp_io:: } sp_io::crypto::secp256k1_ecdsa_recover(&sig, &msg) } + +#[cfg(test)] +mod tests { + use std::{path::PathBuf, sync::Arc}; + + use frontier_template_runtime::RuntimeApi; + use futures::executor; + use sc_block_builder::BlockBuilderProvider; + use sp_consensus::BlockOrigin; + use sp_runtime::{ + generic::{Block, BlockId, Header}, + traits::BlakeTwo256, + }; + use substrate_test_runtime_client::{ + prelude::*, DefaultTestClientBuilderExt, TestClientBuilder, + }; + use tempfile::tempdir; + + type OpaqueBlock = + Block, substrate_test_runtime_client::runtime::Extrinsic>; + + fn open_frontier_backend( + path: PathBuf, + ) -> Result>, String> { + Ok(Arc::new(fc_db::Backend::::new( + &fc_db::DatabaseSettings { + source: sc_client_db::DatabaseSource::RocksDb { + path, + cache_size: 0, + }, + }, + )?)) + } + + #[test] + fn substrate_block_hash_one_to_many_works() { + let tmp = tempdir().expect("create a temporary directory"); + let (client, _) = + TestClientBuilder::new().build_with_native_executor::(None); + + let mut client = Arc::new(client); + + // Create a temporary frontier secondary DB. + let frontier_backend = open_frontier_backend(tmp.into_path()).unwrap(); + + // A random ethereum block hash to use + let ethereum_block_hash = sp_core::H256::random(); + + // G -> A1. + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let a1 = builder.build().unwrap().block; + let a1_hash = a1.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, a1)).unwrap(); + + // A1 -> B1 + let mut builder = client + .new_block_at(&BlockId::Hash(a1_hash), Default::default(), false) + .unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let b1 = builder.build().unwrap().block; + let b1_hash = b1.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, b1)).unwrap(); + + // Map B1 + let commitment = fc_db::MappingCommitment:: { + block_hash: b1_hash, + ethereum_block_hash: ethereum_block_hash, + ethereum_transaction_hashes: vec![], + }; + let _ = frontier_backend.mapping().write_hashes(commitment); + + // Expect B1 to be canon + assert_eq!( + super::frontier_backend_client::load_hash(client.as_ref(), frontier_backend.as_ref(), ethereum_block_hash).unwrap().unwrap(), + BlockId::Hash(b1_hash), + ); + + // A1 -> B2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1_hash), Default::default(), false) + .unwrap(); + builder.push_storage_change(vec![2], None).unwrap(); + let b2 = builder.build().unwrap().block; + let b2_hash = b2.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, b2)).unwrap(); + + // Map B2 to same ethereum hash + let commitment = fc_db::MappingCommitment:: { + block_hash: b2_hash, + ethereum_block_hash: ethereum_block_hash, + ethereum_transaction_hashes: vec![], + }; + let _ = frontier_backend.mapping().write_hashes(commitment); + + // Still expect B1 to be canon + assert_eq!( + super::frontier_backend_client::load_hash(client.as_ref(), frontier_backend.as_ref(), ethereum_block_hash).unwrap().unwrap(), + BlockId::Hash(b1_hash), + ); + + // B2 -> C1. B2 branch is now canon. + let mut builder = client + .new_block_at(&BlockId::Hash(b2_hash), Default::default(), false) + .unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let c1 = builder.build().unwrap().block; + executor::block_on(client.import(BlockOrigin::Own, c1)).unwrap(); + + // Expect B2 to be new canon + assert_eq!( + super::frontier_backend_client::load_hash(client.as_ref(), frontier_backend.as_ref(), ethereum_block_hash).unwrap().unwrap(), + BlockId::Hash(b2_hash), + ); + } +} diff --git a/ts-tests/tests/test-block.ts b/ts-tests/tests/test-block.ts index 455bb0be21..e676d84675 100644 --- a/ts-tests/tests/test-block.ts +++ b/ts-tests/tests/test-block.ts @@ -2,7 +2,7 @@ import { expect } from "chai"; import { step } from "mocha-steps"; import { BLOCK_TIMESTAMP, BLOCK_GAS_LIMIT } from "./config"; -import { createAndFinalizeBlock, describeWithFrontier } from "./util"; +import { createAndFinalizeBlock, describeWithFrontier, customRequest } from "./util"; describeWithFrontier("Frontier RPC (Block)", (context) => { let previousBlock; diff --git a/ts-tests/tests/util.ts b/ts-tests/tests/util.ts index 9099dca964..058a3597ea 100644 --- a/ts-tests/tests/util.ts +++ b/ts-tests/tests/util.ts @@ -51,7 +51,7 @@ export async function createAndFinalizeBlock(web3: Web3) { // Create a block and finalize it. // It will include all previously executed transactions since the last finalized block. -export async function createAndFinalizeBlockNowait(web3: Web3) { +export async function createAndFinalizeBlockNowait(web3: Web3, parent: any) { const response = await customRequest(web3, "engine_createBlock", [true, true, null]); if (!response.result) { throw new Error(`Unexpected result: ${JSON.stringify(response)}`); From ca528e9980becfaa630df0334ad8cf936ecc0fa9 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Tue, 20 Sep 2022 09:58:39 +0200 Subject: [PATCH 02/17] Fix --- ts-tests/tests/test-block.ts | 2 +- ts-tests/tests/util.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ts-tests/tests/test-block.ts b/ts-tests/tests/test-block.ts index e676d84675..455bb0be21 100644 --- a/ts-tests/tests/test-block.ts +++ b/ts-tests/tests/test-block.ts @@ -2,7 +2,7 @@ import { expect } from "chai"; import { step } from "mocha-steps"; import { BLOCK_TIMESTAMP, BLOCK_GAS_LIMIT } from "./config"; -import { createAndFinalizeBlock, describeWithFrontier, customRequest } from "./util"; +import { createAndFinalizeBlock, describeWithFrontier } from "./util"; describeWithFrontier("Frontier RPC (Block)", (context) => { let previousBlock; diff --git a/ts-tests/tests/util.ts b/ts-tests/tests/util.ts index 058a3597ea..9099dca964 100644 --- a/ts-tests/tests/util.ts +++ b/ts-tests/tests/util.ts @@ -51,7 +51,7 @@ export async function createAndFinalizeBlock(web3: Web3) { // Create a block and finalize it. // It will include all previously executed transactions since the last finalized block. -export async function createAndFinalizeBlockNowait(web3: Web3, parent: any) { +export async function createAndFinalizeBlockNowait(web3: Web3) { const response = await customRequest(web3, "engine_createBlock", [true, true, null]); if (!response.result) { throw new Error(`Unexpected result: ${JSON.stringify(response)}`); From eb9a4a00f4c60ac4a55417f46859acacfba3f954 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Tue, 20 Sep 2022 11:40:08 +0200 Subject: [PATCH 03/17] fmt --- client/db/src/lib.rs | 19 ++--- client/db/src/upgrade.rs | 124 ++++++++++++++++++------------ client/db/src/utils.rs | 9 ++- client/rpc/src/eth/block.rs | 16 +++- client/rpc/src/eth/filter.rs | 8 +- client/rpc/src/eth/transaction.rs | 24 ++++-- client/rpc/src/lib.rs | 36 ++++++--- 7 files changed, 154 insertions(+), 82 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index fb08030e9a..52e23af516 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -18,8 +18,8 @@ #[cfg(feature = "parity-db")] mod parity_db_adapter; -mod utils; mod upgrade; +mod utils; use std::{ marker::PhantomData, @@ -213,16 +213,17 @@ impl MappingDb { } } - pub fn block_hash(&self, ethereum_block_hash: &H256) -> Result>, String> { + pub fn block_hash( + &self, + ethereum_block_hash: &H256, + ) -> Result>, String> { match self .db .get(crate::columns::BLOCK_MAPPING, ðereum_block_hash.encode()) { - Some(raw) => { - Ok(Some( - Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, - )) - }, + Some(raw) => Ok(Some( + Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, + )), None => Ok(None), } } @@ -268,8 +269,8 @@ impl MappingDb { Ok(Some(mut data)) => { data.push(commitment.block_hash); data - }, - _ => vec![commitment.block_hash] + } + _ => vec![commitment.block_hash], }; transaction.set( diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 4d7e8f4dec..744f9f2ab9 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -16,8 +16,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_runtime::traits::Block as BlockT; use codec::{Decode, Encode}; +use sp_runtime::traits::Block as BlockT; use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; @@ -69,15 +69,22 @@ impl fmt::Display for UpgradeError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { UpgradeError::UnknownDatabaseVersion => { - write!(f, "Database version cannot be read from existing db_version file") - }, + write!( + f, + "Database version cannot be read from existing db_version file" + ) + } UpgradeError::MissingDatabaseVersionFile => write!(f, "Missing database version file"), UpgradeError::UnsupportedVersion(version) => { write!(f, "Database version no longer supported: {}", version) - }, + } UpgradeError::FutureDatabaseVersion(version) => { - write!(f, "Database version comes from future version of the client: {}", version) - }, + write!( + f, + "Database version comes from future version of the client: {}", + version + ) + } UpgradeError::Io(err) => write!(f, "Io error: {}", err), } } @@ -91,11 +98,14 @@ pub(crate) fn upgrade_db(db_path: &Path) -> UpgradeResult<()> { 1 => { let summary = migrate_1_to_2::(db_path)?; if summary.error.len() > 0 { - panic!("Inconsistent migration from version 1 to 2. Failed on {:?}", summary.error); + panic!( + "Inconsistent migration from version 1 to 2. Failed on {:?}", + summary.error + ); } else { log::info!("✔️ Successful Frontier DB migration from version 1 to version 2 ({:?} entries).", summary.success); } - }, + } CURRENT_VERSION => (), _ => return Err(UpgradeError::FutureDatabaseVersion(db_version)), } @@ -112,13 +122,14 @@ pub(crate) fn current_version(path: &Path) -> UpgradeResult { let mut file = fs::File::create(version_file_path(path))?; file.write_all(format!("{}", 1).as_bytes())?; Ok(1u32) - }, + } Err(_) => Err(UpgradeError::UnknownDatabaseVersion), Ok(mut file) => { let mut s = String::new(); - file.read_to_string(&mut s).map_err(|_| UpgradeError::UnknownDatabaseVersion)?; + file.read_to_string(&mut s) + .map_err(|_| UpgradeError::UnknownDatabaseVersion)?; u32::from_str_radix(&s, 10).map_err(|_| UpgradeError::UnknownDatabaseVersion) - }, + } } } @@ -141,14 +152,18 @@ fn version_file_path(path: &Path) -> PathBuf { /// Migration from version1 to version2: /// - The format of the Ethereum<>Substrate block mapping changed to support equivocation. /// - Migrating schema from One-to-one to One-to-many (EthHash: Vec) relationship. -pub(crate) fn migrate_1_to_2(db_path: &Path) -> UpgradeResult { +pub(crate) fn migrate_1_to_2( + db_path: &Path, +) -> UpgradeResult { log::info!("🔨 Running Frontier DB migration from version 1 to version 2. Please wait."); let mut res = UpgradeVersion1To2Summary { success: 0, error: vec![], }; // Process a batch of hashes in a single db transaction - let mut process_chunk = |db: &kvdb_rocksdb::Database, ethereum_hashes: &[std::boxed::Box<[u8]>]| -> UpgradeResult<()> { + let mut process_chunk = |db: &kvdb_rocksdb::Database, + ethereum_hashes: &[std::boxed::Box<[u8]>]| + -> UpgradeResult<()> { let mut transaction = db.transaction(); for ethereum_hash in ethereum_hashes { if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING, ethereum_hash)? { @@ -157,7 +172,11 @@ pub(crate) fn migrate_1_to_2(db_path: &Path) -> UpgradeResult(db_path: &Path) -> UpgradeResult = db.iter(crate::columns::BLOCK_MAPPING).map(|entry| entry.0).collect(); + // Get all the block hashes we need to update + let ethereum_hashes: Vec<_> = db + .iter(crate::columns::BLOCK_MAPPING) + .map(|entry| entry.0) + .collect(); - // Read and update each entry in db transaction batches + // Read and update each entry in db transaction batches const CHUNK_SIZE: usize = 10_000; let chunks = ethereum_hashes.chunks(CHUNK_SIZE); for chunk in chunks { @@ -217,44 +238,47 @@ mod tests { )?)) } - #[test] + #[test] fn upgrade_1_to_2_works() { let tmp = tempdir().expect("create a temporary directory"); - let path = tmp.path().to_owned(); - let mut ethereum_hashes = vec![]; - let mut substrate_hashes = vec![]; - { - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(path.clone()).expect("a temporary db was created"); - - // Fill the tmp db with some data - let mut transaction = sp_database::Transaction::new(); - for n in 0..20_010 { - let ethhash = H256::random(); - let subhash = H256::random(); - ethereum_hashes.push(ethhash); - substrate_hashes.push(subhash); - transaction.set( - crate::columns::BLOCK_MAPPING, - ðhash.encode(), - &subhash.encode(), - ); - } - let _ = backend.mapping().db.commit(transaction); + let path = tmp.path().to_owned(); + let mut ethereum_hashes = vec![]; + let mut substrate_hashes = vec![]; + { + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(path.clone()).expect("a temporary db was created"); - } - // Upgrade database from version 1 to 2 - let _ = super::upgrade_db::(&path); + // Fill the tmp db with some data + let mut transaction = sp_database::Transaction::new(); + for n in 0..20_010 { + let ethhash = H256::random(); + let subhash = H256::random(); + ethereum_hashes.push(ethhash); + substrate_hashes.push(subhash); + transaction.set( + crate::columns::BLOCK_MAPPING, + ðhash.encode(), + &subhash.encode(), + ); + } + let _ = backend.mapping().db.commit(transaction); + } + // Upgrade database from version 1 to 2 + let _ = super::upgrade_db::(&path); // Check data - let backend = open_frontier_backend(path.clone()).expect("a temporary db was created"); - for (i, original_ethereum_hash) in ethereum_hashes.iter().enumerate() { - let entry = backend.mapping().block_hash(original_ethereum_hash).unwrap().unwrap(); + let backend = open_frontier_backend(path.clone()).expect("a temporary db was created"); + for (i, original_ethereum_hash) in ethereum_hashes.iter().enumerate() { + let entry = backend + .mapping() + .block_hash(original_ethereum_hash) + .unwrap() + .unwrap(); // All entries now hold a single element Vec assert_eq!(entry.len(), 1); // The Vec holds the old value assert_eq!(entry.first(), substrate_hashes.get(i)); - } + } // Upgrade db version file assert_eq!(super::update_version(&path), Ok(2u32)); diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index ccb651a37b..ea1be4ec3c 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -22,7 +22,9 @@ use sp_runtime::traits::Block as BlockT; use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; -pub fn open_database(config: &DatabaseSettings) -> Result>, String> { +pub fn open_database( + config: &DatabaseSettings, +) -> Result>, String> { let db: Arc> = match &config.source { DatabaseSource::ParityDb { path } => open_parity_db(path)?, DatabaseSource::RocksDb { path, .. } => open_kvdb_rocksdb::(path, true)?, @@ -40,7 +42,10 @@ pub fn open_database(config: &DatabaseSettings) -> Result(path: &Path, create: bool) -> Result>, String> { +fn open_kvdb_rocksdb( + path: &Path, + create: bool, +) -> Result>, String> { // first upgrade database to required version match crate::upgrade::upgrade_db::(path) { // in case of missing version file, assume that database simply does not exist at given diff --git a/client/rpc/src/eth/block.rs b/client/rpc/src/eth/block.rs index 2d64a7b395..c182a8a774 100644 --- a/client/rpc/src/eth/block.rs +++ b/client/rpc/src/eth/block.rs @@ -48,8 +48,12 @@ where let block_data_cache = Arc::clone(&self.block_data_cache); let backend = Arc::clone(&self.backend); - let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), @@ -137,8 +141,12 @@ where } pub fn block_transaction_count_by_hash(&self, hash: H256) -> Result> { - let id = match frontier_backend_client::load_hash::(self.client.as_ref(), self.backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + self.client.as_ref(), + self.backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), diff --git a/client/rpc/src/eth/filter.rs b/client/rpc/src/eth/filter.rs index eb1ae39273..2681611270 100644 --- a/client/rpc/src/eth/filter.rs +++ b/client/rpc/src/eth/filter.rs @@ -363,8 +363,12 @@ where let mut ret: Vec = Vec::new(); if let Some(hash) = filter.block_hash { - let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(Vec::new()), diff --git a/client/rpc/src/eth/transaction.rs b/client/rpc/src/eth/transaction.rs index b1c466d4ab..d1c304c1f0 100644 --- a/client/rpc/src/eth/transaction.rs +++ b/client/rpc/src/eth/transaction.rs @@ -127,8 +127,12 @@ where } }; - let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), @@ -172,8 +176,12 @@ where let block_data_cache = Arc::clone(&self.block_data_cache); let backend = Arc::clone(&self.backend); - let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), @@ -291,8 +299,12 @@ where None => return Ok(None), }; - let id = match frontier_backend_client::load_hash::(client.as_ref(), backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 960782a6e9..1cbd0f4f77 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -76,7 +76,9 @@ pub mod frontier_backend_client { C: HeaderBackend + Send + Sync + 'static, { Ok(match number.unwrap_or(BlockNumber::Latest) { - BlockNumber::Hash { hash, .. } => load_hash::(client, backend, hash).unwrap_or(None), + BlockNumber::Hash { hash, .. } => { + load_hash::(client, backend, hash).unwrap_or(None) + } BlockNumber::Num(number) => Some(BlockId::Number(number.unique_saturated_into())), BlockNumber::Latest => Some(BlockId::Hash(client.info().best_hash)), BlockNumber::Earliest => Some(BlockId::Number(Zero::zero())), @@ -271,9 +273,7 @@ mod tests { type OpaqueBlock = Block, substrate_test_runtime_client::runtime::Extrinsic>; - fn open_frontier_backend( - path: PathBuf, - ) -> Result>, String> { + fn open_frontier_backend(path: PathBuf) -> Result>, String> { Ok(Arc::new(fc_db::Backend::::new( &fc_db::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { @@ -317,14 +317,20 @@ mod tests { // Map B1 let commitment = fc_db::MappingCommitment:: { block_hash: b1_hash, - ethereum_block_hash: ethereum_block_hash, + ethereum_block_hash, ethereum_transaction_hashes: vec![], }; let _ = frontier_backend.mapping().write_hashes(commitment); // Expect B1 to be canon assert_eq!( - super::frontier_backend_client::load_hash(client.as_ref(), frontier_backend.as_ref(), ethereum_block_hash).unwrap().unwrap(), + super::frontier_backend_client::load_hash( + client.as_ref(), + frontier_backend.as_ref(), + ethereum_block_hash + ) + .unwrap() + .unwrap(), BlockId::Hash(b1_hash), ); @@ -340,14 +346,20 @@ mod tests { // Map B2 to same ethereum hash let commitment = fc_db::MappingCommitment:: { block_hash: b2_hash, - ethereum_block_hash: ethereum_block_hash, + ethereum_block_hash, ethereum_transaction_hashes: vec![], }; let _ = frontier_backend.mapping().write_hashes(commitment); // Still expect B1 to be canon assert_eq!( - super::frontier_backend_client::load_hash(client.as_ref(), frontier_backend.as_ref(), ethereum_block_hash).unwrap().unwrap(), + super::frontier_backend_client::load_hash( + client.as_ref(), + frontier_backend.as_ref(), + ethereum_block_hash + ) + .unwrap() + .unwrap(), BlockId::Hash(b1_hash), ); @@ -361,7 +373,13 @@ mod tests { // Expect B2 to be new canon assert_eq!( - super::frontier_backend_client::load_hash(client.as_ref(), frontier_backend.as_ref(), ethereum_block_hash).unwrap().unwrap(), + super::frontier_backend_client::load_hash( + client.as_ref(), + frontier_backend.as_ref(), + ethereum_block_hash + ) + .unwrap() + .unwrap(), BlockId::Hash(b2_hash), ); } From 67ac004e8de93c59f2a72d21712377fc0f342c6c Mon Sep 17 00:00:00 2001 From: tgmichel Date: Tue, 20 Sep 2022 14:37:43 +0200 Subject: [PATCH 04/17] Cleanup --- client/cli/src/frontier_db_cmd/tests.rs | 6 ++--- client/db/src/lib.rs | 14 +++++++++++- client/db/src/upgrade.rs | 30 ++++++++++--------------- client/db/src/utils.rs | 8 ++++--- 4 files changed, 33 insertions(+), 25 deletions(-) diff --git a/client/cli/src/frontier_db_cmd/tests.rs b/client/cli/src/frontier_db_cmd/tests.rs index a64b4ed8cb..7925dd57e8 100644 --- a/client/cli/src/frontier_db_cmd/tests.rs +++ b/client/cli/src/frontier_db_cmd/tests.rs @@ -573,7 +573,7 @@ mod tests { // Expect the ethereum and substrate block hashes to be mapped. assert_eq!( backend.mapping().block_hash(ðereum_block_hash), - Ok(Some(block_hash)) + Ok(Some(vec![block_hash])) ); // Expect the offchain-stored transaction metadata to match the one we stored in the runtime. @@ -656,7 +656,7 @@ mod tests { // Expect the ethereum and substrate block hashes to be mapped. assert_eq!( backend.mapping().block_hash(ðereum_block_hash), - Ok(Some(block_a1_hash)) + Ok(Some(vec![block_a1_hash])) ); // Expect the offchain-stored transaction metadata to match the one we stored in the runtime. @@ -706,7 +706,7 @@ mod tests { // Expect the ethereum and substrate block hashes to be mapped. assert_eq!( backend.mapping().block_hash(ðereum_block_hash), - Ok(Some(block_a2_hash)) + Ok(Some(vec![block_a1_hash, block_a2_hash])) ); // Expect the offchain-stored transaction metadata to have data for both blocks. diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 52e23af516..cf97c63026 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -228,6 +228,18 @@ impl MappingDb { } } + pub fn block_hashb(&self, ethereum_block_hash: &H256) -> Result, String> { + match self + .db + .get(crate::columns::BLOCK_MAPPING, ðereum_block_hash.encode()) + { + Some(raw) => Ok(Some( + Block::Hash::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, + )), + None => Ok(None), + } + } + pub fn transaction_metadata( &self, ethereum_transaction_hash: &H256, @@ -265,7 +277,7 @@ impl MappingDb { let mut transaction = sp_database::Transaction::new(); - let mut substrate_hashes = match self.block_hash(&commitment.ethereum_block_hash) { + let substrate_hashes = match self.block_hash(&commitment.ethereum_block_hash) { Ok(Some(mut data)) => { data.push(commitment.block_hash); data diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 744f9f2ab9..75f59f5d13 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -19,8 +19,6 @@ use codec::{Decode, Encode}; use sp_runtime::traits::Block as BlockT; -use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; - use std::{ fmt, fs, io::{self, ErrorKind, Read, Write}, @@ -42,8 +40,6 @@ const V2_NUM_COLUMNS: u32 = 4; pub(crate) enum UpgradeError { /// Database version cannot be read from existing db_version file. UnknownDatabaseVersion, - /// Missing database version file. - MissingDatabaseVersionFile, /// Database version no longer supported. UnsupportedVersion(u32), /// Database version comes from future version of the client. @@ -54,7 +50,7 @@ pub(crate) enum UpgradeError { pub(crate) type UpgradeResult = Result; -struct UpgradeVersion1To2Summary { +pub(crate) struct UpgradeVersion1To2Summary { pub success: u32, pub error: Vec, } @@ -74,7 +70,6 @@ impl fmt::Display for UpgradeError { "Database version cannot be read from existing db_version file" ) } - UpgradeError::MissingDatabaseVersionFile => write!(f, "Missing database version file"), UpgradeError::UnsupportedVersion(version) => { write!(f, "Database version no longer supported: {}", version) } @@ -97,7 +92,7 @@ pub(crate) fn upgrade_db(db_path: &Path) -> UpgradeResult<()> { 0 => return Err(UpgradeError::UnsupportedVersion(db_version)), 1 => { let summary = migrate_1_to_2::(db_path)?; - if summary.error.len() > 0 { + if !summary.error.is_empty() { panic!( "Inconsistent migration from version 1 to 2. Failed on {:?}", summary.error @@ -128,7 +123,8 @@ pub(crate) fn current_version(path: &Path) -> UpgradeResult { let mut s = String::new(); file.read_to_string(&mut s) .map_err(|_| UpgradeError::UnknownDatabaseVersion)?; - u32::from_str_radix(&s, 10).map_err(|_| UpgradeError::UnknownDatabaseVersion) + s.parse::() + .map_err(|_| UpgradeError::UnknownDatabaseVersion) } } } @@ -170,14 +166,12 @@ pub(crate) fn migrate_1_to_2( // Only update version1 data let decoded = Vec::::decode(&mut &substrate_hash[..]); if decoded.is_err() || decoded.unwrap().is_empty() { - let mut hashes = Vec::new(); - hashes.push(sp_core::H256::from_slice(&substrate_hash[..])); transaction.put_vec( crate::columns::BLOCK_MAPPING, ethereum_hash, - hashes.encode(), + vec![sp_core::H256::from_slice(&substrate_hash[..])].encode(), ); - res.success = res.success + 1; + res.success += 1; } else { res.error.push(sp_core::H256::from_slice(ethereum_hash)); } @@ -211,15 +205,14 @@ pub(crate) fn migrate_1_to_2( #[cfg(test)] mod tests { - use std::{collections::HashMap, path::PathBuf, sync::Arc}; + use std::{path::PathBuf, sync::Arc}; use codec::Encode; use sp_core::H256; use sp_runtime::{ - generic::{Block, BlockId, Header}, - traits::{BlakeTwo256, Block as BlockT}, + generic::{Block, Header}, + traits::BlakeTwo256, }; - use std::str::FromStr; use tempfile::tempdir; type OpaqueBlock = @@ -250,7 +243,7 @@ mod tests { // Fill the tmp db with some data let mut transaction = sp_database::Transaction::new(); - for n in 0..20_010 { + for _ in 0..20_010 { let ethhash = H256::random(); let subhash = H256::random(); ethereum_hashes.push(ethhash); @@ -281,6 +274,7 @@ mod tests { } // Upgrade db version file - assert_eq!(super::update_version(&path), Ok(2u32)); + // let _ = super::update_version(&path); + assert_eq!(super::current_version(&path).expect("version"), 2u32); } } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index ea1be4ec3c..f14c41eeb7 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -47,11 +47,12 @@ fn open_kvdb_rocksdb( create: bool, ) -> Result>, String> { // first upgrade database to required version + #[cfg(not(test))] match crate::upgrade::upgrade_db::(path) { // in case of missing version file, assume that database simply does not exist at given // location - Ok(_) | Err(crate::upgrade::UpgradeError::MissingDatabaseVersionFile) => (), - Err(err) => return Err("Todo".to_string()), + Ok(_) => (), + Err(_) => return Err("Frontier DB upgrade error".to_string()), } let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(crate::columns::NUM_COLUMNS); @@ -59,7 +60,8 @@ fn open_kvdb_rocksdb( let db = kvdb_rocksdb::Database::open(&db_config, &path).map_err(|err| format!("{}", err))?; // write database version only after the database is succesfully opened - crate::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string()); + #[cfg(not(test))] + let _ = crate::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string())?; return Ok(sp_database::as_database(db)); } From c30f56714fa213d2d6ce936ca3db66df9be5c892 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Tue, 20 Sep 2022 14:52:17 +0200 Subject: [PATCH 05/17] Cleanup --- client/db/src/utils.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index f14c41eeb7..a9be864e0f 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -49,8 +49,6 @@ fn open_kvdb_rocksdb( // first upgrade database to required version #[cfg(not(test))] match crate::upgrade::upgrade_db::(path) { - // in case of missing version file, assume that database simply does not exist at given - // location Ok(_) => (), Err(_) => return Err("Frontier DB upgrade error".to_string()), } From bc32db6283afa531f8223ae74645065bed712f40 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Tue, 20 Sep 2022 14:58:01 +0200 Subject: [PATCH 06/17] Cleanup --- client/db/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index f4f17a6fb4..d4da0c76ea 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -29,7 +29,7 @@ sp-runtime = { version = "6.0.0", git = "https://github.com/paritytech/substrate fp-storage = { version = "2.0.0-dev", path = "../../primitives/storage" } [features] -default = ["kvdb-rocksdb", "parity-db", "sc-client-db/rocksdb"] +default = ["kvdb-rocksdb", "parity-db"] [dev-dependencies] tempfile = "3.3.0" From ac5489218e4b2ed2dbb81fcb013eb0913e5c881e Mon Sep 17 00:00:00 2001 From: tgmichel Date: Tue, 20 Sep 2022 15:15:31 +0200 Subject: [PATCH 07/17] fmt skip --- client/db/src/upgrade.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 75f59f5d13..b76238abe1 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -157,9 +157,11 @@ pub(crate) fn migrate_1_to_2( error: vec![], }; // Process a batch of hashes in a single db transaction - let mut process_chunk = |db: &kvdb_rocksdb::Database, - ethereum_hashes: &[std::boxed::Box<[u8]>]| - -> UpgradeResult<()> { + #[rustfmt::skip] + let mut process_chunk = | + db: &kvdb_rocksdb::Database, + ethereum_hashes: &[std::boxed::Box<[u8]>] + | -> UpgradeResult<()> { let mut transaction = db.transaction(); for ethereum_hash in ethereum_hashes { if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING, ethereum_hash)? { From dc97d30f681c806d4a359ef217666904a279eb02 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Wed, 21 Sep 2022 12:15:03 +0200 Subject: [PATCH 08/17] parity db migration --- client/db/src/upgrade.rs | 83 ++++++++++++++++++++++++++++++++++++++-- client/db/src/utils.rs | 38 ++++++++++++++---- 2 files changed, 110 insertions(+), 11 deletions(-) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index b76238abe1..db5e856a95 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -19,6 +19,8 @@ use codec::{Decode, Encode}; use sp_runtime::traits::Block as BlockT; +use crate::DatabaseSource; + use std::{ fmt, fs, io::{self, ErrorKind, Read, Write}, @@ -86,12 +88,19 @@ impl fmt::Display for UpgradeError { } /// Upgrade database to current version. -pub(crate) fn upgrade_db(db_path: &Path) -> UpgradeResult<()> { +pub(crate) fn upgrade_db( + db_path: &Path, + source: &DatabaseSource, +) -> UpgradeResult<()> { let db_version = current_version(db_path)?; match db_version { 0 => return Err(UpgradeError::UnsupportedVersion(db_version)), 1 => { - let summary = migrate_1_to_2::(db_path)?; + let summary = match source { + DatabaseSource::ParityDb { .. } => migrate_1_to_2_parity_db::(db_path)?, + DatabaseSource::RocksDb { .. } => migrate_1_to_2_rocks_db::(db_path)?, + _ => panic!("DatabaseSource required for upgrade ParityDb | RocksDb"), + }; if !summary.error.is_empty() { panic!( "Inconsistent migration from version 1 to 2. Failed on {:?}", @@ -148,7 +157,7 @@ fn version_file_path(path: &Path) -> PathBuf { /// Migration from version1 to version2: /// - The format of the Ethereum<>Substrate block mapping changed to support equivocation. /// - Migrating schema from One-to-one to One-to-many (EthHash: Vec) relationship. -pub(crate) fn migrate_1_to_2( +pub(crate) fn migrate_1_to_2_rocks_db( db_path: &Path, ) -> UpgradeResult { log::info!("🔨 Running Frontier DB migration from version 1 to version 2. Please wait."); @@ -204,6 +213,74 @@ pub(crate) fn migrate_1_to_2( Ok(res) } +pub(crate) fn migrate_1_to_2_parity_db( + db_path: &Path, +) -> UpgradeResult { + log::info!("🔨 Running Frontier DB migration from version 1 to version 2. Please wait."); + let mut res = UpgradeVersion1To2Summary { + success: 0, + error: vec![], + }; + // Process a batch of hashes in a single db transaction + #[rustfmt::skip] + let mut process_chunk = | + db: &parity_db::Db, + ethereum_hashes: &[Vec] + | -> UpgradeResult<()> { + let mut transaction = vec![]; + for ethereum_hash in ethereum_hashes { + if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING as u8, ethereum_hash).map_err(|_| + io::Error::new(ErrorKind::Other, "Key does not exist") + )? { + // Only update version1 data + let decoded = Vec::::decode(&mut &substrate_hash[..]); + if decoded.is_err() || decoded.unwrap().is_empty() { + transaction.push(( + crate::columns::BLOCK_MAPPING as u8, + ethereum_hash, + Some(vec![sp_core::H256::from_slice(&substrate_hash[..])].encode()), + )); + res.success += 1; + } else { + res.error.push(sp_core::H256::from_slice(ethereum_hash)); + } + } else { + res.error.push(sp_core::H256::from_slice(ethereum_hash)); + } + } + db.commit(transaction) + .map_err(|_| io::Error::new(ErrorKind::Other, "Failed to commit on migrate_1_to_2"))?; + Ok(()) + }; + + let db_cfg = parity_db::Options::with_columns(db_path, V2_NUM_COLUMNS as u8); + let db = parity_db::Db::open_or_create(&db_cfg) + .map_err(|_| io::Error::new(ErrorKind::Other, "Failed to open db"))?; + + // Get all the block hashes we need to update + let ethereum_hashes: Vec<_> = match db.iter(crate::columns::BLOCK_MAPPING as u8) { + Ok(mut iter) => { + let mut hashes = vec![]; + loop { + match iter.next() { + Ok(Some((k, _))) => hashes.push(k), + _ => break, + } + } + hashes + } + _ => vec![], + }; + + // Read and update each entry in db transaction batches + const CHUNK_SIZE: usize = 10_000; + let chunks = ethereum_hashes.chunks(CHUNK_SIZE); + for chunk in chunks { + process_chunk(&db, chunk)?; + } + Ok(res) +} + #[cfg(test)] mod tests { diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index a9be864e0f..6b8372c677 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -26,15 +26,17 @@ pub fn open_database( config: &DatabaseSettings, ) -> Result>, String> { let db: Arc> = match &config.source { - DatabaseSource::ParityDb { path } => open_parity_db(path)?, - DatabaseSource::RocksDb { path, .. } => open_kvdb_rocksdb::(path, true)?, + DatabaseSource::ParityDb { path } => open_parity_db::(path, &config.source)?, + DatabaseSource::RocksDb { path, .. } => { + open_kvdb_rocksdb::(path, true, &config.source)? + } DatabaseSource::Auto { paritydb_path, rocksdb_path, .. - } => match open_kvdb_rocksdb::(rocksdb_path, false) { + } => match open_kvdb_rocksdb::(rocksdb_path, false, &config.source) { Ok(db) => db, - Err(_) => open_parity_db(paritydb_path)?, + Err(_) => open_parity_db::(paritydb_path, &config.source)?, }, _ => return Err("Missing feature flags `parity-db`".to_string()), }; @@ -45,10 +47,11 @@ pub fn open_database( fn open_kvdb_rocksdb( path: &Path, create: bool, + source: &DatabaseSource, ) -> Result>, String> { // first upgrade database to required version #[cfg(not(test))] - match crate::upgrade::upgrade_db::(path) { + match crate::upgrade::upgrade_db::(path, source) { Ok(_) => (), Err(_) => return Err("Frontier DB upgrade error".to_string()), } @@ -64,18 +67,37 @@ fn open_kvdb_rocksdb( } #[cfg(not(feature = "kvdb-rocksdb"))] -fn open_kvdb_rocksdb(_path: &Path, _create: bool) -> Result>, String> { +fn open_kvdb_rocksdb( + _path: &Path, + _create: bool, + _source: &DatabaseSource, +) -> Result>, String> { Err("Missing feature flags `kvdb-rocksdb`".to_string()) } #[cfg(feature = "parity-db")] -fn open_parity_db(path: &Path) -> Result>, String> { +fn open_parity_db( + path: &Path, + source: &DatabaseSource, +) -> Result>, String> { + // first upgrade database to required version + #[cfg(not(test))] + match crate::upgrade::upgrade_db::(path, source) { + Ok(_) => (), + Err(_) => return Err("Frontier DB upgrade error".to_string()), + } let config = parity_db::Options::with_columns(path, crate::columns::NUM_COLUMNS as u8); let db = parity_db::Db::open_or_create(&config).map_err(|err| format!("{}", err))?; + // write database version only after the database is succesfully opened + #[cfg(not(test))] + let _ = crate::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string())?; Ok(Arc::new(crate::parity_db_adapter::DbAdapter(db))) } #[cfg(not(feature = "parity-db"))] -fn open_parity_db(_path: &Path) -> Result>, String> { +fn open_parity_db( + _path: &Path, + _source: &DatabaseSource, +) -> Result>, String> { Err("Missing feature flags `parity-db`".to_string()) } From a9ef762febbeb84a0c06c925b350894a52782bba Mon Sep 17 00:00:00 2001 From: tgmichel Date: Wed, 21 Sep 2022 16:28:38 +0200 Subject: [PATCH 09/17] Cleanup + tests --- client/db/Cargo.toml | 2 +- client/db/src/lib.rs | 12 ---- client/db/src/upgrade.rs | 142 ++++++++++++++++++++++----------------- client/db/src/utils.rs | 12 ++-- 4 files changed, 88 insertions(+), 80 deletions(-) diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index d4da0c76ea..9ae0ac47f4 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -34,4 +34,4 @@ default = ["kvdb-rocksdb", "parity-db"] [dev-dependencies] tempfile = "3.3.0" substrate-test-runtime-client = { version = "2.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } - +sc-client-db= { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", features = ["rocksdb"] } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index cf97c63026..bfa59ecf73 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -228,18 +228,6 @@ impl MappingDb { } } - pub fn block_hashb(&self, ethereum_block_hash: &H256) -> Result, String> { - match self - .db - .get(crate::columns::BLOCK_MAPPING, ðereum_block_hash.encode()) - { - Some(raw) => Ok(Some( - Block::Hash::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, - )), - None => Ok(None), - } - } - pub fn transaction_metadata( &self, ethereum_transaction_hash: &H256, diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index db5e856a95..ef4e0f8e3e 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -19,6 +19,8 @@ use codec::{Decode, Encode}; use sp_runtime::traits::Block as BlockT; +use sp_core::H256; + use crate::DatabaseSource; use std::{ @@ -54,7 +56,7 @@ pub(crate) type UpgradeResult = Result; pub(crate) struct UpgradeVersion1To2Summary { pub success: u32, - pub error: Vec, + pub error: Vec, } impl From for UpgradeError { @@ -180,14 +182,14 @@ pub(crate) fn migrate_1_to_2_rocks_db( transaction.put_vec( crate::columns::BLOCK_MAPPING, ethereum_hash, - vec![sp_core::H256::from_slice(&substrate_hash[..])].encode(), + vec![H256::from_slice(&substrate_hash[..])].encode(), ); res.success += 1; } else { - res.error.push(sp_core::H256::from_slice(ethereum_hash)); + res.error.push(H256::from_slice(ethereum_hash)); } } else { - res.error.push(sp_core::H256::from_slice(ethereum_hash)); + res.error.push(H256::from_slice(ethereum_hash)); } } db.write(transaction) @@ -238,14 +240,14 @@ pub(crate) fn migrate_1_to_2_parity_db( transaction.push(( crate::columns::BLOCK_MAPPING as u8, ethereum_hash, - Some(vec![sp_core::H256::from_slice(&substrate_hash[..])].encode()), + Some(vec![H256::from_slice(&substrate_hash[..])].encode()), )); res.success += 1; } else { - res.error.push(sp_core::H256::from_slice(ethereum_hash)); + res.error.push(H256::from_slice(ethereum_hash)); } } else { - res.error.push(sp_core::H256::from_slice(ethereum_hash)); + res.error.push(H256::from_slice(ethereum_hash)); } } db.commit(transaction) @@ -253,7 +255,9 @@ pub(crate) fn migrate_1_to_2_parity_db( Ok(()) }; - let db_cfg = parity_db::Options::with_columns(db_path, V2_NUM_COLUMNS as u8); + let mut db_cfg = parity_db::Options::with_columns(db_path, V2_NUM_COLUMNS as u8); + db_cfg.columns[crate::columns::BLOCK_MAPPING as usize].btree_index = true; + let db = parity_db::Db::open_or_create(&db_cfg) .map_err(|_| io::Error::new(ErrorKind::Other, "Failed to open db"))?; @@ -263,15 +267,16 @@ pub(crate) fn migrate_1_to_2_parity_db( let mut hashes = vec![]; loop { match iter.next() { - Ok(Some((k, _))) => hashes.push(k), + Ok(Some((k, _))) => { + hashes.push(k); + } _ => break, } } hashes } - _ => vec![], + Err(_) => vec![], }; - // Read and update each entry in db transaction batches const CHUNK_SIZE: usize = 10_000; let chunks = ethereum_hashes.chunks(CHUNK_SIZE); @@ -284,7 +289,7 @@ pub(crate) fn migrate_1_to_2_parity_db( #[cfg(test)] mod tests { - use std::{path::PathBuf, sync::Arc}; + use std::sync::Arc; use codec::Encode; use sp_core::H256; @@ -298,62 +303,75 @@ mod tests { Block, substrate_test_runtime_client::runtime::Extrinsic>; pub fn open_frontier_backend( - path: PathBuf, + setting: &crate::DatabaseSettings, ) -> Result>, String> { - Ok(Arc::new(crate::Backend::::new( - &crate::DatabaseSettings { - source: sc_client_db::DatabaseSource::RocksDb { - path, - cache_size: 0, - }, - }, - )?)) + Ok(Arc::new(crate::Backend::::new(setting)?)) } #[test] fn upgrade_1_to_2_works() { - let tmp = tempdir().expect("create a temporary directory"); - let path = tmp.path().to_owned(); - let mut ethereum_hashes = vec![]; - let mut substrate_hashes = vec![]; - { - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(path.clone()).expect("a temporary db was created"); - - // Fill the tmp db with some data - let mut transaction = sp_database::Transaction::new(); - for _ in 0..20_010 { - let ethhash = H256::random(); - let subhash = H256::random(); - ethereum_hashes.push(ethhash); - substrate_hashes.push(subhash); - transaction.set( - crate::columns::BLOCK_MAPPING, - ðhash.encode(), - &subhash.encode(), - ); + let tmp_1 = tempdir().expect("create a temporary directory"); + let tmp_2 = tempdir().expect("create a temporary directory"); + + let settings = vec![ + // Rocks db + crate::DatabaseSettings { + source: sc_client_db::DatabaseSource::RocksDb { + path: tmp_1.path().to_owned(), + cache_size: 0, + }, + }, + // Parity db + crate::DatabaseSettings { + source: sc_client_db::DatabaseSource::ParityDb { + path: tmp_2.path().to_owned(), + }, + }, + ]; + + for setting in settings { + let path = setting.source.path().unwrap(); + + let mut ethereum_hashes = vec![]; + let mut substrate_hashes = vec![]; + { + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(&setting).expect("a temporary db was created"); + + // Fill the tmp db with some data + let mut transaction = sp_database::Transaction::new(); + for _ in 0..20_010 { + let ethhash = H256::random(); + let subhash = H256::random(); + ethereum_hashes.push(ethhash); + substrate_hashes.push(subhash); + transaction.set( + crate::columns::BLOCK_MAPPING, + ðhash.encode(), + &subhash.encode(), + ); + } + let _ = backend.mapping().db.commit(transaction); + } + // Upgrade database from version 1 to 2 + let _ = super::upgrade_db::(&path, &setting.source); + + // Check data + let backend = open_frontier_backend(&setting).expect("a temporary db was created"); + for (i, original_ethereum_hash) in ethereum_hashes.iter().enumerate() { + let entry = backend + .mapping() + .block_hash(original_ethereum_hash) + .unwrap() + .unwrap(); + // All entries now hold a single element Vec + assert_eq!(entry.len(), 1); + // The Vec holds the old value + assert_eq!(entry.first(), substrate_hashes.get(i)); } - let _ = backend.mapping().db.commit(transaction); - } - // Upgrade database from version 1 to 2 - let _ = super::upgrade_db::(&path); - - // Check data - let backend = open_frontier_backend(path.clone()).expect("a temporary db was created"); - for (i, original_ethereum_hash) in ethereum_hashes.iter().enumerate() { - let entry = backend - .mapping() - .block_hash(original_ethereum_hash) - .unwrap() - .unwrap(); - // All entries now hold a single element Vec - assert_eq!(entry.len(), 1); - // The Vec holds the old value - assert_eq!(entry.first(), substrate_hashes.get(i)); - } - // Upgrade db version file - // let _ = super::update_version(&path); - assert_eq!(super::current_version(&path).expect("version"), 2u32); + // Upgrade db version file + assert_eq!(super::current_version(&path).expect("version"), 2u32); + } } } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 6b8372c677..3e9779020b 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -47,11 +47,11 @@ pub fn open_database( fn open_kvdb_rocksdb( path: &Path, create: bool, - source: &DatabaseSource, + _source: &DatabaseSource, ) -> Result>, String> { // first upgrade database to required version #[cfg(not(test))] - match crate::upgrade::upgrade_db::(path, source) { + match crate::upgrade::upgrade_db::(path, _source) { Ok(_) => (), Err(_) => return Err("Frontier DB upgrade error".to_string()), } @@ -78,15 +78,17 @@ fn open_kvdb_rocksdb( #[cfg(feature = "parity-db")] fn open_parity_db( path: &Path, - source: &DatabaseSource, + _source: &DatabaseSource, ) -> Result>, String> { // first upgrade database to required version #[cfg(not(test))] - match crate::upgrade::upgrade_db::(path, source) { + match crate::upgrade::upgrade_db::(path, _source) { Ok(_) => (), Err(_) => return Err("Frontier DB upgrade error".to_string()), } - let config = parity_db::Options::with_columns(path, crate::columns::NUM_COLUMNS as u8); + let mut config = parity_db::Options::with_columns(path, crate::columns::NUM_COLUMNS as u8); + config.columns[crate::columns::BLOCK_MAPPING as usize].btree_index = true; + let db = parity_db::Db::open_or_create(&config).map_err(|err| format!("{}", err))?; // write database version only after the database is succesfully opened #[cfg(not(test))] From fccbcacf5a14695cd73bc4efb2cc6ede95688c36 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Wed, 21 Sep 2022 18:11:38 +0200 Subject: [PATCH 10/17] WIP retroactively fix non-canon mapped blocks --- Cargo.lock | 1 + client/cli/src/frontier_db_cmd/mapping_db.rs | 4 +- client/cli/src/frontier_db_cmd/meta_db.rs | 13 +-- client/cli/src/frontier_db_cmd/mod.rs | 2 +- client/consensus/src/lib.rs | 4 +- client/db/Cargo.toml | 1 + client/db/src/lib.rs | 58 ++++++++------ client/db/src/upgrade.rs | 83 +++++++++++++------- client/db/src/utils.rs | 61 +++++++++----- client/mapping-sync/src/lib.rs | 27 ++++--- client/mapping-sync/src/worker.rs | 4 +- client/rpc/src/eth/filter.rs | 4 +- client/rpc/src/eth/mod.rs | 4 +- client/rpc/src/lib.rs | 16 ++-- template/node/src/rpc.rs | 2 +- template/node/src/service.rs | 5 +- 16 files changed, 184 insertions(+), 105 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d01e95592f..91746c8d6e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1653,6 +1653,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "sc-client-db", + "sp-blockchain", "sp-core", "sp-database", "sp-runtime", diff --git a/client/cli/src/frontier_db_cmd/mapping_db.rs b/client/cli/src/frontier_db_cmd/mapping_db.rs index 0a048c98d3..16879463a9 100644 --- a/client/cli/src/frontier_db_cmd/mapping_db.rs +++ b/client/cli/src/frontier_db_cmd/mapping_db.rs @@ -39,7 +39,7 @@ pub enum MappingKey { pub struct MappingDb<'a, C, B: BlockT> { cmd: &'a FrontierDbCmd, client: Arc, - backend: Arc>, + backend: Arc>, } impl<'a, C, B: BlockT> MappingDb<'a, C, B> @@ -48,7 +48,7 @@ where C::Api: EthereumRuntimeRPCApi, C: sp_blockchain::HeaderBackend, { - pub fn new(cmd: &'a FrontierDbCmd, client: Arc, backend: Arc>) -> Self { + pub fn new(cmd: &'a FrontierDbCmd, client: Arc, backend: Arc>) -> Self { Self { cmd, client, diff --git a/client/cli/src/frontier_db_cmd/meta_db.rs b/client/cli/src/frontier_db_cmd/meta_db.rs index 2f6c42d5ab..c471a8fb79 100644 --- a/client/cli/src/frontier_db_cmd/meta_db.rs +++ b/client/cli/src/frontier_db_cmd/meta_db.rs @@ -56,13 +56,16 @@ impl FromStr for MetaKey { } } -pub struct MetaDb<'a, B: BlockT> { +pub struct MetaDb<'a, B: BlockT, C> { cmd: &'a FrontierDbCmd, - backend: Arc>, + backend: Arc>, } -impl<'a, B: BlockT> MetaDb<'a, B> { - pub fn new(cmd: &'a FrontierDbCmd, backend: Arc>) -> Self { +impl<'a, B: BlockT, C> MetaDb<'a, B, C> +where + C: sp_blockchain::HeaderBackend, +{ + pub fn new(cmd: &'a FrontierDbCmd, backend: Arc>) -> Self { Self { cmd, backend } } @@ -152,4 +155,4 @@ impl<'a, B: BlockT> MetaDb<'a, B> { } } -impl<'a, B: BlockT> FrontierDbMessage for MetaDb<'a, B> {} +impl<'a, B: BlockT, C> FrontierDbMessage for MetaDb<'a, B, C> {} diff --git a/client/cli/src/frontier_db_cmd/mod.rs b/client/cli/src/frontier_db_cmd/mod.rs index 4f491d9ce0..9c118a1c2e 100644 --- a/client/cli/src/frontier_db_cmd/mod.rs +++ b/client/cli/src/frontier_db_cmd/mod.rs @@ -96,7 +96,7 @@ impl FrontierDbCmd { pub fn run( &self, client: Arc, - backend: Arc>, + backend: Arc>, ) -> sc_cli::Result<()> where C: sp_api::ProvideRuntimeApi, diff --git a/client/consensus/src/lib.rs b/client/consensus/src/lib.rs index 47ee25b062..0849fb90cb 100644 --- a/client/consensus/src/lib.rs +++ b/client/consensus/src/lib.rs @@ -63,7 +63,7 @@ impl From for ConsensusError { pub struct FrontierBlockImport { inner: I, client: Arc, - backend: Arc>, + backend: Arc>, _marker: PhantomData, } @@ -87,7 +87,7 @@ where C::Api: EthereumRuntimeRPCApi, C::Api: BlockBuilderApi, { - pub fn new(inner: I, client: Arc, backend: Arc>) -> Self { + pub fn new(inner: I, client: Arc, backend: Arc>) -> Self { Self { inner, client, diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 9ae0ac47f4..9cd04bf6ef 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -24,6 +24,7 @@ sc-client-db = { version = "0.10.0-dev", git = "https://github.com/paritytech/su sp-core = { version = "6.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } sp-database = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { version = "6.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } # Frontier fp-storage = { version = "2.0.0-dev", path = "../../primitives/storage" } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index bfa59ecf73..0cfc98e840 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -58,9 +58,10 @@ pub mod static_keys { pub const CURRENT_SYNCING_TIPS: &[u8] = b"CURRENT_SYNCING_TIPS"; } -pub struct Backend { +pub struct Backend { meta: Arc>, mapping: Arc>, + _marker: PhantomData, } /// Returns the frontier database directory. @@ -68,31 +69,43 @@ pub fn frontier_database_dir(db_config_dir: &Path, db_path: &str) -> PathBuf { db_config_dir.join("frontier").join(db_path) } -impl Backend { - pub fn open(database: &DatabaseSource, db_config_dir: &Path) -> Result { - Self::new(&DatabaseSettings { - source: match database { - DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { - path: frontier_database_dir(db_config_dir, "db"), - cache_size: 0, +impl Backend +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ + pub fn open( + client: Arc, + database: &DatabaseSource, + db_config_dir: &Path, + ) -> Result { + Self::new( + client, + &DatabaseSettings { + source: match database { + DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { + path: frontier_database_dir(db_config_dir, "db"), + cache_size: 0, + }, + DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { + path: frontier_database_dir(db_config_dir, "paritydb"), + }, + DatabaseSource::Auto { .. } => DatabaseSource::Auto { + rocksdb_path: frontier_database_dir(db_config_dir, "db"), + paritydb_path: frontier_database_dir(db_config_dir, "paritydb"), + cache_size: 0, + }, + _ => { + return Err( + "Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string() + ) + } }, - DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { - path: frontier_database_dir(db_config_dir, "paritydb"), - }, - DatabaseSource::Auto { .. } => DatabaseSource::Auto { - rocksdb_path: frontier_database_dir(db_config_dir, "db"), - paritydb_path: frontier_database_dir(db_config_dir, "paritydb"), - cache_size: 0, - }, - _ => { - return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string()) - } }, - }) + ) } - pub fn new(config: &DatabaseSettings) -> Result { - let db = utils::open_database::(config)?; + pub fn new(client: Arc, config: &DatabaseSettings) -> Result { + let db = utils::open_database::(client, config)?; Ok(Self { mapping: Arc::new(MappingDb { @@ -104,6 +117,7 @@ impl Backend { db: db.clone(), _marker: PhantomData, }), + _marker: PhantomData, }) } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index ef4e0f8e3e..0273713ef2 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use codec::{Decode, Encode}; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, Header}; use sp_core::H256; @@ -27,6 +27,7 @@ use std::{ fmt, fs, io::{self, ErrorKind, Read, Write}, path::{Path, PathBuf}, + sync::Arc, }; /// Version file name. @@ -90,17 +91,25 @@ impl fmt::Display for UpgradeError { } /// Upgrade database to current version. -pub(crate) fn upgrade_db( +pub(crate) fn upgrade_db( + client: Arc, db_path: &Path, source: &DatabaseSource, -) -> UpgradeResult<()> { +) -> UpgradeResult<()> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ let db_version = current_version(db_path)?; match db_version { 0 => return Err(UpgradeError::UnsupportedVersion(db_version)), 1 => { let summary = match source { - DatabaseSource::ParityDb { .. } => migrate_1_to_2_parity_db::(db_path)?, - DatabaseSource::RocksDb { .. } => migrate_1_to_2_rocks_db::(db_path)?, + DatabaseSource::ParityDb { .. } => { + migrate_1_to_2_parity_db::(client, db_path)? + } + DatabaseSource::RocksDb { .. } => { + migrate_1_to_2_rocks_db::(client, db_path)? + } _ => panic!("DatabaseSource required for upgrade ParityDb | RocksDb"), }; if !summary.error.is_empty() { @@ -159,9 +168,13 @@ fn version_file_path(path: &Path) -> PathBuf { /// Migration from version1 to version2: /// - The format of the Ethereum<>Substrate block mapping changed to support equivocation. /// - Migrating schema from One-to-one to One-to-many (EthHash: Vec) relationship. -pub(crate) fn migrate_1_to_2_rocks_db( +pub(crate) fn migrate_1_to_2_rocks_db( + client: Arc, db_path: &Path, -) -> UpgradeResult { +) -> UpgradeResult +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ log::info!("🔨 Running Frontier DB migration from version 1 to version 2. Please wait."); let mut res = UpgradeVersion1To2Summary { success: 0, @@ -175,20 +188,26 @@ pub(crate) fn migrate_1_to_2_rocks_db( | -> UpgradeResult<()> { let mut transaction = db.transaction(); for ethereum_hash in ethereum_hashes { + let mut maybe_error = true; if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING, ethereum_hash)? { // Only update version1 data let decoded = Vec::::decode(&mut &substrate_hash[..]); if decoded.is_err() || decoded.unwrap().is_empty() { - transaction.put_vec( - crate::columns::BLOCK_MAPPING, - ethereum_hash, - vec![H256::from_slice(&substrate_hash[..])].encode(), - ); - res.success += 1; - } else { - res.error.push(H256::from_slice(ethereum_hash)); + // Verify the substrate hash is part of the canonical chain. + if let Ok(Some(number)) = client.number(Block::Hash::decode(&mut &substrate_hash[..]).unwrap()) { + if let Ok(Some(header)) = client.header(sp_runtime::generic::BlockId::Number(number)) { + transaction.put_vec( + crate::columns::BLOCK_MAPPING, + ethereum_hash, + vec![header.hash()].encode(), + ); + res.success += 1; + maybe_error = false; + } + } } - } else { + } + if maybe_error { res.error.push(H256::from_slice(ethereum_hash)); } } @@ -215,9 +234,13 @@ pub(crate) fn migrate_1_to_2_rocks_db( Ok(res) } -pub(crate) fn migrate_1_to_2_parity_db( +pub(crate) fn migrate_1_to_2_parity_db( + client: Arc, db_path: &Path, -) -> UpgradeResult { +) -> UpgradeResult +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ log::info!("🔨 Running Frontier DB migration from version 1 to version 2. Please wait."); let mut res = UpgradeVersion1To2Summary { success: 0, @@ -231,22 +254,28 @@ pub(crate) fn migrate_1_to_2_parity_db( | -> UpgradeResult<()> { let mut transaction = vec![]; for ethereum_hash in ethereum_hashes { + let mut maybe_error = true; if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING as u8, ethereum_hash).map_err(|_| io::Error::new(ErrorKind::Other, "Key does not exist") )? { // Only update version1 data let decoded = Vec::::decode(&mut &substrate_hash[..]); if decoded.is_err() || decoded.unwrap().is_empty() { - transaction.push(( - crate::columns::BLOCK_MAPPING as u8, - ethereum_hash, - Some(vec![H256::from_slice(&substrate_hash[..])].encode()), - )); - res.success += 1; - } else { - res.error.push(H256::from_slice(ethereum_hash)); + // Verify the substrate hash is part of the canonical chain. + if let Ok(Some(number)) = client.number(Block::Hash::decode(&mut &substrate_hash[..]).unwrap()) { + if let Ok(Some(header)) = client.header(sp_runtime::generic::BlockId::Number(number)) { + transaction.push(( + crate::columns::BLOCK_MAPPING as u8, + ethereum_hash, + Some(vec![header.hash()].encode()), + )); + res.success += 1; + maybe_error = false; + } + } } - } else { + } + if maybe_error { res.error.push(H256::from_slice(ethereum_hash)); } } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 3e9779020b..a63e314273 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -22,36 +22,49 @@ use sp_runtime::traits::Block as BlockT; use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; -pub fn open_database( +pub fn open_database( + client: Arc, config: &DatabaseSettings, -) -> Result>, String> { +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ let db: Arc> = match &config.source { - DatabaseSource::ParityDb { path } => open_parity_db::(path, &config.source)?, + DatabaseSource::ParityDb { path } => { + open_parity_db::(client.clone(), path, &config.source)? + } DatabaseSource::RocksDb { path, .. } => { - open_kvdb_rocksdb::(path, true, &config.source)? + open_kvdb_rocksdb::(client.clone(), path, true, &config.source)? } DatabaseSource::Auto { paritydb_path, rocksdb_path, .. - } => match open_kvdb_rocksdb::(rocksdb_path, false, &config.source) { - Ok(db) => db, - Err(_) => open_parity_db::(paritydb_path, &config.source)?, - }, + } => { + match open_kvdb_rocksdb::(client.clone(), rocksdb_path, false, &config.source) + { + Ok(db) => db, + Err(_) => open_parity_db::(client, paritydb_path, &config.source)?, + } + } _ => return Err("Missing feature flags `parity-db`".to_string()), }; Ok(db) } #[cfg(feature = "kvdb-rocksdb")] -fn open_kvdb_rocksdb( +fn open_kvdb_rocksdb( + client: Arc, path: &Path, create: bool, _source: &DatabaseSource, -) -> Result>, String> { +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ // first upgrade database to required version #[cfg(not(test))] - match crate::upgrade::upgrade_db::(path, _source) { + match crate::upgrade::upgrade_db::(client, path, _source) { Ok(_) => (), Err(_) => return Err("Frontier DB upgrade error".to_string()), } @@ -67,22 +80,30 @@ fn open_kvdb_rocksdb( } #[cfg(not(feature = "kvdb-rocksdb"))] -fn open_kvdb_rocksdb( +fn open_kvdb_rocksdb( + _client: Arc, _path: &Path, _create: bool, _source: &DatabaseSource, -) -> Result>, String> { +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ Err("Missing feature flags `kvdb-rocksdb`".to_string()) } #[cfg(feature = "parity-db")] -fn open_parity_db( +fn open_parity_db( + client: Arc, path: &Path, _source: &DatabaseSource, -) -> Result>, String> { +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ // first upgrade database to required version #[cfg(not(test))] - match crate::upgrade::upgrade_db::(path, _source) { + match crate::upgrade::upgrade_db::(client, path, _source) { Ok(_) => (), Err(_) => return Err("Frontier DB upgrade error".to_string()), } @@ -97,9 +118,13 @@ fn open_parity_db( } #[cfg(not(feature = "parity-db"))] -fn open_parity_db( +fn open_parity_db( + _client: Arc, _path: &Path, _source: &DatabaseSource, -) -> Result>, String> { +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ Err("Missing feature flags `parity-db`".to_string()) } diff --git a/client/mapping-sync/src/lib.rs b/client/mapping-sync/src/lib.rs index b4c96e3e38..0568f11c3f 100644 --- a/client/mapping-sync/src/lib.rs +++ b/client/mapping-sync/src/lib.rs @@ -32,10 +32,13 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, Zero}, }; -pub fn sync_block( - backend: &fc_db::Backend, +pub fn sync_block( + backend: &fc_db::Backend, header: &Block::Header, -) -> Result<(), String> { +) -> Result<(), String> +where + C: HeaderBackend + Send + Sync, +{ match fp_consensus::find_log(header.digest()) { Ok(log) => { let post_hashes = log.into_hashes(); @@ -60,7 +63,7 @@ pub fn sync_block( pub fn sync_genesis_block( client: &C, - backend: &fc_db::Backend, + backend: &fc_db::Backend, header: &Block::Header, ) -> Result<(), String> where @@ -99,7 +102,7 @@ where pub fn sync_one_block( client: &C, substrate_backend: &B, - frontier_backend: &fc_db::Backend, + frontier_backend: &fc_db::Backend, sync_from: ::Number, strategy: SyncStrategy, ) -> Result @@ -121,7 +124,7 @@ where let mut operating_header = None; while let Some(checking_tip) = current_syncing_tips.pop() { if let Some(checking_header) = - fetch_header(substrate_backend, frontier_backend, checking_tip, sync_from)? + fetch_header(client, frontier_backend, checking_tip, sync_from)? { operating_header = Some(checking_header); break; @@ -163,7 +166,7 @@ where pub fn sync_blocks( client: &C, substrate_backend: &B, - frontier_backend: &fc_db::Backend, + frontier_backend: &fc_db::Backend, limit: usize, sync_from: ::Number, strategy: SyncStrategy, @@ -189,20 +192,20 @@ where Ok(synced_any) } -pub fn fetch_header( - substrate_backend: &B, - frontier_backend: &fc_db::Backend, +pub fn fetch_header( + client: &C, + frontier_backend: &fc_db::Backend, checking_tip: Block::Hash, sync_from: ::Number, ) -> Result, String> where - B: sp_blockchain::HeaderBackend + sp_blockchain::Backend, + C: sp_blockchain::HeaderBackend, { if frontier_backend.mapping().is_synced(&checking_tip)? { return Ok(None); } - match substrate_backend.header(BlockId::Hash(checking_tip)) { + match client.header(BlockId::Hash(checking_tip)) { Ok(Some(checking_header)) if checking_header.number() >= &sync_from => { Ok(Some(checking_header)) } diff --git a/client/mapping-sync/src/worker.rs b/client/mapping-sync/src/worker.rs index dd940dafc9..d6ed0af803 100644 --- a/client/mapping-sync/src/worker.rs +++ b/client/mapping-sync/src/worker.rs @@ -42,7 +42,7 @@ pub struct MappingSyncWorker { client: Arc, substrate_backend: Arc, - frontier_backend: Arc>, + frontier_backend: Arc>, have_next: bool, retry_times: usize, @@ -58,7 +58,7 @@ impl MappingSyncWorker { timeout: Duration, client: Arc, substrate_backend: Arc, - frontier_backend: Arc>, + frontier_backend: Arc>, retry_times: usize, sync_from: ::Number, strategy: SyncStrategy, diff --git a/client/rpc/src/eth/filter.rs b/client/rpc/src/eth/filter.rs index 2681611270..a9f1b4cf69 100644 --- a/client/rpc/src/eth/filter.rs +++ b/client/rpc/src/eth/filter.rs @@ -38,7 +38,7 @@ use crate::{eth::cache::EthBlockDataCacheTask, frontier_backend_client, internal pub struct EthFilter { client: Arc, - backend: Arc>, + backend: Arc>, filter_pool: FilterPool, max_stored_filters: usize, max_past_logs: u32, @@ -49,7 +49,7 @@ pub struct EthFilter { impl EthFilter { pub fn new( client: Arc, - backend: Arc>, + backend: Arc>, filter_pool: FilterPool, max_stored_filters: usize, max_past_logs: u32, diff --git a/client/rpc/src/eth/mod.rs b/client/rpc/src/eth/mod.rs index 110785f675..9df0ffbea1 100644 --- a/client/rpc/src/eth/mod.rs +++ b/client/rpc/src/eth/mod.rs @@ -68,7 +68,7 @@ pub struct Eth { is_authority: bool, signers: Vec>, overrides: Arc>, - backend: Arc>, + backend: Arc>, block_data_cache: Arc>, fee_history_cache: FeeHistoryCache, fee_history_cache_limit: FeeHistoryCacheLimit, @@ -87,7 +87,7 @@ impl Eth>, signers: Vec>, overrides: Arc>, - backend: Arc>, + backend: Arc>, is_authority: bool, block_data_cache: Arc>, fee_history_cache: FeeHistoryCache, diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 1cbd0f4f77..0fd2d35dce 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -68,7 +68,7 @@ pub mod frontier_backend_client { pub fn native_block_id( client: &C, - backend: &fc_db::Backend, + backend: &fc_db::Backend, number: Option, ) -> RpcResult>> where @@ -88,7 +88,7 @@ pub mod frontier_backend_client { pub fn load_hash( client: &C, - backend: &fc_db::Backend, + backend: &fc_db::Backend, hash: H256, ) -> RpcResult>> where @@ -110,11 +110,12 @@ pub mod frontier_backend_client { Ok(None) } - pub fn load_cached_schema( - backend: &fc_db::Backend, + pub fn load_cached_schema( + backend: &fc_db::Backend, ) -> RpcResult>> where B: BlockT + Send + Sync + 'static, + C: HeaderBackend + Send + Sync + 'static, { let cache = backend .meta() @@ -123,12 +124,13 @@ pub mod frontier_backend_client { Ok(cache) } - pub fn write_cached_schema( - backend: &fc_db::Backend, + pub fn write_cached_schema( + backend: &fc_db::Backend, new_cache: Vec<(EthereumStorageSchema, H256)>, ) -> RpcResult<()> where B: BlockT + Send + Sync + 'static, + C: HeaderBackend + Send + Sync + 'static, { backend .meta() @@ -170,7 +172,7 @@ pub mod frontier_backend_client { pub fn load_transactions( client: &C, - backend: &fc_db::Backend, + backend: &fc_db::Backend, transaction_hash: H256, only_canonical: bool, ) -> RpcResult> diff --git a/template/node/src/rpc.rs b/template/node/src/rpc.rs index b73cce25dc..c959a8f4b6 100644 --- a/template/node/src/rpc.rs +++ b/template/node/src/rpc.rs @@ -48,7 +48,7 @@ pub struct FullDeps { /// EthFilterApi pool. pub filter_pool: Option, /// Backend. - pub backend: Arc>, + pub backend: Arc>, /// Maximum number of logs in a query. pub max_past_logs: u32, /// Fee history cache. diff --git a/template/node/src/service.rs b/template/node/src/service.rs index 0ac8cf1651..1b1fa9f7e2 100644 --- a/template/node/src/service.rs +++ b/template/node/src/service.rs @@ -94,7 +94,7 @@ pub fn new_partial( ( Option, ConsensusResult, - Arc>, + Arc>, Option, (FeeHistoryCache, FeeHistoryCacheLimit), ), @@ -151,6 +151,7 @@ pub fn new_partial( ); let frontier_backend = Arc::new(FrontierBackend::open( + Arc::clone(&client), &config.database, &db_config_dir(config), )?); @@ -729,7 +730,7 @@ fn spawn_frontier_tasks( task_manager: &TaskManager, client: Arc, backend: Arc, - frontier_backend: Arc>, + frontier_backend: Arc>, filter_pool: Option, overrides: Arc>, fee_history_cache: FeeHistoryCache, From 1764784f95a5c27b5fc15933ed32233d9b3b2338 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Thu, 22 Sep 2022 10:48:32 +0200 Subject: [PATCH 11/17] Update tests --- Cargo.lock | 4 ++ client/db/Cargo.toml | 4 ++ client/db/src/upgrade.rs | 80 ++++++++++++++++++++++++++++++++++------ 3 files changed, 76 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 91746c8d6e..07c8360b38 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1647,13 +1647,17 @@ name = "fc-db" version = "2.0.0-dev" dependencies = [ "fp-storage", + "frontier-template-runtime", + "futures", "kvdb-rocksdb", "log", "parity-db", "parity-scale-codec", "parking_lot 0.12.1", + "sc-block-builder", "sc-client-db", "sp-blockchain", + "sp-consensus", "sp-core", "sp-database", "sp-runtime", diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 9cd04bf6ef..d1ae85eb96 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -33,6 +33,10 @@ fp-storage = { version = "2.0.0-dev", path = "../../primitives/storage" } default = ["kvdb-rocksdb", "parity-db"] [dev-dependencies] +futures = "0.3.24" tempfile = "3.3.0" substrate-test-runtime-client = { version = "2.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } sc-client-db= { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", features = ["rocksdb"] } +sc-block-builder = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } +sp-consensus = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } +frontier-template-runtime = { path = "../../template/runtime" } \ No newline at end of file diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 0273713ef2..43b9cc1eb3 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -317,13 +317,20 @@ where #[cfg(test)] mod tests { + use frontier_template_runtime::RuntimeApi; + use futures::executor; + use sc_block_builder::BlockBuilderProvider; + use sp_consensus::BlockOrigin; + use substrate_test_runtime_client::{ + prelude::*, DefaultTestClientBuilderExt, TestClientBuilder, + }; use std::sync::Arc; use codec::Encode; use sp_core::H256; use sp_runtime::{ - generic::{Block, Header}, + generic::{Block, BlockId, Header}, traits::BlakeTwo256, }; use tempfile::tempdir; @@ -331,10 +338,16 @@ mod tests { type OpaqueBlock = Block, substrate_test_runtime_client::runtime::Extrinsic>; - pub fn open_frontier_backend( + pub fn open_frontier_backend( + client: Arc, setting: &crate::DatabaseSettings, - ) -> Result>, String> { - Ok(Arc::new(crate::Backend::::new(setting)?)) + ) -> Result>, String> + where + C: sp_blockchain::HeaderBackend, + { + Ok(Arc::new(crate::Backend::::new( + client, setting, + )?)) } #[test] @@ -359,34 +372,77 @@ mod tests { ]; for setting in settings { + let (client, _) = + TestClientBuilder::new().build_with_native_executor::(None); + let mut client = Arc::new(client); + + // Genesis block + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let block = builder.build().unwrap().block; + let mut previous_canon_block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + let path = setting.source.path().unwrap(); let mut ethereum_hashes = vec![]; let mut substrate_hashes = vec![]; { // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(&setting).expect("a temporary db was created"); + let backend = open_frontier_backend(client.clone(), &setting) + .expect("a temporary db was created"); // Fill the tmp db with some data let mut transaction = sp_database::Transaction::new(); - for _ in 0..20_010 { + for _ in 0..1000 { + // Ethereum hash let ethhash = H256::random(); - let subhash = H256::random(); + // Create two branches, and map the orphan one. + // Keep track of the canon hash to later verify the migration replaced it. + // A1 + let mut builder = client + .new_block_at( + &BlockId::Hash(previous_canon_block_hash), + Default::default(), + false, + ) + .unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let block = builder.build().unwrap().block; + let next_canon_block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + // A2 + let mut builder = client + .new_block_at( + &BlockId::Hash(previous_canon_block_hash), + Default::default(), + false, + ) + .unwrap(); + builder.push_storage_change(vec![2], None).unwrap(); + let block = builder.build().unwrap().block; + let orphan_block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + + // Track canon hash ethereum_hashes.push(ethhash); - substrate_hashes.push(subhash); + substrate_hashes.push(next_canon_block_hash); + // Set orphan hash transaction.set( crate::columns::BLOCK_MAPPING, ðhash.encode(), - &subhash.encode(), + &orphan_block_hash.encode(), ); + previous_canon_block_hash = next_canon_block_hash; } let _ = backend.mapping().db.commit(transaction); } // Upgrade database from version 1 to 2 - let _ = super::upgrade_db::(&path, &setting.source); + let _ = super::upgrade_db::(client.clone(), &path, &setting.source); // Check data - let backend = open_frontier_backend(&setting).expect("a temporary db was created"); + let backend = + open_frontier_backend(client, &setting).expect("a temporary db was created"); for (i, original_ethereum_hash) in ethereum_hashes.iter().enumerate() { let entry = backend .mapping() @@ -395,7 +451,7 @@ mod tests { .unwrap(); // All entries now hold a single element Vec assert_eq!(entry.len(), 1); - // The Vec holds the old value + // The Vec holds the canon block hash assert_eq!(entry.first(), substrate_hashes.get(i)); } From 95c20a683a28bf3fcc2492fcda87d9bae448e5d9 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Thu, 22 Sep 2022 10:56:55 +0200 Subject: [PATCH 12/17] Fix more tests --- client/cli/src/frontier_db_cmd/tests.rs | 122 ++++++++++++++---------- client/rpc/src/lib.rs | 13 ++- 2 files changed, 82 insertions(+), 53 deletions(-) diff --git a/client/cli/src/frontier_db_cmd/tests.rs b/client/cli/src/frontier_db_cmd/tests.rs index 7925dd57e8..b4f624de9c 100644 --- a/client/cli/src/frontier_db_cmd/tests.rs +++ b/client/cli/src/frontier_db_cmd/tests.rs @@ -45,10 +45,15 @@ mod tests { type OpaqueBlock = Block, substrate_test_runtime_client::runtime::Extrinsic>; - pub fn open_frontier_backend( + pub fn open_frontier_backend( + client: Arc, path: PathBuf, - ) -> Result>, String> { - Ok(Arc::new(fc_db::Backend::::new( + ) -> Result>, String> + where + C: sp_blockchain::HeaderBackend, + { + Ok(Arc::new(fc_db::Backend::::new( + client, &fc_db::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { path, @@ -125,11 +130,13 @@ mod tests { // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &schema_test_value()); - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().ethereum_schema(), Ok(None)); @@ -140,7 +147,7 @@ mod tests { Operation::Create, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!( @@ -155,11 +162,13 @@ mod tests { // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &schema_test_value()); - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); let data_before = vec![(EthereumStorageSchema::V2, H256::default())]; @@ -175,7 +184,7 @@ mod tests { Operation::Create, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_err()); let data_after = backend.meta().ethereum_schema().unwrap().unwrap(); @@ -185,12 +194,13 @@ mod tests { #[test] fn schema_read_works() { let tmp = tempdir().expect("create a temporary directory"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().ethereum_schema(), Ok(None)); @@ -208,7 +218,7 @@ mod tests { Operation::Read, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); } @@ -217,12 +227,13 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &schema_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().ethereum_schema(), Ok(None)); // Run the command @@ -232,7 +243,7 @@ mod tests { Operation::Update, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!( @@ -244,12 +255,13 @@ mod tests { #[test] fn schema_delete_works() { let tmp = tempdir().expect("create a temporary directory"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); let data = vec![(EthereumStorageSchema::V2, H256::default())]; @@ -264,7 +276,7 @@ mod tests { Operation::Delete, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!(backend.meta().ethereum_schema(), Ok(Some(vec![]))); @@ -275,12 +287,13 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &tips_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().current_syncing_tips(), Ok(vec![])); // Run the command @@ -290,7 +303,7 @@ mod tests { Operation::Create, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!( @@ -304,12 +317,13 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &tips_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); let data_before = vec![H256::default()]; @@ -324,7 +338,7 @@ mod tests { Operation::Create, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_err()); let data_after = backend.meta().current_syncing_tips().unwrap(); @@ -334,12 +348,13 @@ mod tests { #[test] fn tips_read_works() { let tmp = tempdir().expect("create a temporary directory"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().current_syncing_tips(), Ok(vec![])); @@ -356,7 +371,7 @@ mod tests { Operation::Read, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); } @@ -365,12 +380,13 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &tips_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().current_syncing_tips(), Ok(vec![])); // Run the command @@ -380,7 +396,7 @@ mod tests { Operation::Update, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!( @@ -392,12 +408,13 @@ mod tests { #[test] fn tips_delete_works() { let tmp = tempdir().expect("create a temporary directory"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); let data = vec![H256::default()]; @@ -412,7 +429,7 @@ mod tests { Operation::Delete, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!(backend.meta().current_syncing_tips(), Ok(vec![])); @@ -423,13 +440,14 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &schema_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); + let client = client; let data = vec![(EthereumStorageSchema::V1, H256::default())]; @@ -497,13 +515,14 @@ mod tests { serde_json::to_string("im_not_allowed_here").unwrap(), ) .expect("write test value json file"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); + let client = client; // Run the Create command assert!(cmd( @@ -557,7 +576,8 @@ mod tests { let test_value_path = test_json_file(&tmp, &TestValue::Commitment(block_hash)); // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); // Run the command using some ethereum block hash as key. let ethereum_block_hash = H256::default(); @@ -640,7 +660,8 @@ mod tests { let test_value_path = test_json_file(&tmp, &TestValue::Commitment(block_a1_hash)); // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); // Run the command using some ethereum block hash as key. let ethereum_block_hash = H256::default(); @@ -760,7 +781,8 @@ mod tests { let test_value_path = test_json_file(&tmp, &TestValue::Commitment(block_hash)); // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); // Create command using some ethereum block hash as key. let ethereum_block_hash = H256::default(); diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 0fd2d35dce..271f20d874 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -275,8 +275,15 @@ mod tests { type OpaqueBlock = Block, substrate_test_runtime_client::runtime::Extrinsic>; - fn open_frontier_backend(path: PathBuf) -> Result>, String> { - Ok(Arc::new(fc_db::Backend::::new( + fn open_frontier_backend( + client: Arc, + path: PathBuf, + ) -> Result>, String> + where + C: sp_blockchain::HeaderBackend, + { + Ok(Arc::new(fc_db::Backend::::new( + client, &fc_db::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { path, @@ -295,7 +302,7 @@ mod tests { let mut client = Arc::new(client); // Create a temporary frontier secondary DB. - let frontier_backend = open_frontier_backend(tmp.into_path()).unwrap(); + let frontier_backend = open_frontier_backend(client.clone(), tmp.into_path()).unwrap(); // A random ethereum block hash to use let ethereum_block_hash = sp_core::H256::random(); From 51d77efc88a6914bf7254026a88eb0c8ce4e8071 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Thu, 22 Sep 2022 10:59:58 +0200 Subject: [PATCH 13/17] clippy --- client/db/src/upgrade.rs | 11 +++-------- client/db/src/utils.rs | 4 ++-- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 43b9cc1eb3..a3d087084c 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -255,7 +255,7 @@ where let mut transaction = vec![]; for ethereum_hash in ethereum_hashes { let mut maybe_error = true; - if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING as u8, ethereum_hash).map_err(|_| + if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING as u8, ethereum_hash).map_err(|_| io::Error::new(ErrorKind::Other, "Key does not exist") )? { // Only update version1 data @@ -294,13 +294,8 @@ where let ethereum_hashes: Vec<_> = match db.iter(crate::columns::BLOCK_MAPPING as u8) { Ok(mut iter) => { let mut hashes = vec![]; - loop { - match iter.next() { - Ok(Some((k, _))) => { - hashes.push(k); - } - _ => break, - } + while let Ok(Some((k, _))) = iter.next() { + hashes.push(k); } hashes } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index a63e314273..9fd7dca14e 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -31,10 +31,10 @@ where { let db: Arc> = match &config.source { DatabaseSource::ParityDb { path } => { - open_parity_db::(client.clone(), path, &config.source)? + open_parity_db::(client, path, &config.source)? } DatabaseSource::RocksDb { path, .. } => { - open_kvdb_rocksdb::(client.clone(), path, true, &config.source)? + open_kvdb_rocksdb::(client, path, true, &config.source)? } DatabaseSource::Auto { paritydb_path, From 9f8dc62a00d2bce3986ec5632875a2872197cded Mon Sep 17 00:00:00 2001 From: tgmichel Date: Thu, 22 Sep 2022 13:41:34 +0200 Subject: [PATCH 14/17] taplo --- client/db/Cargo.toml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index d1ae85eb96..1b3310bdbd 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -11,8 +11,8 @@ repository = "https://github.com/paritytech/frontier/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -parking_lot = "0.12.1" log = "0.4.17" +parking_lot = "0.12.1" # Parity codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } @@ -21,10 +21,10 @@ parity-db = { version = "0.3.16", optional = true } # Substrate sc-client-db = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { version = "6.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } sp-database = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { version = "6.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } # Frontier fp-storage = { version = "2.0.0-dev", path = "../../primitives/storage" } @@ -33,10 +33,10 @@ fp-storage = { version = "2.0.0-dev", path = "../../primitives/storage" } default = ["kvdb-rocksdb", "parity-db"] [dev-dependencies] +frontier-template-runtime = { path = "../../template/runtime" } futures = "0.3.24" -tempfile = "3.3.0" -substrate-test-runtime-client = { version = "2.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-db= { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", features = ["rocksdb"] } sc-block-builder = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-db = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", features = ["rocksdb"] } sp-consensus = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } -frontier-template-runtime = { path = "../../template/runtime" } \ No newline at end of file +substrate-test-runtime-client = { version = "2.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } +tempfile = "3.3.0" From 0bb855196105e089d72ebd743b31a844c7f3d932 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Thu, 22 Sep 2022 16:21:47 +0200 Subject: [PATCH 15/17] Test transaction metadata --- client/db/src/upgrade.rs | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index a3d087084c..5be8d0e169 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -382,6 +382,7 @@ mod tests { let mut ethereum_hashes = vec![]; let mut substrate_hashes = vec![]; + let mut transaction_hashes = vec![]; { // Create a temporary frontier secondary DB. let backend = open_frontier_backend(client.clone(), &setting) @@ -422,12 +423,30 @@ mod tests { // Track canon hash ethereum_hashes.push(ethhash); substrate_hashes.push(next_canon_block_hash); - // Set orphan hash + // Set orphan hash block mapping transaction.set( crate::columns::BLOCK_MAPPING, ðhash.encode(), &orphan_block_hash.encode(), ); + // Test also that one-to-many transaction data is not affected by the migration logic. + // Map a transaction to both canon and orphan block hashes. This is what would have + // happened in case of fork or equivocation. + let eth_tx_hash = H256::random(); + let mut metadata = vec![]; + for hash in vec![next_canon_block_hash, orphan_block_hash].iter() { + metadata.push(crate::TransactionMetadata:: { + block_hash: *hash, + ethereum_block_hash: ethhash, + ethereum_index: 0u32, + }); + } + transaction.set( + crate::columns::TRANSACTION_MAPPING, + ð_tx_hash.encode(), + &metadata.encode(), + ); + transaction_hashes.push(eth_tx_hash); previous_canon_block_hash = next_canon_block_hash; } let _ = backend.mapping().db.commit(transaction); @@ -435,19 +454,28 @@ mod tests { // Upgrade database from version 1 to 2 let _ = super::upgrade_db::(client.clone(), &path, &setting.source); - // Check data + // Check data after migration let backend = open_frontier_backend(client, &setting).expect("a temporary db was created"); for (i, original_ethereum_hash) in ethereum_hashes.iter().enumerate() { - let entry = backend + let canon_substrate_block_hash = substrate_hashes.get(i).expect("Block hash"); + let mapped_block = backend .mapping() .block_hash(original_ethereum_hash) .unwrap() .unwrap(); // All entries now hold a single element Vec - assert_eq!(entry.len(), 1); + assert_eq!(mapped_block.len(), 1); // The Vec holds the canon block hash - assert_eq!(entry.first(), substrate_hashes.get(i)); + assert_eq!(mapped_block.first(), Some(canon_substrate_block_hash)); + // Transaction hash still holds canon block data + let mapped_transaction = backend + .mapping() + .transaction_metadata(transaction_hashes.get(i).expect("Transaction hash")) + .unwrap(); + assert!(mapped_transaction + .into_iter() + .any(|tx| tx.block_hash == *canon_substrate_block_hash)); } // Upgrade db version file From 49d196ee7befda37327fb443b45fbee6d4292b4e Mon Sep 17 00:00:00 2001 From: tgmichel Date: Fri, 23 Sep 2022 09:06:49 +0200 Subject: [PATCH 16/17] Use test runtime api --- Cargo.lock | 1 - client/db/Cargo.toml | 1 - client/db/src/upgrade.rs | 7 ++++--- client/rpc/src/lib.rs | 7 ++++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07c8360b38..15828b1b99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1647,7 +1647,6 @@ name = "fc-db" version = "2.0.0-dev" dependencies = [ "fp-storage", - "frontier-template-runtime", "futures", "kvdb-rocksdb", "log", diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 1b3310bdbd..6cfd4baacc 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -33,7 +33,6 @@ fp-storage = { version = "2.0.0-dev", path = "../../primitives/storage" } default = ["kvdb-rocksdb", "parity-db"] [dev-dependencies] -frontier-template-runtime = { path = "../../template/runtime" } futures = "0.3.24" sc-block-builder = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sc-client-db = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", features = ["rocksdb"] } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 5be8d0e169..5d43959a00 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -312,7 +312,6 @@ where #[cfg(test)] mod tests { - use frontier_template_runtime::RuntimeApi; use futures::executor; use sc_block_builder::BlockBuilderProvider; use sp_consensus::BlockOrigin; @@ -367,8 +366,10 @@ mod tests { ]; for setting in settings { - let (client, _) = - TestClientBuilder::new().build_with_native_executor::(None); + let (client, _) = TestClientBuilder::new() + .build_with_native_executor::( + None, + ); let mut client = Arc::new(client); // Genesis block diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 271f20d874..758b84e56b 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -259,7 +259,6 @@ pub fn public_key(transaction: &EthereumTransaction) -> Result<[u8; 64], sp_io:: mod tests { use std::{path::PathBuf, sync::Arc}; - use frontier_template_runtime::RuntimeApi; use futures::executor; use sc_block_builder::BlockBuilderProvider; use sp_consensus::BlockOrigin; @@ -296,8 +295,10 @@ mod tests { #[test] fn substrate_block_hash_one_to_many_works() { let tmp = tempdir().expect("create a temporary directory"); - let (client, _) = - TestClientBuilder::new().build_with_native_executor::(None); + let (client, _) = TestClientBuilder::new() + .build_with_native_executor::( + None, + ); let mut client = Arc::new(client); From 7b6d8b1114a2882b2acc456aaf8c7582f7da5146 Mon Sep 17 00:00:00 2001 From: tgmichel Date: Wed, 5 Oct 2022 10:51:03 +0200 Subject: [PATCH 17/17] Remove unnecessary generic --- client/cli/src/frontier_db_cmd/mapping_db.rs | 4 +-- client/cli/src/frontier_db_cmd/meta_db.rs | 13 ++++------ client/cli/src/frontier_db_cmd/mod.rs | 2 +- client/cli/src/frontier_db_cmd/tests.rs | 4 +-- client/consensus/src/lib.rs | 4 +-- client/db/src/lib.rs | 16 +++++------- client/db/src/upgrade.rs | 4 +-- client/mapping-sync/src/lib.rs | 27 +++++++++----------- client/mapping-sync/src/worker.rs | 4 +-- client/rpc/src/eth/filter.rs | 4 +-- client/rpc/src/eth/mod.rs | 4 +-- client/rpc/src/lib.rs | 14 +++++----- template/node/src/rpc.rs | 2 +- template/node/src/service.rs | 4 +-- 14 files changed, 49 insertions(+), 57 deletions(-) diff --git a/client/cli/src/frontier_db_cmd/mapping_db.rs b/client/cli/src/frontier_db_cmd/mapping_db.rs index 16879463a9..0a048c98d3 100644 --- a/client/cli/src/frontier_db_cmd/mapping_db.rs +++ b/client/cli/src/frontier_db_cmd/mapping_db.rs @@ -39,7 +39,7 @@ pub enum MappingKey { pub struct MappingDb<'a, C, B: BlockT> { cmd: &'a FrontierDbCmd, client: Arc, - backend: Arc>, + backend: Arc>, } impl<'a, C, B: BlockT> MappingDb<'a, C, B> @@ -48,7 +48,7 @@ where C::Api: EthereumRuntimeRPCApi, C: sp_blockchain::HeaderBackend, { - pub fn new(cmd: &'a FrontierDbCmd, client: Arc, backend: Arc>) -> Self { + pub fn new(cmd: &'a FrontierDbCmd, client: Arc, backend: Arc>) -> Self { Self { cmd, client, diff --git a/client/cli/src/frontier_db_cmd/meta_db.rs b/client/cli/src/frontier_db_cmd/meta_db.rs index c471a8fb79..2f6c42d5ab 100644 --- a/client/cli/src/frontier_db_cmd/meta_db.rs +++ b/client/cli/src/frontier_db_cmd/meta_db.rs @@ -56,16 +56,13 @@ impl FromStr for MetaKey { } } -pub struct MetaDb<'a, B: BlockT, C> { +pub struct MetaDb<'a, B: BlockT> { cmd: &'a FrontierDbCmd, - backend: Arc>, + backend: Arc>, } -impl<'a, B: BlockT, C> MetaDb<'a, B, C> -where - C: sp_blockchain::HeaderBackend, -{ - pub fn new(cmd: &'a FrontierDbCmd, backend: Arc>) -> Self { +impl<'a, B: BlockT> MetaDb<'a, B> { + pub fn new(cmd: &'a FrontierDbCmd, backend: Arc>) -> Self { Self { cmd, backend } } @@ -155,4 +152,4 @@ where } } -impl<'a, B: BlockT, C> FrontierDbMessage for MetaDb<'a, B, C> {} +impl<'a, B: BlockT> FrontierDbMessage for MetaDb<'a, B> {} diff --git a/client/cli/src/frontier_db_cmd/mod.rs b/client/cli/src/frontier_db_cmd/mod.rs index 9c118a1c2e..4f491d9ce0 100644 --- a/client/cli/src/frontier_db_cmd/mod.rs +++ b/client/cli/src/frontier_db_cmd/mod.rs @@ -96,7 +96,7 @@ impl FrontierDbCmd { pub fn run( &self, client: Arc, - backend: Arc>, + backend: Arc>, ) -> sc_cli::Result<()> where C: sp_api::ProvideRuntimeApi, diff --git a/client/cli/src/frontier_db_cmd/tests.rs b/client/cli/src/frontier_db_cmd/tests.rs index b4f624de9c..d1fa3b8536 100644 --- a/client/cli/src/frontier_db_cmd/tests.rs +++ b/client/cli/src/frontier_db_cmd/tests.rs @@ -48,11 +48,11 @@ mod tests { pub fn open_frontier_backend( client: Arc, path: PathBuf, - ) -> Result>, String> + ) -> Result>, String> where C: sp_blockchain::HeaderBackend, { - Ok(Arc::new(fc_db::Backend::::new( + Ok(Arc::new(fc_db::Backend::::new( client, &fc_db::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { diff --git a/client/consensus/src/lib.rs b/client/consensus/src/lib.rs index 0849fb90cb..47ee25b062 100644 --- a/client/consensus/src/lib.rs +++ b/client/consensus/src/lib.rs @@ -63,7 +63,7 @@ impl From for ConsensusError { pub struct FrontierBlockImport { inner: I, client: Arc, - backend: Arc>, + backend: Arc>, _marker: PhantomData, } @@ -87,7 +87,7 @@ where C::Api: EthereumRuntimeRPCApi, C::Api: BlockBuilderApi, { - pub fn new(inner: I, client: Arc, backend: Arc>) -> Self { + pub fn new(inner: I, client: Arc, backend: Arc>) -> Self { Self { inner, client, diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 0cfc98e840..826a1801ea 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -58,10 +58,9 @@ pub mod static_keys { pub const CURRENT_SYNCING_TIPS: &[u8] = b"CURRENT_SYNCING_TIPS"; } -pub struct Backend { +pub struct Backend { meta: Arc>, mapping: Arc>, - _marker: PhantomData, } /// Returns the frontier database directory. @@ -69,11 +68,8 @@ pub fn frontier_database_dir(db_config_dir: &Path, db_path: &str) -> PathBuf { db_config_dir.join("frontier").join(db_path) } -impl Backend -where - C: sp_blockchain::HeaderBackend + Send + Sync, -{ - pub fn open( +impl Backend { + pub fn open>( client: Arc, database: &DatabaseSource, db_config_dir: &Path, @@ -104,7 +100,10 @@ where ) } - pub fn new(client: Arc, config: &DatabaseSettings) -> Result { + pub fn new>( + client: Arc, + config: &DatabaseSettings, + ) -> Result { let db = utils::open_database::(client, config)?; Ok(Self { @@ -117,7 +116,6 @@ where db: db.clone(), _marker: PhantomData, }), - _marker: PhantomData, }) } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 5d43959a00..5660ebe198 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -335,11 +335,11 @@ mod tests { pub fn open_frontier_backend( client: Arc, setting: &crate::DatabaseSettings, - ) -> Result>, String> + ) -> Result>, String> where C: sp_blockchain::HeaderBackend, { - Ok(Arc::new(crate::Backend::::new( + Ok(Arc::new(crate::Backend::::new( client, setting, )?)) } diff --git a/client/mapping-sync/src/lib.rs b/client/mapping-sync/src/lib.rs index 0568f11c3f..b4c96e3e38 100644 --- a/client/mapping-sync/src/lib.rs +++ b/client/mapping-sync/src/lib.rs @@ -32,13 +32,10 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, Zero}, }; -pub fn sync_block( - backend: &fc_db::Backend, +pub fn sync_block( + backend: &fc_db::Backend, header: &Block::Header, -) -> Result<(), String> -where - C: HeaderBackend + Send + Sync, -{ +) -> Result<(), String> { match fp_consensus::find_log(header.digest()) { Ok(log) => { let post_hashes = log.into_hashes(); @@ -63,7 +60,7 @@ where pub fn sync_genesis_block( client: &C, - backend: &fc_db::Backend, + backend: &fc_db::Backend, header: &Block::Header, ) -> Result<(), String> where @@ -102,7 +99,7 @@ where pub fn sync_one_block( client: &C, substrate_backend: &B, - frontier_backend: &fc_db::Backend, + frontier_backend: &fc_db::Backend, sync_from: ::Number, strategy: SyncStrategy, ) -> Result @@ -124,7 +121,7 @@ where let mut operating_header = None; while let Some(checking_tip) = current_syncing_tips.pop() { if let Some(checking_header) = - fetch_header(client, frontier_backend, checking_tip, sync_from)? + fetch_header(substrate_backend, frontier_backend, checking_tip, sync_from)? { operating_header = Some(checking_header); break; @@ -166,7 +163,7 @@ where pub fn sync_blocks( client: &C, substrate_backend: &B, - frontier_backend: &fc_db::Backend, + frontier_backend: &fc_db::Backend, limit: usize, sync_from: ::Number, strategy: SyncStrategy, @@ -192,20 +189,20 @@ where Ok(synced_any) } -pub fn fetch_header( - client: &C, - frontier_backend: &fc_db::Backend, +pub fn fetch_header( + substrate_backend: &B, + frontier_backend: &fc_db::Backend, checking_tip: Block::Hash, sync_from: ::Number, ) -> Result, String> where - C: sp_blockchain::HeaderBackend, + B: sp_blockchain::HeaderBackend + sp_blockchain::Backend, { if frontier_backend.mapping().is_synced(&checking_tip)? { return Ok(None); } - match client.header(BlockId::Hash(checking_tip)) { + match substrate_backend.header(BlockId::Hash(checking_tip)) { Ok(Some(checking_header)) if checking_header.number() >= &sync_from => { Ok(Some(checking_header)) } diff --git a/client/mapping-sync/src/worker.rs b/client/mapping-sync/src/worker.rs index d6ed0af803..dd940dafc9 100644 --- a/client/mapping-sync/src/worker.rs +++ b/client/mapping-sync/src/worker.rs @@ -42,7 +42,7 @@ pub struct MappingSyncWorker { client: Arc, substrate_backend: Arc, - frontier_backend: Arc>, + frontier_backend: Arc>, have_next: bool, retry_times: usize, @@ -58,7 +58,7 @@ impl MappingSyncWorker { timeout: Duration, client: Arc, substrate_backend: Arc, - frontier_backend: Arc>, + frontier_backend: Arc>, retry_times: usize, sync_from: ::Number, strategy: SyncStrategy, diff --git a/client/rpc/src/eth/filter.rs b/client/rpc/src/eth/filter.rs index a9f1b4cf69..2681611270 100644 --- a/client/rpc/src/eth/filter.rs +++ b/client/rpc/src/eth/filter.rs @@ -38,7 +38,7 @@ use crate::{eth::cache::EthBlockDataCacheTask, frontier_backend_client, internal pub struct EthFilter { client: Arc, - backend: Arc>, + backend: Arc>, filter_pool: FilterPool, max_stored_filters: usize, max_past_logs: u32, @@ -49,7 +49,7 @@ pub struct EthFilter { impl EthFilter { pub fn new( client: Arc, - backend: Arc>, + backend: Arc>, filter_pool: FilterPool, max_stored_filters: usize, max_past_logs: u32, diff --git a/client/rpc/src/eth/mod.rs b/client/rpc/src/eth/mod.rs index 9df0ffbea1..110785f675 100644 --- a/client/rpc/src/eth/mod.rs +++ b/client/rpc/src/eth/mod.rs @@ -68,7 +68,7 @@ pub struct Eth { is_authority: bool, signers: Vec>, overrides: Arc>, - backend: Arc>, + backend: Arc>, block_data_cache: Arc>, fee_history_cache: FeeHistoryCache, fee_history_cache_limit: FeeHistoryCacheLimit, @@ -87,7 +87,7 @@ impl Eth>, signers: Vec>, overrides: Arc>, - backend: Arc>, + backend: Arc>, is_authority: bool, block_data_cache: Arc>, fee_history_cache: FeeHistoryCache, diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 758b84e56b..f85a546ddc 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -68,7 +68,7 @@ pub mod frontier_backend_client { pub fn native_block_id( client: &C, - backend: &fc_db::Backend, + backend: &fc_db::Backend, number: Option, ) -> RpcResult>> where @@ -88,7 +88,7 @@ pub mod frontier_backend_client { pub fn load_hash( client: &C, - backend: &fc_db::Backend, + backend: &fc_db::Backend, hash: H256, ) -> RpcResult>> where @@ -111,7 +111,7 @@ pub mod frontier_backend_client { } pub fn load_cached_schema( - backend: &fc_db::Backend, + backend: &fc_db::Backend, ) -> RpcResult>> where B: BlockT + Send + Sync + 'static, @@ -125,7 +125,7 @@ pub mod frontier_backend_client { } pub fn write_cached_schema( - backend: &fc_db::Backend, + backend: &fc_db::Backend, new_cache: Vec<(EthereumStorageSchema, H256)>, ) -> RpcResult<()> where @@ -172,7 +172,7 @@ pub mod frontier_backend_client { pub fn load_transactions( client: &C, - backend: &fc_db::Backend, + backend: &fc_db::Backend, transaction_hash: H256, only_canonical: bool, ) -> RpcResult> @@ -277,11 +277,11 @@ mod tests { fn open_frontier_backend( client: Arc, path: PathBuf, - ) -> Result>, String> + ) -> Result>, String> where C: sp_blockchain::HeaderBackend, { - Ok(Arc::new(fc_db::Backend::::new( + Ok(Arc::new(fc_db::Backend::::new( client, &fc_db::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { diff --git a/template/node/src/rpc.rs b/template/node/src/rpc.rs index c959a8f4b6..b73cce25dc 100644 --- a/template/node/src/rpc.rs +++ b/template/node/src/rpc.rs @@ -48,7 +48,7 @@ pub struct FullDeps { /// EthFilterApi pool. pub filter_pool: Option, /// Backend. - pub backend: Arc>, + pub backend: Arc>, /// Maximum number of logs in a query. pub max_past_logs: u32, /// Fee history cache. diff --git a/template/node/src/service.rs b/template/node/src/service.rs index 1b1fa9f7e2..8df42ce63e 100644 --- a/template/node/src/service.rs +++ b/template/node/src/service.rs @@ -94,7 +94,7 @@ pub fn new_partial( ( Option, ConsensusResult, - Arc>, + Arc>, Option, (FeeHistoryCache, FeeHistoryCacheLimit), ), @@ -730,7 +730,7 @@ fn spawn_frontier_tasks( task_manager: &TaskManager, client: Arc, backend: Arc, - frontier_backend: Arc>, + frontier_backend: Arc>, filter_pool: Option, overrides: Arc>, fee_history_cache: FeeHistoryCache,