From 5bd0dc7aef2054cb881037a40866df3ce47b3b9e Mon Sep 17 00:00:00 2001 From: tgmichel Date: Wed, 5 Oct 2022 21:52:25 +0200 Subject: [PATCH] Frontier DB block mapping one-to-many (#862) * WIP Frontier DB block mapping one-to-many * Fix * fmt * Cleanup * Cleanup * Cleanup * fmt skip * parity db migration * Cleanup + tests * WIP retroactively fix non-canon mapped blocks * Update tests * Fix more tests * clippy * taplo * Test transaction metadata * Use test runtime api * Remove unnecessary generic --- Cargo.lock | 7 + client/cli/src/frontier_db_cmd/tests.rs | 126 +++--- client/db/Cargo.toml | 10 + client/db/src/lib.rs | 70 ++-- client/db/src/upgrade.rs | 486 ++++++++++++++++++++++++ client/db/src/utils.rs | 94 ++++- client/rpc/src/eth/block.rs | 16 +- client/rpc/src/eth/filter.rs | 8 +- client/rpc/src/eth/transaction.rs | 24 +- client/rpc/src/lib.rs | 164 +++++++- template/node/src/service.rs | 1 + 11 files changed, 896 insertions(+), 110 deletions(-) create mode 100644 client/db/src/upgrade.rs diff --git a/Cargo.lock b/Cargo.lock index 8675a98e0e..c249e9a25e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1647,14 +1647,21 @@ name = "fc-db" version = "2.0.0-dev" dependencies = [ "fp-storage", + "futures", "kvdb-rocksdb", + "log", "parity-db", "parity-scale-codec", "parking_lot 0.12.1", + "sc-block-builder", "sc-client-db", + "sp-blockchain", + "sp-consensus", "sp-core", "sp-database", "sp-runtime", + "substrate-test-runtime-client", + "tempfile", ] [[package]] diff --git a/client/cli/src/frontier_db_cmd/tests.rs b/client/cli/src/frontier_db_cmd/tests.rs index 5b05a90199..003e3176d2 100644 --- a/client/cli/src/frontier_db_cmd/tests.rs +++ b/client/cli/src/frontier_db_cmd/tests.rs @@ -46,10 +46,15 @@ mod tests { type OpaqueBlock = Block, substrate_test_runtime_client::runtime::Extrinsic>; - pub fn open_frontier_backend( + pub fn open_frontier_backend( + client: Arc, path: PathBuf, - ) -> Result>, String> { + ) -> Result>, String> + where + C: sp_blockchain::HeaderBackend, + { Ok(Arc::new(fc_db::Backend::::new( + client, &fc_db::DatabaseSettings { source: sc_client_db::DatabaseSource::RocksDb { path, @@ -126,11 +131,13 @@ mod tests { // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &schema_test_value()); - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().ethereum_schema(), Ok(None)); @@ -141,7 +148,7 @@ mod tests { Operation::Create, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!( @@ -156,11 +163,13 @@ mod tests { // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &schema_test_value()); - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); let data_before = vec![(EthereumStorageSchema::V2, H256::default())]; @@ -176,7 +185,7 @@ mod tests { Operation::Create, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_err()); let data_after = backend.meta().ethereum_schema().unwrap().unwrap(); @@ -186,12 +195,13 @@ mod tests { #[test] fn schema_read_works() { let tmp = tempdir().expect("create a temporary directory"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().ethereum_schema(), Ok(None)); @@ -209,7 +219,7 @@ mod tests { Operation::Read, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); } @@ -218,12 +228,13 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &schema_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().ethereum_schema(), Ok(None)); // Run the command @@ -233,7 +244,7 @@ mod tests { Operation::Update, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!( @@ -245,12 +256,13 @@ mod tests { #[test] fn schema_delete_works() { let tmp = tempdir().expect("create a temporary directory"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); let data = vec![(EthereumStorageSchema::V2, H256::default())]; @@ -265,7 +277,7 @@ mod tests { Operation::Delete, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!(backend.meta().ethereum_schema(), Ok(Some(vec![]))); @@ -276,12 +288,13 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &tips_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().current_syncing_tips(), Ok(vec![])); // Run the command @@ -291,7 +304,7 @@ mod tests { Operation::Create, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!( @@ -305,12 +318,13 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &tips_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); let data_before = vec![H256::default()]; @@ -325,7 +339,7 @@ mod tests { Operation::Create, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_err()); let data_after = backend.meta().current_syncing_tips().unwrap(); @@ -335,12 +349,13 @@ mod tests { #[test] fn tips_read_works() { let tmp = tempdir().expect("create a temporary directory"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().current_syncing_tips(), Ok(vec![])); @@ -357,7 +372,7 @@ mod tests { Operation::Read, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); } @@ -366,12 +381,13 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &tips_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); assert_eq!(backend.meta().current_syncing_tips(), Ok(vec![])); // Run the command @@ -381,7 +397,7 @@ mod tests { Operation::Update, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!( @@ -393,12 +409,13 @@ mod tests { #[test] fn tips_delete_works() { let tmp = tempdir().expect("create a temporary directory"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); + let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); let data = vec![H256::default()]; @@ -413,7 +430,7 @@ mod tests { Operation::Delete, Column::Meta ) - .run(Arc::new(client), backend.clone()) + .run(client, backend.clone()) .is_ok()); assert_eq!(backend.meta().current_syncing_tips(), Ok(vec![])); @@ -424,13 +441,14 @@ mod tests { let tmp = tempdir().expect("create a temporary directory"); // Write some data in a temp file. let test_value_path = test_json_file(&tmp, &schema_test_value()); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); + let client = client; let data = vec![(EthereumStorageSchema::V1, H256::default())]; @@ -498,13 +516,14 @@ mod tests { serde_json::to_string("im_not_allowed_here").unwrap(), ) .expect("write test value json file"); - - // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); // Test client. let (client, _) = TestClientBuilder::new().build_with_native_executor::(None); let client = Arc::new(client); + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); + let client = client; // Run the Create command assert!(cmd( @@ -559,7 +578,8 @@ mod tests { let test_value_path = test_json_file(&tmp, &TestValue::Commitment(block_hash)); // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); // Run the command using some ethereum block hash as key. let ethereum_block_hash = H256::default(); @@ -575,7 +595,7 @@ mod tests { // Expect the ethereum and substrate block hashes to be mapped. assert_eq!( backend.mapping().block_hash(ðereum_block_hash), - Ok(Some(block_hash)) + Ok(Some(vec![block_hash])) ); // Expect the offchain-stored transaction metadata to match the one we stored in the runtime. @@ -643,7 +663,8 @@ mod tests { let test_value_path = test_json_file(&tmp, &TestValue::Commitment(block_a1_hash)); // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); // Run the command using some ethereum block hash as key. let ethereum_block_hash = H256::default(); @@ -659,7 +680,7 @@ mod tests { // Expect the ethereum and substrate block hashes to be mapped. assert_eq!( backend.mapping().block_hash(ðereum_block_hash), - Ok(Some(block_a1_hash)) + Ok(Some(vec![block_a1_hash])) ); // Expect the offchain-stored transaction metadata to match the one we stored in the runtime. @@ -709,7 +730,7 @@ mod tests { // Expect the ethereum and substrate block hashes to be mapped. assert_eq!( backend.mapping().block_hash(ðereum_block_hash), - Ok(Some(block_a2_hash)) + Ok(Some(vec![block_a1_hash, block_a2_hash])) ); // Expect the offchain-stored transaction metadata to have data for both blocks. @@ -764,7 +785,8 @@ mod tests { let test_value_path = test_json_file(&tmp, &TestValue::Commitment(block_hash)); // Create a temporary frontier secondary DB. - let backend = open_frontier_backend(tmp.into_path()).expect("a temporary db was created"); + let backend = open_frontier_backend(client.clone(), tmp.into_path()) + .expect("a temporary db was created"); // Create command using some ethereum block hash as key. let ethereum_block_hash = H256::default(); diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 79993e30fa..6cfd4baacc 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -11,6 +11,7 @@ repository = "https://github.com/paritytech/frontier/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +log = "0.4.17" parking_lot = "0.12.1" # Parity @@ -20,6 +21,7 @@ parity-db = { version = "0.3.16", optional = true } # Substrate sc-client-db = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } +sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { version = "6.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } sp-database = { version = "4.0.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } sp-runtime = { version = "6.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } @@ -29,3 +31,11 @@ fp-storage = { version = "2.0.0-dev", path = "../../primitives/storage" } [features] default = ["kvdb-rocksdb", "parity-db"] + +[dev-dependencies] +futures = "0.3.24" +sc-block-builder = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-db = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master", features = ["rocksdb"] } +sp-consensus = { version = "0.10.0-dev", git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-test-runtime-client = { version = "2.0.0", git = "https://github.com/paritytech/substrate", branch = "master" } +tempfile = "3.3.0" diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index c5b83da3a8..f8889384d4 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -18,6 +18,7 @@ #[cfg(feature = "parity-db")] mod parity_db_adapter; +mod upgrade; mod utils; use std::{ @@ -70,30 +71,42 @@ pub fn frontier_database_dir(db_config_dir: &Path, db_path: &str) -> PathBuf { } impl Backend { - pub fn open(database: &DatabaseSource, db_config_dir: &Path) -> Result { - Self::new(&DatabaseSettings { - source: match database { - DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { - path: frontier_database_dir(db_config_dir, "db"), - cache_size: 0, + pub fn open>( + client: Arc, + database: &DatabaseSource, + db_config_dir: &Path, + ) -> Result { + Self::new( + client, + &DatabaseSettings { + source: match database { + DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb { + path: frontier_database_dir(db_config_dir, "db"), + cache_size: 0, + }, + DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { + path: frontier_database_dir(db_config_dir, "paritydb"), + }, + DatabaseSource::Auto { .. } => DatabaseSource::Auto { + rocksdb_path: frontier_database_dir(db_config_dir, "db"), + paritydb_path: frontier_database_dir(db_config_dir, "paritydb"), + cache_size: 0, + }, + _ => { + return Err( + "Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string() + ) + } }, - DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb { - path: frontier_database_dir(db_config_dir, "paritydb"), - }, - DatabaseSource::Auto { .. } => DatabaseSource::Auto { - rocksdb_path: frontier_database_dir(db_config_dir, "db"), - paritydb_path: frontier_database_dir(db_config_dir, "paritydb"), - cache_size: 0, - }, - _ => { - return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string()) - } }, - }) + ) } - pub fn new(config: &DatabaseSettings) -> Result { - let db = utils::open_database(config)?; + pub fn new>( + client: Arc, + config: &DatabaseSettings, + ) -> Result { + let db = utils::open_database::(client, config)?; Ok(Self { mapping: Arc::new(MappingDb { @@ -214,13 +227,16 @@ impl MappingDb { } } - pub fn block_hash(&self, ethereum_block_hash: &H256) -> Result, String> { + pub fn block_hash( + &self, + ethereum_block_hash: &H256, + ) -> Result>, String> { match self .db .get(crate::columns::BLOCK_MAPPING, ðereum_block_hash.encode()) { Some(raw) => Ok(Some( - Block::Hash::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, + Vec::::decode(&mut &raw[..]).map_err(|e| format!("{:?}", e))?, )), None => Ok(None), } @@ -263,10 +279,18 @@ impl MappingDb { let mut transaction = sp_database::Transaction::new(); + let substrate_hashes = match self.block_hash(&commitment.ethereum_block_hash) { + Ok(Some(mut data)) => { + data.push(commitment.block_hash); + data + } + _ => vec![commitment.block_hash], + }; + transaction.set( crate::columns::BLOCK_MAPPING, &commitment.ethereum_block_hash.encode(), - &commitment.block_hash.encode(), + &substrate_hashes.encode(), ); for (i, ethereum_transaction_hash) in commitment diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs new file mode 100644 index 0000000000..5660ebe198 --- /dev/null +++ b/client/db/src/upgrade.rs @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 +// This file is part of Frontier. +// +// Copyright (c) 2020-2022 Parity Technologies (UK) Ltd. +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use codec::{Decode, Encode}; +use sp_runtime::traits::{Block as BlockT, Header}; + +use sp_core::H256; + +use crate::DatabaseSource; + +use std::{ + fmt, fs, + io::{self, ErrorKind, Read, Write}, + path::{Path, PathBuf}, + sync::Arc, +}; + +/// Version file name. +const VERSION_FILE_NAME: &str = "db_version"; + +/// Current db version. +const CURRENT_VERSION: u32 = 2; + +/// Number of columns in each version. +const _V1_NUM_COLUMNS: u32 = 4; +const V2_NUM_COLUMNS: u32 = 4; + +/// Database upgrade errors. +#[derive(Debug)] +pub(crate) enum UpgradeError { + /// Database version cannot be read from existing db_version file. + UnknownDatabaseVersion, + /// Database version no longer supported. + UnsupportedVersion(u32), + /// Database version comes from future version of the client. + FutureDatabaseVersion(u32), + /// Common io error. + Io(io::Error), +} + +pub(crate) type UpgradeResult = Result; + +pub(crate) struct UpgradeVersion1To2Summary { + pub success: u32, + pub error: Vec, +} + +impl From for UpgradeError { + fn from(err: io::Error) -> Self { + UpgradeError::Io(err) + } +} + +impl fmt::Display for UpgradeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + UpgradeError::UnknownDatabaseVersion => { + write!( + f, + "Database version cannot be read from existing db_version file" + ) + } + UpgradeError::UnsupportedVersion(version) => { + write!(f, "Database version no longer supported: {}", version) + } + UpgradeError::FutureDatabaseVersion(version) => { + write!( + f, + "Database version comes from future version of the client: {}", + version + ) + } + UpgradeError::Io(err) => write!(f, "Io error: {}", err), + } + } +} + +/// Upgrade database to current version. +pub(crate) fn upgrade_db( + client: Arc, + db_path: &Path, + source: &DatabaseSource, +) -> UpgradeResult<()> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ + let db_version = current_version(db_path)?; + match db_version { + 0 => return Err(UpgradeError::UnsupportedVersion(db_version)), + 1 => { + let summary = match source { + DatabaseSource::ParityDb { .. } => { + migrate_1_to_2_parity_db::(client, db_path)? + } + DatabaseSource::RocksDb { .. } => { + migrate_1_to_2_rocks_db::(client, db_path)? + } + _ => panic!("DatabaseSource required for upgrade ParityDb | RocksDb"), + }; + if !summary.error.is_empty() { + panic!( + "Inconsistent migration from version 1 to 2. Failed on {:?}", + summary.error + ); + } else { + log::info!("✔️ Successful Frontier DB migration from version 1 to version 2 ({:?} entries).", summary.success); + } + } + CURRENT_VERSION => (), + _ => return Err(UpgradeError::FutureDatabaseVersion(db_version)), + } + update_version(db_path)?; + Ok(()) +} + +/// Reads current database version from the file at given path. +/// If the file does not exist it gets created with version 1. +pub(crate) fn current_version(path: &Path) -> UpgradeResult { + match fs::File::open(version_file_path(path)) { + Err(ref err) if err.kind() == ErrorKind::NotFound => { + fs::create_dir_all(path)?; + let mut file = fs::File::create(version_file_path(path))?; + file.write_all(format!("{}", 1).as_bytes())?; + Ok(1u32) + } + Err(_) => Err(UpgradeError::UnknownDatabaseVersion), + Ok(mut file) => { + let mut s = String::new(); + file.read_to_string(&mut s) + .map_err(|_| UpgradeError::UnknownDatabaseVersion)?; + s.parse::() + .map_err(|_| UpgradeError::UnknownDatabaseVersion) + } + } +} + +/// Writes current database version to the file. +/// Creates a new file if the version file does not exist yet. +pub(crate) fn update_version(path: &Path) -> io::Result<()> { + fs::create_dir_all(path)?; + let mut file = fs::File::create(version_file_path(path))?; + file.write_all(format!("{}", CURRENT_VERSION).as_bytes())?; + Ok(()) +} + +/// Returns the version file path. +fn version_file_path(path: &Path) -> PathBuf { + let mut file_path = path.to_owned(); + file_path.push(VERSION_FILE_NAME); + file_path +} + +/// Migration from version1 to version2: +/// - The format of the Ethereum<>Substrate block mapping changed to support equivocation. +/// - Migrating schema from One-to-one to One-to-many (EthHash: Vec) relationship. +pub(crate) fn migrate_1_to_2_rocks_db( + client: Arc, + db_path: &Path, +) -> UpgradeResult +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ + log::info!("🔨 Running Frontier DB migration from version 1 to version 2. Please wait."); + let mut res = UpgradeVersion1To2Summary { + success: 0, + error: vec![], + }; + // Process a batch of hashes in a single db transaction + #[rustfmt::skip] + let mut process_chunk = | + db: &kvdb_rocksdb::Database, + ethereum_hashes: &[std::boxed::Box<[u8]>] + | -> UpgradeResult<()> { + let mut transaction = db.transaction(); + for ethereum_hash in ethereum_hashes { + let mut maybe_error = true; + if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING, ethereum_hash)? { + // Only update version1 data + let decoded = Vec::::decode(&mut &substrate_hash[..]); + if decoded.is_err() || decoded.unwrap().is_empty() { + // Verify the substrate hash is part of the canonical chain. + if let Ok(Some(number)) = client.number(Block::Hash::decode(&mut &substrate_hash[..]).unwrap()) { + if let Ok(Some(header)) = client.header(sp_runtime::generic::BlockId::Number(number)) { + transaction.put_vec( + crate::columns::BLOCK_MAPPING, + ethereum_hash, + vec![header.hash()].encode(), + ); + res.success += 1; + maybe_error = false; + } + } + } + } + if maybe_error { + res.error.push(H256::from_slice(ethereum_hash)); + } + } + db.write(transaction) + .map_err(|_| io::Error::new(ErrorKind::Other, "Failed to commit on migrate_1_to_2"))?; + Ok(()) + }; + + let db_cfg = kvdb_rocksdb::DatabaseConfig::with_columns(V2_NUM_COLUMNS); + let db = kvdb_rocksdb::Database::open(&db_cfg, db_path)?; + + // Get all the block hashes we need to update + let ethereum_hashes: Vec<_> = db + .iter(crate::columns::BLOCK_MAPPING) + .map(|entry| entry.0) + .collect(); + + // Read and update each entry in db transaction batches + const CHUNK_SIZE: usize = 10_000; + let chunks = ethereum_hashes.chunks(CHUNK_SIZE); + for chunk in chunks { + process_chunk(&db, chunk)?; + } + Ok(res) +} + +pub(crate) fn migrate_1_to_2_parity_db( + client: Arc, + db_path: &Path, +) -> UpgradeResult +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ + log::info!("🔨 Running Frontier DB migration from version 1 to version 2. Please wait."); + let mut res = UpgradeVersion1To2Summary { + success: 0, + error: vec![], + }; + // Process a batch of hashes in a single db transaction + #[rustfmt::skip] + let mut process_chunk = | + db: &parity_db::Db, + ethereum_hashes: &[Vec] + | -> UpgradeResult<()> { + let mut transaction = vec![]; + for ethereum_hash in ethereum_hashes { + let mut maybe_error = true; + if let Some(substrate_hash) = db.get(crate::columns::BLOCK_MAPPING as u8, ethereum_hash).map_err(|_| + io::Error::new(ErrorKind::Other, "Key does not exist") + )? { + // Only update version1 data + let decoded = Vec::::decode(&mut &substrate_hash[..]); + if decoded.is_err() || decoded.unwrap().is_empty() { + // Verify the substrate hash is part of the canonical chain. + if let Ok(Some(number)) = client.number(Block::Hash::decode(&mut &substrate_hash[..]).unwrap()) { + if let Ok(Some(header)) = client.header(sp_runtime::generic::BlockId::Number(number)) { + transaction.push(( + crate::columns::BLOCK_MAPPING as u8, + ethereum_hash, + Some(vec![header.hash()].encode()), + )); + res.success += 1; + maybe_error = false; + } + } + } + } + if maybe_error { + res.error.push(H256::from_slice(ethereum_hash)); + } + } + db.commit(transaction) + .map_err(|_| io::Error::new(ErrorKind::Other, "Failed to commit on migrate_1_to_2"))?; + Ok(()) + }; + + let mut db_cfg = parity_db::Options::with_columns(db_path, V2_NUM_COLUMNS as u8); + db_cfg.columns[crate::columns::BLOCK_MAPPING as usize].btree_index = true; + + let db = parity_db::Db::open_or_create(&db_cfg) + .map_err(|_| io::Error::new(ErrorKind::Other, "Failed to open db"))?; + + // Get all the block hashes we need to update + let ethereum_hashes: Vec<_> = match db.iter(crate::columns::BLOCK_MAPPING as u8) { + Ok(mut iter) => { + let mut hashes = vec![]; + while let Ok(Some((k, _))) = iter.next() { + hashes.push(k); + } + hashes + } + Err(_) => vec![], + }; + // Read and update each entry in db transaction batches + const CHUNK_SIZE: usize = 10_000; + let chunks = ethereum_hashes.chunks(CHUNK_SIZE); + for chunk in chunks { + process_chunk(&db, chunk)?; + } + Ok(res) +} + +#[cfg(test)] +mod tests { + use futures::executor; + use sc_block_builder::BlockBuilderProvider; + use sp_consensus::BlockOrigin; + use substrate_test_runtime_client::{ + prelude::*, DefaultTestClientBuilderExt, TestClientBuilder, + }; + + use std::sync::Arc; + + use codec::Encode; + use sp_core::H256; + use sp_runtime::{ + generic::{Block, BlockId, Header}, + traits::BlakeTwo256, + }; + use tempfile::tempdir; + + type OpaqueBlock = + Block, substrate_test_runtime_client::runtime::Extrinsic>; + + pub fn open_frontier_backend( + client: Arc, + setting: &crate::DatabaseSettings, + ) -> Result>, String> + where + C: sp_blockchain::HeaderBackend, + { + Ok(Arc::new(crate::Backend::::new( + client, setting, + )?)) + } + + #[test] + fn upgrade_1_to_2_works() { + let tmp_1 = tempdir().expect("create a temporary directory"); + let tmp_2 = tempdir().expect("create a temporary directory"); + + let settings = vec![ + // Rocks db + crate::DatabaseSettings { + source: sc_client_db::DatabaseSource::RocksDb { + path: tmp_1.path().to_owned(), + cache_size: 0, + }, + }, + // Parity db + crate::DatabaseSettings { + source: sc_client_db::DatabaseSource::ParityDb { + path: tmp_2.path().to_owned(), + }, + }, + ]; + + for setting in settings { + let (client, _) = TestClientBuilder::new() + .build_with_native_executor::( + None, + ); + let mut client = Arc::new(client); + + // Genesis block + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let block = builder.build().unwrap().block; + let mut previous_canon_block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + + let path = setting.source.path().unwrap(); + + let mut ethereum_hashes = vec![]; + let mut substrate_hashes = vec![]; + let mut transaction_hashes = vec![]; + { + // Create a temporary frontier secondary DB. + let backend = open_frontier_backend(client.clone(), &setting) + .expect("a temporary db was created"); + + // Fill the tmp db with some data + let mut transaction = sp_database::Transaction::new(); + for _ in 0..1000 { + // Ethereum hash + let ethhash = H256::random(); + // Create two branches, and map the orphan one. + // Keep track of the canon hash to later verify the migration replaced it. + // A1 + let mut builder = client + .new_block_at( + &BlockId::Hash(previous_canon_block_hash), + Default::default(), + false, + ) + .unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let block = builder.build().unwrap().block; + let next_canon_block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + // A2 + let mut builder = client + .new_block_at( + &BlockId::Hash(previous_canon_block_hash), + Default::default(), + false, + ) + .unwrap(); + builder.push_storage_change(vec![2], None).unwrap(); + let block = builder.build().unwrap().block; + let orphan_block_hash = block.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + + // Track canon hash + ethereum_hashes.push(ethhash); + substrate_hashes.push(next_canon_block_hash); + // Set orphan hash block mapping + transaction.set( + crate::columns::BLOCK_MAPPING, + ðhash.encode(), + &orphan_block_hash.encode(), + ); + // Test also that one-to-many transaction data is not affected by the migration logic. + // Map a transaction to both canon and orphan block hashes. This is what would have + // happened in case of fork or equivocation. + let eth_tx_hash = H256::random(); + let mut metadata = vec![]; + for hash in vec![next_canon_block_hash, orphan_block_hash].iter() { + metadata.push(crate::TransactionMetadata:: { + block_hash: *hash, + ethereum_block_hash: ethhash, + ethereum_index: 0u32, + }); + } + transaction.set( + crate::columns::TRANSACTION_MAPPING, + ð_tx_hash.encode(), + &metadata.encode(), + ); + transaction_hashes.push(eth_tx_hash); + previous_canon_block_hash = next_canon_block_hash; + } + let _ = backend.mapping().db.commit(transaction); + } + // Upgrade database from version 1 to 2 + let _ = super::upgrade_db::(client.clone(), &path, &setting.source); + + // Check data after migration + let backend = + open_frontier_backend(client, &setting).expect("a temporary db was created"); + for (i, original_ethereum_hash) in ethereum_hashes.iter().enumerate() { + let canon_substrate_block_hash = substrate_hashes.get(i).expect("Block hash"); + let mapped_block = backend + .mapping() + .block_hash(original_ethereum_hash) + .unwrap() + .unwrap(); + // All entries now hold a single element Vec + assert_eq!(mapped_block.len(), 1); + // The Vec holds the canon block hash + assert_eq!(mapped_block.first(), Some(canon_substrate_block_hash)); + // Transaction hash still holds canon block data + let mapped_transaction = backend + .mapping() + .transaction_metadata(transaction_hashes.get(i).expect("Transaction hash")) + .unwrap(); + assert!(mapped_transaction + .into_iter() + .any(|tx| tx.block_hash == *canon_substrate_block_hash)); + } + + // Upgrade db version file + assert_eq!(super::current_version(&path).expect("version"), 2u32); + } + } +} diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 37542e9ca4..9fd7dca14e 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -18,51 +18,113 @@ use std::{path::Path, sync::Arc}; +use sp_runtime::traits::Block as BlockT; + use crate::{Database, DatabaseSettings, DatabaseSource, DbHash}; -pub fn open_database(config: &DatabaseSettings) -> Result>, String> { +pub fn open_database( + client: Arc, + config: &DatabaseSettings, +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ let db: Arc> = match &config.source { - DatabaseSource::ParityDb { path } => open_parity_db(path)?, - DatabaseSource::RocksDb { path, .. } => open_kvdb_rocksdb(path, true)?, + DatabaseSource::ParityDb { path } => { + open_parity_db::(client, path, &config.source)? + } + DatabaseSource::RocksDb { path, .. } => { + open_kvdb_rocksdb::(client, path, true, &config.source)? + } DatabaseSource::Auto { paritydb_path, rocksdb_path, .. - } => match open_kvdb_rocksdb(rocksdb_path, false) { - Ok(db) => db, - Err(_) => open_parity_db(paritydb_path)?, - }, + } => { + match open_kvdb_rocksdb::(client.clone(), rocksdb_path, false, &config.source) + { + Ok(db) => db, + Err(_) => open_parity_db::(client, paritydb_path, &config.source)?, + } + } _ => return Err("Missing feature flags `parity-db`".to_string()), }; Ok(db) } #[cfg(feature = "kvdb-rocksdb")] -fn open_kvdb_rocksdb(path: &Path, create: bool) -> Result>, String> { +fn open_kvdb_rocksdb( + client: Arc, + path: &Path, + create: bool, + _source: &DatabaseSource, +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ + // first upgrade database to required version + #[cfg(not(test))] + match crate::upgrade::upgrade_db::(client, path, _source) { + Ok(_) => (), + Err(_) => return Err("Frontier DB upgrade error".to_string()), + } + let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(crate::columns::NUM_COLUMNS); db_config.create_if_missing = create; - let path = path - .to_str() - .ok_or_else(|| "Invalid database path".to_string())?; - let db = kvdb_rocksdb::Database::open(&db_config, &path).map_err(|err| format!("{}", err))?; + // write database version only after the database is succesfully opened + #[cfg(not(test))] + let _ = crate::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string())?; return Ok(sp_database::as_database(db)); } #[cfg(not(feature = "kvdb-rocksdb"))] -fn open_kvdb_rocksdb(_path: &Path, _create: bool) -> Result>, String> { +fn open_kvdb_rocksdb( + _client: Arc, + _path: &Path, + _create: bool, + _source: &DatabaseSource, +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ Err("Missing feature flags `kvdb-rocksdb`".to_string()) } #[cfg(feature = "parity-db")] -fn open_parity_db(path: &Path) -> Result>, String> { - let config = parity_db::Options::with_columns(path, crate::columns::NUM_COLUMNS as u8); +fn open_parity_db( + client: Arc, + path: &Path, + _source: &DatabaseSource, +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ + // first upgrade database to required version + #[cfg(not(test))] + match crate::upgrade::upgrade_db::(client, path, _source) { + Ok(_) => (), + Err(_) => return Err("Frontier DB upgrade error".to_string()), + } + let mut config = parity_db::Options::with_columns(path, crate::columns::NUM_COLUMNS as u8); + config.columns[crate::columns::BLOCK_MAPPING as usize].btree_index = true; + let db = parity_db::Db::open_or_create(&config).map_err(|err| format!("{}", err))?; + // write database version only after the database is succesfully opened + #[cfg(not(test))] + let _ = crate::upgrade::update_version(path).map_err(|_| "Cannot update db version".to_string())?; Ok(Arc::new(crate::parity_db_adapter::DbAdapter(db))) } #[cfg(not(feature = "parity-db"))] -fn open_parity_db(_path: &Path) -> Result>, String> { +fn open_parity_db( + _client: Arc, + _path: &Path, + _source: &DatabaseSource, +) -> Result>, String> +where + C: sp_blockchain::HeaderBackend + Send + Sync, +{ Err("Missing feature flags `parity-db`".to_string()) } diff --git a/client/rpc/src/eth/block.rs b/client/rpc/src/eth/block.rs index 972063f201..1997b450c0 100644 --- a/client/rpc/src/eth/block.rs +++ b/client/rpc/src/eth/block.rs @@ -48,8 +48,12 @@ where let block_data_cache = Arc::clone(&self.block_data_cache); let backend = Arc::clone(&self.backend); - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), @@ -137,8 +141,12 @@ where } pub fn block_transaction_count_by_hash(&self, hash: H256) -> Result> { - let id = match frontier_backend_client::load_hash::(self.backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + self.client.as_ref(), + self.backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), diff --git a/client/rpc/src/eth/filter.rs b/client/rpc/src/eth/filter.rs index f275c2ead4..00f09f5b24 100644 --- a/client/rpc/src/eth/filter.rs +++ b/client/rpc/src/eth/filter.rs @@ -363,8 +363,12 @@ where let mut ret: Vec = Vec::new(); if let Some(hash) = filter.block_hash { - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(Vec::new()), diff --git a/client/rpc/src/eth/transaction.rs b/client/rpc/src/eth/transaction.rs index 6b1489662d..e0600429a4 100644 --- a/client/rpc/src/eth/transaction.rs +++ b/client/rpc/src/eth/transaction.rs @@ -127,8 +127,12 @@ where } }; - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), @@ -172,8 +176,12 @@ where let block_data_cache = Arc::clone(&self.block_data_cache); let backend = Arc::clone(&self.backend); - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), @@ -291,8 +299,12 @@ where None => return Ok(None), }; - let id = match frontier_backend_client::load_hash::(backend.as_ref(), hash) - .map_err(|err| internal_err(format!("{:?}", err)))? + let id = match frontier_backend_client::load_hash::( + client.as_ref(), + backend.as_ref(), + hash, + ) + .map_err(|err| internal_err(format!("{:?}", err)))? { Some(hash) => hash, _ => return Ok(None), diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 2b2f2242e6..553174fb85 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -76,7 +76,9 @@ pub mod frontier_backend_client { C: HeaderBackend + Send + Sync + 'static, { Ok(match number.unwrap_or(BlockNumber::Latest) { - BlockNumber::Hash { hash, .. } => load_hash::(backend, hash).unwrap_or(None), + BlockNumber::Hash { hash, .. } => { + load_hash::(client, backend, hash).unwrap_or(None) + } BlockNumber::Num(number) => Some(BlockId::Number(number.unique_saturated_into())), BlockNumber::Latest => Some(BlockId::Hash(client.info().best_hash)), BlockNumber::Earliest => Some(BlockId::Number(Zero::zero())), @@ -86,29 +88,36 @@ pub mod frontier_backend_client { }) } - pub fn load_hash( + pub fn load_hash( + client: &C, backend: &fc_db::Backend, hash: H256, ) -> RpcResult>> where B: BlockT + Send + Sync + 'static, + C: HeaderBackend + Send + Sync + 'static, { - let substrate_hash = backend + let substrate_hashes = backend .mapping() .block_hash(&hash) .map_err(|err| internal_err(format!("fetch aux store failed: {:?}", err)))?; - if let Some(substrate_hash) = substrate_hash { - return Ok(Some(BlockId::Hash(substrate_hash))); + if let Some(substrate_hashes) = substrate_hashes { + for substrate_hash in substrate_hashes { + if is_canon::(client, substrate_hash) { + return Ok(Some(BlockId::Hash(substrate_hash))); + } + } } Ok(None) } - pub fn load_cached_schema( + pub fn load_cached_schema( backend: &fc_db::Backend, ) -> RpcResult>> where B: BlockT + Send + Sync + 'static, + C: HeaderBackend + Send + Sync + 'static, { let cache = backend .meta() @@ -117,12 +126,13 @@ pub mod frontier_backend_client { Ok(cache) } - pub fn write_cached_schema( + pub fn write_cached_schema( backend: &fc_db::Backend, new_cache: Vec<(EthereumStorageSchema, H256)>, ) -> RpcResult<()> where B: BlockT + Send + Sync + 'static, + C: HeaderBackend + Send + Sync + 'static, { backend .meta() @@ -246,3 +256,143 @@ pub fn public_key(transaction: &EthereumTransaction) -> Result<[u8; 64], sp_io:: } sp_io::crypto::secp256k1_ecdsa_recover(&sig, &msg) } + +#[cfg(test)] +mod tests { + use std::{path::PathBuf, sync::Arc}; + + use futures::executor; + use sc_block_builder::BlockBuilderProvider; + use sp_consensus::BlockOrigin; + use sp_runtime::{ + generic::{Block, BlockId, Header}, + traits::BlakeTwo256, + }; + use substrate_test_runtime_client::{ + prelude::*, DefaultTestClientBuilderExt, TestClientBuilder, + }; + use tempfile::tempdir; + + type OpaqueBlock = + Block, substrate_test_runtime_client::runtime::Extrinsic>; + + fn open_frontier_backend( + client: Arc, + path: PathBuf, + ) -> Result>, String> + where + C: sp_blockchain::HeaderBackend, + { + Ok(Arc::new(fc_db::Backend::::new( + client, + &fc_db::DatabaseSettings { + source: sc_client_db::DatabaseSource::RocksDb { + path, + cache_size: 0, + }, + }, + )?)) + } + + #[test] + fn substrate_block_hash_one_to_many_works() { + let tmp = tempdir().expect("create a temporary directory"); + let (client, _) = TestClientBuilder::new() + .build_with_native_executor::( + None, + ); + + let mut client = Arc::new(client); + + // Create a temporary frontier secondary DB. + let frontier_backend = open_frontier_backend(client.clone(), tmp.into_path()).unwrap(); + + // A random ethereum block hash to use + let ethereum_block_hash = sp_core::H256::random(); + + // G -> A1. + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let a1 = builder.build().unwrap().block; + let a1_hash = a1.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, a1)).unwrap(); + + // A1 -> B1 + let mut builder = client + .new_block_at(&BlockId::Hash(a1_hash), Default::default(), false) + .unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let b1 = builder.build().unwrap().block; + let b1_hash = b1.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, b1)).unwrap(); + + // Map B1 + let commitment = fc_db::MappingCommitment:: { + block_hash: b1_hash, + ethereum_block_hash, + ethereum_transaction_hashes: vec![], + }; + let _ = frontier_backend.mapping().write_hashes(commitment); + + // Expect B1 to be canon + assert_eq!( + super::frontier_backend_client::load_hash( + client.as_ref(), + frontier_backend.as_ref(), + ethereum_block_hash + ) + .unwrap() + .unwrap(), + BlockId::Hash(b1_hash), + ); + + // A1 -> B2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1_hash), Default::default(), false) + .unwrap(); + builder.push_storage_change(vec![2], None).unwrap(); + let b2 = builder.build().unwrap().block; + let b2_hash = b2.header.hash(); + executor::block_on(client.import(BlockOrigin::Own, b2)).unwrap(); + + // Map B2 to same ethereum hash + let commitment = fc_db::MappingCommitment:: { + block_hash: b2_hash, + ethereum_block_hash, + ethereum_transaction_hashes: vec![], + }; + let _ = frontier_backend.mapping().write_hashes(commitment); + + // Still expect B1 to be canon + assert_eq!( + super::frontier_backend_client::load_hash( + client.as_ref(), + frontier_backend.as_ref(), + ethereum_block_hash + ) + .unwrap() + .unwrap(), + BlockId::Hash(b1_hash), + ); + + // B2 -> C1. B2 branch is now canon. + let mut builder = client + .new_block_at(&BlockId::Hash(b2_hash), Default::default(), false) + .unwrap(); + builder.push_storage_change(vec![1], None).unwrap(); + let c1 = builder.build().unwrap().block; + executor::block_on(client.import(BlockOrigin::Own, c1)).unwrap(); + + // Expect B2 to be new canon + assert_eq!( + super::frontier_backend_client::load_hash( + client.as_ref(), + frontier_backend.as_ref(), + ethereum_block_hash + ) + .unwrap() + .unwrap(), + BlockId::Hash(b2_hash), + ); + } +} diff --git a/template/node/src/service.rs b/template/node/src/service.rs index 0ac8cf1651..8df42ce63e 100644 --- a/template/node/src/service.rs +++ b/template/node/src/service.rs @@ -151,6 +151,7 @@ pub fn new_partial( ); let frontier_backend = Arc::new(FrontierBackend::open( + Arc::clone(&client), &config.database, &db_config_dir(config), )?);