From 611f4813aee4e88d883b4433ec94c83b7974a0bc Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 25 Jun 2019 10:18:38 +0300 Subject: [PATCH 01/63] DigestItem::ChangesTrieSignal --- core/sr-primitives/src/generic/digest.rs | 52 +++++++++++++++++++++++- core/sr-primitives/src/generic/mod.rs | 2 +- srml/system/src/lib.rs | 21 ++++++++-- 3 files changed, 68 insertions(+), 7 deletions(-) diff --git a/core/sr-primitives/src/generic/digest.rs b/core/sr-primitives/src/generic/digest.rs index 5edb370e50c4e..cf946be8e047c 100644 --- a/core/sr-primitives/src/generic/digest.rs +++ b/core/sr-primitives/src/generic/digest.rs @@ -21,6 +21,7 @@ use serde::Serialize; use rstd::prelude::*; +use substrate_primitives::ChangesTrieConfiguration; use crate::ConsensusEngineId; use crate::codec::{Decode, Encode, Input}; @@ -97,10 +98,32 @@ pub enum DigestItem { /// by runtimes. Seal(ConsensusEngineId, Vec), + /// Digest item that contains signal from changes tries manager to the + /// native code. + ChangesTrieSignal(ChangesTrieSignal), + /// Some other thing. Unsupported and experimental. Other(Vec), } +/// Available changes trie signals. +#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Debug))] +pub enum ChangesTrieSignal { + /// New changes trie configuration is enacted, starting from **next block**. + /// + /// The block that emits this signal will contain changes trie (CT) that covers + /// blocks range [BEGIN; current block], where BEGIN is (order matters): + /// - LAST_TOP_LEVEL_DIGEST_BLOCK+1 if top level digest CT has ever been created + /// using current configuration AND the last top level digest CT has been created + /// at block LAST_TOP_LEVEL_DIGEST_BLOCK; + /// - LAST_CONFIGURATION_CHANGE_BLOCK+1 if there has been CT configuration change + /// before and the last configuration change happened at block + /// LAST_CONFIGURATION_CHANGE_BLOCK; + /// - 1 otherwise. + NewConfiguration(Option), +} + #[cfg(feature = "std")] impl ::serde::Serialize for DigestItem { fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { @@ -131,6 +154,9 @@ pub enum DigestItemRef<'a, Hash: 'a> { /// Put a Seal on it. This is only used by native code, and is never seen /// by runtimes. Seal(&'a ConsensusEngineId, &'a Vec), + /// Digest item that contains signal from changes tries manager to the + /// native code. + ChangesTrieSignal(&'a ChangesTrieSignal), /// Any 'non-system' digest item, opaque to the native code. Other(&'a Vec), } @@ -142,11 +168,12 @@ pub enum DigestItemRef<'a, Hash: 'a> { #[repr(u32)] #[derive(Encode, Decode)] pub enum DigestItemType { + Other = 0, ChangesTrieRoot = 2, - PreRuntime = 6, Consensus = 4, Seal = 5, - Other = 0, + PreRuntime = 6, + ChangesTrieSignal = 7, } /// Type of a digest item that contains raw data; this also names the consensus engine ID where @@ -171,6 +198,7 @@ impl DigestItem { DigestItem::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), DigestItem::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), DigestItem::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), + DigestItem::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), DigestItem::Other(ref v) => DigestItemRef::Other(v), } } @@ -195,6 +223,11 @@ impl DigestItem { self.dref().as_seal() } + /// Returns `Some` if the entry is the `ChangesTrieSignal` entry. + pub fn as_changes_trie_signal(&self) -> Option<&ChangesTrieSignal> { + self.dref().as_changes_trie_signal() + } + /// Returns Some if `self` is a `DigestItem::Other`. pub fn as_other(&self) -> Option<&[u8]> { match *self { @@ -241,6 +274,9 @@ impl Decode for DigestItem { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; Some(DigestItem::Seal(vals.0, vals.1)) }, + DigestItemType::ChangesTrieSignal => Some(DigestItem::ChangesTrieSignal( + Decode::decode(input)?, + )), DigestItemType::Other => Some(DigestItem::Other( Decode::decode(input)?, )), @@ -281,6 +317,14 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { } } + /// Cast this digest item into `ChangesTrieSignal`. + pub fn as_changes_trie_signal(&self) -> Option<&'a ChangesTrieSignal> { + match *self { + DigestItemRef::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), + _ => None, + } + } + /// Cast this digest item into `PreRuntime` pub fn as_other(&self) -> Option<&'a [u8]> { match *self { @@ -330,6 +374,10 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { DigestItemType::PreRuntime.encode_to(&mut v); (val, data).encode_to(&mut v); }, + DigestItemRef::ChangesTrieSignal(changes_trie_signal) => { + DigestItemType::ChangesTrieSignal.encode_to(&mut v); + changes_trie_signal.encode_to(&mut v); + }, DigestItemRef::Other(val) => { DigestItemType::Other.encode_to(&mut v); val.encode_to(&mut v); diff --git a/core/sr-primitives/src/generic/mod.rs b/core/sr-primitives/src/generic/mod.rs index a4e4106780efc..8888e69a18949 100644 --- a/core/sr-primitives/src/generic/mod.rs +++ b/core/sr-primitives/src/generic/mod.rs @@ -37,7 +37,7 @@ pub use self::checked_extrinsic::CheckedExtrinsic; pub use self::header::Header; pub use self::block::{Block, SignedBlock, BlockId}; pub use self::digest::{ - Digest, DigestItem, DigestItemRef, OpaqueDigestItemId + Digest, DigestItem, DigestItemRef, OpaqueDigestItemId, ChangesTrieSignal, }; use crate::codec::Encode; diff --git a/srml/system/src/lib.rs b/srml/system/src/lib.rs index 73f8c942091e3..da42ff08262f5 100644 --- a/srml/system/src/lib.rs +++ b/srml/system/src/lib.rs @@ -82,7 +82,7 @@ use primitives::{generic, traits::{self, CheckEqual, SimpleArithmetic, }}; #[cfg(any(feature = "std", test))] use primitives::traits::Zero; -use substrate_primitives::storage::well_known_keys; +use substrate_primitives::{storage::well_known_keys, ChangesTrieConfiguration}; use srml_support::{ storage, decl_module, decl_event, decl_storage, StorageDoubleMap, StorageValue, StorageMap, Parameter, for_each_tuple, traits::Contains @@ -94,9 +94,6 @@ use crate::{self as system}; #[cfg(any(feature = "std", test))] use runtime_io::{twox_128, TestExternalities, Blake2Hasher}; -#[cfg(any(feature = "std", test))] -use substrate_primitives::ChangesTrieConfiguration; - /// Handler for when a new account has been created. pub trait OnNewAccount { /// A new account `who` has been registered. @@ -214,6 +211,22 @@ decl_module! { storage::unhashed::put_raw(well_known_keys::CODE, &new); } + /// Set the new changes trie configuration. + pub fn set_changes_trie_onfig(changes_trie_config: Option) { + match changes_trie_config.clone() { + Some(changes_trie_config) => storage::unhashed::put_raw( + well_known_keys::CHANGES_TRIE_CONFIG, + &changes_trie_config.encode(), + ), + None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), + } + + let log = generic::DigestItem::ChangesTrieSignal( + generic::ChangesTrieSignal::NewConfiguration(changes_trie_config), + ); + Self::deposit_log(log.into()); + } + /// Set some items of storage. fn set_storage(items: Vec) { for i in &items { From 8b65b048eae9e6a5abd60e298882478b21093403 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 26 Jun 2019 13:18:53 +0300 Subject: [PATCH 02/63] introduce changes_trie::State --- core/client/src/backend.rs | 33 +++- core/client/src/call_executor.rs | 17 +- core/client/src/genesis.rs | 14 +- core/state-machine/src/basic.rs | 7 +- core/state-machine/src/changes_trie/build.rs | 39 ++--- core/state-machine/src/changes_trie/mod.rs | 38 ++++- core/state-machine/src/ext.rs | 68 ++++---- core/state-machine/src/lib.rs | 156 ++++--------------- core/state-machine/src/overlayed_changes.rs | 76 ++------- core/state-machine/src/testing.rs | 33 ++-- 10 files changed, 190 insertions(+), 291 deletions(-) diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 8860f61c47e70..20da280be0322 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -18,11 +18,12 @@ use std::collections::HashMap; use crate::error; -use primitives::ChangesTrieConfiguration; +use parity_codec::Decode; +use primitives::{storage::well_known_keys::CHANGES_TRIE_CONFIG, ChangesTrieConfiguration}; use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; use runtime_primitives::traits::{Block as BlockT, NumberFor}; use state_machine::backend::Backend as StateBackend; -use state_machine::ChangesTrieStorage as StateChangesTrieStorage; +use state_machine::{ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieState}; use consensus::well_known_cache_keys; use hash_db::Hasher; use trie::MemoryDB; @@ -222,3 +223,31 @@ where /// Returns true if the state for given block is available locally. fn is_local_state_available(&self, block: &BlockId) -> bool; } + +/// Return changes tries state at given block. +pub fn changes_tries_state_at_block<'a, B: Backend, Block: BlockT, H: Hasher>( + backend: &'a B, + block: &BlockId, +) -> error::Result>>> + where + H: Hasher, +{ + let changes_trie_storage = match backend.changes_trie_storage() { + Some(changes_trie_storage) => changes_trie_storage, + None => return Ok(None), + }; + + let state = backend.state_at(*block)?; + changes_tries_state_at_state::<_, Block, _>(&state, changes_trie_storage) +} + +/// Return changes tries state at given state. +pub fn changes_tries_state_at_state<'a, S: StateBackend, Block: BlockT, H: Hasher>( + state: &S, + storage: &'a dyn StateChangesTrieStorage>, +) -> error::Result>>> { + Ok(state.storage(CHANGES_TRIE_CONFIG) + .map_err(|e| error::Error::from_state(Box::new(e)))? + .and_then(|v| Decode::decode(&mut &v[..])) + .map(|config| ChangesTrieState::new(config, storage))) +} diff --git a/core/client/src/call_executor.rs b/core/client/src/call_executor.rs index f956f27b5058b..fac5baf565cc1 100644 --- a/core/client/src/call_executor.rs +++ b/core/client/src/call_executor.rs @@ -191,7 +191,7 @@ where let state = self.backend.state_at(*id)?; let return_data = state_machine::new( &state, - self.backend.changes_trie_storage(), + backend::changes_tries_state_at_block(&*self.backend, id)?, side_effects_handler, &mut changes, &self.executor, @@ -240,6 +240,7 @@ where } let mut state = self.backend.state_at(*at)?; + let changes_trie_state = backend::changes_tries_state_at_block(&*self.backend, at)?; match recorder { Some(recorder) => { @@ -256,7 +257,7 @@ where state_machine::new( &backend, - self.backend.changes_trie_storage(), + changes_trie_state, side_effects_handler, &mut *changes.borrow_mut(), &self.executor, @@ -273,7 +274,7 @@ where } None => state_machine::new( &state, - self.backend.changes_trie_storage(), + changes_trie_state, side_effects_handler, &mut *changes.borrow_mut(), &self.executor, @@ -293,10 +294,12 @@ where fn runtime_version(&self, id: &BlockId) -> error::Result { let mut overlay = OverlayedChanges::default(); let state = self.backend.state_at(*id)?; - let mut ext = Ext::new(&mut overlay, &state, self.backend.changes_trie_storage(), NeverOffchainExt::new()); + let changes_trie_state = backend::changes_tries_state_at_block(&*self.backend, id)?; + let mut ext = Ext::new(&mut overlay, &state, changes_trie_state.as_ref(), NeverOffchainExt::new()); self.executor.runtime_version(&mut ext).ok_or(error::Error::VersionInvalid.into()) } + // TODO: probably remove this method??? fn call_at_state< O: offchain::Externalities, S: state_machine::Backend, @@ -315,9 +318,13 @@ where native_call: Option, side_effects_handler: Option<&mut O>, ) -> error::Result<(NativeOrEncoded, S::Transaction, Option>)> { + let changes_trie_state = match self.backend.changes_trie_storage() { + Some(changes_trie_storage) => backend::changes_tries_state_at_state::<_, Block, _>(state, changes_trie_storage)?, + None => None, + }; state_machine::new( state, - self.backend.changes_trie_storage(), + changes_trie_state, side_effects_handler, changes, &self.executor, diff --git a/core/client/src/genesis.rs b/core/client/src/genesis.rs index 73bd1e03680bc..2e56cd93df9bf 100644 --- a/core/client/src/genesis.rs +++ b/core/client/src/genesis.rs @@ -42,7 +42,7 @@ mod tests { use super::*; use parity_codec::{Encode, Decode, Joiner}; use executor::{NativeExecutionDispatch, native_executor_instance}; - use state_machine::{self, OverlayedChanges, ExecutionStrategy, InMemoryChangesTrieStorage}; + use state_machine::{self, OverlayedChanges, ExecutionStrategy}; use state_machine::backend::InMemory; use test_client::{ runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}, @@ -84,7 +84,7 @@ mod tests { state_machine::new( backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + state_machine::disabled_changes_trie_state::<_, u64>(), state_machine::NeverOffchainExt::new(), &mut overlay, &executor(), @@ -97,7 +97,7 @@ mod tests { for tx in transactions.iter() { state_machine::new( backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + state_machine::disabled_changes_trie_state::<_, u64>(), state_machine::NeverOffchainExt::new(), &mut overlay, &executor(), @@ -110,7 +110,7 @@ mod tests { let (ret_data, _, _) = state_machine::new( backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + state_machine::disabled_changes_trie_state::<_, u64>(), state_machine::NeverOffchainExt::new(), &mut overlay, &executor(), @@ -157,7 +157,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let _ = state_machine::new( &backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + state_machine::disabled_changes_trie_state::<_, u64>(), state_machine::NeverOffchainExt::new(), &mut overlay, &executor(), @@ -186,7 +186,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let _ = state_machine::new( &backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + state_machine::disabled_changes_trie_state::<_, u64>(), state_machine::NeverOffchainExt::new(), &mut overlay, &executor(), @@ -215,7 +215,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let r = state_machine::new( &backend, - Some(&InMemoryChangesTrieStorage::<_, u64>::new()), + state_machine::disabled_changes_trie_state::<_, u64>(), state_machine::NeverOffchainExt::new(), &mut overlay, &Executor::new(None), diff --git a/core/state-machine/src/basic.rs b/core/state-machine/src/basic.rs index e9939711f1e47..a18cadda9ad8f 100644 --- a/core/state-machine/src/basic.rs +++ b/core/state-machine/src/basic.rs @@ -42,12 +42,7 @@ impl BasicExternalities { /// Create a new instance of `BasicExternalities` pub fn new_with_code(code: &[u8], mut inner: HashMap, Vec>) -> Self { let mut overlay = OverlayedChanges::default(); - super::set_changes_trie_config( - &mut overlay, - inner.get(&CHANGES_TRIE_CONFIG.to_vec()).cloned(), - false, - ).expect("changes trie configuration is correct in test env; qed"); - + overlay.collect_extrinsics(inner.contains_key(CHANGES_TRIE_CONFIG)); inner.insert(HEAP_PAGES.to_vec(), 8u64.encode()); BasicExternalities { diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index 487fde2e3528c..7365fa72d808d 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -33,16 +33,15 @@ use crate::changes_trie::{AnchorBlockId, Configuration, Storage, BlockNumber}; /// required data. /// Returns Ok(None) data required to prepare input pairs is not collected /// or storage is not provided. -pub fn prepare_input<'a, B, S, H, Number>( +pub fn prepare_input<'a, B, H, Number>( backend: &B, - storage: &'a S, + storage: &'a Storage, config: &'a Configuration, changes: &OverlayedChanges, parent: &'a AnchorBlockId, ) -> Result>>, String> where B: Backend, - S: Storage, H: Hasher, Number: BlockNumber, { @@ -51,7 +50,7 @@ pub fn prepare_input<'a, B, S, H, Number>( backend, parent.number.clone() + 1.into(), changes)?); - input.extend(prepare_digest_input::<_, H, Number>( + input.extend(prepare_digest_input::( parent, config, storage)?); @@ -97,13 +96,12 @@ fn prepare_extrinsics_input( } /// Prepare DigestIndex input pairs. -fn prepare_digest_input<'a, S, H, Number>( +fn prepare_digest_input<'a, H, Number>( parent: &'a AnchorBlockId, config: &Configuration, - storage: &'a S + storage: &'a Storage, ) -> Result> + 'a, String> where - S: Storage, H: Hasher, H::Out: 'a, Number: BlockNumber, @@ -149,7 +147,7 @@ mod test { use crate::overlayed_changes::OverlayedValue; use super::*; - fn prepare_for_build() -> (InMemory, InMemoryStorage, OverlayedChanges) { + fn prepare_for_build() -> (InMemory, InMemoryStorage, OverlayedChanges, Configuration) { let backend: InMemory<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -217,20 +215,20 @@ mod test { extrinsics: Some(vec![1].into_iter().collect()) }), ].into_iter().collect(), - changes_trie_config: Some(Configuration { digest_interval: 4, digest_levels: 2 }), + collect_extrinsics: true, }; + let config = Configuration { digest_interval: 4, digest_levels: 2 }; - (backend, storage, changes) + (backend, storage, changes, config) } #[test] fn build_changes_trie_nodes_on_non_digest_block() { - let (backend, storage, changes) = prepare_for_build(); - let config = changes.changes_trie_config.as_ref().unwrap(); + let (backend, storage, changes, config) = prepare_for_build(); let changes_trie_nodes = prepare_input( &backend, &storage, - config, + &config, &changes, &AnchorBlockId { hash: Default::default(), number: 4 }, ).unwrap(); @@ -243,12 +241,11 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l1() { - let (backend, storage, changes) = prepare_for_build(); - let config = changes.changes_trie_config.as_ref().unwrap(); + let (backend, storage, changes, config) = prepare_for_build(); let changes_trie_nodes = prepare_input( &backend, &storage, - config, + &config, &changes, &AnchorBlockId { hash: Default::default(), number: 3 }, ).unwrap(); @@ -266,12 +263,11 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l2() { - let (backend, storage, changes) = prepare_for_build(); - let config = changes.changes_trie_config.as_ref().unwrap(); + let (backend, storage, changes, config) = prepare_for_build(); let changes_trie_nodes = prepare_input( &backend, &storage, - config, + &config, &changes, &AnchorBlockId { hash: Default::default(), number: 15 }, ).unwrap(); @@ -290,7 +286,7 @@ mod test { #[test] fn build_changes_trie_nodes_ignores_temporary_storage_values() { - let (backend, storage, mut changes) = prepare_for_build(); + let (backend, storage, mut changes, config) = prepare_for_build(); // 110: missing from backend, set to None in overlay changes.prospective.top.insert(vec![110], OverlayedValue { @@ -298,11 +294,10 @@ mod test { extrinsics: Some(vec![1].into_iter().collect()) }); - let config = changes.changes_trie_config.as_ref().unwrap(); let changes_trie_nodes = prepare_input( &backend, &storage, - config, + &config, &changes, &AnchorBlockId { hash: Default::default(), number: 3 }, ).unwrap(); diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index 7dc95fb5a7ba7..7d099b9e1ee62 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -96,6 +96,14 @@ pub struct AnchorBlockId { pub number: Number, } +/// Changes tries state at some block. +pub struct State<'a, H, Number> { + /// Configuration that is active at given block. + pub config: Configuration, + /// Underlying changes tries storage reference. + pub storage: &'a dyn Storage, +} + /// Changes trie storage. Provides access to trie roots and trie nodes. pub trait RootsStorage: Send + Sync { /// Resolve hash of the block into anchor. @@ -125,29 +133,45 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackend /// Changes trie configuration. pub type Configuration = primitives::ChangesTrieConfiguration; +impl<'a, H, Number> State<'a, H, Number> { + /// Create state with given config and storage. + pub fn new(config: Configuration, storage: &'a dyn Storage) -> Self { + Self { + config, + storage, + } + } +} + +/// Create state where changes tries are disabled. +pub fn disabled_state<'a, H, Number>() -> Option> { + None +} + /// Compute the changes trie root and transaction for given block. /// Returns Err(()) if unknown `parent_hash` has been passed. /// Returns Ok(None) if there's no data to perform computation. /// Panics if background storage returns an error. -pub fn compute_changes_trie_root<'a, B: Backend, S: Storage, H: Hasher, Number: BlockNumber>( +pub fn compute_changes_trie_root<'a, B: Backend, H: Hasher, Number: BlockNumber>( backend: &B, - storage: Option<&'a S>, + state: Option<&'a State<'a, H, Number>>, changes: &OverlayedChanges, parent_hash: H::Out, ) -> Result, Vec)>)>, ()> where H::Out: Ord + 'static, { - let (storage, config) = match (storage, changes.changes_trie_config.as_ref()) { - (Some(storage), Some(config)) => (storage, config), - _ => return Ok(None), + // when storage isn't provided, changes tries aren't created + let state = match state { + Some(state) => state, + None => return Ok(None), }; // build_anchor error should not be considered fatal - let parent = storage.build_anchor(parent_hash).map_err(|_| ())?; + let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; // storage errors are considered fatal (similar to situations when runtime fetches values from storage) - let input_pairs = prepare_input::(backend, storage, config, changes, &parent) + let input_pairs = prepare_input::(backend, state.storage, &state.config, changes, &parent) .expect("storage is not allowed to fail within runtime"); match input_pairs { Some(input_pairs) => { diff --git a/core/state-machine/src/ext.rs b/core/state-machine/src/ext.rs index 5a0daeb3488b9..f22207088ba8a 100644 --- a/core/state-machine/src/ext.rs +++ b/core/state-machine/src/ext.rs @@ -19,7 +19,7 @@ use std::{error, fmt, cmp::Ord}; use log::warn; use crate::backend::Backend; -use crate::changes_trie::{Storage as ChangesTrieStorage, compute_changes_trie_root}; +use crate::changes_trie::{State as ChangesTrieState, compute_changes_trie_root}; use crate::{Externalities, OverlayedChanges, ChildStorageKey}; use hash_db::Hasher; use primitives::offchain; @@ -58,7 +58,7 @@ impl error::Error for Error { } /// Wraps a read-only backend, call executor, and current overlayed changes. -pub struct Ext<'a, H, N, B, T, O> +pub struct Ext<'a, H, N, B, O> where H: Hasher, B: 'a + Backend, @@ -70,14 +70,13 @@ where /// The storage transaction necessary to commit to the backend. Is cached when /// `storage_root` is called and the cache is cleared on every subsequent change. storage_transaction: Option<(B::Transaction, H::Out)>, - /// Changes trie storage to read from. - changes_trie_storage: Option<&'a T>, + /// Changes trie state to read from. + changes_trie_state: Option<&'a ChangesTrieState<'a, H, N>>, /// The changes trie transaction necessary to commit to the changes trie backend. /// Set to Some when `storage_changes_root` is called. Could be replaced later /// by calling `storage_changes_root` again => never used as cache. /// This differs from `storage_transaction` behavior, because the moment when - /// `storage_changes_root` is called matters + we need to remember additional - /// data at this moment (block number). + /// `storage_changes_root` is called matters. changes_trie_transaction: Option<(MemoryDB, H::Out)>, /// Additional externalities for offchain workers. /// @@ -87,11 +86,10 @@ where _phantom: ::std::marker::PhantomData, } -impl<'a, H, N, B, T, O> Ext<'a, H, N, B, T, O> +impl<'a, H, N, B, O> Ext<'a, H, N, B, O> where H: Hasher, B: 'a + Backend, - T: 'a + ChangesTrieStorage, O: 'a + offchain::Externalities, H::Out: Ord + 'static, N: crate::changes_trie::BlockNumber, @@ -100,14 +98,14 @@ where pub fn new( overlay: &'a mut OverlayedChanges, backend: &'a B, - changes_trie_storage: Option<&'a T>, + changes_trie_state: Option<&'a ChangesTrieState<'a, H, N>>, offchain_externalities: Option<&'a mut O>, ) -> Self { Ext { overlay, backend, storage_transaction: None, - changes_trie_storage, + changes_trie_state, changes_trie_transaction: None, offchain_externalities, _phantom: Default::default(), @@ -141,11 +139,10 @@ where } #[cfg(test)] -impl<'a, H, N, B, T, O> Ext<'a, H, N, B, T, O> +impl<'a, H, N, B, O> Ext<'a, H, N, B, O> where H: Hasher, B: 'a + Backend, - T: 'a + ChangesTrieStorage, O: 'a + offchain::Externalities, N: crate::changes_trie::BlockNumber, { @@ -163,11 +160,10 @@ where } } -impl<'a, B, T, H, N, O> Externalities for Ext<'a, H, N, B, T, O> +impl<'a, B, H, N, O> Externalities for Ext<'a, H, N, B, O> where H: Hasher, B: 'a + Backend, - T: 'a + ChangesTrieStorage, O: 'a + offchain::Externalities, H::Out: Ord + 'static, N: crate::changes_trie::BlockNumber, @@ -320,9 +316,10 @@ where fn storage_changes_root(&mut self, parent_hash: H::Out) -> Result, ()> { let _guard = panic_handler::AbortGuard::new(true); - let root_and_tx = compute_changes_trie_root::<_, T, H, N>( + + let root_and_tx = compute_changes_trie_root::<_, H, N>( self.backend, - self.changes_trie_storage.clone(), + self.changes_trie_state.clone(), self.overlay, parent_hash, )?; @@ -352,17 +349,18 @@ where mod tests { use hex_literal::hex; use parity_codec::Encode; - use primitives::{Blake2Hasher}; + use primitives::Blake2Hasher; use primitives::storage::well_known_keys::EXTRINSIC_INDEX; use crate::backend::InMemory; - use crate::changes_trie::{Configuration as ChangesTrieConfiguration, - InMemoryStorage as InMemoryChangesTrieStorage}; + use crate::changes_trie::{ + Configuration as ChangesTrieConfiguration, + InMemoryStorage as TestChangesTrieStorage, + }; use crate::overlayed_changes::OverlayedValue; use super::*; type TestBackend = InMemory; - type TestChangesTrieStorage = InMemoryChangesTrieStorage; - type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend, TestChangesTrieStorage, crate::NeverOffchainExt>; + type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend, crate::NeverOffchainExt>; fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { @@ -377,28 +375,22 @@ mod tests { }), ].into_iter().collect(), committed: Default::default(), - changes_trie_config: Some(ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, - }), + collect_extrinsics: true, } } - #[test] - fn storage_changes_root_is_none_when_storage_is_not_provided() { - let mut overlay = prepare_overlay_with_changes(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, None, None); - assert_eq!(ext.storage_changes_root(Default::default()).unwrap(), None); + fn changes_trie_config() -> ChangesTrieConfiguration { + ChangesTrieConfiguration { + digest_interval: 0, + digest_levels: 0, + } } #[test] - fn storage_changes_root_is_none_when_extrinsic_changes_are_none() { + fn storage_changes_root_is_none_when_state_is_not_provided() { let mut overlay = prepare_overlay_with_changes(); - overlay.changes_trie_config = None; - let storage = TestChangesTrieStorage::with_blocks(vec![(100, Default::default())]); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage), None); + let mut ext = TestExt::new(&mut overlay, &backend, None, None); assert_eq!(ext.storage_changes_root(Default::default()).unwrap(), None); } @@ -406,8 +398,9 @@ mod tests { fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { let mut overlay = prepare_overlay_with_changes(); let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); + let state = Some(ChangesTrieState::new(changes_trie_config(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage), None); + let mut ext = TestExt::new(&mut overlay, &backend, state.as_ref(), None); assert_eq!(ext.storage_changes_root(Default::default()).unwrap(), Some(hex!("5b829920b9c8d554a19ee2a1ba593c4f2ee6fc32822d083e04236d693e8358d5").into())); } @@ -417,8 +410,9 @@ mod tests { let mut overlay = prepare_overlay_with_changes(); overlay.prospective.top.get_mut(&vec![1]).unwrap().value = None; let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); + let state = Some(ChangesTrieState::new(changes_trie_config(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage), None); + let mut ext = TestExt::new(&mut overlay, &backend, state.as_ref(), None); assert_eq!(ext.storage_changes_root(Default::default()).unwrap(), Some(hex!("bcf494e41e29a15c9ae5caa053fe3cb8b446ee3e02a254efbdec7a19235b76e4").into())); } diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index 2c98cc8a30ff0..9275245448f47 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -23,9 +23,7 @@ use std::borrow::Cow; use log::warn; use hash_db::Hasher; use parity_codec::{Decode, Encode}; -use primitives::{ - storage::well_known_keys, NativeOrEncoded, NeverNativeValue, offchain -}; +use primitives::{NativeOrEncoded, NeverNativeValue, offchain}; pub mod backend; mod changes_trie; @@ -45,12 +43,14 @@ pub use ext::Ext; pub use backend::Backend; pub use changes_trie::{ AnchorBlockId as ChangesTrieAnchorBlockId, + State as ChangesTrieState, Storage as ChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, InMemoryStorage as InMemoryChangesTrieStorage, key_changes, key_changes_proof, key_changes_proof_check, prune as prune_changes_tries, - oldest_non_pruned_trie as oldest_non_pruned_changes_trie + oldest_non_pruned_trie as oldest_non_pruned_changes_trie, + disabled_state as disabled_changes_trie_state, }; pub use overlayed_changes::OverlayedChanges; pub use proving_backend::{ @@ -449,18 +449,18 @@ pub fn always_wasm() -> ExecutionManager> { } /// Creates new substrate state machine. -pub fn new<'a, H, N, B, T, O, Exec>( +pub fn new<'a, H, N, B, O, Exec>( backend: &'a B, - changes_trie_storage: Option<&'a T>, + changes_trie_state: Option>, offchain_ext: Option<&'a mut O>, overlay: &'a mut OverlayedChanges, exec: &'a Exec, method: &'a str, call_data: &'a [u8], -) -> StateMachine<'a, H, N, B, T, O, Exec> { +) -> StateMachine<'a, H, N, B, O, Exec> { StateMachine { backend, - changes_trie_storage, + changes_trie_state, offchain_ext, overlay, exec, @@ -471,9 +471,9 @@ pub fn new<'a, H, N, B, T, O, Exec>( } /// The substrate state machine. -pub struct StateMachine<'a, H, N, B, T, O, Exec> { +pub struct StateMachine<'a, H, N, B, O, Exec> { backend: &'a B, - changes_trie_storage: Option<&'a T>, + changes_trie_state: Option>, offchain_ext: Option<&'a mut O>, overlay: &'a mut OverlayedChanges, exec: &'a Exec, @@ -482,11 +482,10 @@ pub struct StateMachine<'a, H, N, B, T, O, Exec> { _hasher: PhantomData<(H, N)>, } -impl<'a, H, N, B, T, O, Exec> StateMachine<'a, H, N, B, T, O, Exec> where +impl<'a, H, N, B, O, Exec> StateMachine<'a, H, N, B, O, Exec> where H: Hasher, Exec: CodeExecutor, B: Backend, - T: ChangesTrieStorage, O: offchain::Externalities, H::Out: Ord + 'static, N: crate::changes_trie::BlockNumber, @@ -529,7 +528,7 @@ impl<'a, H, N, B, T, O, Exec> StateMachine<'a, H, N, B, T, O, Exec> where let mut externalities = ext::Ext::new( self.overlay, self.backend, - self.changes_trie_storage, + self.changes_trie_state.as_ref(), self.offchain_ext.as_mut().map(|x| &mut **x), ); let (result, was_native) = self.exec.call( @@ -621,21 +620,8 @@ impl<'a, H, N, B, T, O, Exec> StateMachine<'a, H, N, B, T, O, Exec> where CallResult ) -> CallResult { - // read changes trie configuration. The reason why we're doing it here instead of the - // `OverlayedChanges` constructor is that we need proofs for this read as a part of - // proof-of-execution on light clients. And the proof is recorded by the backend which - // is created after OverlayedChanges - - let backend = self.backend.clone(); - let init_overlay = |overlay: &mut OverlayedChanges, final_check: bool| { - let changes_trie_config = try_read_overlay_value( - overlay, - backend, - well_known_keys::CHANGES_TRIE_CONFIG - )?; - set_changes_trie_config(overlay, changes_trie_config, final_check) - }; - init_overlay(self.overlay, false)?; + let changes_tries_enabled = self.changes_trie_state.is_some(); + self.overlay.collect_extrinsics(changes_tries_enabled); let result = { let orig_prospective = self.overlay.prospective.clone(); @@ -659,10 +645,6 @@ impl<'a, H, N, B, T, O, Exec> StateMachine<'a, H, N, B, T, O, Exec> where result.map(move |out| (out, storage_delta, changes_delta)) }; - if result.is_ok() { - init_overlay(self.overlay, true)?; - } - result.map_err(|e| Box::new(e) as _) } } @@ -711,7 +693,7 @@ where let proving_backend = proving_backend::ProvingBackend::new(trie_backend); let mut sm = StateMachine { backend: &proving_backend, - changes_trie_storage: None as Option<&changes_trie::InMemoryStorage>, + changes_trie_state: changes_trie::disabled_state::(), offchain_ext: NeverOffchainExt::new(), overlay, exec, @@ -761,7 +743,7 @@ where { let mut sm = StateMachine { backend: trie_backend, - changes_trie_storage: None as Option<&changes_trie::InMemoryStorage>, + changes_trie_state: changes_trie::disabled_state::(), offchain_ext: NeverOffchainExt::new(), overlay, exec, @@ -896,46 +878,6 @@ where proving_backend.child_storage(storage_key, key).map_err(|e| Box::new(e) as Box) } -/// Sets overlayed changes' changes trie configuration. Returns error if configuration -/// differs from previous OR config decode has failed. -pub(crate) fn set_changes_trie_config( - overlay: &mut OverlayedChanges, - config: Option>, - final_check: bool, -) -> Result<(), Box> { - let config = match config { - Some(v) => Some(Decode::decode(&mut &v[..]) - .ok_or_else(|| Box::new("Failed to decode changes trie configuration".to_owned()) as Box)?), - None => None, - }; - - if final_check && overlay.changes_trie_config.is_some() != config.is_some() { - return Err(Box::new("Changes trie configuration change is not supported".to_owned())); - } - - if let Some(config) = config { - if !overlay.set_changes_trie_config(config) { - return Err(Box::new("Changes trie configuration change is not supported".to_owned())); - } - } - Ok(()) -} - -/// Reads storage value from overlay or from the backend. -fn try_read_overlay_value(overlay: &OverlayedChanges, backend: &B, key: &[u8]) - -> Result>, Box> -where - H: Hasher, - B: Backend, -{ - match overlay.storage(key).map(|x| x.map(|x| x.to_vec())) { - Some(value) => Ok(value), - None => backend - .storage(key) - .map_err(|err| Box::new(ExecutionError::Backend(format!("{}", err))) as Box), - } -} - #[cfg(test)] mod tests { use std::collections::HashMap; @@ -944,10 +886,7 @@ mod tests { use super::*; use super::backend::InMemory; use super::ext::Ext; - use super::changes_trie::{ - InMemoryStorage as InMemoryChangesTrieStorage, - Configuration as ChangesTrieConfig, - }; + use super::changes_trie::Configuration as ChangesTrieConfig; use primitives::{Blake2Hasher, map}; struct DummyCodeExecutor { @@ -970,7 +909,7 @@ mod tests { ) -> (CallResult, bool) { if self.change_changes_trie_config { ext.place_storage( - well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), + primitives::storage::well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), Some( ChangesTrieConfig { digest_interval: 777, @@ -1006,7 +945,7 @@ mod tests { fn execute_works() { assert_eq!(new( &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::::new()), + changes_trie::disabled_state::<_, u64>(), NeverOffchainExt::new(), &mut Default::default(), &DummyCodeExecutor { @@ -1027,7 +966,7 @@ mod tests { fn execute_works_with_native_else_wasm() { assert_eq!(new( &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::::new()), + changes_trie::disabled_state::<_, u64>(), NeverOffchainExt::new(), &mut Default::default(), &DummyCodeExecutor { @@ -1048,7 +987,7 @@ mod tests { let mut consensus_failed = false; assert!(new( &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::::new()), + changes_trie::disabled_state::<_, u64>(), NeverOffchainExt::new(), &mut Default::default(), &DummyCodeExecutor { @@ -1117,8 +1056,13 @@ mod tests { }; { - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); - let mut ext = Ext::new(&mut overlay, backend, Some(&changes_trie_storage), NeverOffchainExt::new()); + let state = changes_trie::disabled_state::<_, u64>(); + let mut ext = Ext::new( + &mut overlay, + backend, + state.as_ref(), + NeverOffchainExt::new(), + ); ext.clear_prefix(b"ab"); } overlay.commit_prospective(); @@ -1141,12 +1085,12 @@ mod tests { fn set_child_storage_works() { let mut state = InMemory::::default(); let backend = state.as_trie_backend().unwrap(); - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); let mut overlay = OverlayedChanges::default(); + let changes_trie_state = changes_trie::disabled_state::<_, u64>(); let mut ext = Ext::new( &mut overlay, backend, - Some(&changes_trie_storage), + changes_trie_state.as_ref(), NeverOffchainExt::new() ); @@ -1216,44 +1160,4 @@ mod tests { assert_eq!(local_result1, Some(vec![142])); assert_eq!(local_result2, None); } - - #[test] - fn cannot_change_changes_trie_config() { - assert!(new( - &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::::new()), - NeverOffchainExt::new(), - &mut Default::default(), - &DummyCodeExecutor { - change_changes_trie_config: true, - native_available: false, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - ).execute( - ExecutionStrategy::NativeWhenPossible - ).is_err()); - } - - #[test] - fn cannot_change_changes_trie_config_with_native_else_wasm() { - assert!(new( - &trie_backend::tests::test_trie(), - Some(&InMemoryChangesTrieStorage::::new()), - NeverOffchainExt::new(), - &mut Default::default(), - &DummyCodeExecutor { - change_changes_trie_config: true, - native_available: false, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - ).execute( - ExecutionStrategy::NativeElseWasm - ).is_err()); - } } diff --git a/core/state-machine/src/overlayed_changes.rs b/core/state-machine/src/overlayed_changes.rs index 7d6d6081bd26b..9a48d9cd771b5 100644 --- a/core/state-machine/src/overlayed_changes.rs +++ b/core/state-machine/src/overlayed_changes.rs @@ -19,7 +19,7 @@ #[cfg(test)] use std::iter::FromIterator; use std::collections::{HashMap, HashSet}; use parity_codec::Decode; -use crate::changes_trie::{NO_EXTRINSIC_INDEX, Configuration as ChangesTrieConfig}; +use crate::changes_trie::NO_EXTRINSIC_INDEX; use primitives::storage::well_known_keys::EXTRINSIC_INDEX; /// The overlayed changes to state to be queried on top of the backend. @@ -32,9 +32,8 @@ pub struct OverlayedChanges { pub(crate) prospective: OverlayedChangeSet, /// Committed changes. pub(crate) committed: OverlayedChangeSet, - /// Changes trie configuration. None by default, but could be installed by the - /// runtime if it supports change tries. - pub(crate) changes_trie_config: Option, + /// True if extrinsiscs stats must be collected. + pub(crate) collect_extrinsics: bool, } /// The storage value, used inside OverlayedChanges. @@ -87,20 +86,9 @@ impl OverlayedChanges { self.prospective.is_empty() && self.committed.is_empty() } - /// Sets the changes trie configuration. - /// - /// Returns false if configuration has been set already and we now trying - /// to install different configuration. This isn't supported now. - pub(crate) fn set_changes_trie_config(&mut self, config: ChangesTrieConfig) -> bool { - if let Some(ref old_config) = self.changes_trie_config { - // we do not support changes trie configuration' change now - if *old_config != config { - return false; - } - } - - self.changes_trie_config = Some(config); - true + /// Ask to collect/not to collect extrinsics indices where key(s) has been changed. + pub fn collect_extrinsics(&mut self, collect_extrinsics: bool) { + self.collect_extrinsics = collect_extrinsics; } /// Returns a double-Option: None if the key is unknown (i.e. and the query should be refered @@ -281,7 +269,7 @@ impl OverlayedChanges { /// Changes that are made outside of extrinsics, are marked with /// `NO_EXTRINSIC_INDEX` index. fn extrinsic_index(&self) -> Option { - match self.changes_trie_config.is_some() { + match self.collect_extrinsics { true => Some( self.storage(EXTRINSIC_INDEX) .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx))) @@ -304,7 +292,6 @@ mod tests { use primitives::{Blake2Hasher, H256}; use primitives::storage::well_known_keys::EXTRINSIC_INDEX; use crate::backend::InMemory; - use crate::changes_trie::InMemoryStorage as InMemoryChangesTrieStorage; use crate::ext::Ext; use crate::Externalities; use super::*; @@ -365,64 +352,21 @@ mod tests { ..Default::default() }; - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); + let changes_trie_state = crate::changes_trie::disabled_state::<_, u64>(); let mut ext = Ext::new( &mut overlay, &backend, - Some(&changes_trie_storage), + changes_trie_state.as_ref(), crate::NeverOffchainExt::new(), ); const ROOT: [u8; 32] = hex!("0b41e488cccbd67d1f1089592c2c235f5c5399b053f7fe9152dd4b5f279914cd"); assert_eq!(ext.storage_root(), H256::from(ROOT)); } - #[test] - fn changes_trie_configuration_is_saved() { - let mut overlay = OverlayedChanges::default(); - assert!(overlay.changes_trie_config.is_none()); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - assert!(overlay.changes_trie_config.is_some()); - } - - #[test] - fn changes_trie_configuration_is_saved_twice() { - let mut overlay = OverlayedChanges::default(); - assert!(overlay.changes_trie_config.is_none()); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - overlay.set_extrinsic_index(0); - overlay.set_storage(vec![1], Some(vec![2])); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - assert_eq!( - strip_extrinsic_index(&overlay.prospective.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![2]), extrinsics: Some(vec![0].into_iter().collect()) }), - ].into_iter().collect(), - ); - } - - #[test] - fn panics_when_trying_to_save_different_changes_trie_configuration() { - let mut overlay = OverlayedChanges::default(); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }), true); - assert_eq!(overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 2, digest_levels: 1, - }), false); - } - #[test] fn extrinsic_changes_are_collected() { let mut overlay = OverlayedChanges::default(); - let _ = overlay.set_changes_trie_config(ChangesTrieConfig { - digest_interval: 4, digest_levels: 1, - }); + overlay.collect_extrinsics(true); overlay.set_storage(vec![100], Some(vec![101])); diff --git a/core/state-machine/src/testing.rs b/core/state-machine/src/testing.rs index 68b9d28752c28..f169f478182a0 100644 --- a/core/state-machine/src/testing.rs +++ b/core/state-machine/src/testing.rs @@ -19,11 +19,13 @@ use std::collections::{HashMap, BTreeMap}; use std::iter::FromIterator; use hash_db::Hasher; +use parity_codec::Decode; use crate::backend::{InMemory, Backend}; use primitives::storage::well_known_keys::is_child_storage_key; use crate::changes_trie::{ compute_changes_trie_root, InMemoryStorage as ChangesTrieInMemoryStorage, - BlockNumber as ChangesTrieBlockNumber, + BlockNumber as ChangesTrieBlockNumber, Configuration as ChangesTrieConfiguration, + State as ChangesTrieState, }; use primitives::offchain; use primitives::storage::well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES}; @@ -36,6 +38,7 @@ const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within pub struct TestExternalities { overlay: OverlayedChanges, backend: InMemory, + changes_trie_config: Option, changes_trie_storage: ChangesTrieInMemoryStorage, offchain: Option>, } @@ -49,18 +52,16 @@ impl TestExternalities { /// Create a new instance of `TestExternalities` pub fn new_with_code(code: &[u8], mut inner: HashMap, Vec>) -> Self { let mut overlay = OverlayedChanges::default(); - - super::set_changes_trie_config( - &mut overlay, - inner.get(&CHANGES_TRIE_CONFIG.to_vec()).cloned(), - false, - ).expect("changes trie configuration is correct in test env; qed"); + let changes_trie_config = inner.get(CHANGES_TRIE_CONFIG) + .and_then(|v| Decode::decode(&mut &v[..])); + overlay.collect_extrinsics(changes_trie_config.is_some()); inner.insert(HEAP_PAGES.to_vec(), 8u64.encode()); inner.insert(CODE.to_vec(), code.to_vec()); TestExternalities { overlay, + changes_trie_config, changes_trie_storage: ChangesTrieInMemoryStorage::new(), backend: inner.into(), offchain: None, @@ -226,12 +227,18 @@ impl Externalities for TestExternalities } fn storage_changes_root(&mut self, parent: H::Out) -> Result, ()> { - Ok(compute_changes_trie_root::<_, _, H, N>( - &self.backend, - Some(&self.changes_trie_storage), - &self.overlay, - parent, - )?.map(|(root, _)| root.clone())) + match self.changes_trie_config.clone() { + Some(config) => { + let state = ChangesTrieState::new(config, &self.changes_trie_storage); + Ok(compute_changes_trie_root::<_, H, N>( + &self.backend, + Some(&state), + &self.overlay, + parent, + )?.map(|(root, _)| root.clone())) + }, + None => Ok(None), + } } fn offchain(&mut self) -> Option<&mut dyn offchain::Externalities> { From 0cd05df46d3be2fb159eca8ca0632bcadde3ad6c Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 26 Jun 2019 16:41:22 +0300 Subject: [PATCH 03/63] introduce config activation block --- core/client/db/src/lib.rs | 2 + core/client/src/backend.rs | 4 +- core/primitives/src/changes_trie.rs | 77 +++--- core/state-machine/src/changes_trie/build.rs | 9 +- .../src/changes_trie/build_iterator.rs | 187 +++++++++----- core/state-machine/src/changes_trie/mod.rs | 13 +- core/state-machine/src/changes_trie/prune.rs | 235 ++++++++++-------- core/state-machine/src/ext.rs | 5 +- core/state-machine/src/testing.rs | 3 +- 9 files changed, 331 insertions(+), 204 deletions(-) diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index b1c0596a0e261..8081a4b532e09 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -561,6 +561,7 @@ impl> DbChangesTrieStorage { }; state_machine::prune_changes_tries( + Zero::zero(), // TODO: not true config, &*self, min_blocks_to_keep.into(), @@ -584,6 +585,7 @@ where ) -> NumberFor { match self.min_blocks_to_keep { Some(min_blocks_to_keep) => state_machine::oldest_non_pruned_changes_trie( + Zero::zero(), // TODO: not true config, min_blocks_to_keep.into(), best_finalized_block, diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 20da280be0322..1d650b34fbb51 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -21,7 +21,7 @@ use crate::error; use parity_codec::Decode; use primitives::{storage::well_known_keys::CHANGES_TRIE_CONFIG, ChangesTrieConfiguration}; use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; -use runtime_primitives::traits::{Block as BlockT, NumberFor}; +use runtime_primitives::traits::{Block as BlockT, Zero, NumberFor}; use state_machine::backend::Backend as StateBackend; use state_machine::{ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieState}; use consensus::well_known_cache_keys; @@ -249,5 +249,5 @@ pub fn changes_tries_state_at_state<'a, S: StateBackend, Block: BlockT, H: Ha Ok(state.storage(CHANGES_TRIE_CONFIG) .map_err(|e| error::Error::from_state(Box::new(e)))? .and_then(|v| Decode::decode(&mut &v[..])) - .map(|config| ChangesTrieState::new(config, storage))) + .map(|config| ChangesTrieState::new(config, Zero::zero(), storage))) } diff --git a/core/primitives/src/changes_trie.rs b/core/primitives/src/changes_trie.rs index eb6a75454fe41..057ea1dd63fa3 100644 --- a/core/primitives/src/changes_trie.rs +++ b/core/primitives/src/changes_trie.rs @@ -45,13 +45,17 @@ impl ChangesTrieConfiguration { } /// Do we need to build digest at given block? - pub fn is_digest_build_required_at_block(&self, block: Number) -> bool + pub fn is_digest_build_required_at_block( + &self, + zero: Number, + block: Number, + ) -> bool where - Number: From + PartialEq + ::rstd::ops::Rem + Zero, + Number: From + PartialEq + ::rstd::ops::Rem + ::rstd::ops::Sub + ::rstd::cmp::PartialOrd + Zero, { - block != 0.into() + block > zero && self.is_digest_build_enabled() - && (block % self.digest_interval.into()).is_zero() + && ((block - zero) % self.digest_interval.into()).is_zero() } /// Returns max digest interval. One if digests are not created at all. @@ -78,20 +82,21 @@ impl ChangesTrieConfiguration { /// digest interval (in blocks) /// step between blocks we're interested in when digest is built /// ) - pub fn digest_level_at_block(&self, block: Number) -> Option<(u32, u32, u32)> + pub fn digest_level_at_block(&self, zero: Number, block: Number) -> Option<(u32, u32, u32)> where - Number: Clone + From + PartialEq + ::rstd::ops::Rem + Zero, + Number: Clone + From + PartialEq + ::rstd::ops::Rem + ::rstd::ops::Sub + ::rstd::cmp::PartialOrd + Zero, { - if !self.is_digest_build_required_at_block(block.clone()) { + if !self.is_digest_build_required_at_block(zero.clone(), block.clone()) { return None; } + let relative_block = block - zero; let mut digest_interval = self.digest_interval; let mut current_level = 1u32; let mut digest_step = 1u32; while current_level < self.digest_levels { let new_digest_interval = match digest_interval.checked_mul(self.digest_interval) { - Some(new_digest_interval) if (block.clone() % new_digest_interval.into()).is_zero() + Some(new_digest_interval) if (relative_block.clone() % new_digest_interval.into()).is_zero() => new_digest_interval, _ => break, }; @@ -131,31 +136,43 @@ mod tests { #[test] fn is_digest_build_required_at_block_works() { - assert!(!config(8, 4).is_digest_build_required_at_block(0u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(1u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(2u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(4u64)); - assert!(config(8, 4).is_digest_build_required_at_block(8u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(9u64)); - assert!(config(8, 4).is_digest_build_required_at_block(64u64)); - assert!(config(8, 4).is_digest_build_required_at_block(64u64)); - assert!(config(8, 4).is_digest_build_required_at_block(512u64)); - assert!(config(8, 4).is_digest_build_required_at_block(4096u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(4103u64)); - assert!(config(8, 4).is_digest_build_required_at_block(4104u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(4108u64)); + fn test_with_zero(zero: u64) { + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 0u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 1u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 2u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 8u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 9u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 512u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4096u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4103u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4104u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4108u64)); + } + + test_with_zero(0); + test_with_zero(8); + test_with_zero(17); } #[test] fn digest_level_at_block_works() { - assert_eq!(config(8, 4).digest_level_at_block(0u64), None); - assert_eq!(config(8, 4).digest_level_at_block(7u64), None); - assert_eq!(config(8, 4).digest_level_at_block(63u64), None); - assert_eq!(config(8, 4).digest_level_at_block(8u64), Some((1, 8, 1))); - assert_eq!(config(8, 4).digest_level_at_block(64u64), Some((2, 64, 8))); - assert_eq!(config(8, 4).digest_level_at_block(512u64), Some((3, 512, 64))); - assert_eq!(config(8, 4).digest_level_at_block(4096u64), Some((4, 4096, 512))); - assert_eq!(config(8, 4).digest_level_at_block(4112u64), Some((1, 8, 1))); + fn test_with_zero(zero: u64) { + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 0u64), None); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 7u64), None); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 63u64), None); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 8u64), Some((1, 8, 1))); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 64u64), Some((2, 64, 8))); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 512u64), Some((3, 512, 64))); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4096u64), Some((4, 4096, 512))); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4112u64), Some((1, 8, 1))); + } + + test_with_zero(0); + test_with_zero(8); + test_with_zero(17); } #[test] @@ -165,4 +182,6 @@ mod tests { assert_eq!(config(8, 4).max_digest_interval(), 4096); assert_eq!(config(::std::u32::MAX, 1024).max_digest_interval(), ::std::u32::MAX); } + + // TODO: test that it doesn't panic when zero > block } diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index 7365fa72d808d..6b7170b871c49 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -36,6 +36,7 @@ use crate::changes_trie::{AnchorBlockId, Configuration, Storage, BlockNumber}; pub fn prepare_input<'a, B, H, Number>( backend: &B, storage: &'a Storage, + config_activation_block: Number, config: &'a Configuration, changes: &OverlayedChanges, parent: &'a AnchorBlockId, @@ -52,6 +53,7 @@ pub fn prepare_input<'a, B, H, Number>( changes)?); input.extend(prepare_digest_input::( parent, + config_activation_block, config, storage)?); @@ -98,6 +100,7 @@ fn prepare_extrinsics_input( /// Prepare DigestIndex input pairs. fn prepare_digest_input<'a, H, Number>( parent: &'a AnchorBlockId, + config_activation_block: Number, config: &Configuration, storage: &'a Storage, ) -> Result> + 'a, String> @@ -107,7 +110,7 @@ fn prepare_digest_input<'a, H, Number>( Number: BlockNumber, { let mut digest_map = BTreeMap::, BTreeSet>::new(); - for digest_build_block in digest_build_iterator(config, parent.number.clone() + One::one()) { + for digest_build_block in digest_build_iterator(config, config_activation_block, parent.number.clone() + One::one()) { let trie_root = storage.root(parent, digest_build_block.clone())?; let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block.clone()))?; let trie_storage = TrieBackendEssence::<_, H>::new( @@ -228,6 +231,7 @@ mod test { let changes_trie_nodes = prepare_input( &backend, &storage, + 0, // TODO: test other cases &config, &changes, &AnchorBlockId { hash: Default::default(), number: 4 }, @@ -245,6 +249,7 @@ mod test { let changes_trie_nodes = prepare_input( &backend, &storage, + 0, // TODO: test other cases &config, &changes, &AnchorBlockId { hash: Default::default(), number: 3 }, @@ -267,6 +272,7 @@ mod test { let changes_trie_nodes = prepare_input( &backend, &storage, + 0, // TODO: test other cases &config, &changes, &AnchorBlockId { hash: Default::default(), number: 15 }, @@ -297,6 +303,7 @@ mod test { let changes_trie_nodes = prepare_input( &backend, &storage, + 0, // TODO: test other cases &config, &changes, &AnchorBlockId { hash: Default::default(), number: 3 }, diff --git a/core/state-machine/src/changes_trie/build_iterator.rs b/core/state-machine/src/changes_trie/build_iterator.rs index 5d8a8318abf84..8cb7b8a90d5f7 100644 --- a/core/state-machine/src/changes_trie/build_iterator.rs +++ b/core/state-machine/src/changes_trie/build_iterator.rs @@ -23,10 +23,11 @@ use crate::changes_trie::{Configuration, BlockNumber}; /// changes trie of given block. pub fn digest_build_iterator( config: &Configuration, + zero: Number, block: Number, ) -> DigestBuildIterator { // prepare digest build parameters - let (_, _, digest_step) = match config.digest_level_at_block(block.clone()) { + let (_, _, digest_step) = match config.digest_level_at_block(zero, block.clone()) { Some((current_level, digest_interval, digest_step)) => (current_level, digest_interval, digest_step), None => return DigestBuildIterator::empty(), @@ -136,100 +137,160 @@ impl Iterator for BlocksRange { mod tests { use super::*; - fn digest_build_iterator(digest_interval: u32, digest_levels: u32, block: u64) -> DigestBuildIterator { - super::digest_build_iterator(&Configuration { digest_interval, digest_levels }, block) + fn digest_build_iterator( + digest_interval: u32, + digest_levels: u32, + zero: u64, + block: u64, + ) -> DigestBuildIterator { + super::digest_build_iterator(&Configuration { digest_interval, digest_levels }, zero, block) } - fn digest_build_iterator_basic(digest_interval: u32, digest_levels: u32, block: u64) -> (u64, u32, u32) { - let iter = digest_build_iterator(digest_interval, digest_levels, block); + fn digest_build_iterator_basic( + digest_interval: u32, + digest_levels: u32, + zero: u64, + block: u64, + ) -> (u64, u32, u32) { + let iter = digest_build_iterator(digest_interval, digest_levels, zero, block); (iter.block, iter.digest_interval, iter.max_step) } - fn digest_build_iterator_blocks(digest_interval: u32, digest_levels: u32, block: u64) -> Vec { - digest_build_iterator(digest_interval, digest_levels, block).collect() + fn digest_build_iterator_blocks( + digest_interval: u32, + digest_levels: u32, + zero: u64, + block: u64, + ) -> Vec { + digest_build_iterator(digest_interval, digest_levels, zero, block).collect() } #[test] fn suggest_digest_inclusion_returns_empty_iterator() { - let empty = (0, 0, 0); - assert_eq!(digest_build_iterator_basic(4, 16, 0), empty, "block is 0"); - assert_eq!(digest_build_iterator_basic(0, 16, 64), empty, "digest_interval is 0"); - assert_eq!(digest_build_iterator_basic(1, 16, 64), empty, "digest_interval is 1"); - assert_eq!(digest_build_iterator_basic(4, 0, 64), empty, "digest_levels is 0"); - assert_eq!(digest_build_iterator_basic(4, 16, 1), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(4, 16, 2), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(4, 16, 15), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(4, 16, 17), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic( - ::std::u32::MAX / 2 + 1, - 16, - ::std::u64::MAX, - ), empty, "digest_interval * 2 is greater than u64::MAX"); + fn test_with_zero(zero: u64) { + let empty = (0, 0, 0); + assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 0), empty, "block is 0"); + assert_eq!(digest_build_iterator_basic(0, 16, zero, zero + 64), empty, "digest_interval is 0"); + assert_eq!(digest_build_iterator_basic(1, 16, zero, zero + 64), empty, "digest_interval is 1"); + assert_eq!(digest_build_iterator_basic(4, 0, zero, zero + 64), empty, "digest_levels is 0"); + assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 1), empty, "digest is not required for this block"); + assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 2), empty, "digest is not required for this block"); + assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 15), empty, "digest is not required for this block"); + assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 17), empty, "digest is not required for this block"); + assert_eq!(digest_build_iterator_basic( + ::std::u32::MAX / 2 + 1, + 16, + zero, + ::std::u64::MAX, + ), empty, "digest_interval * 2 is greater than u64::MAX"); + } + + test_with_zero(0); + test_with_zero(1); + test_with_zero(2); + test_with_zero(4); + test_with_zero(17); } #[test] fn suggest_digest_inclusion_returns_level1_iterator() { - assert_eq!(digest_build_iterator_basic(16, 1, 16), (16, 16, 1), "!(block % interval) && first digest level == block"); - assert_eq!(digest_build_iterator_basic(16, 1, 256), (256, 16, 1), "!(block % interval^2), but there's only 1 digest level"); - assert_eq!(digest_build_iterator_basic(16, 2, 32), (32, 16, 1), "second level digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(16, 3, 4080), (4080, 16, 1), "second && third level digest are not required for this block"); + fn test_with_zero(zero: u64) { + assert_eq!(digest_build_iterator_basic(16, 1, zero, zero + 16), (zero + 16, 16, 1), "!(block % interval) && first digest level == block"); + assert_eq!(digest_build_iterator_basic(16, 1, zero, zero + 256), (zero + 256, 16, 1), "!(block % interval^2), but there's only 1 digest level"); + assert_eq!(digest_build_iterator_basic(16, 2, zero, zero + 32), (zero + 32, 16, 1), "second level digest is not required for this block"); + assert_eq!(digest_build_iterator_basic(16, 3, zero, zero + 4080), (zero + 4080, 16, 1), "second && third level digest are not required for this block"); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } #[test] fn suggest_digest_inclusion_returns_level2_iterator() { - assert_eq!(digest_build_iterator_basic(16, 2, 256), (256, 16, 16), "second level digest"); - assert_eq!(digest_build_iterator_basic(16, 2, 4096), (4096, 16, 16), "!(block % interval^3), but there's only 2 digest levels"); + fn test_with_zero(zero: u64) { + assert_eq!(digest_build_iterator_basic(16, 2, zero, zero + 256), (zero + 256, 16, 16), "second level digest"); + assert_eq!(digest_build_iterator_basic(16, 2, zero, zero + 4096), (zero + 4096, 16, 16), "!(block % interval^3), but there's only 2 digest levels"); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } #[test] fn suggest_digest_inclusion_returns_level3_iterator() { - assert_eq!(digest_build_iterator_basic(16, 3, 4096), (4096, 16, 256), "third level digest: beginning"); - assert_eq!(digest_build_iterator_basic(16, 3, 8192), (8192, 16, 256), "third level digest: next"); + fn test_with_zero(zero: u64) { + assert_eq!(digest_build_iterator_basic(16, 3, zero, zero + 4096), (zero + 4096, 16, 256), "third level digest: beginning"); + assert_eq!(digest_build_iterator_basic(16, 3, zero, zero + 8192), (zero + 8192, 16, 256), "third level digest: next"); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } #[test] fn digest_iterator_returns_level1_blocks() { - assert_eq!(digest_build_iterator_blocks(16, 1, 16), - vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - assert_eq!(digest_build_iterator_blocks(16, 1, 256), - vec![241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255]); - assert_eq!(digest_build_iterator_blocks(16, 2, 32), - vec![17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]); - assert_eq!(digest_build_iterator_blocks(16, 3, 4080), - vec![4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079]); + fn test_with_zero(zero: u64) { + assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 16), + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15].iter().map(|item| zero + item).collect::>()); + assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 256), + [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255].iter().map(|item| zero + item).collect::>()); + assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 32), + [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31].iter().map(|item| zero + item).collect::>()); + assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4080), + [4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079].iter().map(|item| zero + item).collect::>()); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } #[test] fn digest_iterator_returns_level1_and_level2_blocks() { - assert_eq!(digest_build_iterator_blocks(16, 2, 256), - vec![ - // level2 is a level1 digest of 16-1 previous blocks: - 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - // level2 points to previous 16-1 level1 digests: - 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, - ], - ); - assert_eq!(digest_build_iterator_blocks(16, 2, 4096), - vec![ - // level2 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - // level2 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - ], - ); + fn test_with_zero(zero: u64) { + assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 256), + [ + // level2 is a level1 digest of 16-1 previous blocks: + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + // level2 points to previous 16-1 level1 digests: + 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, + ].iter().map(|item| zero + item).collect::>(), + ); + assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 4096), + [ + // level2 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, + // level2 points to previous 16-1 level1 digests: + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, + ].iter().map(|item| zero + item).collect::>(), + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } #[test] fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { - assert_eq!(digest_build_iterator_blocks(16, 3, 4096), - vec![ - // level3 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - // level3 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - // level3 points to previous 16-1 level2 digests: - 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, - ], - ); + fn test_with_zero(zero: u64) { + assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096), + [ + // level3 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, + // level3 points to previous 16-1 level1 digests: + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, + // level3 points to previous 16-1 level2 digests: + 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, + ].iter().map(|item| zero + item).collect::>(), + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } } diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index 7d099b9e1ee62..b4da513a9c77e 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -100,6 +100,10 @@ pub struct AnchorBlockId { pub struct State<'a, H, Number> { /// Configuration that is active at given block. pub config: Configuration, + /// Configuration activation block number. Zero if it is the first coonfiguration on the chain, + /// or number of the block that have emit NewConfiguration signal (thus activating configuration + /// starting from the **next** block). + pub config_activation_block: Number, /// Underlying changes tries storage reference. pub storage: &'a dyn Storage, } @@ -135,9 +139,14 @@ pub type Configuration = primitives::ChangesTrieConfiguration; impl<'a, H, Number> State<'a, H, Number> { /// Create state with given config and storage. - pub fn new(config: Configuration, storage: &'a dyn Storage) -> Self { + pub fn new( + config: Configuration, + config_activation_block: Number, + storage: &'a dyn Storage, + ) -> Self { Self { config, + config_activation_block, storage, } } @@ -171,7 +180,7 @@ pub fn compute_changes_trie_root<'a, B: Backend, H: Hasher, Number: BlockNumb let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; // storage errors are considered fatal (similar to situations when runtime fetches values from storage) - let input_pairs = prepare_input::(backend, state.storage, &state.config, changes, &parent) + let input_pairs = prepare_input::(backend, state.storage, state.config_activation_block.clone(), &state.config, changes, &parent) .expect("storage is not allowed to fail within runtime"); match input_pairs { Some(input_pairs) => { diff --git a/core/state-machine/src/changes_trie/prune.rs b/core/state-machine/src/changes_trie/prune.rs index 3aedf66f75712..f71dbeeefdba0 100644 --- a/core/state-machine/src/changes_trie/prune.rs +++ b/core/state-machine/src/changes_trie/prune.rs @@ -29,16 +29,17 @@ use crate::changes_trie::storage::TrieBackendAdapter; /// given changes trie configuration, pruning parameter and number of /// best finalized block. pub fn oldest_non_pruned_trie( + config_activation_block: Number, config: &Configuration, min_blocks_to_keep: Number, best_finalized_block: Number, ) -> Number { let max_digest_interval = config.max_digest_interval(); - let best_finalized_block_rem = best_finalized_block.clone() % max_digest_interval.into(); + let best_finalized_block_rem = (best_finalized_block.clone() - config_activation_block.clone()) % max_digest_interval.into(); let max_digest_block = best_finalized_block - best_finalized_block_rem; - match pruning_range(config, min_blocks_to_keep, max_digest_block) { + match pruning_range(config_activation_block.clone(), config, min_blocks_to_keep, max_digest_block) { Some((_, last_pruned_block)) => last_pruned_block + One::one(), - None => One::one(), + None => config_activation_block + One::one(), } } @@ -48,6 +49,7 @@ pub fn oldest_non_pruned_trie( /// ranges. /// Returns MemoryDB that contains all deleted changes tries nodes. pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H::Out)>( + config_activation_block: Number, config: &Configuration, storage: &S, min_blocks_to_keep: Number, @@ -55,13 +57,12 @@ pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H:: mut remove_trie_node: F, ) { // select range for pruning - let (first, last) = match pruning_range(config, min_blocks_to_keep, current_block.number.clone()) { + let (first, last) = match pruning_range(config_activation_block, config, min_blocks_to_keep, current_block.number.clone()) { Some((first, last)) => (first, last), None => return, }; // delete changes trie for every block in range - // FIXME: limit `max_digest_interval` so that this cycle won't involve huge ranges let mut block = first; loop { if block >= last.clone() + One::one() { @@ -103,6 +104,7 @@ pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H:: /// Select blocks range (inclusive from both ends) for pruning changes tries in. fn pruning_range( + config_activation_block: Number, config: &Configuration, min_blocks_to_keep: Number, block: Number, @@ -110,7 +112,7 @@ fn pruning_range( // compute number of changes tries we actually want to keep let (prune_interval, blocks_to_keep) = if config.is_digest_build_enabled() { // we only CAN prune at block where max-level-digest is created - let max_digest_interval = match config.digest_level_at_block(block.clone()) { + let max_digest_interval = match config.digest_level_at_block(config_activation_block.clone(), block.clone()) { Some((digest_level, digest_interval, _)) if digest_level == config.digest_levels => digest_interval, _ => return None, @@ -132,11 +134,17 @@ fn pruning_range( }; // last block for which changes trie is pruned - let last_block_to_prune = blocks_to_keep.and_then(|b| block.checked_sub(&b)); - let first_block_to_prune = last_block_to_prune.clone().and_then(|b| b.checked_sub(&prune_interval.into())); + let last_block_to_prune = match blocks_to_keep.and_then(|b| block.checked_sub(&b)) { + Some(last_block_to_prune) => if last_block_to_prune > config_activation_block { + last_block_to_prune + } else { + return None; + }, + _ => return None, + }; + let first_block_to_prune = last_block_to_prune.checked_sub(&prune_interval.into()); - last_block_to_prune - .and_then(|last| first_block_to_prune.map(|first| (first + One::one(), last))) + first_block_to_prune.map(|first| (first + One::one(), last_block_to_prune)) } /// Select pruning delay for the changes tries. To make sure we could build a changes @@ -145,7 +153,7 @@ fn pruning_range( /// blocks. So we can only prune blocks that are earlier than B - max_digest_interval. /// The pruning_delay stands for number of max_digest_interval-s that we want to keep: /// 0 or 1: means that only last changes trie is guaranteed to exists; -/// 2: the last chnages trie + previous changes trie +/// 2: the last changes trie + previous changes trie /// ... fn max_digest_intervals_to_keep( min_blocks_to_keep: Number, @@ -182,17 +190,18 @@ mod tests { config: &Configuration, storage: &S, min_blocks_to_keep: u64, + zero: u64, current_block: u64, ) -> HashSet { let mut pruned_trie_nodes = HashSet::new(); - prune(config, storage, min_blocks_to_keep, &AnchorBlockId { hash: Default::default(), number: current_block }, + prune(zero, config, storage, min_blocks_to_keep, &AnchorBlockId { hash: Default::default(), number: current_block }, |node| { pruned_trie_nodes.insert(node); }); pruned_trie_nodes } #[test] fn prune_works() { - fn prepare_storage() -> InMemoryStorage { + fn prepare_storage(zero: u64) -> InMemoryStorage { let mut mdb1 = MemoryDB::::default(); let root1 = insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]).unwrap(); let mut mdb2 = MemoryDB::::default(); @@ -202,90 +211,102 @@ mod tests { let mut mdb4 = MemoryDB::::default(); let root4 = insert_into_memory_db::(&mut mdb4, vec![(vec![15], vec![25])]).unwrap(); let storage = InMemoryStorage::new(); - storage.insert(65, root1, mdb1); - storage.insert(66, root2, mdb2); - storage.insert(67, root3, mdb3); - storage.insert(68, root4, mdb4); + storage.insert(zero + 65, root1, mdb1); + storage.insert(zero + 66, root2, mdb2); + storage.insert(zero + 67, root3, mdb3); + storage.insert(zero + 68, root4, mdb4); storage } - // l1-digest is created every 2 blocks - // l2-digest is created every 4 blocks - // we do not want to keep any additional changes tries - // => only one l2-digest is saved AND it is pruned once next is created - let config = Configuration { digest_interval: 2, digest_levels: 2 }; - let storage = prepare_storage(); - assert!(prune_by_collect(&config, &storage, 0, 69).is_empty()); - assert!(prune_by_collect(&config, &storage, 0, 70).is_empty()); - assert!(prune_by_collect(&config, &storage, 0, 71).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 0, 72); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); - - // l1-digest is created every 2 blocks - // l2-digest is created every 4 blocks - // we want keep 1 additional changes tries - let config = Configuration { digest_interval: 2, digest_levels: 2 }; - let storage = prepare_storage(); - assert!(prune_by_collect(&config, &storage, 8, 69).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 70).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 71).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 72).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 73).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 74).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, 75).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 8, 76); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); - - // l1-digest is created every 2 blocks - // we want keep 2 additional changes tries - let config = Configuration { digest_interval: 2, digest_levels: 1 }; - let storage = prepare_storage(); - assert!(prune_by_collect(&config, &storage, 4, 69).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 4, 70); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(prune_by_collect(&config, &storage, 4, 71).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 4, 72); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); + fn test_with_zero(zero: u64) { + // l1-digest is created every 2 blocks + // l2-digest is created every 4 blocks + // we do not want to keep any additional changes tries + // => only one l2-digest is saved AND it is pruned once next is created + let config = Configuration { digest_interval: 2, digest_levels: 2 }; + let storage = prepare_storage(zero); + assert!(prune_by_collect(&config, &storage, 0, zero, zero + 69).is_empty()); + assert!(prune_by_collect(&config, &storage, 0, zero, zero + 70).is_empty()); + assert!(prune_by_collect(&config, &storage, 0, zero, zero + 71).is_empty()); + let non_empty = prune_by_collect(&config, &storage, 0, zero, zero + 72); + assert!(!non_empty.is_empty()); + storage.remove_from_storage(&non_empty); + assert!(storage.into_mdb().drain().is_empty()); + + // l1-digest is created every 2 blocks + // l2-digest is created every 4 blocks + // we want keep 1 additional changes tries + let config = Configuration { digest_interval: 2, digest_levels: 2 }; + let storage = prepare_storage(zero); + assert!(prune_by_collect(&config, &storage, 8, zero, zero + 69).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, zero, zero + 70).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, zero, zero + 71).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, zero, zero + 72).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, zero, zero + 73).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, zero, zero + 74).is_empty()); + assert!(prune_by_collect(&config, &storage, 8, zero, zero + 75).is_empty()); + let non_empty = prune_by_collect(&config, &storage, 8, zero, zero + 76); + assert!(!non_empty.is_empty()); + storage.remove_from_storage(&non_empty); + assert!(storage.into_mdb().drain().is_empty()); + + // l1-digest is created every 2 blocks + // we want keep 2 additional changes tries + let config = Configuration { digest_interval: 2, digest_levels: 1 }; + let storage = prepare_storage(zero); + assert!(prune_by_collect(&config, &storage, 4, zero, zero + 69).is_empty()); + let non_empty = prune_by_collect(&config, &storage, 4, zero, zero + 70); + assert!(!non_empty.is_empty()); + storage.remove_from_storage(&non_empty); + assert!(prune_by_collect(&config, &storage, 4, zero, zero + 71).is_empty()); + let non_empty = prune_by_collect(&config, &storage, 4, zero, zero + 72); + assert!(!non_empty.is_empty()); + storage.remove_from_storage(&non_empty); + assert!(storage.into_mdb().drain().is_empty()); + } + + test_with_zero(0); + test_with_zero(1023); + test_with_zero(1024); } #[test] fn pruning_range_works() { - // DIGESTS ARE NOT CREATED + NO TRIES ARE PRUNED - assert_eq!(pruning_range(&config(10, 0), 2u64, 2u64), None); - - // DIGESTS ARE NOT CREATED + SOME TRIES ARE PRUNED - assert_eq!(pruning_range(&config(10, 0), 100u64, 110u64), Some((10, 10))); - assert_eq!(pruning_range(&config(10, 0), 100u64, 210u64), Some((110, 110))); - - // DIGESTS ARE CREATED + NO TRIES ARE PRUNED - - assert_eq!(pruning_range(&config(10, 2), 2u64, 0u64), None); - assert_eq!(pruning_range(&config(10, 2), 30u64, 100u64), None); - assert_eq!(pruning_range(&config(::std::u32::MAX, 2), 1u64, 1024u64), None); - assert_eq!(pruning_range(&config(::std::u32::MAX, 2), ::std::u64::MAX, 1024u64), None); - assert_eq!(pruning_range(&config(32, 2), 2048u64, 512u64), None); - assert_eq!(pruning_range(&config(32, 2), 2048u64, 1024u64), None); - - // DIGESTS ARE CREATED + SOME TRIES ARE PRUNED - - // when we do not want to keep any highest-level-digests - // (system forces to keep at least one) - assert_eq!(pruning_range(&config(4, 2), 0u64, 32u64), Some((1, 16))); - assert_eq!(pruning_range(&config(4, 2), 0u64, 64u64), Some((33, 48))); - // when we want to keep 1 (last) highest-level-digest - assert_eq!(pruning_range(&config(4, 2), 16u64, 32u64), Some((1, 16))); - assert_eq!(pruning_range(&config(4, 2), 16u64, 64u64), Some((33, 48))); - // when we want to keep 1 (last) + 1 additional level digests - assert_eq!(pruning_range(&config(32, 2), 4096u64, 5120u64), Some((1, 1024))); - assert_eq!(pruning_range(&config(32, 2), 4096u64, 6144u64), Some((1025, 2048))); + fn test_with_zero(zero: u64) { + // DIGESTS ARE NOT CREATED + NO TRIES ARE PRUNED + assert_eq!(pruning_range(zero, &config(10, 0), 2u64, zero + 2u64), None); + + // DIGESTS ARE NOT CREATED + SOME TRIES ARE PRUNED + assert_eq!(pruning_range(zero, &config(10, 0), 100u64, zero + 110u64), Some((zero + 10, zero + 10))); + assert_eq!(pruning_range(zero, &config(10, 0), 100u64, zero + 210u64), Some((zero + 110, zero + 110))); + + // DIGESTS ARE CREATED + NO TRIES ARE PRUNED + + assert_eq!(pruning_range(zero, &config(10, 2), 2u64, zero + 0u64), None); + assert_eq!(pruning_range(zero, &config(10, 2), 30u64, zero + 100u64), None); + assert_eq!(pruning_range(zero, &config(::std::u32::MAX, 2), 1u64, zero + 1024u64), None); + assert_eq!(pruning_range(zero, &config(::std::u32::MAX, 2), ::std::u64::MAX, zero + 1024u64), None); + assert_eq!(pruning_range(zero, &config(32, 2), 2048u64, zero + 512u64), None); + assert_eq!(pruning_range(zero, &config(32, 2), 2048u64, zero + 1024u64), None); + + // DIGESTS ARE CREATED + SOME TRIES ARE PRUNED + + // when we do not want to keep any highest-level-digests + // (system forces to keep at least one) + assert_eq!(pruning_range(zero, &config(4, 2), 0u64, zero + 32u64), Some((zero + 1, zero + 16))); + assert_eq!(pruning_range(zero, &config(4, 2), 0u64, zero + 64u64), Some((zero + 33, zero + 48))); + // when we want to keep 1 (last) highest-level-digest + assert_eq!(pruning_range(zero, &config(4, 2), 16u64, zero + 32u64), Some((zero + 1, zero + 16))); + assert_eq!(pruning_range(zero, &config(4, 2), 16u64, zero + 64u64), Some((zero + 33, zero + 48))); + // when we want to keep 1 (last) + 1 additional level digests + assert_eq!(pruning_range(zero, &config(32, 2), 4096u64, zero + 5120u64), Some((zero + 1, zero + 1024))); + assert_eq!(pruning_range(zero, &config(32, 2), 4096u64, zero + 6144u64), Some((zero + 1025, zero + 2048))); + } + + test_with_zero(0); + test_with_zero(1023); + test_with_zero(1024); } #[test] @@ -299,20 +320,26 @@ mod tests { #[test] fn oldest_non_pruned_trie_works() { - // when digests are not created at all - assert_eq!(oldest_non_pruned_trie(&config(0, 0), 100u64, 10u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(0, 0), 100u64, 110u64), 11); - - // when only l1 digests are created - assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100u64, 50u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100u64, 110u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 1), 100u64, 210u64), 101); - - // when l2 digests are created - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 50u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 110u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 210u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 10110u64), 1); - assert_eq!(oldest_non_pruned_trie(&config(100, 2), 100u64, 20110u64), 10001); + fn test_with_zero(zero: u64) { + // when digests are not created at all + assert_eq!(oldest_non_pruned_trie(zero, &config(0, 0), 100u64, zero + 10u64), zero + 1); + assert_eq!(oldest_non_pruned_trie(zero, &config(0, 0), 100u64, zero + 110u64), zero + 11); + + // when only l1 digests are created + assert_eq!(oldest_non_pruned_trie(zero, &config(100, 1), 100u64, zero +50u64), zero + 1); + assert_eq!(oldest_non_pruned_trie(zero, &config(100, 1), 100u64, zero +110u64), zero + 1); + assert_eq!(oldest_non_pruned_trie(zero, &config(100, 1), 100u64, zero +210u64), zero + 101); + + // when l2 digests are created + assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 50u64), zero + 1); + assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 110u64), zero + 1); + assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 210u64), zero + 1); + assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 10110u64), zero + 1); + assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 20110u64), zero + 10001); + } + + test_with_zero(0); + test_with_zero(100); + test_with_zero(101); } } diff --git a/core/state-machine/src/ext.rs b/core/state-machine/src/ext.rs index f22207088ba8a..365696ad05e38 100644 --- a/core/state-machine/src/ext.rs +++ b/core/state-machine/src/ext.rs @@ -348,6 +348,7 @@ where #[cfg(test)] mod tests { use hex_literal::hex; + use num_traits::Zero; use parity_codec::Encode; use primitives::Blake2Hasher; use primitives::storage::well_known_keys::EXTRINSIC_INDEX; @@ -398,7 +399,7 @@ mod tests { fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { let mut overlay = prepare_overlay_with_changes(); let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); - let state = Some(ChangesTrieState::new(changes_trie_config(), &storage)); + let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); let mut ext = TestExt::new(&mut overlay, &backend, state.as_ref(), None); assert_eq!(ext.storage_changes_root(Default::default()).unwrap(), @@ -410,7 +411,7 @@ mod tests { let mut overlay = prepare_overlay_with_changes(); overlay.prospective.top.get_mut(&vec![1]).unwrap().value = None; let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); - let state = Some(ChangesTrieState::new(changes_trie_config(), &storage)); + let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); let mut ext = TestExt::new(&mut overlay, &backend, state.as_ref(), None); assert_eq!(ext.storage_changes_root(Default::default()).unwrap(), diff --git a/core/state-machine/src/testing.rs b/core/state-machine/src/testing.rs index f169f478182a0..e52c96326e7d8 100644 --- a/core/state-machine/src/testing.rs +++ b/core/state-machine/src/testing.rs @@ -19,6 +19,7 @@ use std::collections::{HashMap, BTreeMap}; use std::iter::FromIterator; use hash_db::Hasher; +use num_traits::Zero; use parity_codec::Decode; use crate::backend::{InMemory, Backend}; use primitives::storage::well_known_keys::is_child_storage_key; @@ -229,7 +230,7 @@ impl Externalities for TestExternalities fn storage_changes_root(&mut self, parent: H::Out) -> Result, ()> { match self.changes_trie_config.clone() { Some(config) => { - let state = ChangesTrieState::new(config, &self.changes_trie_storage); + let state = ChangesTrieState::new(config, Zero::zero(), &self.changes_trie_storage); Ok(compute_changes_trie_root::<_, H, N>( &self.backend, Some(&state), From 40ed2c2213363f4901253bd57a9de3d83eefca07 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 26 Jun 2019 17:47:49 +0300 Subject: [PATCH 04/63] ChangesTrieSignal::as_new_configuration --- core/sr-primitives/src/generic/digest.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/core/sr-primitives/src/generic/digest.rs b/core/sr-primitives/src/generic/digest.rs index cf946be8e047c..d6e6352587208 100644 --- a/core/sr-primitives/src/generic/digest.rs +++ b/core/sr-primitives/src/generic/digest.rs @@ -388,6 +388,15 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { } } +impl ChangesTrieSignal { + /// Try to cast this signal to NewConfiguration. + pub fn as_new_configuration(&self) -> Option<&Option> { + match self { + ChangesTrieSignal::NewConfiguration(config) => Some(config), + } + } +} + #[cfg(test)] mod tests { use super::*; From c01ecd6fa28999d5330510dc65db8f2fd4e71f35 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 27 Jun 2019 11:19:08 +0300 Subject: [PATCH 05/63] moved well_known_cache_keys to client --- Cargo.lock | 1 - core/client/db/Cargo.toml | 1 - core/client/db/src/cache/mod.rs | 2 +- core/client/db/src/lib.rs | 3 +-- core/client/db/src/light.rs | 3 +-- core/client/src/backend.rs | 2 +- core/client/src/blockchain.rs | 10 +++++++++- core/client/src/client.rs | 2 +- core/client/src/in_mem.rs | 3 +-- core/client/src/lib.rs | 3 +++ core/client/src/light/backend.rs | 3 +-- core/client/src/light/blockchain.rs | 8 +++++--- core/consensus/aura/src/lib.rs | 3 ++- core/consensus/babe/src/lib.rs | 4 ++-- core/consensus/common/src/block_import.rs | 5 ++--- core/consensus/common/src/import_queue.rs | 5 ++++- core/consensus/common/src/lib.rs | 9 --------- core/finality-grandpa/src/import.rs | 6 +++--- core/finality-grandpa/src/light_import.rs | 3 ++- core/network/src/test/mod.rs | 7 +++++-- core/service/src/lib.rs | 4 ++-- 21 files changed, 46 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c835b0761ad13..c8590dec46226 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4015,7 +4015,6 @@ dependencies = [ "parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 2.0.0", "substrate-client 2.0.0", - "substrate-consensus-common 2.0.0", "substrate-executor 2.0.0", "substrate-keyring 2.0.0", "substrate-primitives 2.0.0", diff --git a/core/client/db/Cargo.toml b/core/client/db/Cargo.toml index bfc7108db750f..1ae42d6a8975c 100644 --- a/core/client/db/Cargo.toml +++ b/core/client/db/Cargo.toml @@ -21,7 +21,6 @@ parity-codec = { version = "3.3", features = ["derive"] } executor = { package = "substrate-executor", path = "../../executor" } state_db = { package = "substrate-state-db", path = "../../state-db" } trie = { package = "substrate-trie", path = "../../trie" } -consensus_common = { package = "substrate-consensus-common", path = "../../consensus/common" } [dev-dependencies] substrate-keyring = { path = "../../keyring" } diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index 64d3c4a25e7bf..a54668d5d7bfd 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -22,11 +22,11 @@ use parking_lot::RwLock; use kvdb::{KeyValueDB, DBTransaction}; use client::blockchain::Cache as BlockchainCache; +use client::well_known_cache_keys::Id as CacheKeyId; use client::error::Result as ClientResult; use parity_codec::{Encode, Decode}; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; -use consensus_common::well_known_cache_keys::Id as CacheKeyId; use crate::utils::{self, COLUMN_META, db_err}; use self::list_cache::ListCache; diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 8081a4b532e09..206a0cbd31bda 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -58,9 +58,8 @@ use executor::RuntimeInfo; use state_machine::{CodeExecutor, DBValue}; use crate::utils::{Meta, db_err, meta_keys, read_db, block_id_to_lookup_key, read_meta}; use client::leaves::{LeafSet, FinalizationDisplaced}; -use client::children; +use client::{children, well_known_cache_keys}; use state_db::StateDb; -use consensus_common::well_known_cache_keys; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; use log::{trace, debug, warn}; pub use state_db::PruningMode; diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index 0abce00528d37..b5e283fc33333 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -25,7 +25,7 @@ use kvdb::{KeyValueDB, DBTransaction}; use client::backend::{AuxStore, NewBlockState}; use client::blockchain::{BlockStatus, Cache as BlockchainCache, HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo}; -use client::cht; +use client::{cht, well_known_cache_keys}; use client::leaves::{LeafSet, FinalizationDisplaced}; use client::error::{Error as ClientError, Result as ClientResult}; use client::light::blockchain::Storage as LightBlockchainStorage; @@ -33,7 +33,6 @@ use parity_codec::{Decode, Encode}; use primitives::Blake2Hasher; use runtime_primitives::generic::{DigestItem, BlockId}; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor}; -use consensus_common::well_known_cache_keys; use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType}; use crate::utils::{self, meta_keys, Meta, db_err, read_db, block_id_to_lookup_key, read_meta}; use crate::DatabaseSettings; diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 1d650b34fbb51..f839bc1332cae 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -24,7 +24,7 @@ use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, Childr use runtime_primitives::traits::{Block as BlockT, Zero, NumberFor}; use state_machine::backend::Backend as StateBackend; use state_machine::{ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieState}; -use consensus::well_known_cache_keys; +use crate::blockchain::well_known_cache_keys; use hash_db::Hasher; use trie::MemoryDB; use parking_lot::Mutex; diff --git a/core/client/src/blockchain.rs b/core/client/src/blockchain.rs index b07e26396efb5..ba31e2528065a 100644 --- a/core/client/src/blockchain.rs +++ b/core/client/src/blockchain.rs @@ -21,7 +21,6 @@ use std::sync::Arc; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use runtime_primitives::generic::BlockId; use runtime_primitives::Justification; -use consensus::well_known_cache_keys; use crate::error::{Error, Result}; @@ -258,3 +257,12 @@ pub fn tree_route>( pivot, }) } + +/// A list of all well known keys in the blockchain cache. +pub mod well_known_cache_keys { + /// The type representing cache keys. + pub type Id = consensus::import_queue::CacheKeyId; + + /// A list of authorities. + pub const AUTHORITIES: Id = *b"auth"; +} diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 3f3f1563b8b0f..dcb95f0f89478 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -31,7 +31,6 @@ use runtime_primitives::{ use consensus::{ Error as ConsensusError, ImportBlock, ImportResult, BlockOrigin, ForkChoiceStrategy, - well_known_cache_keys::Id as CacheKeyId, SelectChain, self, }; use runtime_primitives::traits::{ @@ -67,6 +66,7 @@ use crate::backend::{ use crate::blockchain::{ self, Info as ChainInfo, Backend as ChainBackend, HeaderBackend as ChainHeaderBackend, ProvideCache, Cache, + well_known_cache_keys::Id as CacheKeyId, }; use crate::call_executor::{CallExecutor, LocalCallExecutor}; use executor::{RuntimeVersion, RuntimeInfo}; diff --git a/core/client/src/in_mem.rs b/core/client/src/in_mem.rs index d0283147fa4ce..973fd9e1a4f75 100644 --- a/core/client/src/in_mem.rs +++ b/core/client/src/in_mem.rs @@ -27,13 +27,12 @@ use state_machine::backend::{Backend as StateBackend, InMemory}; use state_machine::{self, InMemoryChangesTrieStorage, ChangesTrieAnchorBlockId}; use hash_db::Hasher; use trie::MemoryDB; -use consensus::well_known_cache_keys::Id as CacheKeyId; use crate::error; use crate::backend::{self, NewBlockState, StorageCollection, ChildStorageCollection}; use crate::light; use crate::leaves::LeafSet; -use crate::blockchain::{self, BlockStatus, HeaderBackend}; +use crate::blockchain::{self, BlockStatus, HeaderBackend, well_known_cache_keys::Id as CacheKeyId}; struct PendingBlock { block: StoredBlock, diff --git a/core/client/src/lib.rs b/core/client/src/lib.rs index 8062fae500199..276cc1b6215f0 100644 --- a/core/client/src/lib.rs +++ b/core/client/src/lib.rs @@ -68,5 +68,8 @@ pub use state_machine::{ExecutionStrategy, NeverOffchainExt}; #[cfg(feature = "std")] pub use crate::leaves::LeafSet; +#[cfg(feature = "std")] +pub use crate::blockchain::well_known_cache_keys; + #[doc(inline)] pub use sr_api_macros::{decl_runtime_apis, impl_runtime_apis}; diff --git a/core/client/src/light/backend.rs b/core/client/src/light/backend.rs index f71366808eccd..87e9a4b258a09 100644 --- a/core/client/src/light/backend.rs +++ b/core/client/src/light/backend.rs @@ -30,13 +30,12 @@ use crate::backend::{ AuxStore, Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState, StorageCollection, ChildStorageCollection, }; -use crate::blockchain::HeaderBackend as BlockchainHeaderBackend; +use crate::blockchain::{HeaderBackend as BlockchainHeaderBackend, well_known_cache_keys}; use crate::error::{Error as ClientError, Result as ClientResult}; use crate::light::blockchain::{Blockchain, Storage as BlockchainStorage}; use crate::light::fetcher::{Fetcher, RemoteReadRequest}; use hash_db::Hasher; use trie::MemoryDB; -use consensus::well_known_cache_keys; const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always suceeds; qed"; diff --git a/core/client/src/light/blockchain.rs b/core/client/src/light/blockchain.rs index e3d9c55a6a463..13bc4a0a0863f 100644 --- a/core/client/src/light/blockchain.rs +++ b/core/client/src/light/blockchain.rs @@ -23,11 +23,13 @@ use parking_lot::Mutex; use runtime_primitives::{Justification, generic::BlockId}; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; -use consensus::well_known_cache_keys; use crate::backend::{AuxStore, NewBlockState}; -use crate::blockchain::{Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, - HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache}; +use crate::blockchain::{ + Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, + HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache, + well_known_cache_keys, +}; use crate::cht; use crate::error::{Error as ClientError, Result as ClientResult}; use crate::light::fetcher::{Fetcher, RemoteBodyRequest, RemoteHeaderRequest}; diff --git a/core/consensus/aura/src/lib.rs b/core/consensus/aura/src/lib.rs index c85e273a42b34..aa07bc9604a92 100644 --- a/core/consensus/aura/src/lib.rs +++ b/core/consensus/aura/src/lib.rs @@ -33,7 +33,7 @@ use std::{sync::Arc, time::Duration, thread, marker::PhantomData, hash::Hash, fm use parity_codec::{Encode, Decode, Codec}; use consensus_common::{self, BlockImport, Environment, Proposer, ForkChoiceStrategy, ImportBlock, BlockOrigin, Error as ConsensusError, - SelectChain, well_known_cache_keys::{self, Id as CacheKeyId} + SelectChain, }; use consensus_common::import_queue::{ Verifier, BasicQueue, SharedBlockImport, SharedJustificationImport, SharedFinalityProofImport, @@ -45,6 +45,7 @@ use client::{ runtime_api::ApiExt, error::Result as CResult, backend::AuxStore, + well_known_cache_keys::{self, Id as CacheKeyId}, }; use runtime_primitives::{generic::{self, BlockId, OpaqueDigestItemId}, Justification}; diff --git a/core/consensus/babe/src/lib.rs b/core/consensus/babe/src/lib.rs index 0bab790c991f8..9ec976379156e 100644 --- a/core/consensus/babe/src/lib.rs +++ b/core/consensus/babe/src/lib.rs @@ -34,7 +34,6 @@ use consensus_common::import_queue::{ SharedBlockImport, SharedJustificationImport, SharedFinalityProofImport, SharedFinalityProofRequestBuilder, }; -use consensus_common::well_known_cache_keys::Id as CacheKeyId; use runtime_primitives::{generic, generic::{BlockId, OpaqueDigestItemId}, Justification}; use runtime_primitives::traits::{ Block, Header, DigestItemFor, ProvideRuntimeApi, @@ -68,7 +67,7 @@ use srml_babe::{ BabeInherentData, timestamp::{TimestampInherentData, InherentType as TimestampInherent} }; -use consensus_common::{SelectChain, well_known_cache_keys}; +use consensus_common::SelectChain; use consensus_common::import_queue::{Verifier, BasicQueue}; use client::{ block_builder::api::BlockBuilder as BlockBuilderApi, @@ -76,6 +75,7 @@ use client::{ runtime_api::ApiExt, error::Result as CResult, backend::AuxStore, + well_known_cache_keys::{self, Id as CacheKeyId}, }; use slots::{CheckedHeader, check_equivocation}; use futures::{Future, IntoFuture, future}; diff --git a/core/consensus/common/src/block_import.rs b/core/consensus/common/src/block_import.rs index 6ce4acdf3941a..0cb1832b1690c 100644 --- a/core/consensus/common/src/block_import.rs +++ b/core/consensus/common/src/block_import.rs @@ -20,9 +20,8 @@ use runtime_primitives::traits::{Block as BlockT, DigestItemFor, Header as Heade use runtime_primitives::Justification; use std::borrow::Cow; use std::collections::HashMap; -use crate::well_known_cache_keys; -use crate::import_queue::Verifier; +use crate::import_queue::{Verifier, CacheKeyId}; /// Block import result. #[derive(Debug, PartialEq, Eq)] @@ -186,7 +185,7 @@ pub trait BlockImport { fn import_block( &self, block: ImportBlock, - cache: HashMap>, + cache: HashMap>, ) -> Result; } diff --git a/core/consensus/common/src/import_queue.rs b/core/consensus/common/src/import_queue.rs index 6cbb8ee413ed6..03db50097d8e8 100644 --- a/core/consensus/common/src/import_queue.rs +++ b/core/consensus/common/src/import_queue.rs @@ -30,7 +30,7 @@ use futures::{prelude::*, sync::mpsc}; use runtime_primitives::{Justification, traits::{ Block as BlockT, Header as HeaderT, NumberFor, }}; -use crate::{error::Error as ConsensusError, well_known_cache_keys::Id as CacheKeyId, block_import::{ +use crate::{error::Error as ConsensusError, block_import::{ BlockImport, BlockOrigin, ImportBlock, ImportedAux, ImportResult, JustificationImport, FinalityProofImport, FinalityProofRequestBuilder, }}; @@ -74,6 +74,9 @@ pub struct IncomingBlock { pub origin: Option, } +/// Type of keys in the blockchain cache that consensus module could use for its needs. +pub type CacheKeyId = [u8; 4]; + /// Verify a justification of a block pub trait Verifier: Send + Sync { /// Verify the given data and return the ImportBlock and an optional diff --git a/core/consensus/common/src/lib.rs b/core/consensus/common/src/lib.rs index aa210b9f867d6..f5dd8936a1630 100644 --- a/core/consensus/common/src/lib.rs +++ b/core/consensus/common/src/lib.rs @@ -115,12 +115,3 @@ impl SyncOracle for Arc { T::is_offline(&*self) } } - -/// A list of all well known keys in the cache. -pub mod well_known_cache_keys { - /// The type representing cache keys. - pub type Id = [u8; 4]; - - /// A list of authorities. - pub const AUTHORITIES: Id = *b"auth"; -} diff --git a/core/finality-grandpa/src/import.rs b/core/finality-grandpa/src/import.rs index 227daff5527d1..7feec92be893a 100644 --- a/core/finality-grandpa/src/import.rs +++ b/core/finality-grandpa/src/import.rs @@ -27,8 +27,8 @@ use client::backend::Backend; use client::runtime_api::ApiExt; use consensus_common::{ BlockImport, Error as ConsensusError, - ImportBlock, ImportResult, JustificationImport, well_known_cache_keys, - SelectChain, + ImportBlock, ImportResult, JustificationImport, + SelectChain, import_queue::CacheKeyId, }; use fg_primitives::GrandpaApi; use runtime_primitives::Justification; @@ -387,7 +387,7 @@ impl, RA, PRA, SC> BlockImport { type Error = ConsensusError; - fn import_block(&self, mut block: ImportBlock, new_cache: HashMap>) + fn import_block(&self, mut block: ImportBlock, new_cache: HashMap>) -> Result { let hash = block.post_header().hash(); diff --git a/core/finality-grandpa/src/light_import.rs b/core/finality-grandpa/src/light_import.rs index 25a3f84f6dc01..ec1f638b480e1 100644 --- a/core/finality-grandpa/src/light_import.rs +++ b/core/finality-grandpa/src/light_import.rs @@ -24,10 +24,11 @@ use client::{ backend::{AuxStore, Backend}, blockchain::HeaderBackend, error::Error as ClientError, + well_known_cache_keys, }; use parity_codec::{Encode, Decode}; use consensus_common::{ - import_queue::{Verifier, SharedFinalityProofRequestBuilder}, well_known_cache_keys, + import_queue::{Verifier, SharedFinalityProofRequestBuilder}, BlockOrigin, BlockImport, FinalityProofImport, ImportBlock, ImportResult, ImportedAux, Error as ConsensusError, FinalityProofRequestBuilder, }; diff --git a/core/network/src/test/mod.rs b/core/network/src/test/mod.rs index 58d8a91c2e5de..5bb48c77366b7 100644 --- a/core/network/src/test/mod.rs +++ b/core/network/src/test/mod.rs @@ -27,7 +27,10 @@ use std::sync::Arc; use crate::AlwaysBadChecker; use log::trace; use crate::chain::FinalityProofProvider; -use client::{self, ClientInfo, BlockchainEvents, FinalityNotifications}; +use client::{ + self, ClientInfo, BlockchainEvents, FinalityNotifications, + well_known_cache_keys::{self, Id as CacheKeyId}, +}; use client::{in_mem::Backend as InMemoryBackend, error::Result as ClientResult}; use client::block_builder::BlockBuilder; use client::backend::AuxStore; @@ -37,7 +40,7 @@ use consensus::import_queue::{ Link, SharedBlockImport, SharedJustificationImport, Verifier, SharedFinalityProofImport, SharedFinalityProofRequestBuilder, }; -use consensus::{Error as ConsensusError, well_known_cache_keys::{self, Id as CacheKeyId}}; +use consensus::Error as ConsensusError; use consensus::{BlockOrigin, ForkChoiceStrategy, ImportBlock, JustificationImport}; use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient, TopicNotification}; use futures::{prelude::*, sync::{mpsc, oneshot}}; diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index f66b083fd0b91..0b13852627c18 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -730,10 +730,10 @@ fn build_system_rpc_handler( /// # }; /// # use transaction_pool::{self, txpool::{Pool as TransactionPool}}; /// # use network::construct_simple_protocol; -/// # use client::{self, LongestChain}; +/// # use client::{self, well_known_cache_keys::Id as CacheKeyId, LongestChain}; /// # use primitives::{Pair as PairT, ed25519}; /// # use consensus_common::import_queue::{BasicQueue, Verifier}; -/// # use consensus_common::{BlockOrigin, ImportBlock, well_known_cache_keys::Id as CacheKeyId}; +/// # use consensus_common::{BlockOrigin, ImportBlock}; /// # use node_runtime::{GenesisConfig, RuntimeApi}; /// # use std::sync::Arc; /// # use node_primitives::Block; From 73f193326c8225dab38e27ede7d25f5ea203759a Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 27 Jun 2019 12:18:30 +0300 Subject: [PATCH 06/63] extracted DbChangesTrieStorage to separate file --- core/client/db/src/changes_tries_storage.rs | 422 ++++++++++++++++++++ core/client/db/src/lib.rs | 387 +----------------- core/client/src/blockchain.rs | 3 + 3 files changed, 441 insertions(+), 371 deletions(-) create mode 100644 core/client/db/src/changes_tries_storage.rs diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs new file mode 100644 index 0000000000000..016e796960b1a --- /dev/null +++ b/core/client/db/src/changes_tries_storage.rs @@ -0,0 +1,422 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! DB-backed changes tries storage. + +use std::sync::Arc; +use kvdb::{KeyValueDB, DBTransaction}; +use parking_lot::RwLock; +use trie::MemoryDB; +use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; +use runtime_primitives::traits::{ + Block as BlockT, Header as HeaderT, NumberFor, Zero, One, +}; +use runtime_primitives::generic::{BlockId, DigestItem}; +use state_machine::DBValue; +use crate::utils::{self, Meta}; + +pub struct DbChangesTrieStorage { + db: Arc, + changes_tries_column: Option, + key_lookup_column: Option, + header_column: Option, + meta: Arc, Block::Hash>>>, + min_blocks_to_keep: Option, + _phantom: ::std::marker::PhantomData, +} + +impl> DbChangesTrieStorage { + /// Create new changes trie storage. + pub fn new( + db: Arc, + changes_tries_column: Option, + key_lookup_column: Option, + header_column: Option, + meta: Arc, Block::Hash>>>, + min_blocks_to_keep: Option, + ) -> Self { + Self { + db, + changes_tries_column, + key_lookup_column, + header_column, + meta, + min_blocks_to_keep, + _phantom: Default::default(), + } + } + + /// Commit new changes trie. + pub fn commit(&self, tx: &mut DBTransaction, mut changes_trie: MemoryDB) { + for (key, (val, _)) in changes_trie.drain() { + tx.put(self.changes_tries_column, &key[..], &val); + } + } + + /// Prune obsolete changes tries. + pub fn prune( + &self, + config: &ChangesTrieConfiguration, + tx: &mut DBTransaction, + block_hash: Block::Hash, + block_num: NumberFor, + ) { + // never prune on archive nodes + let min_blocks_to_keep = match self.min_blocks_to_keep { + Some(min_blocks_to_keep) => min_blocks_to_keep, + None => return, + }; + + state_machine::prune_changes_tries( + Zero::zero(), // TODO: not true + config, + &*self, + min_blocks_to_keep.into(), + &state_machine::ChangesTrieAnchorBlockId { + hash: convert_hash(&block_hash), + number: block_num, + }, + |node| tx.delete(self.changes_tries_column, node.as_ref())); + } +} + +impl client::backend::PrunableStateChangesTrieStorage + for DbChangesTrieStorage +where + Block: BlockT, +{ + fn oldest_changes_trie_block( + &self, + config: &ChangesTrieConfiguration, + best_finalized_block: NumberFor, + ) -> NumberFor { + match self.min_blocks_to_keep { + Some(min_blocks_to_keep) => state_machine::oldest_non_pruned_changes_trie( + Zero::zero(), // TODO: not true + config, + min_blocks_to_keep.into(), + best_finalized_block, + ), + None => One::one(), + } + } +} + +impl state_machine::ChangesTrieRootsStorage> + for DbChangesTrieStorage +where + Block: BlockT, +{ + fn build_anchor( + &self, + hash: H256, + ) -> Result>, String> { + utils::read_header::(&*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(hash)) + .map_err(|e| e.to_string()) + .and_then(|maybe_header| maybe_header.map(|header| + state_machine::ChangesTrieAnchorBlockId { + hash, + number: *header.number(), + } + ).ok_or_else(|| format!("Unknown header: {}", hash))) + } + + fn root( + &self, + anchor: &state_machine::ChangesTrieAnchorBlockId>, + block: NumberFor, + ) -> Result, String> { + // check API requirement: we can't get NEXT block(s) based on anchor + if block > anchor.number { + return Err(format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number)); + } + + // we need to get hash of the block to resolve changes trie root + let block_id = if block <= self.meta.read().finalized_number { + // if block is finalized, we could just read canonical hash + BlockId::Number(block) + } else { + // the block is not finalized + let mut current_num = anchor.number; + let mut current_hash: Block::Hash = convert_hash(&anchor.hash); + let maybe_anchor_header: Block::Header = utils::require_header::( + &*self.db, self.key_lookup_column, self.header_column, BlockId::Number(current_num) + ).map_err(|e| e.to_string())?; + if maybe_anchor_header.hash() == current_hash { + // if anchor is canonicalized, then the block is also canonicalized + BlockId::Number(block) + } else { + // else (block is not finalized + anchor is not canonicalized): + // => we should find the required block hash by traversing + // back from the anchor to the block with given number + while current_num != block { + let current_header: Block::Header = utils::require_header::( + &*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(current_hash) + ).map_err(|e| e.to_string())?; + + current_hash = *current_header.parent_hash(); + current_num = current_num - One::one(); + } + + BlockId::Hash(current_hash) + } + }; + + Ok(utils::require_header::(&*self.db, self.key_lookup_column, self.header_column, block_id) + .map_err(|e| e.to_string())? + .digest().log(DigestItem::as_changes_trie_root) + .map(|root| H256::from_slice(root.as_ref()))) + } +} + +impl state_machine::ChangesTrieStorage> + for DbChangesTrieStorage +where + Block: BlockT, +{ + fn get(&self, key: &H256, _prefix: &[u8]) -> Result, String> { + self.db.get(self.changes_tries_column, &key[..]) + .map_err(|err| format!("{}", err)) + } +} + +#[cfg(test)] +mod tests { + use client::backend::Backend as ClientBackend; + use client::blockchain::HeaderBackend as BlockchainHeaderBackend; + use state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; + use crate::Backend; + use crate::tests::{Block, insert_header, prepare_changes}; + use super::*; + + #[test] + fn changes_trie_storage_works() { + let backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.meta.write().finalized_number = 1000; + + + let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { + let (changes_root, mut changes_trie_update) = prepare_changes(changes); + let anchor = state_machine::ChangesTrieAnchorBlockId { + hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), + number: block + }; + assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); + + for (key, (val, _)) in changes_trie_update.drain() { + assert_eq!(backend.changes_trie_storage().unwrap().get(&key, &[]), Ok(Some(val))); + } + }; + + let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; + let changes1 = vec![ + (b"key_at_1".to_vec(), b"val_at_1".to_vec()), + (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), + ]; + let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; + + let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); + let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); + let _ = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); + + // check that the storage contains tries for all blocks + check_changes(&backend, 0, changes0); + check_changes(&backend, 1, changes1); + check_changes(&backend, 2, changes2); + } + + #[test] + fn changes_trie_storage_works_with_forks() { + let backend = Backend::::new_test(1000, 100); + + let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; + let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; + let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; + let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); + let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); + let block2 = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); + + let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; + let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; + let block2_1_0 = insert_header(&backend, 3, block2, changes2_1_0.clone(), Default::default()); + let block2_1_1 = insert_header(&backend, 4, block2_1_0, changes2_1_1.clone(), Default::default()); + + let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; + let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; + let block2_2_0 = insert_header(&backend, 3, block2, changes2_2_0.clone(), Default::default()); + let block2_2_1 = insert_header(&backend, 4, block2_2_0, changes2_2_1.clone(), Default::default()); + + // finalize block1 + backend.changes_tries_storage.meta.write().finalized_number = 1; + + // branch1: when asking for finalized block hash + let (changes1_root, _) = prepare_changes(changes1); + let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); + + // branch2: when asking for finalized block hash + let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); + + // branch1: when asking for non-finalized block hash (search by traversal) + let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); + let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root))); + + // branch2: when asking for non-finalized block hash (search using canonicalized hint) + let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); + let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); + + // finalize first block of branch2 (block2_2_0) + backend.changes_tries_storage.meta.write().finalized_number = 3; + + // branch2: when asking for finalized block of this branch + assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); + + // branch1: when asking for finalized block of other branch + // => result is incorrect (returned for the block of branch1), but this is expected, + // because the other fork is abandoned (forked before finalized header) + let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; + assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); + } + + #[test] + fn changes_tries_with_digest_are_pruned_on_finalization() { + let mut backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.min_blocks_to_keep = Some(8); + let config = ChangesTrieConfiguration { + digest_interval: 2, + digest_levels: 2, + }; + + // insert some blocks + let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); + let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); + let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); + let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); + let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); + let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); + let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); + let block7 = insert_header(&backend, 7, block6, vec![(b"key_at_7".to_vec(), b"val_at_7".to_vec())], Default::default()); + let block8 = insert_header(&backend, 8, block7, vec![(b"key_at_8".to_vec(), b"val_at_8".to_vec())], Default::default()); + let block9 = insert_header(&backend, 9, block8, vec![(b"key_at_9".to_vec(), b"val_at_9".to_vec())], Default::default()); + let block10 = insert_header(&backend, 10, block9, vec![(b"key_at_10".to_vec(), b"val_at_10".to_vec())], Default::default()); + let block11 = insert_header(&backend, 11, block10, vec![(b"key_at_11".to_vec(), b"val_at_11".to_vec())], Default::default()); + let block12 = insert_header(&backend, 12, block11, vec![(b"key_at_12".to_vec(), b"val_at_12".to_vec())], Default::default()); + let block13 = insert_header(&backend, 13, block12, vec![(b"key_at_13".to_vec(), b"val_at_13".to_vec())], Default::default()); + backend.changes_tries_storage.meta.write().finalized_number = 13; + + // check that roots of all tries are in the columns::CHANGES_TRIE + let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block13, number: 13 }; + fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { + backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() + .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() + } + let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); + let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); + let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); + let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); + let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); + let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); + let root7 = read_changes_trie_root(&backend, 7); assert_eq!(backend.changes_tries_storage.root(&anchor, 7).unwrap(), Some(root7)); + let root8 = read_changes_trie_root(&backend, 8); assert_eq!(backend.changes_tries_storage.root(&anchor, 8).unwrap(), Some(root8)); + let root9 = read_changes_trie_root(&backend, 9); assert_eq!(backend.changes_tries_storage.root(&anchor, 9).unwrap(), Some(root9)); + let root10 = read_changes_trie_root(&backend, 10); assert_eq!(backend.changes_tries_storage.root(&anchor, 10).unwrap(), Some(root10)); + let root11 = read_changes_trie_root(&backend, 11); assert_eq!(backend.changes_tries_storage.root(&anchor, 11).unwrap(), Some(root11)); + let root12 = read_changes_trie_root(&backend, 12); assert_eq!(backend.changes_tries_storage.root(&anchor, 12).unwrap(), Some(root12)); + + // now simulate finalization of block#12, causing prune of tries at #1..#4 + let mut tx = DBTransaction::new(); + backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 12); + backend.storage.db.write(tx).unwrap(); + assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); + assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); + assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_none()); + assert!(backend.changes_tries_storage.get(&root4, &[]).unwrap().is_none()); + assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_some()); + assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_some()); + assert!(backend.changes_tries_storage.get(&root7, &[]).unwrap().is_some()); + assert!(backend.changes_tries_storage.get(&root8, &[]).unwrap().is_some()); + + // now simulate finalization of block#16, causing prune of tries at #5..#8 + let mut tx = DBTransaction::new(); + backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 16); + backend.storage.db.write(tx).unwrap(); + assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_none()); + assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_none()); + assert!(backend.changes_tries_storage.get(&root7, &[]).unwrap().is_none()); + assert!(backend.changes_tries_storage.get(&root8, &[]).unwrap().is_none()); + + // now "change" pruning mode to archive && simulate finalization of block#20 + // => no changes tries are pruned, because we never prune in archive mode + backend.changes_tries_storage.min_blocks_to_keep = None; + let mut tx = DBTransaction::new(); + backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 20); + backend.storage.db.write(tx).unwrap(); + assert!(backend.changes_tries_storage.get(&root9, &[]).unwrap().is_some()); + assert!(backend.changes_tries_storage.get(&root10, &[]).unwrap().is_some()); + assert!(backend.changes_tries_storage.get(&root11, &[]).unwrap().is_some()); + assert!(backend.changes_tries_storage.get(&root12, &[]).unwrap().is_some()); + } + + #[test] + fn changes_tries_without_digest_are_pruned_on_finalization() { + let mut backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.min_blocks_to_keep = Some(4); + let config = ChangesTrieConfiguration { + digest_interval: 0, + digest_levels: 0, + }; + + // insert some blocks + let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); + let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); + let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); + let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); + let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); + let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); + let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); + + // check that roots of all tries are in the columns::CHANGES_TRIE + let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block6, number: 6 }; + fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { + backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() + .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() + } + + let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); + let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); + let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); + let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); + let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); + let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); + + // now simulate finalization of block#5, causing prune of trie at #1 + let mut tx = DBTransaction::new(); + backend.changes_tries_storage.prune(&config, &mut tx, block5, 5); + backend.storage.db.write(tx).unwrap(); + assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); + assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_some()); + + // now simulate finalization of block#6, causing prune of tries at #2 + let mut tx = DBTransaction::new(); + backend.changes_tries_storage.prune(&config, &mut tx, block6, 6); + backend.storage.db.write(tx).unwrap(); + assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); + assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_some()); + } +} diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 206a0cbd31bda..3faf8bb6c7cee 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -27,6 +27,7 @@ pub mod light; mod cache; +mod changes_tries_storage; mod storage_cache; mod utils; @@ -44,10 +45,10 @@ use hash_db::Hasher; use kvdb::{KeyValueDB, DBTransaction}; use trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use parking_lot::{Mutex, RwLock}; -use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; +use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration}; use primitives::storage::well_known_keys; use runtime_primitives::{ - generic::{BlockId, DigestItem}, Justification, StorageOverlay, ChildrenStorageOverlay, + generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay, BuildStorage }; use runtime_primitives::traits::{ @@ -57,6 +58,7 @@ use state_machine::backend::Backend as StateBackend; use executor::RuntimeInfo; use state_machine::{CodeExecutor, DBValue}; use crate::utils::{Meta, db_err, meta_keys, read_db, block_id_to_lookup_key, read_meta}; +use crate::changes_tries_storage::DbChangesTrieStorage; use client::leaves::{LeafSet, FinalizationDisplaced}; use client::{children, well_known_cache_keys}; use state_db::StateDb; @@ -530,148 +532,6 @@ impl state_machine::Storage for DbGenesisStorage { } } -pub struct DbChangesTrieStorage { - db: Arc, - meta: Arc, Block::Hash>>>, - min_blocks_to_keep: Option, - _phantom: ::std::marker::PhantomData, -} - -impl> DbChangesTrieStorage { - /// Commit new changes trie. - pub fn commit(&self, tx: &mut DBTransaction, mut changes_trie: MemoryDB) { - for (key, (val, _)) in changes_trie.drain() { - tx.put(columns::CHANGES_TRIE, &key[..], &val); - } - } - - /// Prune obsolete changes tries. - pub fn prune( - &self, - config: &ChangesTrieConfiguration, - tx: &mut DBTransaction, - block_hash: Block::Hash, - block_num: NumberFor, - ) { - // never prune on archive nodes - let min_blocks_to_keep = match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => min_blocks_to_keep, - None => return, - }; - - state_machine::prune_changes_tries( - Zero::zero(), // TODO: not true - config, - &*self, - min_blocks_to_keep.into(), - &state_machine::ChangesTrieAnchorBlockId { - hash: convert_hash(&block_hash), - number: block_num, - }, - |node| tx.delete(columns::CHANGES_TRIE, node.as_ref())); - } -} - -impl client::backend::PrunableStateChangesTrieStorage - for DbChangesTrieStorage -where - Block: BlockT, -{ - fn oldest_changes_trie_block( - &self, - config: &ChangesTrieConfiguration, - best_finalized_block: NumberFor, - ) -> NumberFor { - match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => state_machine::oldest_non_pruned_changes_trie( - Zero::zero(), // TODO: not true - config, - min_blocks_to_keep.into(), - best_finalized_block, - ), - None => One::one(), - } - } -} - -impl state_machine::ChangesTrieRootsStorage> - for DbChangesTrieStorage -where - Block: BlockT, -{ - fn build_anchor( - &self, - hash: H256, - ) -> Result>, String> { - utils::read_header::(&*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::Hash(hash)) - .map_err(|e| e.to_string()) - .and_then(|maybe_header| maybe_header.map(|header| - state_machine::ChangesTrieAnchorBlockId { - hash, - number: *header.number(), - } - ).ok_or_else(|| format!("Unknown header: {}", hash))) - } - - fn root( - &self, - anchor: &state_machine::ChangesTrieAnchorBlockId>, - block: NumberFor, - ) -> Result, String> { - // check API requirement: we can't get NEXT block(s) based on anchor - if block > anchor.number { - return Err(format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number)); - } - - // we need to get hash of the block to resolve changes trie root - let block_id = if block <= self.meta.read().finalized_number { - // if block is finalized, we could just read canonical hash - BlockId::Number(block) - } else { - // the block is not finalized - let mut current_num = anchor.number; - let mut current_hash: Block::Hash = convert_hash(&anchor.hash); - let maybe_anchor_header: Block::Header = utils::require_header::( - &*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::Number(current_num) - ).map_err(|e| e.to_string())?; - if maybe_anchor_header.hash() == current_hash { - // if anchor is canonicalized, then the block is also canonicalized - BlockId::Number(block) - } else { - // else (block is not finalized + anchor is not canonicalized): - // => we should find the required block hash by traversing - // back from the anchor to the block with given number - while current_num != block { - let current_header: Block::Header = utils::require_header::( - &*self.db, columns::KEY_LOOKUP, columns::HEADER, BlockId::Hash(current_hash) - ).map_err(|e| e.to_string())?; - - current_hash = *current_header.parent_hash(); - current_num = current_num - One::one(); - } - - BlockId::Hash(current_hash) - } - }; - - Ok(utils::require_header::(&*self.db, columns::KEY_LOOKUP, columns::HEADER, block_id) - .map_err(|e| e.to_string())? - .digest().log(DigestItem::as_changes_trie_root) - .map(|root| H256::from_slice(root.as_ref()))) - } -} - -impl state_machine::ChangesTrieStorage> - for DbChangesTrieStorage -where - Block: BlockT, -{ - fn get(&self, key: &H256, _prefix: &[u8]) -> Result, String> { - self.db.get(columns::CHANGES_TRIE, &key[..]) - .map_err(|err| format!("{}", err)) - } -} - /// Disk backend. Keeps data in a key-value store. In archive mode, trie nodes are kept from all blocks. /// Otherwise, trie nodes are kept only from some recent blocks. pub struct Backend { @@ -746,12 +606,14 @@ impl> Backend { db: db.clone(), state_db, }; - let changes_tries_storage = DbChangesTrieStorage { + let changes_tries_storage = DbChangesTrieStorage::new( db, + columns::CHANGES_TRIE, + columns::KEY_LOOKUP, + columns::HEADER, meta, - min_blocks_to_keep: if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, - _phantom: Default::default(), - }; + if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, + ); Ok(Backend { storage: Arc::new(storage_db), @@ -1404,21 +1266,22 @@ impl client::backend::LocalBackend for Backend {} #[cfg(test)] -mod tests { +pub(crate) mod tests { use hash_db::HashDB; use super::*; use crate::columns; use client::backend::Backend as BTrait; use client::blockchain::Backend as BLBTrait; use client::backend::BlockImportOperation as Op; + use runtime_primitives::generic::DigestItem; use runtime_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; use runtime_primitives::traits::{Hash, BlakeTwo256}; - use state_machine::{TrieMut, TrieDBMut, ChangesTrieRootsStorage, ChangesTrieStorage}; + use state_machine::{TrieMut, TrieDBMut}; use test_client; - type Block = RawBlock>; + pub type Block = RawBlock>; - fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { + pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { let mut changes_root = H256::default(); let mut changes_trie_update = MemoryDB::::default(); { @@ -1434,7 +1297,7 @@ mod tests { (changes_root, changes_trie_update) } - fn insert_header( + pub fn insert_header( backend: &Backend, number: u64, parent_hash: H256, @@ -1751,224 +1614,6 @@ mod tests { assert!(backend.storage.db.get(columns::STATE, key.as_bytes()).unwrap().is_none()); } - #[test] - fn changes_trie_storage_works() { - let backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.meta.write().finalized_number = 1000; - - - let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { - let (changes_root, mut changes_trie_update) = prepare_changes(changes); - let anchor = state_machine::ChangesTrieAnchorBlockId { - hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), - number: block - }; - assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); - - for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(backend.changes_trie_storage().unwrap().get(&key, &[]), Ok(Some(val))); - } - }; - - let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; - let changes1 = vec![ - (b"key_at_1".to_vec(), b"val_at_1".to_vec()), - (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), - ]; - let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - - let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); - let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); - let _ = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); - - // check that the storage contains tries for all blocks - check_changes(&backend, 0, changes0); - check_changes(&backend, 1, changes1); - check_changes(&backend, 2, changes2); - } - - #[test] - fn changes_trie_storage_works_with_forks() { - let backend = Backend::::new_test(1000, 100); - - let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; - let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; - let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); - let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); - let block2 = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); - - let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; - let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = insert_header(&backend, 3, block2, changes2_1_0.clone(), Default::default()); - let block2_1_1 = insert_header(&backend, 4, block2_1_0, changes2_1_1.clone(), Default::default()); - - let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; - let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = insert_header(&backend, 3, block2, changes2_2_0.clone(), Default::default()); - let block2_2_1 = insert_header(&backend, 4, block2_2_0, changes2_2_1.clone(), Default::default()); - - // finalize block1 - backend.changes_tries_storage.meta.write().finalized_number = 1; - - // branch1: when asking for finalized block hash - let (changes1_root, _) = prepare_changes(changes1); - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch2: when asking for finalized block hash - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch1: when asking for non-finalized block hash (search by traversal) - let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root))); - - // branch2: when asking for non-finalized block hash (search using canonicalized hint) - let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // finalize first block of branch2 (block2_2_0) - backend.changes_tries_storage.meta.write().finalized_number = 3; - - // branch2: when asking for finalized block of this branch - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // branch1: when asking for finalized block of other branch - // => result is incorrect (returned for the block of branch1), but this is expected, - // because the other fork is abandoned (forked before finalized header) - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - } - - #[test] - fn changes_tries_with_digest_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - let config = ChangesTrieConfiguration { - digest_interval: 2, - digest_levels: 2, - }; - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); - let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); - let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); - let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); - let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); - let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); - let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); - let block7 = insert_header(&backend, 7, block6, vec![(b"key_at_7".to_vec(), b"val_at_7".to_vec())], Default::default()); - let block8 = insert_header(&backend, 8, block7, vec![(b"key_at_8".to_vec(), b"val_at_8".to_vec())], Default::default()); - let block9 = insert_header(&backend, 9, block8, vec![(b"key_at_9".to_vec(), b"val_at_9".to_vec())], Default::default()); - let block10 = insert_header(&backend, 10, block9, vec![(b"key_at_10".to_vec(), b"val_at_10".to_vec())], Default::default()); - let block11 = insert_header(&backend, 11, block10, vec![(b"key_at_11".to_vec(), b"val_at_11".to_vec())], Default::default()); - let block12 = insert_header(&backend, 12, block11, vec![(b"key_at_12".to_vec(), b"val_at_12".to_vec())], Default::default()); - let block13 = insert_header(&backend, 13, block12, vec![(b"key_at_13".to_vec(), b"val_at_13".to_vec())], Default::default()); - backend.changes_tries_storage.meta.write().finalized_number = 13; - - // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block13, number: 13 }; - fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { - backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() - .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() - } - let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); - let root7 = read_changes_trie_root(&backend, 7); assert_eq!(backend.changes_tries_storage.root(&anchor, 7).unwrap(), Some(root7)); - let root8 = read_changes_trie_root(&backend, 8); assert_eq!(backend.changes_tries_storage.root(&anchor, 8).unwrap(), Some(root8)); - let root9 = read_changes_trie_root(&backend, 9); assert_eq!(backend.changes_tries_storage.root(&anchor, 9).unwrap(), Some(root9)); - let root10 = read_changes_trie_root(&backend, 10); assert_eq!(backend.changes_tries_storage.root(&anchor, 10).unwrap(), Some(root10)); - let root11 = read_changes_trie_root(&backend, 11); assert_eq!(backend.changes_tries_storage.root(&anchor, 11).unwrap(), Some(root11)); - let root12 = read_changes_trie_root(&backend, 12); assert_eq!(backend.changes_tries_storage.root(&anchor, 12).unwrap(), Some(root12)); - - // now simulate finalization of block#12, causing prune of tries at #1..#4 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 12); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root4, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root7, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root8, &[]).unwrap().is_some()); - - // now simulate finalization of block#16, causing prune of tries at #5..#8 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 16); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root7, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root8, &[]).unwrap().is_none()); - - // now "change" pruning mode to archive && simulate finalization of block#20 - // => no changes tries are pruned, because we never prune in archive mode - backend.changes_tries_storage.min_blocks_to_keep = None; - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 20); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root9, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root10, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root11, &[]).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root12, &[]).unwrap().is_some()); - } - - #[test] - fn changes_tries_without_digest_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(4); - let config = ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, - }; - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); - let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); - let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); - let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); - let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); - let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); - let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); - - // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block6, number: 6 }; - fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { - backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() - .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() - } - - let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); - - // now simulate finalization of block#5, causing prune of trie at #1 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, block5, 5); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_some()); - - // now simulate finalization of block#6, causing prune of tries at #2 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, block6, 6); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_some()); - } - #[test] fn tree_route_works() { let backend = Backend::::new_test(1000, 100); diff --git a/core/client/src/blockchain.rs b/core/client/src/blockchain.rs index ba31e2528065a..445be71f5d685 100644 --- a/core/client/src/blockchain.rs +++ b/core/client/src/blockchain.rs @@ -265,4 +265,7 @@ pub mod well_known_cache_keys { /// A list of authorities. pub const AUTHORITIES: Id = *b"auth"; + + /// Changes trie configuration. + pub const CHANGES_TRIE_CONFIG: Id = *b"chtr"; } From 315303aa5c0ef8f648530a07ea883039394e3046 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Fri, 28 Jun 2019 09:08:20 +0300 Subject: [PATCH 07/63] change meaning of none in blockchain cache --- core/client/db/src/cache/list_cache.rs | 405 +++++++++++-------------- core/client/db/src/cache/list_entry.rs | 78 +++-- core/client/db/src/cache/mod.rs | 2 +- 3 files changed, 223 insertions(+), 262 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index 4f343e93fdc93..727375244d6ce 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -163,13 +163,15 @@ impl> ListCache }; match head { - Some(head) => head.search_best_before(&self.storage, at.number, true) - .map(|e| e.and_then(|e| e.0.value)), + Some(head) => head.search_best_before(&self.storage, at.number) + .map(|e| e.map(|e| e.0.value)), None => Ok(None), } } /// When new block is inserted into database. + /// + /// None passed as value means that the value has not changed since previous block. pub fn on_block_insert>( &self, tx: &mut Tx, @@ -191,6 +193,11 @@ impl> ListCache if !is_final { let mut fork_and_action = None; + // when value hasn't changed and block isn't final, there's nothing we need to do + if value.is_none() { + return Ok(None); + } + // first: try to find fork that is known to has the best block we're appending to for (index, fork) in self.unfinalized.iter().enumerate() { if fork.try_append(&parent) { @@ -231,7 +238,7 @@ impl> ListCache // it is possible that we're inserting extra (but still required) fork here let new_storage_entry = StorageEntry { prev_valid_from: Some(prev_valid_from), - value, + value: value.expect("chcecked abpve that !value.is_none(); qed"), }; tx.insert_storage_entry(&block, &new_storage_entry); @@ -250,7 +257,10 @@ impl> ListCache let new_storage_entry = match self.best_finalized_entry.as_ref() { Some(best_finalized_entry) => best_finalized_entry.try_update(value), - None if value.is_some() => Some(StorageEntry { prev_valid_from: None, value }), + None if value.is_some() => Some(StorageEntry { + prev_valid_from: None, + value: value.expect("value.is_some(); qed"), + }), None => None, }; @@ -378,8 +388,12 @@ impl> ListCache }); // destroy 'fork' ending with previous entry - Fork { best_block: None, head: Entry { valid_from: first_entry_to_truncate, value: None } } - .destroy(&self.storage, tx, None) + destroy_fork( + first_entry_to_truncate, + &self.storage, + tx, + None, + ) }; if let Err(error) = do_pruning() { @@ -491,25 +505,40 @@ impl Fork { tx: &mut Tx, best_finalized_block: Option>, ) -> ClientResult<()> { - let mut current = self.head.valid_from.clone(); - loop { - // optionally: deletion stops when we found entry at finalized block - if let Some(best_finalized_block) = best_finalized_block { - if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(()); - } + destroy_fork( + self.head.valid_from.clone(), + storage, + tx, + best_finalized_block, + ) + } +} + +/// Destroy fork by deleting all unfinalized entries. +pub fn destroy_fork, Tx: StorageTransaction>( + head_valid_from: ComplexBlockId, + storage: &S, + tx: &mut Tx, + best_finalized_block: Option>, +) -> ClientResult<()> { + let mut current = head_valid_from; + loop { + // optionally: deletion stops when we found entry at finalized block + if let Some(best_finalized_block) = best_finalized_block { + if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { + return Ok(()); } + } - // read pointer to previous entry - let entry = storage.require_entry(¤t)?; - tx.remove_storage_entry(¤t); + // read pointer to previous entry + let entry = storage.require_entry(¤t)?; + tx.remove_storage_entry(¤t); - // deletion stops when there are no more entries in the list - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(()), - }; - } + // deletion stops when there are no more entries in the list + current = match entry.prev_valid_from { + Some(prev_valid_from) => prev_valid_from, + None => return Ok(()), + }; } } @@ -639,24 +668,14 @@ pub mod tests { // ----------> [100] assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), 1024, test_id(100)) .value_at_block(&test_id(50)).unwrap(), None); - // when block is earlier than best finalized block AND it is finalized AND value is empty - // [30] ---- 50 ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: None }), - 1024, test_id(100) - ).value_at_block(&test_id(50)).unwrap(), None); // when block is earlier than best finalized block AND it is finalized AND value is some // [30] ---- 50 ---> [100] assert_eq!(ListCache::new( DummyStorage::new() .with_meta(Some(test_id(100)), Vec::new()) .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }), + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) + .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), 1024, test_id(100) ).value_at_block(&test_id(50)).unwrap(), Some(30)); // when block is the best finalized block AND value is some @@ -665,8 +684,8 @@ pub mod tests { DummyStorage::new() .with_meta(Some(test_id(100)), Vec::new()) .with_id(100, H256::from_low_u64_be(100)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }), + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) + .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), 1024, test_id(100) ).value_at_block(&test_id(100)).unwrap(), Some(100)); // when block is parallel to the best finalized block @@ -676,45 +695,21 @@ pub mod tests { DummyStorage::new() .with_meta(Some(test_id(100)), Vec::new()) .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(30) }), + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) + .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), 1024, test_id(100) ).value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).unwrap(), None); - // when block is later than last finalized block AND there are no forks AND finalized value is None - // ---> [100] --- 200 - assert_eq!(ListCache::<_, u64, _>::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: None }), - 1024, test_id(100) - ).value_at_block(&test_id(200)).unwrap(), None); // when block is later than last finalized block AND there are no forks AND finalized value is Some // ---> [100] --- 200 assert_eq!(ListCache::new( DummyStorage::new() .with_meta(Some(test_id(100)), Vec::new()) .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(100) }), + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), 1024, test_id(100) ).value_at_block(&test_id(200)).unwrap(), Some(100)); - // when block is later than last finalized block AND there are no matching forks - // AND block is connected to finalized block AND finalized value is None - // --- 3 - // ---> [2] /---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - 1024, test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), None); // when block is later than last finalized block AND there are no matching forks // AND block is connected to finalized block AND finalized value is Some // --- 3 @@ -722,8 +717,8 @@ pub mod tests { assert_eq!(ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)) @@ -737,8 +732,8 @@ pub mod tests { assert_eq!(ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) .with_header(test_header(1)) .with_header(test_header(2)) .with_header(test_header(3)) @@ -754,52 +749,12 @@ pub mod tests { assert_eq!(ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) .with_header(test_header(4)) .with_header(test_header(5)), 1024, test_id(2) ).value_at_block(&correct_id(5)).unwrap(), Some(4)); - // when block is later than last finalized block AND it appends to unfinalized fork from the end - // AND unfinalized value is None - // ---> [2] ---> [4] ---> 5 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: None }) - .with_header(test_header(4)) - .with_header(test_header(5)), - 1024, test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), None); - // when block is later than last finalized block AND it fits to the middle of unfinalized fork - // AND unfinalized value is Some - // ---> [2] ---> [4] ---> 5 ---> [6] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(6)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(4)), value: None }) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(test_header(6)), - 1024, test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), Some(4)); - // when block is later than last finalized block AND it fits to the middle of unfinalized fork - // AND unfinalized value is None - // ---> [2] ---> [4] ---> 5 ---> [6] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(6)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: None }) - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(4)), value: Some(4) }) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(test_header(6)), - 1024, test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), None); // when block is later than last finalized block AND it does not fits unfinalized fork // AND it is connected to the finalized block AND finalized value is Some // ---> [2] ----------> [4] @@ -807,29 +762,14 @@ pub mod tests { assert_eq!(ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) + .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), 1024, test_id(2) ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some(2)); - // when block is later than last finalized block AND it does not fits unfinalized fork - // AND it is connected to the finalized block AND finalized value is Some - // ---> [2] ----------> [4] - // \--- 3 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - 1024, test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), None); } #[test] @@ -861,7 +801,7 @@ pub mod tests { let mut cache = ListCache::new( DummyStorage::new() .with_meta(None, vec![test_id(4)]) - .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: Some(4) }), + .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: 4 }), 1024, test_id(2) ); cache.unfinalized[0].best_block = Some(test_id(4)); @@ -875,7 +815,7 @@ pub mod tests { // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: Some(5) }))); + Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 }))); assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] })); @@ -885,7 +825,7 @@ pub mod tests { let cache = ListCache::new( DummyStorage::new() .with_meta(None, vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: Some(4) }) + .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: 4 }) .with_header(test_header(4)), 1024, test_id(2) ); @@ -899,7 +839,7 @@ pub mod tests { // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), nfin).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: Some(5) }))); + Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 }))); assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); @@ -908,8 +848,8 @@ pub mod tests { let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(4) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) + .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)), @@ -917,7 +857,7 @@ pub mod tests { ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin).unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: Some(14) }))); + Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 }))); assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] })); @@ -927,7 +867,7 @@ pub mod tests { let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }), + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); @@ -940,12 +880,12 @@ pub mod tests { let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }), + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin).unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: Some(3) }))); + Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 }))); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] })); @@ -953,8 +893,14 @@ pub mod tests { // when inserting finalized entry AND there are no previous finalized entries let cache = ListCache::new(DummyStorage::new(), 1024, correct_id(2)); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default()))); + assert_eq!( + cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + Some(Entry { valid_from: correct_id(3), value: 3 }), + Default::default(), + )), + ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); @@ -962,7 +908,7 @@ pub mod tests { let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }), + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); @@ -973,8 +919,14 @@ pub mod tests { assert!(tx.updated_meta().is_none()); // when inserting finalized entry AND value differs from previous finalized let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default()))); + assert_eq!( + cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + Some(Entry { valid_from: correct_id(3), value: 3 }), + Default::default(), + )), + ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); @@ -983,8 +935,8 @@ pub mod tests { let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: Some(13) }), + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); @@ -998,8 +950,8 @@ pub mod tests { let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }), + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); @@ -1012,13 +964,19 @@ pub mod tests { let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }), + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), 1024, correct_id(4) ); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(4), correct_id(5)).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(5), Some(Entry { valid_from: correct_id(5), value: Some(5) }), vec![0].into_iter().collect()))); + assert_eq!( + cache.on_block_finalize(&mut tx, correct_id(4), correct_id(5)).unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(5), + Some(Entry { valid_from: correct_id(5), value: 5 }), + vec![0].into_iter().collect(), + )), + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] })); @@ -1026,8 +984,8 @@ pub mod tests { let cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: Some(13) }), + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); @@ -1040,9 +998,9 @@ pub mod tests { let mut cache = ListCache::new( DummyStorage::new() .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: Some(2) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }) - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: Some(6) }), + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) + .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }), 1024, correct_id(2) ); @@ -1050,17 +1008,21 @@ pub mod tests { cache.on_transaction_commit(CommitOperation::AppendNewBlock(0, correct_id(6))); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); // when new entry is appended to unfinalized fork - cache.on_transaction_commit(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: Some(7) })); + cache.on_transaction_commit(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 })); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); - assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: Some(7) }); + assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: 7 }); // when new fork is added - cache.on_transaction_commit(CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: Some(10) })); + cache.on_transaction_commit(CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 })); assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); - assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: Some(10) }); + assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: 10 }); // when block is finalized + entry is finalized + unfinalized forks are deleted - cache.on_transaction_commit(CommitOperation::BlockFinalized(correct_id(20), Some(Entry { valid_from: correct_id(20), value: Some(20) }), vec![0, 1, 2].into_iter().collect())); + cache.on_transaction_commit(CommitOperation::BlockFinalized( + correct_id(20), + Some(Entry { valid_from: correct_id(20), value: 20 }), + vec![0, 1, 2].into_iter().collect(), + )); assert_eq!(cache.best_finalized_block, correct_id(20)); - assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: Some(20) })); + assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: 20 })); assert!(cache.unfinalized.is_empty()); } @@ -1071,9 +1033,9 @@ pub mod tests { assert_eq!(ListCache::new( DummyStorage::new() .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: None }) + .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)) @@ -1085,9 +1047,9 @@ pub mod tests { assert_eq!(ListCache::new( DummyStorage::new() .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(2) }) + .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 }) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)) @@ -1103,9 +1065,9 @@ pub mod tests { assert!(ListCache::new( DummyStorage::new() .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(13) }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: Some(5) }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: Some(2) }) + .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) + .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 }) .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)) @@ -1123,59 +1085,59 @@ pub mod tests { fn fork_matches_works() { // when block is not within list range let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } .matches(&storage, &test_id(20)).unwrap(), false); // when block is not connected to the begin block let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 2, 4)) .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } .matches(&storage, &fork_id(0, 2, 4)).unwrap(), false); // when block is not connected to the end block let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 3, 4)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } .matches(&storage, &fork_id(0, 3, 4)).unwrap(), false); // when block is connected to the begin block AND end is open let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: Some(100) }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: 100 }) .with_header(test_header(5)) .with_header(test_header(6)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } .matches(&storage, &correct_id(6)).unwrap(), true); // when block is connected to the begin block AND to the end block let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } .matches(&storage, &correct_id(4)).unwrap(), true); } #[test] fn fork_try_append_works() { // when best block is unknown - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } .try_append(&test_id(100)), false); // when best block is known but different - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } .try_append(&test_id(101)), false); // when best block is known and the same - assert_eq!(Fork::<_, u64> { best_block: Some(test_id(100)), head: Entry { valid_from: test_id(100), value: None } } + assert_eq!(Fork::<_, u64> { best_block: Some(test_id(100)), head: Entry { valid_from: test_id(100), value: 0 } } .try_append(&test_id(100)), true); } @@ -1183,49 +1145,52 @@ pub mod tests { fn fork_try_append_or_fork_works() { // when there's no entry before parent let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } .try_append_or_fork(&storage, &test_id(30), None).unwrap(), None); // when parent does not belong to the fork let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 2, 4)) .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } .try_append_or_fork(&storage, &fork_id(0, 2, 4), None).unwrap(), None); // when the entry before parent is the head entry let storage = DummyStorage::new() - .with_entry(ComplexBlockId::new(test_header(5).hash(), 5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) + .with_entry( + ComplexBlockId::new(test_header(5).hash(), 5), + StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, + ) .with_header(test_header(6)) .with_header(test_header(5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: Some(100) } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } .try_append_or_fork(&storage, &correct_id(6), None).unwrap(), Some(ForkAppendResult::Append)); // when the parent located after last finalized entry let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) + .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(6)) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: Some(100) } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: 100 } } .try_append_or_fork(&storage, &fork_id(0, 4, 5), None).unwrap(), Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3)))); // when the parent located before last finalized entry let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: Some(100) }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: Some(200) }) + .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) + .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) .with_header(test_header(6)) .with_header(test_header(5)) .with_header(test_header(4)) .with_header(test_header(3)) .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: Some(100) } } + assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: 100 } } .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)).unwrap(), None); } @@ -1234,30 +1199,30 @@ pub mod tests { // when we reached finalized entry without iterations let storage = DummyStorage::new().with_id(100, H256::from_low_u64_be(100)); let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } .destroy(&storage, &mut tx, Some(200)).unwrap(); assert!(tx.removed_entries().is_empty()); // when we reach finalized entry with iterations let storage = DummyStorage::new() .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: Some(50) }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: Some(20) }) - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: Some(10) }) - .with_entry(test_id(5), StorageEntry { prev_valid_from: Some(test_id(3)), value: Some(5) }) - .with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: None }); + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: 50 }) + .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) + .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: 10 }) + .with_entry(test_id(5), StorageEntry { prev_valid_from: Some(test_id(3)), value: 5 }) + .with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: 0 }); let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } .destroy(&storage, &mut tx, Some(200)).unwrap(); assert_eq!(*tx.removed_entries(), vec![test_id(100).hash, test_id(50).hash, test_id(20).hash].into_iter().collect()); // when we reach beginning of fork before finalized block let storage = DummyStorage::new() .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }); + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: None } } + Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } .destroy(&storage, &mut tx, Some(200)).unwrap(); assert_eq!(*tx.removed_entries(), vec![test_id(100).hash, test_id(50).hash].into_iter().collect()); @@ -1355,14 +1320,14 @@ pub mod tests { #[test] fn read_forks_works() { let storage = DummyStorage::new() - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(1)), value: Some(11) }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(2)), value: None }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: Some(33) }); + .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(1)), value: 11 }) + .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(2)), value: 0 }) + .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 33 }); let expected = ( - Some(Entry { valid_from: test_id(10), value: Some(11) }), + Some(Entry { valid_from: test_id(10), value: 11 }), vec![ - Fork { best_block: None, head: Entry { valid_from: test_id(20), value: None } }, - Fork { best_block: None, head: Entry { valid_from: test_id(30), value: Some(33) } }, + Fork { best_block: None, head: Entry { valid_from: test_id(20), value: 0 } }, + Fork { best_block: None, head: Entry { valid_from: test_id(30), value: 33 } }, ], ); @@ -1378,9 +1343,9 @@ pub mod tests { .with_id(10, H256::from_low_u64_be(10)) .with_id(20, H256::from_low_u64_be(20)) .with_id(30, H256::from_low_u64_be(30)) - .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: Some(10) }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: Some(20) }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: Some(30) }), + .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) + .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) + .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }), 10, test_id(9)); let mut tx = DummyTransaction::new(); diff --git a/core/client/db/src/cache/list_entry.rs b/core/client/db/src/cache/list_entry.rs index 237ae9a268026..3305b909d2b2c 100644 --- a/core/client/db/src/cache/list_entry.rs +++ b/core/client/db/src/cache/list_entry.rs @@ -27,10 +27,10 @@ use crate::cache::list_storage::{Storage}; #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub struct Entry { - /// first block, when this value became actual + /// first block, when this value became actual. pub valid_from: ComplexBlockId, - /// None means that we do not know the value starting from `valid_from` block - pub value: Option, + /// Value stored at this entry. + pub value: T, } /// Internal representation of the single list-based cache entry. The entry points to the @@ -38,21 +38,24 @@ pub struct Entry { #[derive(Debug, Encode, Decode)] #[cfg_attr(test, derive(Clone, PartialEq))] pub struct StorageEntry { - /// None if valid from the beginning + /// None if valid from the beginning. pub prev_valid_from: Option>, - /// None means that we do not know the value starting from `valid_from` block - pub value: Option, + /// Value stored at this entry. + pub value: T, } impl Entry { /// Returns Some if the entry should be updated with the new value. pub fn try_update(&self, value: Option) -> Option> { - match self.value == value { - true => None, - false => Some(StorageEntry { - prev_valid_from: Some(self.valid_from.clone()), - value, - }), + match value { + Some(value) => match self.value == value { + true => None, + false => Some(StorageEntry { + prev_valid_from: Some(self.valid_from.clone()), + value, + }), + }, + None => None, } } @@ -62,7 +65,7 @@ impl Entry { storage: &S, block: NumberFor, ) -> ClientResult, Option>)>> { - Ok(self.search_best_before(storage, block, false)? + Ok(self.search_best_before(storage, block)? .map(|(entry, next)| (entry.valid_from, next))) } @@ -75,13 +78,12 @@ impl Entry { &self, storage: &S, block: NumberFor, - require_value: bool, ) -> ClientResult, Option>)>> { // we're looking for the best value let mut next = None; let mut current = self.valid_from.clone(); if block >= self.valid_from.number { - let value = if require_value { self.value.clone() } else { None }; + let value = self.value.clone(); return Ok(Some((Entry { valid_from: current, value }, next))); } @@ -119,47 +121,41 @@ mod tests { #[test] fn entry_try_update_works() { - // when trying to update with the same None value - assert_eq!(Entry::<_, u64> { valid_from: test_id(1), value: None }.try_update(None), None); + // when trying to update with None value + assert_eq!(Entry::<_, u64> { valid_from: test_id(1), value: 42 }.try_update(None), None); // when trying to update with the same Some value - assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(1)), None); - // when trying to update with different None value - assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(None), - Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: None })); + assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(1)), None); // when trying to update with different Some value - assert_eq!(Entry { valid_from: test_id(1), value: Some(1) }.try_update(Some(2)), - Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: Some(2) })); + assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)), + Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 })); } #[test] fn entry_search_best_before_fails() { // when storage returns error - assert!(Entry::<_, u64> { valid_from: test_id(100), value: None }.search_best_before(&FaultyStorage, 50, false).is_err()); + assert!(Entry::<_, u64> { valid_from: test_id(100), value: 42 } + .search_best_before(&FaultyStorage, 50).is_err()); } #[test] fn entry_search_best_before_works() { - // when block is better than our best block AND value is not required - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) } - .search_best_before(&DummyStorage::new(), 150, false).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(100), value: None }, None))); - // when block is better than our best block AND value is required - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) } - .search_best_before(&DummyStorage::new(), 150, true).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(100), value: Some(100) }, None))); + // when block is better than our best block + assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } + .search_best_before(&DummyStorage::new(), 150).unwrap(), + Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None))); // when block is found between two entries - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) } + assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: Some(50) }), - 75, false).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(50), value: Some(50) }, Some(test_id(100))))); + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 }), + 75).unwrap(), + Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100))))); // when block is not found - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: Some(100) } + assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: Some(100) }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: Some(50) }), - 30, true).unwrap(), + .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) + .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }), + 30).unwrap(), None); } } diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index a54668d5d7bfd..555360d16b02a 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -221,7 +221,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { ), parent.clone(), block.clone(), - value.or(cache.value_at_block(&parent)?), + value, entry_type, )?; if let Some(op) = op { From 67642d1a2175115934cd1e5364ce488c61e5722b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 1 Jul 2019 09:39:19 +0300 Subject: [PATCH 08/63] changes trie config (FULL) cache draft --- core/client/db/src/cache/list_cache.rs | 2 + core/client/db/src/changes_tries_storage.rs | 182 +++++++++++++++++++- core/client/db/src/lib.rs | 23 ++- core/client/db/src/utils.rs | 2 +- 4 files changed, 199 insertions(+), 10 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index 727375244d6ce..90a32090bd6e4 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -184,6 +184,7 @@ impl> ListCache debug_assert!(entry_type != EntryType::Final || self.best_finalized_block.hash == parent.hash); // we do not store any values behind finalized +// TODO: how this works with CT configuration??? if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { return Ok(None); } @@ -359,6 +360,7 @@ impl> ListCache tx: &mut Tx, block: &ComplexBlockId ) { +// TODO: do not finalize CT configuration let mut do_pruning = || -> ClientResult<()> { // calculate last ancient block number let ancient_block = match block.number.checked_sub(&self.prune_depth) { diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index 016e796960b1a..f36f1889fa285 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -16,17 +16,30 @@ //! DB-backed changes tries storage. +use std::collections::HashMap; use std::sync::Arc; use kvdb::{KeyValueDB, DBTransaction}; +use parity_codec::Encode; use parking_lot::RwLock; +use client::error::Result as ClientResult; use trie::MemoryDB; +use client::blockchain::{Cache, well_known_cache_keys}; +use parity_codec::Decode; use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; use runtime_primitives::traits::{ Block as BlockT, Header as HeaderT, NumberFor, Zero, One, }; -use runtime_primitives::generic::{BlockId, DigestItem}; +use runtime_primitives::generic::{BlockId, DigestItem, ChangesTrieSignal}; use state_machine::DBValue; use crate::utils::{self, Meta}; +use crate::cache::{DbCacheSync, DbCache, DbCacheTransactionOps, ComplexBlockId, EntryType as CacheEntryType}; + +/// Extract new changes trie configuration (if available) from the header. +pub fn extract_new_configuration(header: &Header) -> Option<&Option> { + header.digest() + .log(DigestItem::as_changes_trie_signal) + .and_then(ChangesTrieSignal::as_new_configuration) +} pub struct DbChangesTrieStorage { db: Arc, @@ -35,6 +48,7 @@ pub struct DbChangesTrieStorage { header_column: Option, meta: Arc, Block::Hash>>>, min_blocks_to_keep: Option, + cache: DbCacheSync, _phantom: ::std::marker::PhantomData, } @@ -45,25 +59,83 @@ impl> DbChangesTrieStorage { changes_tries_column: Option, key_lookup_column: Option, header_column: Option, + cache_column: Option, meta: Arc, Block::Hash>>>, min_blocks_to_keep: Option, ) -> Self { + let (finalized_hash, finalized_number, genesis_hash) = { + let meta = meta.read(); + (meta.finalized_hash, meta.finalized_number, meta.genesis_hash) + }; Self { - db, + db: db.clone(), changes_tries_column, key_lookup_column, header_column, meta, min_blocks_to_keep, + cache: DbCacheSync(RwLock::new(DbCache::new( + db.clone(), + key_lookup_column, + header_column, + cache_column, + genesis_hash, + ComplexBlockId::new(finalized_hash, finalized_number), + ))), _phantom: Default::default(), } } + /// Get configuration at given block. + pub fn configuration_at( + &self, + at: &BlockId, + ) -> Option { + let maybe_encoded: Option> = self.cache.get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at); + let maybe_config: Option> = + maybe_encoded.and_then(|config| Decode::decode(&mut &config[..])); + maybe_config.and_then(|v| v) + } + /// Commit new changes trie. - pub fn commit(&self, tx: &mut DBTransaction, mut changes_trie: MemoryDB) { + pub fn commit( + &self, + tx: &mut DBTransaction, + mut changes_trie: MemoryDB, + parent_block: ComplexBlockId, + block: ComplexBlockId, + finalized: bool, + new_configuration: Option>, + ) -> ClientResult>> { + // insert changes trie, associated with block, into DB for (key, (val, _)) in changes_trie.drain() { tx.put(self.changes_tries_column, &key[..], &val); } + + // if configuration has been changed, we need to update configuration cache as well + let new_configuration = match new_configuration { + Some(new_configuration) => new_configuration, + None => return Ok(None), + }; + + let mut cache_at = HashMap::new(); + cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); + + Ok(Some(self.cache.0.write().transaction(tx) + .on_block_insert( + parent_block, + block, + cache_at, + if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, + )? + .into_ops())) + } + + /// When transaction has been committed. + pub fn post_commit(&self, cache_ops: Option>) { + if let Some(cache_ops) = cache_ops { + self.cache.0.write().commit(cache_ops); + } } /// Prune obsolete changes tries. @@ -195,8 +267,10 @@ where #[cfg(test)] mod tests { - use client::backend::Backend as ClientBackend; + use client::backend::{Backend as ClientBackend, NewBlockState, BlockImportOperation}; use client::blockchain::HeaderBackend as BlockchainHeaderBackend; + use runtime_primitives::testing::Header; + use runtime_primitives::traits::{Hash, BlakeTwo256}; use state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; use crate::Backend; use crate::tests::{Block, insert_header, prepare_changes}; @@ -419,4 +493,104 @@ mod tests { assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_some()); } + + #[test] + fn changes_tries_configuration_is_updated_on_block_insert() { + fn insert_header_with_configuration_change( + backend: &Backend, + number: u64, + parent_hash: H256, + changes: Vec<(Vec, Vec)>, + new_configuration: Option, + ) -> H256 { + use runtime_primitives::testing::Digest; + + let (changes_root, changes_trie_update) = prepare_changes(changes); + let digest = Digest { + logs: vec![ + DigestItem::ChangesTrieRoot(changes_root), + DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_configuration)), + ], + }; + let header = Header { + number, + parent_hash, + state_root: BlakeTwo256::trie_root::<_, &[u8], &[u8]>(Vec::new()), + digest, + extrinsics_root: Default::default(), + }; + let header_hash = header.hash(); + + let block_id = if number == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(number - 1) + }; + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, block_id).unwrap(); + op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); + op.update_changes_trie(changes_trie_update).unwrap(); + backend.commit_operation(op).unwrap(); + + header_hash + } + + let backend = Backend::::new_test(1000, 100); + + // configurations at blocks + let config_at_1 = Some(ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + }); + let config_at_3 = Some(ChangesTrieConfiguration { + digest_interval: 8, + digest_levels: 1, + }); + let config_at_5 = None; + let config_at_7 = Some(ChangesTrieConfiguration { + digest_interval: 8, + digest_levels: 1, + }); + + // insert some blocks + let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); + let block1 = insert_header_with_configuration_change(&backend, 1, block0, Vec::new(), config_at_1.clone()); + let block2 = insert_header(&backend, 2, block1, Vec::new(), Default::default()); + let block3 = insert_header_with_configuration_change(&backend, 3, block2, Vec::new(), config_at_3.clone()); + let block4 = insert_header(&backend, 4, block3, Vec::new(), Default::default()); + let block5 = insert_header_with_configuration_change(&backend, 5, block4, Vec::new(), config_at_5.clone()); + let block6 = insert_header(&backend, 6, block5, Vec::new(), Default::default()); + let block7 = insert_header_with_configuration_change(&backend, 7, block6, Vec::new(), config_at_7.clone()); + + // test configuration cache + let storage = backend.changes_trie_storage().unwrap(); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block1)), + config_at_1.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block2)), + config_at_1.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block3)), + config_at_3.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block4)), + config_at_3.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block5)), + config_at_5.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block6)), + config_at_5.clone(), + ); + assert_eq!( + storage.configuration_at(&BlockId::Hash(block7)), + config_at_7.clone(), + ); + } } diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 3faf8bb6c7cee..7fa8d97ea4691 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -214,6 +214,7 @@ mod columns { pub const JUSTIFICATION: Option = Some(6); pub const CHANGES_TRIE: Option = Some(7); pub const AUX: Option = Some(8); + pub const CACHE: Option = Some(9); } struct PendingBlock { @@ -377,6 +378,7 @@ pub struct BlockImportOperation { storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB, + changes_trie_config_update: Option>, pending_block: Option>, aux_ops: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, @@ -412,6 +414,7 @@ where Block: BlockT, leaf_state: NewBlockState, ) -> Result<(), client::error::Error> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); + self.changes_trie_config_update = changes_tries_storage::extract_new_configuration(&header).cloned(); self.pending_block = Some(PendingBlock { header, body, @@ -611,6 +614,7 @@ impl> Backend { columns::CHANGES_TRIE, columns::KEY_LOOKUP, columns::HEADER, + columns::CACHE, meta, if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, ); @@ -920,11 +924,17 @@ impl> Backend { let header = &pending_block.header; let is_best = pending_block.leaf_state.is_best(); let changes_trie_updates = operation.changes_trie_updates; - - self.changes_tries_storage.commit(&mut transaction, changes_trie_updates); + let changes_trie_config_update = operation.changes_trie_config_update; + let changes_trie_cache_ops = self.changes_tries_storage.commit( + &mut transaction, + changes_trie_updates, + cache::ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), + cache::ComplexBlockId::new(hash, number), + finalized, + changes_trie_config_update, + )?; let cache = operation.old_state.release(); // release state reference so that it can be finalized - if finalized { // TODO: ensure best chain contains this block. self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; @@ -955,7 +965,7 @@ impl> Backend { meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); - Some((number, hash, enacted, retracted, displaced_leaf, is_best, cache)) + Some((number, hash, enacted, retracted, displaced_leaf, is_best, cache, changes_trie_cache_ops)) } else { None }; @@ -977,7 +987,7 @@ impl> Backend { let write_result = self.storage.db.write(transaction).map_err(db_err); - if let Some((number, hash, enacted, retracted, displaced_leaf, is_best, mut cache)) = imported { + if let Some((number, hash, enacted, retracted, displaced_leaf, is_best, mut cache, changes_trie_cache_ops)) = imported { if let Err(e) = write_result { let mut leaves = self.blockchain.leaves.write(); let mut undo = leaves.undo(); @@ -992,6 +1002,8 @@ impl> Backend { return Err(e) } + self.changes_tries_storage.post_commit(changes_trie_cache_ops); + cache.sync_cache( &enacted, &retracted, @@ -1104,6 +1116,7 @@ impl client::backend::Backend for Backend whe db_updates: PrefixedMemoryDB::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), + changes_trie_config_update: None, changes_trie_updates: MemoryDB::default(), aux_ops: Vec::new(), finalized_blocks: Vec::new(), diff --git a/core/client/db/src/utils.rs b/core/client/db/src/utils.rs index a4ab82b5d8b2d..0179165a11851 100644 --- a/core/client/db/src/utils.rs +++ b/core/client/db/src/utils.rs @@ -36,7 +36,7 @@ use crate::DatabaseSettings; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. -pub const NUM_COLUMNS: u32 = 9; +pub const NUM_COLUMNS: u32 = 10; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: Option = Some(0); From a7e240d4c3ddca8e26974f02d5dbecc9d39e508f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 1 Jul 2019 17:25:03 +0300 Subject: [PATCH 09/63] eliminating const ChangesTrieConfiguration --- core/client/db/src/cache/list_cache.rs | 17 +++---- core/client/db/src/cache/mod.rs | 7 ++- core/client/db/src/changes_tries_storage.rs | 31 +++++++------ core/client/db/src/lib.rs | 51 +++++++++------------ core/client/db/src/light.rs | 15 ++++-- core/client/src/blockchain.rs | 5 +- 6 files changed, 66 insertions(+), 60 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index 90a32090bd6e4..c923980dfbfd9 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -130,7 +130,7 @@ impl> ListCache } /// Get value valid at block. - pub fn value_at_block(&self, at: &ComplexBlockId) -> ClientResult> { + pub fn value_at_block(&self, at: &ComplexBlockId) -> ClientResult, T)>> { let head = if at.number <= self.best_finalized_block.number { // if the block is older than the best known finalized block // => we're should search for the finalized value @@ -164,7 +164,7 @@ impl> ListCache match head { Some(head) => head.search_best_before(&self.storage, at.number) - .map(|e| e.map(|e| e.0.value)), + .map(|e| e.map(|e| (e.0.valid_from, e.0.value))), None => Ok(None), } } @@ -665,6 +665,7 @@ pub mod tests { #[test] fn list_value_at_block_works() { +// TODO: check that value_at_block actually returns correct value!!! // when block is earlier than best finalized block AND it is not finalized // --- 50 --- // ----------> [100] @@ -679,7 +680,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), 1024, test_id(100) - ).value_at_block(&test_id(50)).unwrap(), Some(30)); + ).value_at_block(&test_id(50)).unwrap(), Some((test_id(30), 30))); // when block is the best finalized block AND value is some // ---> [100] assert_eq!(ListCache::new( @@ -689,7 +690,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), 1024, test_id(100) - ).value_at_block(&test_id(100)).unwrap(), Some(100)); + ).value_at_block(&test_id(100)).unwrap(), Some((test_id(100), 100))); // when block is parallel to the best finalized block // ---- 100 // ---> [100] @@ -710,7 +711,7 @@ pub mod tests { .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), 1024, test_id(100) - ).value_at_block(&test_id(200)).unwrap(), Some(100)); + ).value_at_block(&test_id(200)).unwrap(), Some((test_id(100), 100))); // when block is later than last finalized block AND there are no matching forks // AND block is connected to finalized block AND finalized value is Some @@ -726,7 +727,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), 1024, test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some(2)); + ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), 2))); // when block is later than last finalized block AND there are no matching forks // AND block is not connected to finalized block // --- 2 --- 3 @@ -756,7 +757,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(test_header(5)), 1024, test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), Some(4)); + ).value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), 4))); // when block is later than last finalized block AND it does not fits unfinalized fork // AND it is connected to the finalized block AND finalized value is Some // ---> [2] ----------> [4] @@ -771,7 +772,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), 1024, test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some(2)); + ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), 2))); } #[test] diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index 555360d16b02a..84cef9b1e5a5c 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -294,7 +294,7 @@ impl BlockchainCache for DbCacheSync { Ok(()) } - fn get_at(&self, key: &CacheKeyId, at: &BlockId) -> Option> { + fn get_at(&self, key: &CacheKeyId, at: &BlockId) -> Option<(Block::Hash, Vec)> { let cache = self.0.read(); let storage = cache.cache_at.get(key)?.storage(); let db = storage.db(); @@ -318,7 +318,10 @@ impl BlockchainCache for DbCacheSync { }, }; - cache.cache_at.get(key)?.value_at_block(&at).ok()? + cache.cache_at.get(key)? + .value_at_block(&at) + .map(|block_and_value| block_and_value.map(|(block, value)| (block.hash, value))) + .ok()? } } diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index f36f1889fa285..42ac394790ff8 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use kvdb::{KeyValueDB, DBTransaction}; use parity_codec::Encode; use parking_lot::RwLock; -use client::error::Result as ClientResult; +use client::error::{Error as ClientError, Result as ClientResult}; use trie::MemoryDB; use client::blockchain::{Cache, well_known_cache_keys}; use parity_codec::Decode; @@ -90,11 +90,15 @@ impl> DbChangesTrieStorage { pub fn configuration_at( &self, at: &BlockId, - ) -> Option { - let maybe_encoded: Option> = self.cache.get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at); - let maybe_config: Option> = - maybe_encoded.and_then(|config| Decode::decode(&mut &config[..])); - maybe_config.and_then(|v| v) + ) -> ClientResult> { + // TODO: deal with errors here - whenever cache have no value for block, or we unable to decode it - return error + let encoded = self.cache + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) + .map(|block_and_value| block_and_value.1) + .ok_or_else(|| ClientError::Backend("TODO".into()))?; // TODO: specific error + let maybe_config = Decode::decode(&mut &encoded[..]) + .ok_or_else(|| ClientError::Backend("TODO".into()))?; // TODO: specific error + Ok(maybe_config) } /// Commit new changes trie. @@ -281,7 +285,6 @@ mod tests { let backend = Backend::::new_test(1000, 100); backend.changes_tries_storage.meta.write().finalized_number = 1000; - let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { let (changes_root, mut changes_trie_update) = prepare_changes(changes); let anchor = state_machine::ChangesTrieAnchorBlockId { @@ -565,31 +568,31 @@ mod tests { // test configuration cache let storage = backend.changes_trie_storage().unwrap(); assert_eq!( - storage.configuration_at(&BlockId::Hash(block1)), + storage.configuration_at(&BlockId::Hash(block1)).unwrap(), config_at_1.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block2)), + storage.configuration_at(&BlockId::Hash(block2)).unwrap(), config_at_1.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block3)), + storage.configuration_at(&BlockId::Hash(block3)).unwrap(), config_at_3.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block4)), + storage.configuration_at(&BlockId::Hash(block4)).unwrap(), config_at_3.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block5)), + storage.configuration_at(&BlockId::Hash(block5)).unwrap(), config_at_5.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block6)), + storage.configuration_at(&BlockId::Hash(block6)).unwrap(), config_at_5.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block7)), + storage.configuration_at(&BlockId::Hash(block7)).unwrap(), config_at_7.clone(), ); } diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 7fa8d97ea4691..8bacc028dfb70 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -414,7 +414,9 @@ where Block: BlockT, leaf_state: NewBlockState, ) -> Result<(), client::error::Error> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - self.changes_trie_config_update = changes_tries_storage::extract_new_configuration(&header).cloned(); + if let Some(changes_trie_config_update) = changes_tries_storage::extract_new_configuration(&header) { + self.changes_trie_config_update = Some(changes_trie_config_update.clone()); + } self.pending_block = Some(PendingBlock { header, body, @@ -453,12 +455,20 @@ where Block: BlockT, .map(|(storage_key, child_overlay)| (storage_key, child_overlay.into_iter().map(|(k, v)| (k, Some(v))))); + let mut changes_trie_config: Option = None; let (root, transaction) = self.old_state.full_storage_root( - top.into_iter().map(|(k, v)| (k, Some(v))), + top.into_iter().map(|(k, v)| { + if k == well_known_keys::CHANGES_TRIE_CONFIG { + changes_trie_config = Decode::decode(&mut &v[..]); + } + (k, Some(v)) + }), child_delta ); self.db_updates = transaction; + self.changes_trie_config_update = Some(changes_trie_config); + Ok(root) } @@ -540,9 +550,6 @@ impl state_machine::Storage for DbGenesisStorage { pub struct Backend { storage: Arc>, changes_tries_storage: DbChangesTrieStorage, - /// None<*> means that the value hasn't been cached yet. Some(*) means that the value (either None or - /// Some(*)) has been cached and is valid. - changes_trie_config: Mutex>>, blockchain: BlockchainDb, canonicalization_delay: u64, shared_cache: SharedCache, @@ -622,7 +629,6 @@ impl> Backend { Ok(Backend { storage: Arc::new(storage_db), changes_tries_storage, - changes_trie_config: Mutex::new(None), blockchain, canonicalization_delay, shared_cache: new_shared_cache( @@ -681,26 +687,6 @@ impl> Backend { inmem } - /// Read (from storage or cache) changes trie config. - /// - /// Currently changes tries configuration is set up once (at genesis) and could not - /// be changed. Thus, we'll actually read value once and then just use cached value. - fn changes_trie_config(&self, block: Block::Hash) -> Result, client::error::Error> { - let mut cached_changes_trie_config = self.changes_trie_config.lock(); - match cached_changes_trie_config.clone() { - Some(cached_changes_trie_config) => Ok(cached_changes_trie_config), - None => { - use client::backend::Backend; - let changes_trie_config = self - .state_at(BlockId::Hash(block))? - .storage(well_known_keys::CHANGES_TRIE_CONFIG)? - .and_then(|v| Decode::decode(&mut &*v)); - *cached_changes_trie_config = Some(changes_trie_config.clone()); - Ok(changes_trie_config) - }, - } - } - /// Handle setting head within a transaction. `route_to` should be the last /// block that existed in the database. `best_to` should be the best block /// to be set. @@ -903,6 +889,11 @@ impl> Backend { if number.is_zero() { transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); + + // for tests, because config is set from within the reset_storage + if operation.changes_trie_config_update.is_none() { + operation.changes_trie_config_update = Some(None); + } } let mut changeset: state_db::ChangeSet> = state_db::ChangeSet::default(); @@ -1047,9 +1038,11 @@ impl> Backend { .map_err(|e: state_db::Error| client::error::Error::from(format!("State database error: {:?}", e)))?; apply_state_commit(transaction, commit); - let changes_trie_config = self.changes_trie_config(parent_hash)?; - if let Some(changes_trie_config) = changes_trie_config { - self.changes_tries_storage.prune(&changes_trie_config, transaction, f_hash, f_num); + if !f_num.is_zero() { + let changes_trie_config = self.changes_tries_storage.configuration_at(&BlockId::Hash(parent_hash))?; + if let Some(changes_trie_config) = changes_trie_config { + self.changes_tries_storage.prune(&changes_trie_config, transaction, f_hash, f_num); + } } } diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index b5e283fc33333..5aaeba3725555 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -597,10 +597,10 @@ pub(crate) mod tests { header } - pub fn insert_block Header>( + pub fn insert_block Header>( db: &LightStorage, cache: HashMap>, - header: F, + mut header: F, ) -> Hash { let header = header(); let hash = header.hash(); @@ -884,7 +884,7 @@ pub(crate) mod tests { } fn get_authorities(cache: &dyn BlockchainCache, at: BlockId) -> Option> { - cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).and_then(|val| Decode::decode(&mut &val[..])) + cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).and_then(|(_, val)| Decode::decode(&mut &val[..])) } let auth1 = || AuthorityId::from_raw([1u8; 32]); @@ -1094,7 +1094,12 @@ pub(crate) mod tests { assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); // insert genesis block (no value for cache is provided) - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + let mut genesis_hash = None; + insert_block(&db, HashMap::new(), || { + let header = default_header(&Default::default(), 0); + genesis_hash = Some(header.hash()); + header + }); // after genesis is inserted => None assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); @@ -1103,6 +1108,6 @@ pub(crate) mod tests { db.cache().initialize(b"test", vec![42]).unwrap(); // after genesis is inserted + cache is initialized => Some - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some(vec![42])); + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some((genesis_hash.unwrap(), vec![42]))); } } diff --git a/core/client/src/blockchain.rs b/core/client/src/blockchain.rs index 445be71f5d685..5513e06a8fd68 100644 --- a/core/client/src/blockchain.rs +++ b/core/client/src/blockchain.rs @@ -104,8 +104,9 @@ pub trait Cache: Send + Sync { /// The operation should be performed once before anything else is inserted in the cache. /// Otherwise cache may end up in inconsistent state. fn initialize(&self, key: &well_known_cache_keys::Id, value_at_genesis: Vec) -> Result<()>; - /// Returns cached value by the given key. - fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId) -> Option>; + /// For given key and block, returns cached value actual at this block AND block where this value + /// has been originally set. + fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId) -> Option<(Block::Hash, Vec)>; } /// Blockchain info From 5256d038eb0da264f97a5f3f5d4e42e171db85bb Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 2 Jul 2019 11:08:04 +0300 Subject: [PATCH 10/63] delay pruning --- core/client/db/src/changes_tries_storage.rs | 59 +++++++++++--------- core/client/db/src/lib.rs | 5 +- core/state-machine/src/changes_trie/prune.rs | 1 - srml/system/src/lib.rs | 2 +- 4 files changed, 34 insertions(+), 33 deletions(-) diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index 42ac394790ff8..735c2157e6f5a 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -90,15 +90,12 @@ impl> DbChangesTrieStorage { pub fn configuration_at( &self, at: &BlockId, - ) -> ClientResult> { + ) -> ClientResult<(NumberFor, Block::Hash, Option)> { // TODO: deal with errors here - whenever cache have no value for block, or we unable to decode it - return error - let encoded = self.cache + self.cache .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) - .map(|block_and_value| block_and_value.1) - .ok_or_else(|| ClientError::Backend("TODO".into()))?; // TODO: specific error - let maybe_config = Decode::decode(&mut &encoded[..]) - .ok_or_else(|| ClientError::Backend("TODO".into()))?; // TODO: specific error - Ok(maybe_config) + .and_then(|(block, encoded)| Decode::decode(&mut &encoded[..]).map(|config| (block, config))) + .ok_or_else(|| ClientError::Backend("TODO".into())) // TODO: specific error } /// Commit new changes trie. @@ -145,27 +142,35 @@ impl> DbChangesTrieStorage { /// Prune obsolete changes tries. pub fn prune( &self, - config: &ChangesTrieConfiguration, tx: &mut DBTransaction, + parent_hash: Block::Hash, block_hash: Block::Hash, block_num: NumberFor, - ) { + ) -> ClientResult<()> { // never prune on archive nodes let min_blocks_to_keep = match self.min_blocks_to_keep { Some(min_blocks_to_keep) => min_blocks_to_keep, - None => return, + None => return Ok(()), }; - state_machine::prune_changes_tries( - Zero::zero(), // TODO: not true - config, - &*self, - min_blocks_to_keep.into(), - &state_machine::ChangesTrieAnchorBlockId { - hash: convert_hash(&block_hash), - number: block_num, - }, - |node| tx.delete(self.changes_tries_column, node.as_ref())); + // prune changes tries that are created using newest configuration + let (mut activation_num, mut activation_hash, newest_config) = self.configuration_at(&BlockId::Hash(parent_hash))?; + if let Some(config) = newest_config { + state_machine::prune_changes_tries( + activation_num, + config, + &*self, + min_blocks_to_keep.into(), + &state_machine::ChangesTrieAnchorBlockId { + hash: convert_hash(&block_hash), + number: block_num, + }, + |node| tx.delete(self.changes_tries_column, node.as_ref())); + } + + // TODO: prune tries that were created using previous configurations + + Ok(()) } } @@ -568,31 +573,31 @@ mod tests { // test configuration cache let storage = backend.changes_trie_storage().unwrap(); assert_eq!( - storage.configuration_at(&BlockId::Hash(block1)).unwrap(), + storage.configuration_at(&BlockId::Hash(block1)).unwrap().1, config_at_1.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block2)).unwrap(), + storage.configuration_at(&BlockId::Hash(block2)).unwrap().1, config_at_1.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block3)).unwrap(), + storage.configuration_at(&BlockId::Hash(block3)).unwrap().1, config_at_3.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block4)).unwrap(), + storage.configuration_at(&BlockId::Hash(block4)).unwrap().1, config_at_3.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block5)).unwrap(), + storage.configuration_at(&BlockId::Hash(block5)).unwrap().1, config_at_5.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block6)).unwrap(), + storage.configuration_at(&BlockId::Hash(block6)).unwrap().1, config_at_5.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block7)).unwrap(), + storage.configuration_at(&BlockId::Hash(block7)).unwrap().1, config_at_7.clone(), ); } diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 8bacc028dfb70..f33bbc9e962fd 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -1039,10 +1039,7 @@ impl> Backend { apply_state_commit(transaction, commit); if !f_num.is_zero() { - let changes_trie_config = self.changes_tries_storage.configuration_at(&BlockId::Hash(parent_hash))?; - if let Some(changes_trie_config) = changes_trie_config { - self.changes_tries_storage.prune(&changes_trie_config, transaction, f_hash, f_num); - } + self.changes_tries_storage.prune(transaction, parent_hash, f_hash, f_num)?; } } diff --git a/core/state-machine/src/changes_trie/prune.rs b/core/state-machine/src/changes_trie/prune.rs index f71dbeeefdba0..34c4c5675db8d 100644 --- a/core/state-machine/src/changes_trie/prune.rs +++ b/core/state-machine/src/changes_trie/prune.rs @@ -47,7 +47,6 @@ pub fn oldest_non_pruned_trie( /// level digest is created. Pruning guarantees to save changes tries for last /// `min_blocks_to_keep` blocks. We only prune changes tries at `max_digest_interval` /// ranges. -/// Returns MemoryDB that contains all deleted changes tries nodes. pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H::Out)>( config_activation_block: Number, config: &Configuration, diff --git a/srml/system/src/lib.rs b/srml/system/src/lib.rs index da42ff08262f5..4abf68dcbeecf 100644 --- a/srml/system/src/lib.rs +++ b/srml/system/src/lib.rs @@ -212,7 +212,7 @@ decl_module! { } /// Set the new changes trie configuration. - pub fn set_changes_trie_onfig(changes_trie_config: Option) { + pub fn set_changes_trie_config(changes_trie_config: Option) { match changes_trie_config.clone() { Some(changes_trie_config) => storage::unhashed::put_raw( well_known_keys::CHANGES_TRIE_CONFIG, From 698e2ff97a9be1313e162d971d6d076bd5a5682a Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 3 Jul 2019 12:36:35 +0300 Subject: [PATCH 11/63] continue elimination --- core/client/db/src/cache/mod.rs | 4 +- core/client/db/src/changes_tries_storage.rs | 67 ++++++----- core/client/db/src/light.rs | 5 +- core/client/src/backend.rs | 6 + core/client/src/blockchain.rs | 2 +- core/client/src/client.rs | 36 +++--- core/client/src/in_mem.rs | 7 ++ core/client/src/light/fetcher.rs | 3 +- core/consensus/aura/src/lib.rs | 4 +- core/consensus/babe/src/lib.rs | 4 +- .../src/changes_trie/changes_iterator.rs | 104 +++++++++++++++--- 11 files changed, 170 insertions(+), 72 deletions(-) diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index 84cef9b1e5a5c..bdb794c7243f5 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -294,7 +294,7 @@ impl BlockchainCache for DbCacheSync { Ok(()) } - fn get_at(&self, key: &CacheKeyId, at: &BlockId) -> Option<(Block::Hash, Vec)> { + fn get_at(&self, key: &CacheKeyId, at: &BlockId) -> Option<(NumberFor, Block::Hash, Vec)> { let cache = self.0.read(); let storage = cache.cache_at.get(key)?.storage(); let db = storage.db(); @@ -320,7 +320,7 @@ impl BlockchainCache for DbCacheSync { cache.cache_at.get(key)? .value_at_block(&at) - .map(|block_and_value| block_and_value.map(|(block, value)| (block.hash, value))) + .map(|block_and_value| block_and_value.map(|(block, value)| (block.number, block.hash, value))) .ok()? } } diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index 735c2157e6f5a..75a53174668a8 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -23,6 +23,7 @@ use parity_codec::Encode; use parking_lot::RwLock; use client::error::{Error as ClientError, Result as ClientResult}; use trie::MemoryDB; +use client::backend::PrunableStateChangesTrieStorage; use client::blockchain::{Cache, well_known_cache_keys}; use parity_codec::Decode; use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; @@ -86,18 +87,6 @@ impl> DbChangesTrieStorage { } } - /// Get configuration at given block. - pub fn configuration_at( - &self, - at: &BlockId, - ) -> ClientResult<(NumberFor, Block::Hash, Option)> { - // TODO: deal with errors here - whenever cache have no value for block, or we unable to decode it - return error - self.cache - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) - .and_then(|(block, encoded)| Decode::decode(&mut &encoded[..]).map(|config| (block, config))) - .ok_or_else(|| ClientError::Backend("TODO".into())) // TODO: specific error - } - /// Commit new changes trie. pub fn commit( &self, @@ -134,6 +123,7 @@ impl> DbChangesTrieStorage { /// When transaction has been committed. pub fn post_commit(&self, cache_ops: Option>) { + // TODO: hold lock between commit + post_commit!!! if let Some(cache_ops) = cache_ops { self.cache.0.write().commit(cache_ops); } @@ -154,11 +144,11 @@ impl> DbChangesTrieStorage { }; // prune changes tries that are created using newest configuration - let (mut activation_num, mut activation_hash, newest_config) = self.configuration_at(&BlockId::Hash(parent_hash))?; + let (activation_num, _, newest_config) = self.configuration_at(&BlockId::Hash(parent_hash))?; if let Some(config) = newest_config { state_machine::prune_changes_tries( activation_num, - config, + &config, &*self, min_blocks_to_keep.into(), &state_machine::ChangesTrieAnchorBlockId { @@ -179,6 +169,17 @@ impl client::backend::PrunableStateChangesTrieStorage, { + fn configuration_at( + &self, + at: &BlockId, + ) -> ClientResult<(NumberFor, Block::Hash, Option)> { + // TODO: deal with errors here - whenever cache have no value for block, or we unable to decode it - return error + self.cache + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) + .and_then(|(number, hash, encoded)| Decode::decode(&mut &encoded[..]).map(|config| (number, hash, config))) + .ok_or_else(|| ClientError::Backend("TODO".into())) // TODO: specific error + } + fn oldest_changes_trie_block( &self, config: &ChangesTrieConfiguration, @@ -379,12 +380,13 @@ mod tests { #[test] fn changes_tries_with_digest_are_pruned_on_finalization() { let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); let config = ChangesTrieConfiguration { digest_interval: 2, digest_levels: 2, }; + backend.changes_tries_storage.min_blocks_to_keep = Some(8); + // insert some blocks let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); @@ -401,6 +403,10 @@ mod tests { let block12 = insert_header(&backend, 12, block11, vec![(b"key_at_12".to_vec(), b"val_at_12".to_vec())], Default::default()); let block13 = insert_header(&backend, 13, block12, vec![(b"key_at_13".to_vec(), b"val_at_13".to_vec())], Default::default()); backend.changes_tries_storage.meta.write().finalized_number = 13; + backend.changes_tries_storage.cache.initialize( + &well_known_cache_keys::CHANGES_TRIE_CONFIG, + Some(config).encode(), + ).unwrap(); // check that roots of all tries are in the columns::CHANGES_TRIE let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block13, number: 13 }; @@ -423,7 +429,7 @@ mod tests { // now simulate finalization of block#12, causing prune of tries at #1..#4 let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 12); + backend.changes_tries_storage.prune(&mut tx, block0, Default::default(), 12).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); @@ -436,7 +442,7 @@ mod tests { // now simulate finalization of block#16, causing prune of tries at #5..#8 let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 16); + backend.changes_tries_storage.prune(&mut tx, block0, Default::default(), 16).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_none()); assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_none()); @@ -447,7 +453,7 @@ mod tests { // => no changes tries are pruned, because we never prune in archive mode backend.changes_tries_storage.min_blocks_to_keep = None; let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, Default::default(), 20); + backend.changes_tries_storage.prune(&mut tx, block0, Default::default(), 20).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root9, &[]).unwrap().is_some()); assert!(backend.changes_tries_storage.get(&root10, &[]).unwrap().is_some()); @@ -458,12 +464,13 @@ mod tests { #[test] fn changes_tries_without_digest_are_pruned_on_finalization() { let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(4); let config = ChangesTrieConfiguration { digest_interval: 0, digest_levels: 0, }; + backend.changes_tries_storage.min_blocks_to_keep = Some(4); + // insert some blocks let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); @@ -472,6 +479,10 @@ mod tests { let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); + backend.changes_tries_storage.cache.initialize( + &well_known_cache_keys::CHANGES_TRIE_CONFIG, + Some(config).encode(), + ).unwrap(); // check that roots of all tries are in the columns::CHANGES_TRIE let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block6, number: 6 }; @@ -489,14 +500,14 @@ mod tests { // now simulate finalization of block#5, causing prune of trie at #1 let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, block5, 5); + backend.changes_tries_storage.prune(&mut tx, block1, block5, 5).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_some()); // now simulate finalization of block#6, causing prune of tries at #2 let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&config, &mut tx, block6, 6); + backend.changes_tries_storage.prune(&mut tx, block1, block6, 6).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_some()); @@ -573,31 +584,31 @@ mod tests { // test configuration cache let storage = backend.changes_trie_storage().unwrap(); assert_eq!( - storage.configuration_at(&BlockId::Hash(block1)).unwrap().1, + storage.configuration_at(&BlockId::Hash(block1)).unwrap().2, config_at_1.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block2)).unwrap().1, + storage.configuration_at(&BlockId::Hash(block2)).unwrap().2, config_at_1.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block3)).unwrap().1, + storage.configuration_at(&BlockId::Hash(block3)).unwrap().2, config_at_3.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block4)).unwrap().1, + storage.configuration_at(&BlockId::Hash(block4)).unwrap().2, config_at_3.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block5)).unwrap().1, + storage.configuration_at(&BlockId::Hash(block5)).unwrap().2, config_at_5.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block6)).unwrap().1, + storage.configuration_at(&BlockId::Hash(block6)).unwrap().2, config_at_5.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block7)).unwrap().1, + storage.configuration_at(&BlockId::Hash(block7)).unwrap().2, config_at_7.clone(), ); } diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index 5aaeba3725555..5fc2f5aadd4b6 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -884,7 +884,8 @@ pub(crate) mod tests { } fn get_authorities(cache: &dyn BlockchainCache, at: BlockId) -> Option> { - cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).and_then(|(_, val)| Decode::decode(&mut &val[..])) + cache.get_at(&well_known_cache_keys::AUTHORITIES, &at) + .and_then(|(_, _, val)| Decode::decode(&mut &val[..])) } let auth1 = || AuthorityId::from_raw([1u8; 32]); @@ -1108,6 +1109,6 @@ pub(crate) mod tests { db.cache().initialize(b"test", vec![42]).unwrap(); // after genesis is inserted + cache is initialized => Some - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some((genesis_hash.unwrap(), vec![42]))); + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some((0, genesis_hash.unwrap(), vec![42]))); } } diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index f839bc1332cae..e6804f55c5a07 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -199,6 +199,12 @@ pub trait Backend: AuxStore + Send + Sync where pub trait PrunableStateChangesTrieStorage: StateChangesTrieStorage> { + /// Get coniguration at given block. + fn configuration_at(&self, at: &BlockId) -> error::Result<( + NumberFor, + Block::Hash, + Option, + )>; /// Get number block of oldest, non-pruned changes trie. fn oldest_changes_trie_block( &self, diff --git a/core/client/src/blockchain.rs b/core/client/src/blockchain.rs index 5513e06a8fd68..3baf89d24c1dc 100644 --- a/core/client/src/blockchain.rs +++ b/core/client/src/blockchain.rs @@ -106,7 +106,7 @@ pub trait Cache: Send + Sync { fn initialize(&self, key: &well_known_cache_keys::Id, value_at_genesis: Vec) -> Result<()>; /// For given key and block, returns cached value actual at this block AND block where this value /// has been originally set. - fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId) -> Option<(Block::Hash, Vec)>; + fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId) -> Option<(NumberFor, Block::Hash, Vec)>; } /// Blockchain info diff --git a/core/client/src/client.rs b/core/client/src/client.rs index dcb95f0f89478..5605a0adfd18b 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -503,14 +503,14 @@ impl Client where /// Get longest range within [first; last] that is possible to use in `key_changes` /// and `key_changes_proof` calls. /// Range could be shortened from the beginning if some changes tries have been pruned. - /// Returns Ok(None) if changes trues are not supported. + /// Returns Ok(None) if changes tries are not supported. pub fn max_key_changes_range( &self, first: NumberFor, last: BlockId, ) -> error::Result, BlockId)>> { - let (config, storage) = match self.require_changes_trie().ok() { - Some((config, storage)) => (config, storage), + let (activation_block, config, storage) = match self.require_changes_trie().ok() { + Some((activation_block, config, storage)) => (activation_block, config, storage), None => return Ok(None), }; let last_num = self.backend.blockchain().expect_block_number_from_id(&last)?; @@ -519,6 +519,7 @@ impl Client where } let finalized_number = self.backend.blockchain().info().finalized_number; let oldest = storage.oldest_changes_trie_block(&config, finalized_number); + let oldest = ::std::cmp::max(activation_block + One::one(), oldest); let first = ::std::cmp::max(first, oldest); Ok(Some((first, last))) } @@ -533,13 +534,14 @@ impl Client where last: BlockId, key: &StorageKey ) -> error::Result, u32)>> { - let (config, storage) = self.require_changes_trie()?; + let (activation_block, config, storage) = self.require_changes_trie()?; let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; key_changes::<_, Blake2Hasher, _>( &config, &*storage, + activation_block, first, &ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), @@ -620,7 +622,7 @@ impl Client where } } - let (config, storage) = self.require_changes_trie()?; + let (activation_block, config, storage) = self.require_changes_trie()?; let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; let recording_storage = AccessedRootsRecorder:: { @@ -642,6 +644,7 @@ impl Client where let key_changes_proof = key_changes_proof::<_, Blake2Hasher, _>( &config, &recording_storage, + activation_block, first_number, &ChangesTrieAnchorBlockId { hash: convert_hash(&last), @@ -706,12 +709,16 @@ impl Client where } /// Returns changes trie configuration and storage or an error if it is not supported. - fn require_changes_trie(&self) -> error::Result<(ChangesTrieConfiguration, &B::ChangesTrieStorage)> { - let config = self.changes_trie_config()?; - let storage = self.backend.changes_trie_storage(); - match (config, storage) { - (Some(config), Some(storage)) => Ok((config, storage)), - _ => Err(error::Error::ChangesTriesNotSupported.into()), + fn require_changes_trie(&self) -> error::Result<(NumberFor, ChangesTrieConfiguration, &B::ChangesTrieStorage)> { + let best_block = self.backend.blockchain().info().best_hash; + let storage = match self.backend.changes_trie_storage() { + Some(storage) => storage, + None => return Err(error::Error::ChangesTriesNotSupported), + }; + let (activation_block, _, config) = storage.configuration_at(&BlockId::Hash(best_block))?; + match config { + Some(config) => Ok((activation_block, config, storage)), + None => Err(error::Error::ChangesTriesNotSupported.into()), } } @@ -1305,13 +1312,6 @@ impl Client where Ok(uncles) } - fn changes_trie_config(&self) -> Result, Error> { - Ok(self.backend.state_at(BlockId::Number(self.backend.blockchain().info().best_number))? - .storage(well_known_keys::CHANGES_TRIE_CONFIG) - .map_err(|e| error::Error::from_state(Box::new(e)))? - .and_then(|c| Decode::decode(&mut &*c))) - } - /// Prepare in-memory header that is used in execution environment. fn prepare_environment_block(&self, parent: &BlockId) -> error::Result { let parent_header = self.backend.blockchain().expect_header(*parent)?; diff --git a/core/client/src/in_mem.rs b/core/client/src/in_mem.rs index 973fd9e1a4f75..a839554f1986d 100644 --- a/core/client/src/in_mem.rs +++ b/core/client/src/in_mem.rs @@ -714,6 +714,13 @@ where /// Prunable in-memory changes trie storage. pub struct ChangesTrieStorage(InMemoryChangesTrieStorage>); impl backend::PrunableStateChangesTrieStorage for ChangesTrieStorage { + fn configuration_at(&self, _at: &BlockId) -> error::Result<( + NumberFor, + Block::Hash, + Option, + )> { + unimplemented!("TODO: do we still need this?") + } fn oldest_changes_trie_block( &self, _config: &ChangesTrieConfiguration, diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index c77ebcd0fdd3a..cf48c14568894 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -26,7 +26,7 @@ use parity_codec::{Decode, Encode}; use primitives::{ChangesTrieConfiguration, convert_hash}; use runtime_primitives::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, - SimpleArithmetic, CheckedConversion, + SimpleArithmetic, CheckedConversion, Zero, }; use state_machine::{CodeExecutor, ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, TrieBackend, read_proof_check, key_changes_proof_check, @@ -291,6 +291,7 @@ impl, F> LightDataChecker(client: &C) -> Result<(), ConsensusErro let genesis_id = BlockId::Number(Zero::zero()); let genesis_authorities: Option> = cache .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id) - .and_then(|v| Decode::decode(&mut &v[..])); + .and_then(|(_, _, v)| Decode::decode(&mut &v[..])); if genesis_authorities.is_some() { return Ok(()); } @@ -639,7 +639,7 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus .cache() .and_then(|cache| cache .get_at(&well_known_cache_keys::AUTHORITIES, at) - .and_then(|v| Decode::decode(&mut &v[..])) + .and_then(|(_, _, v)| Decode::decode(&mut &v[..])) ) .or_else(|| AuraApi::authorities(&*client.runtime_api(), at).ok()) .ok_or_else(|| consensus_common::Error::InvalidAuthoritiesSet.into()) diff --git a/core/consensus/babe/src/lib.rs b/core/consensus/babe/src/lib.rs index 9ec976379156e..61850b0fc6c2d 100644 --- a/core/consensus/babe/src/lib.rs +++ b/core/consensus/babe/src/lib.rs @@ -699,7 +699,7 @@ fn authorities(client: &C, at: &BlockId) -> Result< client .cache() .and_then(|cache| cache.get_at(&well_known_cache_keys::AUTHORITIES, at) - .and_then(|v| Decode::decode(&mut &v[..]))) + .and_then(|(_, _, v)| Decode::decode(&mut &v[..]))) .or_else(|| { if client.runtime_api().has_api::>(at).unwrap_or(false) { BabeApi::authorities(&*client.runtime_api(), at).ok() @@ -798,7 +798,7 @@ fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusError> let genesis_id = BlockId::Number(Zero::zero()); let genesis_authorities: Option> = cache .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id) - .and_then(|v| Decode::decode(&mut &v[..])); + .and_then(|(_, _, v)| Decode::decode(&mut &v[..])); if genesis_authorities.is_some() { return Ok(()); } diff --git a/core/state-machine/src/changes_trie/changes_iterator.rs b/core/state-machine/src/changes_trie/changes_iterator.rs index 0e4716ccab98a..f2856cd7063aa 100644 --- a/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/core/state-machine/src/changes_trie/changes_iterator.rs @@ -35,6 +35,7 @@ use crate::trie_backend_essence::{TrieBackendEssence}; pub fn key_changes<'a, S: Storage, H: Hasher, Number: BlockNumber>( config: &'a Configuration, storage: &'a S, + zero: Number, begin: Number, end: &'a AnchorBlockId, max: Number, @@ -50,7 +51,13 @@ pub fn key_changes<'a, S: Storage, H: Hasher, Number: BlockNumber>( storage, begin: begin.clone(), end, - surface: surface_iterator(config, max, begin, end.number.clone())?, + surface: surface_iterator( + config, + zero, + max, + begin, + end.number.clone(), + )?, extrinsics: Default::default(), blocks: Default::default(), @@ -65,6 +72,7 @@ pub fn key_changes<'a, S: Storage, H: Hasher, Number: BlockNumber>( pub fn key_changes_proof, H: Hasher, Number: BlockNumber>( config: &Configuration, storage: &S, + zero: Number, begin: Number, end: &AnchorBlockId, max: Number, @@ -80,7 +88,13 @@ pub fn key_changes_proof, H: Hasher, Number: BlockNumber>( storage, begin: begin.clone(), end, - surface: surface_iterator(config, max, begin, end.number.clone())?, + surface: surface_iterator( + config, + zero, + max, + begin, + end.number.clone(), + )?, extrinsics: Default::default(), blocks: Default::default(), @@ -105,6 +119,7 @@ pub fn key_changes_proof_check, H: Hasher, Number: Bl config: &Configuration, roots_storage: &S, proof: Vec>, + zero: Number, begin: Number, end: &AnchorBlockId, max: Number, @@ -126,7 +141,13 @@ pub fn key_changes_proof_check, H: Hasher, Number: Bl storage: &proof_db, begin: begin.clone(), end, - surface: surface_iterator(config, max, begin, end.number.clone())?, + surface: surface_iterator( + config, + zero, + max, + begin, + end.number.clone(), + )?, extrinsics: Default::default(), blocks: Default::default(), @@ -140,6 +161,7 @@ pub fn key_changes_proof_check, H: Hasher, Number: Bl /// all digest changes for the key. pub struct SurfaceIterator<'a, Number: BlockNumber> { config: &'a Configuration, + zero: Number, begin: Number, max: Number, current: Option, @@ -166,8 +188,14 @@ impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { else if next > self.current_begin { self.current = Some(next); } else { - let (current, current_begin, digest_step, digest_level) = match - lower_bound_max_digest(self.config, self.max.clone(), self.begin.clone(), next) { + let max_digest_interval = lower_bound_max_digest( + self.config, + self.zero.clone(), + self.max.clone(), + self.begin.clone(), + next, + ); + let (current, current_begin, digest_step, digest_level) = match max_digest_interval { Err(err) => return Some(Err(err)), Ok(range) => range, }; @@ -363,18 +391,21 @@ impl<'a, RS, S, H, Number> Iterator for ProvingDrilldownIterator<'a, RS, S, H, N /// Returns surface iterator for given range of blocks. fn surface_iterator<'a, Number: BlockNumber>( config: &'a Configuration, + zero: Number, max: Number, begin: Number, end: Number, ) -> Result, String> { let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest( config, + zero.clone(), max.clone(), begin.clone(), end, )?; Ok(SurfaceIterator { config, + zero, begin, max, current: Some(current), @@ -388,6 +419,7 @@ fn surface_iterator<'a, Number: BlockNumber>( /// and tends to include the whole range. fn lower_bound_max_digest( config: &Configuration, + zero: Number, max: Number, begin: Number, end: Number, @@ -408,7 +440,7 @@ fn lower_bound_max_digest( let new_digest_interval = config.digest_interval * { if digest_interval == 0 { 1 } else { digest_interval } }; - let new_digest_begin = ((current.clone() - One::one()) + let new_digest_begin = zero.clone() + ((current.clone() - One::one() - zero.clone()) / new_digest_interval.into()) * new_digest_interval.into(); let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); let new_current = new_digest_begin.clone() + new_digest_interval.into(); @@ -487,31 +519,71 @@ mod tests { (config, backend) } + #[test] + fn lower_bound_max_digest_works() { + let config = Configuration { digest_interval: 4, digest_levels: 2 }; + + // when config activates at 0 + assert_eq!( + lower_bound_max_digest(&config, 0u64, 100_000u64, 20u64, 180u64).unwrap(), + (192, 176, 16, 2), + ); + + // when config activates at 30 + assert_eq!( + lower_bound_max_digest(&config, 30u64, 100_000u64, 20u64, 180u64).unwrap(), + (190, 174, 16, 2), + ); + } + + #[test] + fn surface_iterator_works() { + let config = Configuration { digest_interval: 4, digest_levels: 2 }; + + // when config activates at 0 + assert_eq!( + surface_iterator(&config, 0u64, 100_000u64, 40u64, 180u64).unwrap().collect::>(), + vec![ + Ok((192, 2)), Ok((176, 2)), Ok((160, 2)), Ok((144, 2)), Ok((128, 2)), Ok((112, 2)), + Ok((96, 2)), Ok((80, 2)), Ok((64, 2)), Ok((48, 2)), + ], + ); + + // when config activates at 30 + assert_eq!( + surface_iterator(&config, 30u64, 100_000u64, 40u64, 180u64).unwrap().collect::>(), + vec![ + Ok((190, 2)), Ok((174, 2)), Ok((158, 2)), Ok((142, 2)), Ok((126, 2)), Ok((110, 2)), + Ok((94, 2)), Ok((78, 2)), Ok((62, 2)), Ok((46, 2)), + ], + ); + } + #[test] fn drilldown_iterator_works() { let (config, storage) = prepare_for_drilldown(); let drilldown_result = key_changes::, Blake2Hasher, u64>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]) + &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); let drilldown_result = key_changes::, Blake2Hasher, u64>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 2 }, 4, &[42]) + &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 2 }, 4, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![])); let drilldown_result = key_changes::, Blake2Hasher, u64>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 3 }, 4, &[42]) + &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 3 }, 4, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(3, 0)])); let drilldown_result = key_changes::, Blake2Hasher, u64>( - &config, &storage, 7, &AnchorBlockId { hash: Default::default(), number: 8 }, 8, &[42]) + &config, &storage, 0, 7, &AnchorBlockId { hash: Default::default(), number: 8 }, 8, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); let drilldown_result = key_changes::, Blake2Hasher, u64>( - &config, &storage, 5, &AnchorBlockId { hash: Default::default(), number: 7 }, 8, &[42]) + &config, &storage, 0, 5, &AnchorBlockId { hash: Default::default(), number: 7 }, 8, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3)])); } @@ -522,7 +594,7 @@ mod tests { storage.clear_storage(); assert!(key_changes::, Blake2Hasher, u64>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, &[42]) + &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, &[42]) .and_then(|i| i.collect::, _>>()).is_err()); } @@ -530,9 +602,9 @@ mod tests { fn drilldown_iterator_fails_when_range_is_invalid() { let (config, storage) = prepare_for_drilldown(); assert!(key_changes::, Blake2Hasher, u64>( - &config, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 50, &[42]).is_err()); + &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 50, &[42]).is_err()); assert!(key_changes::, Blake2Hasher, u64>( - &config, &storage, 20, &AnchorBlockId { hash: Default::default(), number: 10 }, 100, &[42]).is_err()); + &config, &storage, 0, 20, &AnchorBlockId { hash: Default::default(), number: 10 }, 100, &[42]).is_err()); } @@ -544,7 +616,7 @@ mod tests { let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof = key_changes_proof::, Blake2Hasher, u64>( &remote_config, &remote_storage, - 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]).unwrap(); + 0, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]).unwrap(); // happens on local light node: @@ -553,7 +625,7 @@ mod tests { local_storage.clear_storage(); let local_result = key_changes_proof_check::, Blake2Hasher, u64>( &local_config, &local_storage, remote_proof, - 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]); + 0, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]); // check that drilldown result is the same as if it was happening at the full node assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); From 04a8fa033bf0142541e22c52cfc1540132ad43f9 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 3 Jul 2019 13:58:18 +0300 Subject: [PATCH 12/63] do not prune CT config from cache --- core/client/db/src/cache/list_cache.rs | 83 ++++++++++++++++---------- core/client/db/src/cache/mod.rs | 13 +++- 2 files changed, 60 insertions(+), 36 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index c923980dfbfd9..f38a811a135de 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -52,12 +52,21 @@ use crate::cache::{CacheItemT, ComplexBlockId, EntryType}; use crate::cache::list_entry::{Entry, StorageEntry}; use crate::cache::list_storage::{Storage, StorageTransaction, Metadata}; +/// Pruning strategy. +#[derive(Debug, Clone, Copy)] +pub enum PruningStrategy { + /// Prune entries when they're too far behind best finalized block. + ByDepth(N), + /// Do not prune old entries at all. + NeverPrune, +} + /// List-based cache. pub struct ListCache> { /// Cache storage. storage: S, - /// Prune depth. - prune_depth: NumberFor, + /// Pruning strategy. + pruning_strategy: PruningStrategy>, /// Best finalized block. best_finalized_block: ComplexBlockId, /// Best finalized entry (if exists). @@ -107,7 +116,11 @@ pub enum ForkAppendResult { impl> ListCache { /// Create new db list cache entry. - pub fn new(storage: S, prune_depth: NumberFor, best_finalized_block: ComplexBlockId) -> Self { + pub fn new( + storage: S, + pruning_strategy: PruningStrategy>, + best_finalized_block: ComplexBlockId, + ) -> Self { let (best_finalized_entry, unfinalized) = storage.read_meta() .and_then(|meta| read_forks(&storage, meta)) .unwrap_or_else(|error| { @@ -117,7 +130,7 @@ impl> ListCache ListCache { storage, - prune_depth, + pruning_strategy, best_finalized_block, best_finalized_entry, unfinalized, @@ -360,10 +373,14 @@ impl> ListCache tx: &mut Tx, block: &ComplexBlockId ) { -// TODO: do not finalize CT configuration + let prune_depth = match self.pruning_strategy { + PruningStrategy::ByDepth(prune_depth) => prune_depth, + PruningStrategy::NeverPrune => return, + }; + let mut do_pruning = || -> ClientResult<()> { // calculate last ancient block number - let ancient_block = match block.number.checked_sub(&self.prune_depth) { + let ancient_block = match block.number.checked_sub(&prune_depth) { Some(number) => match self.storage.read_id(number)? { Some(hash) => ComplexBlockId::new(hash, number), None => return Ok(()), @@ -669,7 +686,7 @@ pub mod tests { // when block is earlier than best finalized block AND it is not finalized // --- 50 --- // ----------> [100] - assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), 1024, test_id(100)) + assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) .value_at_block(&test_id(50)).unwrap(), None); // when block is earlier than best finalized block AND it is finalized AND value is some // [30] ---- 50 ---> [100] @@ -679,7 +696,7 @@ pub mod tests { .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - 1024, test_id(100) + PruningStrategy::ByDepth(1024), test_id(100) ).value_at_block(&test_id(50)).unwrap(), Some((test_id(30), 30))); // when block is the best finalized block AND value is some // ---> [100] @@ -689,7 +706,7 @@ pub mod tests { .with_id(100, H256::from_low_u64_be(100)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - 1024, test_id(100) + PruningStrategy::ByDepth(1024), test_id(100) ).value_at_block(&test_id(100)).unwrap(), Some((test_id(100), 100))); // when block is parallel to the best finalized block // ---- 100 @@ -700,7 +717,7 @@ pub mod tests { .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - 1024, test_id(100) + PruningStrategy::ByDepth(1024), test_id(100) ).value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).unwrap(), None); // when block is later than last finalized block AND there are no forks AND finalized value is Some @@ -710,7 +727,7 @@ pub mod tests { .with_meta(Some(test_id(100)), Vec::new()) .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), - 1024, test_id(100) + PruningStrategy::ByDepth(1024), test_id(100) ).value_at_block(&test_id(200)).unwrap(), Some((test_id(100), 100))); // when block is later than last finalized block AND there are no matching forks @@ -726,7 +743,7 @@ pub mod tests { .with_header(test_header(3)) .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), - 1024, test_id(2) + PruningStrategy::ByDepth(1024), test_id(2) ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), 2))); // when block is later than last finalized block AND there are no matching forks // AND block is not connected to finalized block @@ -743,7 +760,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 1, 3)) .with_header(fork_header(0, 1, 2)), - 1024, test_id(2) + PruningStrategy::ByDepth(1024), test_id(2) ).value_at_block(&fork_id(0, 1, 3)).unwrap(), None); // when block is later than last finalized block AND it appends to unfinalized fork from the end @@ -756,7 +773,7 @@ pub mod tests { .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) .with_header(test_header(4)) .with_header(test_header(5)), - 1024, test_id(2) + PruningStrategy::ByDepth(1024), test_id(2) ).value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), 4))); // when block is later than last finalized block AND it does not fits unfinalized fork // AND it is connected to the finalized block AND finalized value is Some @@ -771,7 +788,7 @@ pub mod tests { .with_header(test_header(3)) .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), - 1024, test_id(2) + PruningStrategy::ByDepth(1024), test_id(2) ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), 2))); } @@ -781,7 +798,7 @@ pub mod tests { let fin = EntryType::Final; // when trying to insert block < finalized number - assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100)) + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) .on_block_insert( &mut DummyTransaction::new(), test_id(49), @@ -790,7 +807,7 @@ pub mod tests { nfin, ).unwrap().is_none()); // when trying to insert block @ finalized number - assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100)) + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) .on_block_insert( &mut DummyTransaction::new(), test_id(99), @@ -805,7 +822,7 @@ pub mod tests { DummyStorage::new() .with_meta(None, vec![test_id(4)]) .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: 4 }), - 1024, test_id(2) + PruningStrategy::ByDepth(1024), test_id(2) ); cache.unfinalized[0].best_block = Some(test_id(4)); let mut tx = DummyTransaction::new(); @@ -830,7 +847,7 @@ pub mod tests { .with_meta(None, vec![correct_id(4)]) .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: 4 }) .with_header(test_header(4)), - 1024, test_id(2) + PruningStrategy::ByDepth(1024), test_id(2) ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), nfin).unwrap(), @@ -856,7 +873,7 @@ pub mod tests { .with_header(test_header(2)) .with_header(test_header(3)) .with_header(test_header(4)), - 1024, correct_id(2) + PruningStrategy::ByDepth(1024), correct_id(2) ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin).unwrap(), @@ -871,7 +888,7 @@ pub mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - 1024, correct_id(2) + PruningStrategy::ByDepth(1024), correct_id(2) ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin).unwrap(), None); @@ -884,7 +901,7 @@ pub mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - 1024, correct_id(2) + PruningStrategy::ByDepth(1024), correct_id(2) ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin).unwrap(), @@ -894,7 +911,7 @@ pub mod tests { assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] })); // when inserting finalized entry AND there are no previous finalized entries - let cache = ListCache::new(DummyStorage::new(), 1024, correct_id(2)); + let cache = ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)); let mut tx = DummyTransaction::new(); assert_eq!( cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), @@ -912,7 +929,7 @@ pub mod tests { DummyStorage::new() .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - 1024, correct_id(2) + PruningStrategy::ByDepth(1024), correct_id(2) ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(), @@ -940,7 +957,7 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - 1024, correct_id(2) + PruningStrategy::ByDepth(1024), correct_id(2) ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(), @@ -955,7 +972,7 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![correct_id(5)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), - 1024, correct_id(2) + PruningStrategy::ByDepth(1024), correct_id(2) ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(), @@ -969,7 +986,7 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![correct_id(5)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), - 1024, correct_id(4) + PruningStrategy::ByDepth(1024), correct_id(4) ); let mut tx = DummyTransaction::new(); assert_eq!( @@ -989,7 +1006,7 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - 1024, correct_id(2) + PruningStrategy::ByDepth(1024), correct_id(2) ); let mut tx = DummyTransaction::new(); assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(), @@ -1004,7 +1021,7 @@ pub mod tests { .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }), - 1024, correct_id(2) + PruningStrategy::ByDepth(1024), correct_id(2) ); // when new block is appended to unfinalized fork @@ -1043,7 +1060,7 @@ pub mod tests { .with_header(test_header(3)) .with_header(test_header(4)) .with_header(test_header(5)), - 1024, correct_id(0) + PruningStrategy::ByDepth(1024), correct_id(0) ).find_unfinalized_fork(&correct_id(4)).unwrap().unwrap().head.valid_from, correct_id(5)); // --- [2] ---------------> [5] // ----------> [3] ---> 4 @@ -1060,7 +1077,7 @@ pub mod tests { .with_header(fork_header(0, 1, 2)) .with_header(fork_header(0, 1, 3)) .with_header(fork_header(0, 1, 4)), - 1024, correct_id(0) + PruningStrategy::ByDepth(1024), correct_id(0) ).find_unfinalized_fork(&fork_id(0, 1, 4)).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3)); // --- [2] ---------------> [5] // ----------> [3] @@ -1080,7 +1097,7 @@ pub mod tests { .with_header(fork_header(1, 1, 2)) .with_header(fork_header(1, 1, 3)) .with_header(fork_header(1, 1, 4)), - 1024, correct_id(0) + PruningStrategy::ByDepth(1024), correct_id(0) ).find_unfinalized_fork(&fork_id(1, 1, 4)).unwrap().is_none()); } @@ -1349,7 +1366,7 @@ pub mod tests { .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }), - 10, test_id(9)); + PruningStrategy::ByDepth(10), test_id(9)); let mut tx = DummyTransaction::new(); // when finalizing entry #10: no entries pruned diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index bdb794c7243f5..34992b5fbcf0b 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -21,7 +21,7 @@ use parking_lot::RwLock; use kvdb::{KeyValueDB, DBTransaction}; -use client::blockchain::Cache as BlockchainCache; +use client::blockchain::{well_known_cache_keys, Cache as BlockchainCache}; use client::well_known_cache_keys::Id as CacheKeyId; use client::error::Result as ClientResult; use parity_codec::{Encode, Decode}; @@ -29,7 +29,7 @@ use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; use crate::utils::{self, COLUMN_META, db_err}; -use self::list_cache::ListCache; +use self::list_cache::{ListCache, PruningStrategy}; mod list_cache; mod list_entry; @@ -166,7 +166,7 @@ fn get_cache_helper<'a, Block: BlockT>( cache, }, ), - PRUNE_DEPTH.into(), + cache_pruning_strategy(name), best_finalized_block.clone(), ) }) @@ -325,3 +325,10 @@ impl BlockchainCache for DbCacheSync { } } +/// Get pruning strategy for given cache. +fn cache_pruning_strategy>(cache: CacheKeyId) -> PruningStrategy { + match cache { + well_known_cache_keys::CHANGES_TRIE_CONFIG => PruningStrategy::NeverPrune, + _ => PruningStrategy::ByDepth(PRUNE_DEPTH.into()), + } +} From afda8227e47ea48542eb50fc6b4202ea78fb46c6 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 3 Jul 2019 16:03:37 +0300 Subject: [PATCH 13/63] removed redundant code --- core/client/db/src/changes_tries_storage.rs | 13 ++- core/client/db/src/lib.rs | 5 +- core/client/src/backend.rs | 8 +- core/client/src/call_executor.rs | 5 +- core/client/src/client.rs | 16 ++-- core/client/src/in_mem.rs | 68 ++------------- core/client/src/light/backend.rs | 7 +- core/client/src/light/fetcher.rs | 2 +- .../src/changes_trie/changes_iterator.rs | 82 ++++++++----------- core/state-machine/src/changes_trie/mod.rs | 2 + .../state-machine/src/changes_trie/storage.rs | 15 ++-- 11 files changed, 88 insertions(+), 135 deletions(-) diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index 75a53174668a8..13eb7908e3843 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -169,6 +169,10 @@ impl client::backend::PrunableStateChangesTrieStorage, { + fn storage(&self) -> &dyn state_machine::ChangesTrieStorage> { + self + } + fn configuration_at( &self, at: &BlockId, @@ -269,6 +273,10 @@ impl state_machine::ChangesTrieStorage> where Block: BlockT, { + fn as_roots_storage(&self) -> &dyn state_machine::ChangesTrieRootsStorage> { + self + } + fn get(&self, key: &H256, _prefix: &[u8]) -> Result, String> { self.db.get(self.changes_tries_column, &key[..]) .map_err(|err| format!("{}", err)) @@ -299,8 +307,9 @@ mod tests { }; assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); + let storage = backend.changes_tries_storage.storage(); for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(backend.changes_trie_storage().unwrap().get(&key, &[]), Ok(Some(val))); + assert_eq!(storage.get(&key, &[]), Ok(Some(val))); } }; @@ -582,7 +591,7 @@ mod tests { let block7 = insert_header_with_configuration_change(&backend, 7, block6, Vec::new(), config_at_7.clone()); // test configuration cache - let storage = backend.changes_trie_storage().unwrap(); + let storage = &backend.changes_tries_storage; assert_eq!( storage.configuration_at(&BlockId::Hash(block1)).unwrap().2, config_at_1.clone(), diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index f33bbc9e962fd..4fbe22ac4b097 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -39,7 +39,7 @@ use std::collections::HashMap; use client::backend::NewBlockState; use client::blockchain::HeaderBackend; use client::ExecutionStrategies; -use client::backend::{StorageCollection, ChildStorageCollection}; +use client::backend::{StorageCollection, ChildStorageCollection, PrunableStateChangesTrieStorage}; use parity_codec::{Decode, Encode}; use hash_db::Hasher; use kvdb::{KeyValueDB, DBTransaction}; @@ -1096,7 +1096,6 @@ impl client::backend::Backend for Backend whe type BlockImportOperation = BlockImportOperation; type Blockchain = BlockchainDb; type State = CachingState, Block>; - type ChangesTrieStorage = DbChangesTrieStorage; fn begin_operation(&self) -> Result { let old_state = self.state_at(BlockId::Hash(Default::default()))?; @@ -1167,7 +1166,7 @@ impl client::backend::Backend for Backend whe Ok(()) } - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { Some(&self.changes_tries_storage) } diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index e6804f55c5a07..2ab0858047553 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -138,8 +138,6 @@ pub trait Backend: AuxStore + Send + Sync where type Blockchain: crate::blockchain::Backend; /// Associated state backend type. type State: StateBackend; - /// Changes trie storage. - type ChangesTrieStorage: PrunableStateChangesTrieStorage; /// Begin a new block insertion transaction with given parent block id. /// When constructing the genesis, this is called with all-zero hash. @@ -156,7 +154,7 @@ pub trait Backend: AuxStore + Send + Sync where /// Returns the used state cache, if existent. fn used_state_cache_size(&self) -> Option; /// Returns reference to changes trie storage. - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage>; + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage>; /// Returns true if state for given block is available. fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { self.state_at(BlockId::Hash(hash.clone())).is_ok() @@ -199,6 +197,8 @@ pub trait Backend: AuxStore + Send + Sync where pub trait PrunableStateChangesTrieStorage: StateChangesTrieStorage> { + /// Get reference to StateChangesTrieStorage. + fn storage(&self) -> &dyn StateChangesTrieStorage>; /// Get coniguration at given block. fn configuration_at(&self, at: &BlockId) -> error::Result<( NumberFor, @@ -239,7 +239,7 @@ pub fn changes_tries_state_at_block<'a, B: Backend, Block: BlockT, H: H: Hasher, { let changes_trie_storage = match backend.changes_trie_storage() { - Some(changes_trie_storage) => changes_trie_storage, + Some(changes_trie_storage) => changes_trie_storage.storage(), None => return Ok(None), }; diff --git a/core/client/src/call_executor.rs b/core/client/src/call_executor.rs index fac5baf565cc1..d36c48d815505 100644 --- a/core/client/src/call_executor.rs +++ b/core/client/src/call_executor.rs @@ -319,7 +319,10 @@ where side_effects_handler: Option<&mut O>, ) -> error::Result<(NativeOrEncoded, S::Transaction, Option>)> { let changes_trie_state = match self.backend.changes_trie_storage() { - Some(changes_trie_storage) => backend::changes_tries_state_at_state::<_, Block, _>(state, changes_trie_storage)?, + Some(changes_trie_storage) => backend::changes_tries_state_at_state::<_, Block, _>( + state, + changes_trie_storage.storage(), + )?, None => None, }; state_machine::new( diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 5605a0adfd18b..3424a44a92679 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -61,7 +61,7 @@ use hash_db::Hasher; use crate::backend::{ self, BlockImportOperation, PrunableStateChangesTrieStorage, - StorageCollection, ChildStorageCollection + StorageCollection, ChildStorageCollection, }; use crate::blockchain::{ self, Info as ChainInfo, Backend as ChainBackend, @@ -538,9 +538,9 @@ impl Client where let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - key_changes::<_, Blake2Hasher, _>( + key_changes::( &config, - &*storage, + storage.storage(), activation_block, first, &ChangesTrieAnchorBlockId { @@ -617,6 +617,10 @@ impl Client where } impl<'a, Block: BlockT> ChangesTrieStorage> for AccessedRootsRecorder<'a, Block> { + fn as_roots_storage(&self) -> &ChangesTrieRootsStorage> { + self + } + fn get(&self, key: &H256, prefix: &[u8]) -> Result, String> { self.storage.get(key, prefix) } @@ -626,7 +630,7 @@ impl Client where let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; let recording_storage = AccessedRootsRecorder:: { - storage, + storage: storage.storage(), min: min_number, required_roots_proofs: Mutex::new(BTreeMap::new()), }; @@ -641,7 +645,7 @@ impl Client where .expect_block_number_from_id(&BlockId::Hash(first))?; let last_number = self.backend.blockchain() .expect_block_number_from_id(&BlockId::Hash(last))?; - let key_changes_proof = key_changes_proof::<_, Blake2Hasher, _>( + let key_changes_proof = key_changes_proof::( &config, &recording_storage, activation_block, @@ -709,7 +713,7 @@ impl Client where } /// Returns changes trie configuration and storage or an error if it is not supported. - fn require_changes_trie(&self) -> error::Result<(NumberFor, ChangesTrieConfiguration, &B::ChangesTrieStorage)> { + fn require_changes_trie(&self) -> error::Result<(NumberFor, ChangesTrieConfiguration, &PrunableStateChangesTrieStorage)> { let best_block = self.backend.blockchain().info().best_hash; let storage = match self.backend.changes_trie_storage() { Some(storage) => storage, diff --git a/core/client/src/in_mem.rs b/core/client/src/in_mem.rs index a839554f1986d..7053f6b9830bd 100644 --- a/core/client/src/in_mem.rs +++ b/core/client/src/in_mem.rs @@ -19,12 +19,11 @@ use std::collections::HashMap; use std::sync::Arc; use parking_lot::{RwLock, Mutex}; -use primitives::{ChangesTrieConfiguration, storage::well_known_keys}; -use runtime_primitives::generic::{BlockId, DigestItem}; +use primitives::storage::well_known_keys; +use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor}; use runtime_primitives::{Justification, StorageOverlay, ChildrenStorageOverlay}; -use state_machine::backend::{Backend as StateBackend, InMemory}; -use state_machine::{self, InMemoryChangesTrieStorage, ChangesTrieAnchorBlockId}; +use state_machine::{self, backend::{Backend as StateBackend, InMemory}}; use hash_db::Hasher; use trie::MemoryDB; @@ -539,7 +538,6 @@ where H::Out: Ord, { states: RwLock>>, - changes_trie_storage: ChangesTrieStorage, blockchain: Blockchain, import_lock: Mutex<()>, } @@ -554,7 +552,6 @@ where pub fn new() -> Backend { Backend { states: RwLock::new(HashMap::new()), - changes_trie_storage: ChangesTrieStorage(InMemoryChangesTrieStorage::new()), blockchain: Blockchain::new(), import_lock: Default::default(), } @@ -591,7 +588,6 @@ where type BlockImportOperation = BlockImportOperation; type Blockchain = Blockchain; type State = InMemory; - type ChangesTrieStorage = ChangesTrieStorage; fn begin_operation(&self) -> error::Result { let old_state = self.state_at(BlockId::Hash(Default::default()))?; @@ -627,7 +623,7 @@ where self.states.write().insert(hash, operation.new_state.unwrap_or_else(|| old_state.clone())); - let maybe_changes_trie_root = header.digest().log(DigestItem::as_changes_trie_root).cloned(); +/* let maybe_changes_trie_root = header.digest().log(DigestItem::as_changes_trie_root).cloned(); if let Some(changes_trie_root) = maybe_changes_trie_root { if let Some(changes_trie_update) = operation.changes_trie_update { self.changes_trie_storage.0.insert( @@ -636,7 +632,7 @@ where changes_trie_update ); } - } + }*/ self.blockchain.insert(hash, header, justification, body, pending_block.state)?; } @@ -664,8 +660,8 @@ where None } - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { - Some(&self.changes_trie_storage) + fn changes_trie_storage(&self) -> Option<&dyn backend::PrunableStateChangesTrieStorage> { + None } fn state_at(&self, block: BlockId) -> error::Result { @@ -711,56 +707,6 @@ where } } -/// Prunable in-memory changes trie storage. -pub struct ChangesTrieStorage(InMemoryChangesTrieStorage>); -impl backend::PrunableStateChangesTrieStorage for ChangesTrieStorage { - fn configuration_at(&self, _at: &BlockId) -> error::Result<( - NumberFor, - Block::Hash, - Option, - )> { - unimplemented!("TODO: do we still need this?") - } - fn oldest_changes_trie_block( - &self, - _config: &ChangesTrieConfiguration, - _best_finalized: NumberFor, - ) -> NumberFor { - Zero::zero() - } -} - -impl state_machine::ChangesTrieRootsStorage> for ChangesTrieStorage - where - Block: BlockT, - H: Hasher, -{ - fn build_anchor( - &self, - _hash: H::Out, - ) -> Result>, String> { - Err("Dummy implementation".into()) - } - - fn root( - &self, - _anchor: &ChangesTrieAnchorBlockId>, - _block: NumberFor, - ) -> Result, String> { - Err("Dummy implementation".into()) - } -} - -impl state_machine::ChangesTrieStorage> for ChangesTrieStorage - where - Block: BlockT, - H: Hasher, -{ - fn get(&self, _key: &H::Out, _prefix: &[u8]) -> Result, String> { - Err("Dummy implementation".into()) - } -} - /// Check that genesis storage is valid. pub fn check_genesis_storage(top: &StorageOverlay, children: &ChildrenStorageOverlay) -> error::Result<()> { if top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { diff --git a/core/client/src/light/backend.rs b/core/client/src/light/backend.rs index 87e9a4b258a09..a0b84f36ea5f8 100644 --- a/core/client/src/light/backend.rs +++ b/core/client/src/light/backend.rs @@ -25,10 +25,10 @@ use parking_lot::{RwLock, Mutex}; use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; use state_machine::{Backend as StateBackend, TrieBackend, backend::InMemory as InMemoryState}; use runtime_primitives::traits::{Block as BlockT, NumberFor, Zero, Header}; -use crate::in_mem::{self, check_genesis_storage}; +use crate::in_mem::check_genesis_storage; use crate::backend::{ AuxStore, Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState, - StorageCollection, ChildStorageCollection, + StorageCollection, ChildStorageCollection, PrunableStateChangesTrieStorage, }; use crate::blockchain::{HeaderBackend as BlockchainHeaderBackend, well_known_cache_keys}; use crate::error::{Error as ClientError, Result as ClientResult}; @@ -116,7 +116,6 @@ impl ClientBackend for Backend where type BlockImportOperation = ImportOperation; type Blockchain = Blockchain; type State = OnDemandOrGenesisState; - type ChangesTrieStorage = in_mem::ChangesTrieStorage; fn begin_operation(&self) -> ClientResult { Ok(ImportOperation { @@ -190,7 +189,7 @@ impl ClientBackend for Backend where None } - fn changes_trie_storage(&self) -> Option<&Self::ChangesTrieStorage> { + fn changes_trie_storage(&self) -> Option<&PrunableStateChangesTrieStorage> { None } diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index cf48c14568894..6f8228fa16511 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -284,7 +284,7 @@ impl, F> LightDataChecker( + key_changes_proof_check::( &request.changes_trie_config, &RootsStorage { roots: (request.tries_roots.0, &request.tries_roots.2), diff --git a/core/state-machine/src/changes_trie/changes_iterator.rs b/core/state-machine/src/changes_trie/changes_iterator.rs index f2856cd7063aa..30114be01865f 100644 --- a/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/core/state-machine/src/changes_trie/changes_iterator.rs @@ -32,22 +32,22 @@ use crate::trie_backend_essence::{TrieBackendEssence}; /// Return changes of given key at given blocks range. /// `max` is the number of best known block. /// Changes are returned in descending order (i.e. last block comes first). -pub fn key_changes<'a, S: Storage, H: Hasher, Number: BlockNumber>( +pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( config: &'a Configuration, - storage: &'a S, + storage: &'a dyn Storage, zero: Number, begin: Number, end: &'a AnchorBlockId, max: Number, key: &'a [u8], -) -> Result, String> { +) -> Result, String> { // we can't query any roots before root let max = ::std::cmp::min(max.clone(), end.number.clone()); Ok(DrilldownIterator { essence: DrilldownIteratorEssence { key, - roots_storage: storage, + roots_storage: storage.as_roots_storage(), storage, begin: begin.clone(), end, @@ -69,9 +69,9 @@ pub fn key_changes<'a, S: Storage, H: Hasher, Number: BlockNumber>( /// Returns proof of changes of given key at given blocks range. /// `max` is the number of best known block. -pub fn key_changes_proof, H: Hasher, Number: BlockNumber>( +pub fn key_changes_proof( config: &Configuration, - storage: &S, + storage: &dyn Storage, zero: Number, begin: Number, end: &AnchorBlockId, @@ -84,7 +84,7 @@ pub fn key_changes_proof, H: Hasher, Number: BlockNumber>( let mut iter = ProvingDrilldownIterator { essence: DrilldownIteratorEssence { key, - roots_storage: storage.clone(), + roots_storage: storage.as_roots_storage(), storage, begin: begin.clone(), end, @@ -115,9 +115,9 @@ pub fn key_changes_proof, H: Hasher, Number: BlockNumber>( /// Check key changes proog and return changes of the key at given blocks range. /// `max` is the number of best known block. /// Changes are returned in descending order (i.e. last block comes first). -pub fn key_changes_proof_check, H: Hasher, Number: BlockNumber>( +pub fn key_changes_proof_check( config: &Configuration, - roots_storage: &S, + roots_storage: &dyn RootsStorage, proof: Vec>, zero: Number, begin: Number, @@ -213,17 +213,15 @@ impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { /// Drilldown iterator - receives 'digest points' from surface iterator and explores /// every point until extrinsic is found. -pub struct DrilldownIteratorEssence<'a, RS, S, H, Number> +pub struct DrilldownIteratorEssence<'a, H, Number> where - RS: 'a + RootsStorage, - S: 'a + Storage, H: Hasher, Number: BlockNumber, H::Out: 'a, { key: &'a [u8], - roots_storage: &'a RS, - storage: &'a S, + roots_storage: &'a dyn RootsStorage, + storage: &'a dyn Storage, begin: Number, end: &'a AnchorBlockId, surface: SurfaceIterator<'a, Number>, @@ -234,17 +232,15 @@ pub struct DrilldownIteratorEssence<'a, RS, S, H, Number> _hasher: ::std::marker::PhantomData, } -impl<'a, RS, S, H, Number> DrilldownIteratorEssence<'a, RS, S, H, Number> +impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> where - RS: 'a + RootsStorage, - S: 'a + Storage, H: Hasher, Number: BlockNumber, H::Out: 'a, { pub fn next(&mut self, trie_reader: F) -> Option> where - F: FnMut(&S, H::Out, &[u8]) -> Result>, String>, + F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, { match self.do_next(trie_reader) { Ok(Some(res)) => Some(Ok(res)), @@ -255,7 +251,7 @@ impl<'a, RS, S, H, Number> DrilldownIteratorEssence<'a, RS, S, H, Number> fn do_next(&mut self, mut trie_reader: F) -> Result, String> where - F: FnMut(&S, H::Out, &[u8]) -> Result>, String>, + F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, { loop { if let Some((block, extrinsic)) = self.extrinsics.pop_front() { @@ -275,7 +271,7 @@ impl<'a, RS, S, H, Number> DrilldownIteratorEssence<'a, RS, S, H, Number> debug_assert!(block >= self.begin, "We shall not touch digests earlier than a range' begin"); if block <= self.end.number { let extrinsics_key = ExtrinsicIndex { block: block.clone(), key: self.key.to_vec() }.encode(); - let extrinsics = trie_reader(&self.storage, trie_root, &extrinsics_key); + let extrinsics = trie_reader(self.storage, trie_root, &extrinsics_key); if let Some(extrinsics) = extrinsics? { let extrinsics: Option = Decode::decode(&mut &extrinsics[..]); if let Some(extrinsics) = extrinsics { @@ -285,7 +281,7 @@ impl<'a, RS, S, H, Number> DrilldownIteratorEssence<'a, RS, S, H, Number> } let blocks_key = DigestIndex { block: block.clone(), key: self.key.to_vec() }.encode(); - let blocks = trie_reader(&self.storage, trie_root, &blocks_key); + let blocks = trie_reader(self.storage, trie_root, &blocks_key); if let Some(blocks) = blocks? { let blocks: Option> = Decode::decode(&mut &blocks[..]); if let Some(blocks) = blocks { @@ -314,19 +310,17 @@ impl<'a, RS, S, H, Number> DrilldownIteratorEssence<'a, RS, S, H, Number> } /// Exploring drilldown operator. -pub struct DrilldownIterator<'a, RS, S, H, Number> +pub struct DrilldownIterator<'a, H, Number> where Number: BlockNumber, H: Hasher, - S: 'a + Storage, - RS: 'a + RootsStorage, H::Out: 'a, { - essence: DrilldownIteratorEssence<'a, RS, S, H, Number>, + essence: DrilldownIteratorEssence<'a, H, Number>, } -impl<'a, RS: 'a + RootsStorage, S: Storage, H: Hasher, Number: BlockNumber> Iterator - for DrilldownIterator<'a, RS, S, H, Number> +impl<'a, H: Hasher, Number: BlockNumber> Iterator + for DrilldownIterator<'a, H, Number> { type Item = Result<(Number, u32), String>; @@ -337,24 +331,20 @@ impl<'a, RS: 'a + RootsStorage, S: Storage, H: Hasher, Num } /// Proving drilldown iterator. -struct ProvingDrilldownIterator<'a, RS, S, H, Number> +struct ProvingDrilldownIterator<'a, H, Number> where Number: BlockNumber, H: Hasher, - S: 'a + Storage, - RS: 'a + RootsStorage, H::Out: 'a, { - essence: DrilldownIteratorEssence<'a, RS, S, H, Number>, + essence: DrilldownIteratorEssence<'a, H, Number>, proof_recorder: RefCell>, } -impl<'a, RS, S, H, Number> ProvingDrilldownIterator<'a, RS, S, H, Number> +impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> where Number: BlockNumber, H: Hasher, - S: 'a + Storage, - RS: 'a + RootsStorage, H::Out: 'a, { /// Consume the iterator, extracting the gathered proof in lexicographical order @@ -367,12 +357,10 @@ impl<'a, RS, S, H, Number> ProvingDrilldownIterator<'a, RS, S, H, Number> } } -impl<'a, RS, S, H, Number> Iterator for ProvingDrilldownIterator<'a, RS, S, H, Number> +impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> where Number: BlockNumber, H: Hasher, - S: 'a + Storage, - RS: 'a + RootsStorage, H::Out: 'a, { type Item = Result<(Number, u32), String>; @@ -562,27 +550,27 @@ mod tests { #[test] fn drilldown_iterator_works() { let (config, storage) = prepare_for_drilldown(); - let drilldown_result = key_changes::, Blake2Hasher, u64>( + let drilldown_result = key_changes::( &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - let drilldown_result = key_changes::, Blake2Hasher, u64>( + let drilldown_result = key_changes::( &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 2 }, 4, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![])); - let drilldown_result = key_changes::, Blake2Hasher, u64>( + let drilldown_result = key_changes::( &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 3 }, 4, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(3, 0)])); - let drilldown_result = key_changes::, Blake2Hasher, u64>( + let drilldown_result = key_changes::( &config, &storage, 0, 7, &AnchorBlockId { hash: Default::default(), number: 8 }, 8, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); - let drilldown_result = key_changes::, Blake2Hasher, u64>( + let drilldown_result = key_changes::( &config, &storage, 0, 5, &AnchorBlockId { hash: Default::default(), number: 7 }, 8, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3)])); @@ -593,7 +581,7 @@ mod tests { let (config, storage) = prepare_for_drilldown(); storage.clear_storage(); - assert!(key_changes::, Blake2Hasher, u64>( + assert!(key_changes::( &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, &[42]) .and_then(|i| i.collect::, _>>()).is_err()); } @@ -601,9 +589,9 @@ mod tests { #[test] fn drilldown_iterator_fails_when_range_is_invalid() { let (config, storage) = prepare_for_drilldown(); - assert!(key_changes::, Blake2Hasher, u64>( + assert!(key_changes::( &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 50, &[42]).is_err()); - assert!(key_changes::, Blake2Hasher, u64>( + assert!(key_changes::( &config, &storage, 0, 20, &AnchorBlockId { hash: Default::default(), number: 10 }, 100, &[42]).is_err()); } @@ -614,7 +602,7 @@ mod tests { // create drilldown iterator that records all trie nodes during drilldown let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof = key_changes_proof::, Blake2Hasher, u64>( + let remote_proof = key_changes_proof::( &remote_config, &remote_storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]).unwrap(); @@ -623,7 +611,7 @@ mod tests { // create drilldown iterator that works the same, but only depends on trie let (local_config, local_storage) = prepare_for_drilldown(); local_storage.clear_storage(); - let local_result = key_changes_proof_check::, Blake2Hasher, u64>( + let local_result = key_changes_proof_check::( &local_config, &local_storage, remote_proof, 0, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]); diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index b4da513a9c77e..2b3bf618bfbd0 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -119,6 +119,8 @@ pub trait RootsStorage: Send + Sync { /// Changes trie storage. Provides access to trie roots and trie nodes. pub trait Storage: RootsStorage { + /// Casts from self reference to RootsStorage reference. + fn as_roots_storage(&self) -> &dyn RootsStorage; /// Get a trie node. fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String>; } diff --git a/core/state-machine/src/changes_trie/storage.rs b/core/state-machine/src/changes_trie/storage.rs index 8da205251532c..a3b0655e396ab 100644 --- a/core/state-machine/src/changes_trie/storage.rs +++ b/core/state-machine/src/changes_trie/storage.rs @@ -37,8 +37,8 @@ pub struct InMemoryStorage { } /// Adapter for using changes trie storage as a TrieBackendEssence' storage. -pub struct TrieBackendAdapter<'a, H: Hasher, Number: BlockNumber, S: 'a + Storage> { - storage: &'a S, +pub struct TrieBackendAdapter<'a, H: Hasher, Number: BlockNumber> { + storage: &'a Storage, _hasher: ::std::marker::PhantomData<(H, Number)>, } @@ -132,20 +132,23 @@ impl RootsStorage for InMemoryStorage } impl Storage for InMemoryStorage { + fn as_roots_storage(&self) -> &dyn RootsStorage { + self + } + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { MemoryDB::::get(&self.data.read().mdb, key, prefix) } } -impl<'a, H: Hasher, Number: BlockNumber, S: 'a + Storage> TrieBackendAdapter<'a, H, Number, S> { - pub fn new(storage: &'a S) -> Self { +impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { + pub fn new(storage: &'a Storage) -> Self { Self { storage, _hasher: Default::default() } } } -impl<'a, H, Number, S> TrieBackendStorage for TrieBackendAdapter<'a, H, Number, S> +impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> where - S: 'a + Storage, Number: BlockNumber, H: Hasher, { From e08183d3c8d5de616b9b2b205d6507d2b2612751 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 4 Jul 2019 10:19:56 +0300 Subject: [PATCH 14/63] fix some TODOs --- core/client/db/src/changes_tries_storage.rs | 164 ++++++++++++-------- core/client/src/backend.rs | 3 +- core/client/src/client.rs | 2 +- core/client/src/error.rs | 3 + 4 files changed, 107 insertions(+), 65 deletions(-) diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index 13eb7908e3843..08d2f085ba186 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -1,4 +1,4 @@ -// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// Copyright 2019 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify @@ -20,7 +20,7 @@ use std::collections::HashMap; use std::sync::Arc; use kvdb::{KeyValueDB, DBTransaction}; use parity_codec::Encode; -use parking_lot::RwLock; +use parking_lot::{RwLock, RwLockWriteGuard}; use client::error::{Error as ClientError, Result as ClientResult}; use trie::MemoryDB; use client::backend::PrunableStateChangesTrieStorage; @@ -28,7 +28,7 @@ use client::blockchain::{Cache, well_known_cache_keys}; use parity_codec::Decode; use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; use runtime_primitives::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, One, + Block as BlockT, Header as HeaderT, NumberFor, One, }; use runtime_primitives::generic::{BlockId, DigestItem, ChangesTrieSignal}; use state_machine::DBValue; @@ -42,6 +42,17 @@ pub fn extract_new_configuration(header: &Header) -> Option<&Op .and_then(ChangesTrieSignal::as_new_configuration) } +/// Opaque configuration cache transaction. +pub struct DbChangesTrieStorageTransaction<'a, Block: BlockT> { + /// Lock needs to be held between commit and post_commit calls. + lock: RwLockWriteGuard<'a, DbCache>, + /// Cache operations that needs to be performed once tx is committed. + ops: DbCacheTransactionOps, +} + +/// Changes tries storage. +/// +/// Stores all tries in separate DB column. pub struct DbChangesTrieStorage { db: Arc, changes_tries_column: Option, @@ -50,7 +61,6 @@ pub struct DbChangesTrieStorage { meta: Arc, Block::Hash>>>, min_blocks_to_keep: Option, cache: DbCacheSync, - _phantom: ::std::marker::PhantomData, } impl> DbChangesTrieStorage { @@ -83,7 +93,6 @@ impl> DbChangesTrieStorage { genesis_hash, ComplexBlockId::new(finalized_hash, finalized_number), ))), - _phantom: Default::default(), } } @@ -96,7 +105,7 @@ impl> DbChangesTrieStorage { block: ComplexBlockId, finalized: bool, new_configuration: Option>, - ) -> ClientResult>> { + ) -> ClientResult>> { // insert changes trie, associated with block, into DB for (key, (val, _)) in changes_trie.drain() { tx.put(self.changes_tries_column, &key[..], &val); @@ -111,21 +120,25 @@ impl> DbChangesTrieStorage { let mut cache_at = HashMap::new(); cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); - Ok(Some(self.cache.0.write().transaction(tx) + let mut cache = self.cache.0.write(); + let cache_ops = cache.transaction(tx) .on_block_insert( parent_block, block, cache_at, if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, )? - .into_ops())) + .into_ops(); + Ok(Some(DbChangesTrieStorageTransaction { + lock: cache, + ops: cache_ops, + })) } /// When transaction has been committed. - pub fn post_commit(&self, cache_ops: Option>) { - // TODO: hold lock between commit + post_commit!!! - if let Some(cache_ops) = cache_ops { - self.cache.0.write().commit(cache_ops); + pub fn post_commit(&self, tx: Option>) { + if let Some(mut tx) = tx { + tx.lock.commit(tx.ops); } } @@ -177,22 +190,22 @@ where &self, at: &BlockId, ) -> ClientResult<(NumberFor, Block::Hash, Option)> { - // TODO: deal with errors here - whenever cache have no value for block, or we unable to decode it - return error self.cache .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) .and_then(|(number, hash, encoded)| Decode::decode(&mut &encoded[..]).map(|config| (number, hash, config))) - .ok_or_else(|| ClientError::Backend("TODO".into())) // TODO: specific error + .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) } fn oldest_changes_trie_block( &self, - config: &ChangesTrieConfiguration, + config_activation_block: NumberFor, + config: ChangesTrieConfiguration, best_finalized_block: NumberFor, ) -> NumberFor { match self.min_blocks_to_keep { Some(min_blocks_to_keep) => state_machine::oldest_non_pruned_changes_trie( - Zero::zero(), // TODO: not true - config, + config_activation_block, + &config, min_blocks_to_keep.into(), best_finalized_block, ), @@ -397,20 +410,20 @@ mod tests { backend.changes_tries_storage.min_blocks_to_keep = Some(8); // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); - let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); - let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); - let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); - let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); - let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); - let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); - let block7 = insert_header(&backend, 7, block6, vec![(b"key_at_7".to_vec(), b"val_at_7".to_vec())], Default::default()); - let block8 = insert_header(&backend, 8, block7, vec![(b"key_at_8".to_vec(), b"val_at_8".to_vec())], Default::default()); - let block9 = insert_header(&backend, 9, block8, vec![(b"key_at_9".to_vec(), b"val_at_9".to_vec())], Default::default()); - let block10 = insert_header(&backend, 10, block9, vec![(b"key_at_10".to_vec(), b"val_at_10".to_vec())], Default::default()); - let block11 = insert_header(&backend, 11, block10, vec![(b"key_at_11".to_vec(), b"val_at_11".to_vec())], Default::default()); - let block12 = insert_header(&backend, 12, block11, vec![(b"key_at_12".to_vec(), b"val_at_12".to_vec())], Default::default()); - let block13 = insert_header(&backend, 13, block12, vec![(b"key_at_13".to_vec(), b"val_at_13".to_vec())], Default::default()); + let mut blocks = Vec::new(); + let mut last_block = Default::default(); + for i in 0u64..14u64 { + let key = i.to_le_bytes().to_vec(); + let val = key.clone(); + last_block = insert_header( + &backend, + i, + last_block, + vec![(key, val)], + Default::default(), + ); + blocks.push(last_block); + } backend.changes_tries_storage.meta.write().finalized_number = 13; backend.changes_tries_storage.cache.initialize( &well_known_cache_keys::CHANGES_TRIE_CONFIG, @@ -418,27 +431,39 @@ mod tests { ).unwrap(); // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block13, number: 13 }; + let anchor = state_machine::ChangesTrieAnchorBlockId { hash: blocks[13], number: 13 }; fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() } - let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); - let root7 = read_changes_trie_root(&backend, 7); assert_eq!(backend.changes_tries_storage.root(&anchor, 7).unwrap(), Some(root7)); - let root8 = read_changes_trie_root(&backend, 8); assert_eq!(backend.changes_tries_storage.root(&anchor, 8).unwrap(), Some(root8)); - let root9 = read_changes_trie_root(&backend, 9); assert_eq!(backend.changes_tries_storage.root(&anchor, 9).unwrap(), Some(root9)); - let root10 = read_changes_trie_root(&backend, 10); assert_eq!(backend.changes_tries_storage.root(&anchor, 10).unwrap(), Some(root10)); - let root11 = read_changes_trie_root(&backend, 11); assert_eq!(backend.changes_tries_storage.root(&anchor, 11).unwrap(), Some(root11)); - let root12 = read_changes_trie_root(&backend, 12); assert_eq!(backend.changes_tries_storage.root(&anchor, 12).unwrap(), Some(root12)); + let root1 = read_changes_trie_root(&backend, 1); + assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); + let root2 = read_changes_trie_root(&backend, 2); + assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); + let root3 = read_changes_trie_root(&backend, 3); + assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); + let root4 = read_changes_trie_root(&backend, 4); + assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); + let root5 = read_changes_trie_root(&backend, 5); + assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); + let root6 = read_changes_trie_root(&backend, 6); + assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); + let root7 = read_changes_trie_root(&backend, 7); + assert_eq!(backend.changes_tries_storage.root(&anchor, 7).unwrap(), Some(root7)); + let root8 = read_changes_trie_root(&backend, 8); + assert_eq!(backend.changes_tries_storage.root(&anchor, 8).unwrap(), Some(root8)); + let root9 = read_changes_trie_root(&backend, 9); + assert_eq!(backend.changes_tries_storage.root(&anchor, 9).unwrap(), Some(root9)); + let root10 = read_changes_trie_root(&backend, 10); + assert_eq!(backend.changes_tries_storage.root(&anchor, 10).unwrap(), Some(root10)); + let root11 = read_changes_trie_root(&backend, 11); + assert_eq!(backend.changes_tries_storage.root(&anchor, 11).unwrap(), Some(root11)); + let root12 = read_changes_trie_root(&backend, 12); + assert_eq!(backend.changes_tries_storage.root(&anchor, 12).unwrap(), Some(root12)); // now simulate finalization of block#12, causing prune of tries at #1..#4 let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, block0, Default::default(), 12).unwrap(); + backend.changes_tries_storage.prune(&mut tx, blocks[0], Default::default(), 12).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); @@ -451,7 +476,7 @@ mod tests { // now simulate finalization of block#16, causing prune of tries at #5..#8 let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, block0, Default::default(), 16).unwrap(); + backend.changes_tries_storage.prune(&mut tx, blocks[0], Default::default(), 16).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root5, &[]).unwrap().is_none()); assert!(backend.changes_tries_storage.get(&root6, &[]).unwrap().is_none()); @@ -462,7 +487,7 @@ mod tests { // => no changes tries are pruned, because we never prune in archive mode backend.changes_tries_storage.min_blocks_to_keep = None; let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, block0, Default::default(), 20).unwrap(); + backend.changes_tries_storage.prune(&mut tx, blocks[0], Default::default(), 20).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root9, &[]).unwrap().is_some()); assert!(backend.changes_tries_storage.get(&root10, &[]).unwrap().is_some()); @@ -481,42 +506,55 @@ mod tests { backend.changes_tries_storage.min_blocks_to_keep = Some(4); // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())], Default::default()); - let block1 = insert_header(&backend, 1, block0, vec![(b"key_at_1".to_vec(), b"val_at_1".to_vec())], Default::default()); - let block2 = insert_header(&backend, 2, block1, vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())], Default::default()); - let block3 = insert_header(&backend, 3, block2, vec![(b"key_at_3".to_vec(), b"val_at_3".to_vec())], Default::default()); - let block4 = insert_header(&backend, 4, block3, vec![(b"key_at_4".to_vec(), b"val_at_4".to_vec())], Default::default()); - let block5 = insert_header(&backend, 5, block4, vec![(b"key_at_5".to_vec(), b"val_at_5".to_vec())], Default::default()); - let block6 = insert_header(&backend, 6, block5, vec![(b"key_at_6".to_vec(), b"val_at_6".to_vec())], Default::default()); + let mut blocks = Vec::new(); + let mut last_block = Default::default(); + for i in 0u64..7u64 { + let key = i.to_le_bytes().to_vec(); + let val = key.clone(); + last_block = insert_header( + &backend, + i, + last_block, + vec![(key, val)], + Default::default(), + ); + blocks.push(last_block); + } backend.changes_tries_storage.cache.initialize( &well_known_cache_keys::CHANGES_TRIE_CONFIG, Some(config).encode(), ).unwrap(); // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block6, number: 6 }; + let anchor = state_machine::ChangesTrieAnchorBlockId { hash: blocks[6], number: 6 }; fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() } - let root1 = read_changes_trie_root(&backend, 1); assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); + let root1 = read_changes_trie_root(&backend, 1); + assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); + let root2 = read_changes_trie_root(&backend, 2); + assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); + let root3 = read_changes_trie_root(&backend, 3); + assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); + let root4 = read_changes_trie_root(&backend, 4); + assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); + let root5 = read_changes_trie_root(&backend, 5); + assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); + let root6 = read_changes_trie_root(&backend, 6); + assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); // now simulate finalization of block#5, causing prune of trie at #1 let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, block1, block5, 5).unwrap(); + backend.changes_tries_storage.prune(&mut tx, blocks[1], blocks[5], 5).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root1, &[]).unwrap().is_none()); assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_some()); // now simulate finalization of block#6, causing prune of tries at #2 let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, block1, block6, 6).unwrap(); + backend.changes_tries_storage.prune(&mut tx, blocks[1], blocks[6], 6).unwrap(); backend.storage.db.write(tx).unwrap(); assert!(backend.changes_tries_storage.get(&root2, &[]).unwrap().is_none()); assert!(backend.changes_tries_storage.get(&root3, &[]).unwrap().is_some()); diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 2ab0858047553..f2e79ad57ed62 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -208,7 +208,8 @@ pub trait PrunableStateChangesTrieStorage: /// Get number block of oldest, non-pruned changes trie. fn oldest_changes_trie_block( &self, - config: &ChangesTrieConfiguration, + config_activation_block: NumberFor, + config: ChangesTrieConfiguration, best_finalized: NumberFor, ) -> NumberFor; } diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 3424a44a92679..8456a18b4258e 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -518,7 +518,7 @@ impl Client where return Err(error::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); } let finalized_number = self.backend.blockchain().info().finalized_number; - let oldest = storage.oldest_changes_trie_block(&config, finalized_number); + let oldest = storage.oldest_changes_trie_block(activation_block, config, finalized_number); let oldest = ::std::cmp::max(activation_block + One::one(), oldest); let first = ::std::cmp::max(first, oldest); Ok(Some((first, last))) diff --git a/core/client/src/error.rs b/core/client/src/error.rs index b807d5e11cc5d..ab2673bc1b246 100644 --- a/core/client/src/error.rs +++ b/core/client/src/error.rs @@ -82,6 +82,9 @@ pub enum Error { /// Changes tries are not supported. #[display(fmt = "Changes tries are not supported by the runtime")] ChangesTriesNotSupported, + /// Error reading changes trie configuration. + #[display(fmt = "Error reading changes tries configuration")] + ErrorReadingChangesTriesConfig, /// Key changes query has failed. #[display(fmt = "Failed to check changes proof: {}", _0)] ChangesTrieAccessFailed(String), From 01e37924ebe565a982e29ad1eb01ad849dfdc04f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 29 Jul 2019 09:39:44 +0300 Subject: [PATCH 15/63] introduce ConfigurationRange --- .../src/changes_trie/changes_iterator.rs | 85 +++++++++---------- core/state-machine/src/changes_trie/mod.rs | 11 +++ 2 files changed, 52 insertions(+), 44 deletions(-) diff --git a/core/state-machine/src/changes_trie/changes_iterator.rs b/core/state-machine/src/changes_trie/changes_iterator.rs index 5ea722efda2be..733933672c436 100644 --- a/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/core/state-machine/src/changes_trie/changes_iterator.rs @@ -23,7 +23,7 @@ use parity_codec::{Decode, Encode}; use hash_db::{HashDB, Hasher}; use num_traits::One; use trie::{Recorder, MemoryDB}; -use crate::changes_trie::{AnchorBlockId, Configuration, RootsStorage, Storage, BlockNumber}; +use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; use crate::proving_backend::ProvingBackendEssence; @@ -33,9 +33,8 @@ use crate::trie_backend_essence::{TrieBackendEssence}; /// `max` is the number of best known block. /// Changes are returned in descending order (i.e. last block comes first). pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( - config: &'a Configuration, + config: ConfigurationRange<'a, Number>, storage: &'a dyn Storage, - zero: Number, begin: Number, end: &'a AnchorBlockId, max: Number, @@ -53,7 +52,6 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( end, surface: surface_iterator( config, - zero, max, begin, end.number.clone(), @@ -69,10 +67,9 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( /// Returns proof of changes of given key at given blocks range. /// `max` is the number of best known block. -pub fn key_changes_proof( - config: &Configuration, +pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( + config: ConfigurationRange<'a, Number>, storage: &dyn Storage, - zero: Number, begin: Number, end: &AnchorBlockId, max: Number, @@ -90,7 +87,6 @@ pub fn key_changes_proof( end, surface: surface_iterator( config, - zero, max, begin, end.number.clone(), @@ -115,11 +111,10 @@ pub fn key_changes_proof( /// Check key changes proof and return changes of the key at given blocks range. /// `max` is the number of best known block. /// Changes are returned in descending order (i.e. last block comes first). -pub fn key_changes_proof_check( - config: &Configuration, +pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( + config: ConfigurationRange<'a, Number>, roots_storage: &dyn RootsStorage, proof: Vec>, - zero: Number, begin: Number, end: &AnchorBlockId, max: Number, @@ -143,7 +138,6 @@ pub fn key_changes_proof_check( end, surface: surface_iterator( config, - zero, max, begin, end.number.clone(), @@ -160,8 +154,7 @@ pub fn key_changes_proof_check( /// Surface iterator - only traverses top-level digests from given range and tries to find /// all digest changes for the key. pub struct SurfaceIterator<'a, Number: BlockNumber> { - config: &'a Configuration, - zero: Number, + config: ConfigurationRange<'a, Number>, begin: Number, max: Number, current: Option, @@ -189,8 +182,7 @@ impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { self.current = Some(next); } else { let max_digest_interval = lower_bound_max_digest( - self.config, - self.zero.clone(), + self.config.clone(), self.max.clone(), self.begin.clone(), next, @@ -378,22 +370,19 @@ impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> /// Returns surface iterator for given range of blocks. fn surface_iterator<'a, Number: BlockNumber>( - config: &'a Configuration, - zero: Number, + config: ConfigurationRange<'a, Number>, max: Number, begin: Number, end: Number, ) -> Result, String> { let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest( - config, - zero.clone(), + config.clone(), max.clone(), begin.clone(), end, )?; Ok(SurfaceIterator { config, - zero, begin, max, current: Some(current), @@ -405,9 +394,8 @@ fn surface_iterator<'a, Number: BlockNumber>( /// Returns parameters of highest level digest block that includes the end of given range /// and tends to include the whole range. -fn lower_bound_max_digest( - config: &Configuration, - zero: Number, +fn lower_bound_max_digest<'a, Number: BlockNumber>( + config: ConfigurationRange<'a, Number>, max: Number, begin: Number, end: Number, @@ -422,13 +410,13 @@ fn lower_bound_max_digest( let mut current = end.clone(); let mut current_begin = begin.clone(); if current_begin != current { - while digest_level != config.digest_levels { + while digest_level != config.config.digest_levels { let new_digest_level = digest_level + 1; - let new_digest_step = digest_step * config.digest_interval; - let new_digest_interval = config.digest_interval * { + let new_digest_step = digest_step * config.config.digest_interval; + let new_digest_interval = config.config.digest_interval * { if digest_interval == 0 { 1 } else { digest_interval } }; - let new_digest_begin = zero.clone() + ((current.clone() - One::one() - zero.clone()) + let new_digest_begin = config.zero.clone() + ((current.clone() - One::one() - config.zero.clone()) / new_digest_interval.into()) * new_digest_interval.into(); let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); let new_current = new_digest_begin.clone() + new_digest_interval.into(); @@ -464,6 +452,7 @@ fn lower_bound_max_digest( mod tests { use std::iter::FromIterator; use primitives::Blake2Hasher; + use crate::changes_trie::Configuration; use crate::changes_trie::input::InputPair; use crate::changes_trie::storage::InMemoryStorage; use super::*; @@ -507,19 +496,27 @@ mod tests { (config, backend) } + fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { + ConfigurationRange { + config, + zero, + end: None, + } + } + #[test] fn lower_bound_max_digest_works() { let config = Configuration { digest_interval: 4, digest_levels: 2 }; // when config activates at 0 assert_eq!( - lower_bound_max_digest(&config, 0u64, 100_000u64, 20u64, 180u64).unwrap(), + lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64).unwrap(), (192, 176, 16, 2), ); // when config activates at 30 assert_eq!( - lower_bound_max_digest(&config, 30u64, 100_000u64, 20u64, 180u64).unwrap(), + lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 20u64, 180u64).unwrap(), (190, 174, 16, 2), ); } @@ -530,7 +527,7 @@ mod tests { // when config activates at 0 assert_eq!( - surface_iterator(&config, 0u64, 100_000u64, 40u64, 180u64).unwrap().collect::>(), + surface_iterator(configuration_range(&config, 0u64), 100_000u64, 40u64, 180u64).unwrap().collect::>(), vec![ Ok((192, 2)), Ok((176, 2)), Ok((160, 2)), Ok((144, 2)), Ok((128, 2)), Ok((112, 2)), Ok((96, 2)), Ok((80, 2)), Ok((64, 2)), Ok((48, 2)), @@ -539,7 +536,7 @@ mod tests { // when config activates at 30 assert_eq!( - surface_iterator(&config, 30u64, 100_000u64, 40u64, 180u64).unwrap().collect::>(), + surface_iterator(configuration_range(&config, 30u64), 100_000u64, 40u64, 180u64).unwrap().collect::>(), vec![ Ok((190, 2)), Ok((174, 2)), Ok((158, 2)), Ok((142, 2)), Ok((126, 2)), Ok((110, 2)), Ok((94, 2)), Ok((78, 2)), Ok((62, 2)), Ok((46, 2)), @@ -551,27 +548,27 @@ mod tests { fn drilldown_iterator_works() { let (config, storage) = prepare_for_drilldown(); let drilldown_result = key_changes::( - &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]) + configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); let drilldown_result = key_changes::( - &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 2 }, 4, &[42]) + configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 2 }, 4, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![])); let drilldown_result = key_changes::( - &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 3 }, 4, &[42]) + configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 3 }, 4, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(3, 0)])); let drilldown_result = key_changes::( - &config, &storage, 0, 7, &AnchorBlockId { hash: Default::default(), number: 8 }, 8, &[42]) + configuration_range(&config, 0), &storage, 7, &AnchorBlockId { hash: Default::default(), number: 8 }, 8, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); let drilldown_result = key_changes::( - &config, &storage, 0, 5, &AnchorBlockId { hash: Default::default(), number: 7 }, 8, &[42]) + configuration_range(&config, 0), &storage, 5, &AnchorBlockId { hash: Default::default(), number: 7 }, 8, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3)])); } @@ -582,7 +579,7 @@ mod tests { storage.clear_storage(); assert!(key_changes::( - &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, &[42]) + configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, &[42]) .and_then(|i| i.collect::, _>>()).is_err()); } @@ -590,9 +587,9 @@ mod tests { fn drilldown_iterator_fails_when_range_is_invalid() { let (config, storage) = prepare_for_drilldown(); assert!(key_changes::( - &config, &storage, 0, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 50, &[42]).is_err()); + configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 50, &[42]).is_err()); assert!(key_changes::( - &config, &storage, 0, 20, &AnchorBlockId { hash: Default::default(), number: 10 }, 100, &[42]).is_err()); + configuration_range(&config, 0), &storage, 20, &AnchorBlockId { hash: Default::default(), number: 10 }, 100, &[42]).is_err()); } @@ -603,8 +600,8 @@ mod tests { // create drilldown iterator that records all trie nodes during drilldown let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof = key_changes_proof::( - &remote_config, &remote_storage, - 0, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]).unwrap(); + configuration_range(&remote_config, 0), &remote_storage, + 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]).unwrap(); // happens on local light node: @@ -612,8 +609,8 @@ mod tests { let (local_config, local_storage) = prepare_for_drilldown(); local_storage.clear_storage(); let local_result = key_changes_proof_check::( - &local_config, &local_storage, remote_proof, - 0, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]); + configuration_range(&local_config, 0), &local_storage, remote_proof, + 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]); // check that drilldown result is the same as if it was happening at the full node assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index 91dee5f49460b..4cf65d2e2dfc6 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -139,6 +139,17 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackend /// Changes trie configuration. pub type Configuration = primitives::ChangesTrieConfiguration; +/// Blocks range where configuration has been constant. +#[derive(Clone)] +pub struct ConfigurationRange<'a, N> { + /// Active configuration. + pub config: &'a Configuration, + /// Zero block of this configuration. The configuration is active starting from the next block. + pub zero: N, + /// End block of this configuration. It is the last block where configuration has been active. + pub end: Option, +} + impl<'a, H, Number> State<'a, H, Number> { /// Create state with given config and storage. pub fn new( From c1913fc2f204e7b4399355d0db1807c8137c9b28 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 29 Jul 2019 09:59:16 +0300 Subject: [PATCH 16/63] use Configuration range in build --- core/state-machine/src/changes_trie/build.rs | 32 +++++++++++--------- core/state-machine/src/changes_trie/mod.rs | 25 +++++++++++++-- 2 files changed, 39 insertions(+), 18 deletions(-) diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index 22afcd11da2bb..aa408d3b0559c 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -26,7 +26,7 @@ use crate::overlayed_changes::OverlayedChanges; use crate::trie_backend_essence::TrieBackendEssence; use crate::changes_trie::build_iterator::digest_build_iterator; use crate::changes_trie::input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex}; -use crate::changes_trie::{AnchorBlockId, Configuration, Storage, BlockNumber}; +use crate::changes_trie::{AnchorBlockId, ConfigurationRange, Storage, BlockNumber}; /// Prepare input pairs for building a changes trie of given block. /// @@ -37,8 +37,7 @@ use crate::changes_trie::{AnchorBlockId, Configuration, Storage, BlockNumber}; pub fn prepare_input<'a, B, H, Number>( backend: &'a B, storage: &'a Storage, - config_activation_block: Number, - config: &'a Configuration, + config: ConfigurationRange<'a, Number>, changes: &'a OverlayedChanges, parent: &'a AnchorBlockId, ) -> Result> + 'a, String> @@ -54,7 +53,6 @@ pub fn prepare_input<'a, B, H, Number>( changes)?; let digest_input = prepare_digest_input::( parent, - config_activation_block, config, number, storage)?; @@ -116,8 +114,7 @@ fn prepare_extrinsics_input<'a, B, H, Number>( /// Prepare DigestIndex input pairs. fn prepare_digest_input<'a, H, Number>( parent: &'a AnchorBlockId, - config_activation_block: Number, - config: &Configuration, + config: ConfigurationRange<'a, Number>, block: Number, storage: &'a Storage, ) -> Result> + 'a, String> @@ -126,7 +123,7 @@ fn prepare_digest_input<'a, H, Number>( H::Out: 'a, Number: BlockNumber, { - digest_build_iterator(config, config_activation_block, block.clone()) + digest_build_iterator(&config.config, config.zero.clone(), block.clone()) .try_fold(BTreeMap::new(), move |mut map, digest_build_block| { let trie_root = storage.root(parent, digest_build_block.clone())?; let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block.clone()))?; @@ -180,6 +177,7 @@ mod test { use primitives::Blake2Hasher; use primitives::storage::well_known_keys::EXTRINSIC_INDEX; use crate::backend::InMemory; + use crate::changes_trie::Configuration; use crate::changes_trie::storage::InMemoryStorage; use crate::overlayed_changes::OverlayedValue; use super::*; @@ -259,6 +257,14 @@ mod test { (backend, storage, changes, config) } + fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { + ConfigurationRange { + config, + zero, + end: None, + } + } + #[test] fn build_changes_trie_nodes_on_non_digest_block() { let (backend, storage, changes, config) = prepare_for_build(); @@ -266,8 +272,7 @@ mod test { let changes_trie_nodes = prepare_input( &backend, &storage, - 0, // TODO: test other cases - &config, + configuration_range(&config, 0), // TODO: test other cases &changes, &parent, ).unwrap(); @@ -285,8 +290,7 @@ mod test { let changes_trie_nodes = prepare_input( &backend, &storage, - 0, // TODO: test other cases - &config, + configuration_range(&config, 0), // TODO: test other cases &changes, &parent, ).unwrap(); @@ -309,8 +313,7 @@ mod test { let changes_trie_nodes = prepare_input( &backend, &storage, - 0, // TODO: test other cases - &config, + configuration_range(&config, 0), // TODO: test other cases &changes, &parent, ).unwrap(); @@ -341,8 +344,7 @@ mod test { let changes_trie_nodes = prepare_input( &backend, &storage, - 0, // TODO: test other cases - &config, + configuration_range(&config, 0), // TODO: test other cases &changes, &parent, ).unwrap(); diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index 4cf65d2e2dfc6..a63208c72218f 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -189,12 +189,31 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( None => return Ok(None), }; - // build_anchor error should not be considered fatal + // build_anchor error should not be considered fatal (passed parent_hash may be incorrect) let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; + // prepare configuration range - we already know zero block. Current block may be the end block if configuration + // has been changed in this block + // TODO: ^^^ this won't work for forced digests + let is_config_changed = match changes.storage(primitives::storage::well_known_keys::CHANGES_TRIE_CONFIG) { + Some(Some(new_config)) => new_config != &state.config.encode()[..], + Some(None) => true, + None => false, + }; + let config_range = ConfigurationRange { + config: &state.config, + zero: state.config_activation_block.clone(), + end: if is_config_changed { Some(parent.number.clone() + One::one()) } else { None }, + }; + // storage errors are considered fatal (similar to situations when runtime fetches values from storage) - let input_pairs = prepare_input::(backend, state.storage, state.config_activation_block.clone(), &state.config, changes, &parent) - .expect("changes trie: storage access is not allowed to fail within runtime"); + let input_pairs = prepare_input::( + backend, + state.storage, + config_range, + changes, + &parent, + ).expect("changes trie: storage access is not allowed to fail within runtime"); let mut root = Default::default(); let mut mdb = MemoryDB::default(); { From fb6ae9c1b510b8921f0ae54f97f84c6ddc179188 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 29 Jul 2019 12:39:22 +0300 Subject: [PATCH 17/63] build skewed digest --- core/primitives/src/changes_trie.rs | 35 ++++ core/state-machine/src/changes_trie/build.rs | 51 ++++- .../src/changes_trie/build_iterator.rs | 175 ++++++++++++------ 3 files changed, 207 insertions(+), 54 deletions(-) diff --git a/core/primitives/src/changes_trie.rs b/core/primitives/src/changes_trie.rs index 057ea1dd63fa3..66166c4941cce 100644 --- a/core/primitives/src/changes_trie.rs +++ b/core/primitives/src/changes_trie.rs @@ -75,6 +75,29 @@ impl ChangesTrieConfiguration { } } + /// Returns max level digest block number that must be created at block >= passed block number. + pub fn next_max_level_digest_block( + &self, + zero: Number, + block: Number, + ) -> Option + where + Number: Clone + From + PartialEq + ::rstd::ops::Add + ::rstd::ops::Sub + ::rstd::ops::Div + ::rstd::ops::Mul, + { + if !self.is_digest_build_enabled() { + return None; + } + + let max_digest_interval: Number = self.max_digest_interval().into(); + let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); + let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); + Some(if block == last_max_digest_block { + block + } else { + last_max_digest_block + max_digest_interval + }) + } + /// Returns Some if digest must be built at given block number. /// The tuple is: /// ( @@ -183,5 +206,17 @@ mod tests { assert_eq!(config(::std::u32::MAX, 1024).max_digest_interval(), ::std::u32::MAX); } + #[test] + fn next_max_level_digest_block_works() { + assert_eq!(config(0, 0).next_max_level_digest_block(0u64, 16), None); + assert_eq!(config(1, 1).next_max_level_digest_block(0u64, 16), None); + assert_eq!(config(2, 1).next_max_level_digest_block(0u64, 16), Some(16)); + assert_eq!(config(4, 1).next_max_level_digest_block(0u64, 16), Some(16)); + assert_eq!(config(32, 1).next_max_level_digest_block(0u64, 16), Some(32)); + assert_eq!(config(2, 3).next_max_level_digest_block(0u64, 10), Some(16)); + assert_eq!(config(2, 3).next_max_level_digest_block(0u64, 8), Some(8)); + // TODO: more test cases + } + // TODO: test that it doesn't panic when zero > block } diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index aa408d3b0559c..a49c6aebedc33 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -123,7 +123,16 @@ fn prepare_digest_input<'a, H, Number>( H::Out: 'a, Number: BlockNumber, { - digest_build_iterator(&config.config, config.zero.clone(), block.clone()) + let build_skewed_digest = config.end.as_ref() == Some(&block); + let block_for_digest = if build_skewed_digest { + config.config.next_max_level_digest_block(config.zero.clone(), block.clone()) + .unwrap_or_else(|| block.clone()) + } else { + block.clone() + }; + + digest_build_iterator(config, block_for_digest) +// .take_while(|digest_build_block| config.end.as_ref().map(|end| digest_build_block <= end).unwrap_or(true)) .try_fold(BTreeMap::new(), move |mut map, digest_build_block| { let trie_root = storage.root(parent, digest_build_block.clone())?; let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block.clone()))?; @@ -330,6 +339,46 @@ mod test { ]); } + #[test] + fn build_changes_trie_nodes_on_skewed_digest_block() { + let (backend, storage, changes, config) = prepare_for_build(); + let parent = AnchorBlockId { hash: Default::default(), number: 10 }; + + let mut configuration_range = configuration_range(&config, 0); // TODO: test other cases + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range.clone(), + &changes, + &parent, + ).unwrap(); + assert_eq!(changes_trie_nodes.collect::>>(), vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![103] }, vec![0, 1]), + ]); + + configuration_range.end = Some(11); + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range, + &changes, + &parent, + ).unwrap(); + assert_eq!(changes_trie_nodes.collect::>>(), vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![103] }, vec![0, 1]), + + InputPair::DigestIndex(DigestIndex { block: 11, key: vec![100] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 11, key: vec![101] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 11, key: vec![102] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 11, key: vec![103] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 11, key: vec![105] }, vec![4, 8]), + ]); + } + #[test] fn build_changes_trie_nodes_ignores_temporary_storage_values() { let (backend, storage, mut changes, config) = prepare_for_build(); diff --git a/core/state-machine/src/changes_trie/build_iterator.rs b/core/state-machine/src/changes_trie/build_iterator.rs index afb27ff0949c4..4c23ff1a40e4b 100644 --- a/core/state-machine/src/changes_trie/build_iterator.rs +++ b/core/state-machine/src/changes_trie/build_iterator.rs @@ -17,60 +17,73 @@ //! Structures and functions to return blocks whose changes are to be included //! in given block's changes trie. -use crate::changes_trie::{Configuration, BlockNumber}; +use num_traits::Zero; +use crate::changes_trie::{ConfigurationRange, BlockNumber}; /// Returns iterator of OTHER blocks that are required for inclusion into /// changes trie of given block. Blocks are guaranteed to be returned in /// ascending order. -pub fn digest_build_iterator( - config: &Configuration, - zero: Number, +/// +/// Skewed digest is built IF block >= config.end. +pub fn digest_build_iterator<'a, Number: BlockNumber>( + config: ConfigurationRange<'a, Number>, block: Number, ) -> DigestBuildIterator { // prepare digest build parameters - let (_, _, digest_step) = match config.digest_level_at_block(zero, block.clone()) { + let (_, _, digest_step) = match config.config.digest_level_at_block(config.zero, block.clone()) { Some((current_level, digest_interval, digest_step)) => (current_level, digest_interval, digest_step), None => return DigestBuildIterator::empty(), }; - DigestBuildIterator::new(block, config.digest_interval, digest_step) + DigestBuildIterator::new(block.clone(), config.end.unwrap_or(block), config.config.digest_interval, digest_step) } /// Changes trie build iterator that returns numbers of OTHER blocks that are /// required for inclusion into changes trie of given block. #[derive(Debug)] pub struct DigestBuildIterator { - /// Block we're building changes trie for. + /// Block we're building changes trie for. It could (logically) be a post-end block if we are creating + /// skewed digest. block: Number, - /// Interval for creation digest blocks. + /// Block that is a last block where current configuration is active. We have never yet created anything + /// after this block => digest that we're creating can't reference any blocks that are >= end. + end: Number, + /// Interval of L1 digest blocks. digest_interval: u32, - /// Max step of blocks range. + /// Max step valid that could be used when digest is created max_step: u32, + + // Mutable data below: + /// Step of current blocks range. current_step: u32, /// Reverse step of current blocks range. current_step_reverse: u32, /// Current blocks range. current_range: Option>, + /// Last block that we have returned. + last_block: Option, } impl DigestBuildIterator { /// Create new digest build iterator. - pub fn new(block: Number, digest_interval: u32, max_step: u32) -> Self { + pub fn new(block: Number, end: Number, digest_interval: u32, max_step: u32) -> Self { DigestBuildIterator { block, + end, digest_interval, max_step, current_step: max_step, current_step_reverse: 0, current_range: None, + last_block: None, } } /// Create empty digest build iterator. pub fn empty() -> Self { - Self::new(0.into(), 0, 0) + Self::new(Zero::zero(), Zero::zero(), 0, 0) } } @@ -78,39 +91,42 @@ impl Iterator for DigestBuildIterator { type Item = Number; fn next(&mut self) -> Option { - if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { - return Some(next); - } - - // we are safe to use non-checking mul/sub versions here because: - // DigestBuildIterator is created only by internal function that is checking - // that all multiplications/subtractions are safe within max_step limit - - let next_step_reverse = if self.current_step_reverse == 0 { - 1 - } else { - self.current_step_reverse * self.digest_interval - }; - if next_step_reverse > self.max_step { - return None; + loop { + if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { + if next < self.end { + self.last_block = Some(next.clone()); + return Some(next); + } + } + + // we are safe to use non-checking mul/sub versions here because: + // DigestBuildIterator is created only by internal function that is checking + // that all multiplications/subtractions are safe within max_step limit + + let next_step_reverse = if self.current_step_reverse == 0 { + 1 + } else { + self.current_step_reverse * self.digest_interval + }; + if next_step_reverse > self.max_step { + return None; + } + + self.current_step_reverse = next_step_reverse; + self.current_range = Some(BlocksRange::new( + match self.last_block.clone() { + Some(last_block) => last_block + self.current_step.into(), + None => self.block.clone() - (self.current_step * self.digest_interval - self.current_step).into(), + }, + self.block.clone(), + self.current_step.into(), + )); + + self.current_step = self.current_step / self.digest_interval; + if self.current_step == 0 { + self.current_step = 1; + } } - - self.current_step_reverse = next_step_reverse; - self.current_range = Some(BlocksRange::new( - self.block.clone() - (self.current_step * self.digest_interval - self.current_step).into(), - self.block.clone(), - self.current_step.into(), - )); - - self.current_step = self.current_step / self.digest_interval; - if self.current_step == 0 { - self.current_step = 1; - } - - Some(self.current_range.as_mut() - .expect("assigned one line above; qed") - .next() - .expect("X - I^(N+1) + I^N > X when X,I,N are > 1; qed")) } } @@ -124,6 +140,7 @@ struct BlocksRange { impl BlocksRange { pub fn new(begin: Number, end: Number, step: Number) -> Self { +println!("=== {} {} {}", begin, end, step); BlocksRange { current: begin, end, @@ -148,6 +165,7 @@ impl Iterator for BlocksRange { #[cfg(test)] mod tests { + use crate::changes_trie::Configuration; use super::*; fn digest_build_iterator( @@ -155,8 +173,19 @@ mod tests { digest_levels: u32, zero: u64, block: u64, + end: Option, ) -> DigestBuildIterator { - super::digest_build_iterator(&Configuration { digest_interval, digest_levels }, zero, block) + super::digest_build_iterator( + ConfigurationRange { + config: &Configuration { + digest_interval, + digest_levels, + }, + zero, + end, + }, + block, + ) } fn digest_build_iterator_basic( @@ -165,7 +194,7 @@ mod tests { zero: u64, block: u64, ) -> (u64, u32, u32) { - let iter = digest_build_iterator(digest_interval, digest_levels, zero, block); + let iter = digest_build_iterator(digest_interval, digest_levels, zero, block, None); (iter.block, iter.digest_interval, iter.max_step) } @@ -174,8 +203,9 @@ mod tests { digest_levels: u32, zero: u64, block: u64, + end: Option, ) -> Vec { - digest_build_iterator(digest_interval, digest_levels, zero, block).collect() + digest_build_iterator(digest_interval, digest_levels, zero, block, end).collect() } #[test] @@ -246,13 +276,13 @@ mod tests { #[test] fn digest_iterator_returns_level1_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 16), + assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 16, None), [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15].iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 256), + assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 256, None), [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255].iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 32), + assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 32, None), [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31].iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4080), + assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), [4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079].iter().map(|item| zero + item).collect::>()); } @@ -264,7 +294,7 @@ mod tests { #[test] fn digest_iterator_returns_level1_and_level2_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 256), + assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 256, None), [ // level2 points to previous 16-1 level1 digests: 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, @@ -272,7 +302,7 @@ mod tests { 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, ].iter().map(|item| zero + item).collect::>(), ); - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 4096), + assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 4096, None), [ // level2 points to previous 16-1 level1 digests: 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, @@ -290,7 +320,7 @@ mod tests { #[test] fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096), + assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, None), [ // level3 points to previous 16-1 level2 digests: 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, @@ -306,4 +336,43 @@ mod tests { test_with_zero(16); test_with_zero(17); } + + #[test] + fn digest_iterator_returns_skewed_digest_blocks() { + fn test_with_zero(zero: u64) { + assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1338)), + [ + // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: + 256, 512, 768, 1024, 1280, + // level3 MUST point to previous 16-1 level1 digests, BUT there are only 3: + 1296, 1312, 1328, + // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 9: + 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, + ].iter().map(|item| zero + item).collect::>(), + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn digest_iterator_returns_skewed_digest_blocks_skipping_level() { + fn test_with_zero(zero: u64) { + assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1284)), + [ + // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: + 256, 512, 768, 1024, 1280, + // level3 MUST point to previous 16-1 level1 digests, BUT there are NO ANY L1-digests: + // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 3: + 1281, 1282, 1283, + ].iter().map(|item| zero + item).collect::>(), + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } } From e429061b2bee3a535812f76cdaabd4323c31397a Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 29 Jul 2019 12:43:41 +0300 Subject: [PATCH 18/63] remove debug print --- core/state-machine/src/changes_trie/build_iterator.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/state-machine/src/changes_trie/build_iterator.rs b/core/state-machine/src/changes_trie/build_iterator.rs index 4c23ff1a40e4b..1e2b5ce5986f3 100644 --- a/core/state-machine/src/changes_trie/build_iterator.rs +++ b/core/state-machine/src/changes_trie/build_iterator.rs @@ -140,7 +140,6 @@ struct BlocksRange { impl BlocksRange { pub fn new(begin: Number, end: Number, step: Number) -> Self { -println!("=== {} {} {}", begin, end, step); BlocksRange { current: begin, end, From e6acae8a17008a75fda6af219cdb6d9c8b47418d Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 30 Jul 2019 09:39:46 +0300 Subject: [PATCH 19/63] extracted surface iterator --- .../src/changes_trie/changes_iterator.rs | 181 +------------- core/state-machine/src/changes_trie/mod.rs | 1 + .../src/changes_trie/surface_iterator.rs | 222 ++++++++++++++++++ 3 files changed, 230 insertions(+), 174 deletions(-) create mode 100644 core/state-machine/src/changes_trie/surface_iterator.rs diff --git a/core/state-machine/src/changes_trie/changes_iterator.rs b/core/state-machine/src/changes_trie/changes_iterator.rs index 733933672c436..725c0f116696f 100644 --- a/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/core/state-machine/src/changes_trie/changes_iterator.rs @@ -21,11 +21,11 @@ use std::cell::RefCell; use std::collections::VecDeque; use parity_codec::{Decode, Encode}; use hash_db::{HashDB, Hasher}; -use num_traits::One; use trie::{Recorder, MemoryDB}; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; +use crate::changes_trie::surface_iterator::{surface_iterator, SurfaceIterator}; use crate::proving_backend::ProvingBackendEssence; use crate::trie_backend_essence::{TrieBackendEssence}; @@ -151,58 +151,6 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( }.collect() } -/// Surface iterator - only traverses top-level digests from given range and tries to find -/// all digest changes for the key. -pub struct SurfaceIterator<'a, Number: BlockNumber> { - config: ConfigurationRange<'a, Number>, - begin: Number, - max: Number, - current: Option, - current_begin: Number, - digest_step: u32, - digest_level: u32, -} - -impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { - type Item = Result<(Number, u32), String>; - - fn next(&mut self) -> Option { - let current = self.current.clone()?; - let digest_level = self.digest_level; - - if current < self.digest_step.into() { - self.current = None; - } - else { - let next = current.clone() - self.digest_step.into(); - if next.is_zero() || next < self.begin { - self.current = None; - } - else if next > self.current_begin { - self.current = Some(next); - } else { - let max_digest_interval = lower_bound_max_digest( - self.config.clone(), - self.max.clone(), - self.begin.clone(), - next, - ); - let (current, current_begin, digest_step, digest_level) = match max_digest_interval { - Err(err) => return Some(Err(err)), - Ok(range) => range, - }; - - self.current = Some(current); - self.current_begin = current_begin; - self.digest_step = digest_step; - self.digest_level = digest_level; - } - } - - Some(Ok((current, digest_level))) - } -} - /// Drilldown iterator - receives 'digest points' from surface iterator and explores /// every point until extrinsic is found. pub struct DrilldownIteratorEssence<'a, H, Number> @@ -293,7 +241,7 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> } match self.surface.next() { - Some(Ok(block)) => self.blocks.push_back(block), + Some(Ok(block)) => { println!("=== XXX: {}", block.0); self.blocks.push_back(block) }, Some(Err(err)) => return Err(err), None => return Ok(None), } @@ -368,86 +316,6 @@ impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> } } -/// Returns surface iterator for given range of blocks. -fn surface_iterator<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - max: Number, - begin: Number, - end: Number, -) -> Result, String> { - let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest( - config.clone(), - max.clone(), - begin.clone(), - end, - )?; - Ok(SurfaceIterator { - config, - begin, - max, - current: Some(current), - current_begin, - digest_step, - digest_level, - }) -} - -/// Returns parameters of highest level digest block that includes the end of given range -/// and tends to include the whole range. -fn lower_bound_max_digest<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - max: Number, - begin: Number, - end: Number, -) -> Result<(Number, Number, u32, u32), String> { - if end > max || begin > end { - return Err("invalid changes range".into()); - } - - let mut digest_level = 0u32; - let mut digest_step = 1u32; - let mut digest_interval = 0u32; - let mut current = end.clone(); - let mut current_begin = begin.clone(); - if current_begin != current { - while digest_level != config.config.digest_levels { - let new_digest_level = digest_level + 1; - let new_digest_step = digest_step * config.config.digest_interval; - let new_digest_interval = config.config.digest_interval * { - if digest_interval == 0 { 1 } else { digest_interval } - }; - let new_digest_begin = config.zero.clone() + ((current.clone() - One::one() - config.zero.clone()) - / new_digest_interval.into()) * new_digest_interval.into(); - let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); - let new_current = new_digest_begin.clone() + new_digest_interval.into(); - - if new_digest_end > max { - if begin < new_digest_begin { - current_begin = new_digest_begin; - } - break; - } - - digest_level = new_digest_level; - digest_step = new_digest_step; - digest_interval = new_digest_interval; - current = new_current; - current_begin = new_digest_begin; - - if current_begin <= begin && new_digest_end >= end { - break; - } - } - } - - Ok(( - current, - current_begin, - digest_step, - digest_level, - )) -} - #[cfg(test)] mod tests { use std::iter::FromIterator; @@ -504,46 +372,6 @@ mod tests { } } - #[test] - fn lower_bound_max_digest_works() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - // when config activates at 0 - assert_eq!( - lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64).unwrap(), - (192, 176, 16, 2), - ); - - // when config activates at 30 - assert_eq!( - lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 20u64, 180u64).unwrap(), - (190, 174, 16, 2), - ); - } - - #[test] - fn surface_iterator_works() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - // when config activates at 0 - assert_eq!( - surface_iterator(configuration_range(&config, 0u64), 100_000u64, 40u64, 180u64).unwrap().collect::>(), - vec![ - Ok((192, 2)), Ok((176, 2)), Ok((160, 2)), Ok((144, 2)), Ok((128, 2)), Ok((112, 2)), - Ok((96, 2)), Ok((80, 2)), Ok((64, 2)), Ok((48, 2)), - ], - ); - - // when config activates at 30 - assert_eq!( - surface_iterator(configuration_range(&config, 30u64), 100_000u64, 40u64, 180u64).unwrap().collect::>(), - vec![ - Ok((190, 2)), Ok((174, 2)), Ok((158, 2)), Ok((142, 2)), Ok((126, 2)), Ok((110, 2)), - Ok((94, 2)), Ok((78, 2)), Ok((62, 2)), Ok((46, 2)), - ], - ); - } - #[test] fn drilldown_iterator_works() { let (config, storage) = prepare_for_drilldown(); @@ -562,6 +390,11 @@ mod tests { .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(3, 0)])); + let drilldown_result = key_changes::( + configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 7 }, 7, &[42]) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); + let drilldown_result = key_changes::( configuration_range(&config, 0), &storage, 7, &AnchorBlockId { hash: Default::default(), number: 8 }, 8, &[42]) .and_then(Result::from_iter); diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index a63208c72218f..86313c382eef1 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -41,6 +41,7 @@ mod changes_iterator; mod input; mod prune; mod storage; +mod surface_iterator; pub use self::storage::InMemoryStorage; pub use self::changes_iterator::{key_changes, key_changes_proof, key_changes_proof_check}; diff --git a/core/state-machine/src/changes_trie/surface_iterator.rs b/core/state-machine/src/changes_trie/surface_iterator.rs new file mode 100644 index 0000000000000..bc126161e14ec --- /dev/null +++ b/core/state-machine/src/changes_trie/surface_iterator.rs @@ -0,0 +1,222 @@ +// Copyright 2017-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The best way to understand how this iterator works is to imagine some 2D terrain that have some mountains +//! (digest changes tries) and valleys (changes tries for regular blocks). There are gems (blocks) beneath the +//! terrain. Given the request to find all gems in the range [X1; X2] this iterator will return **minimal set** +//! of points at the terrain (mountains and valleys() inside this range that have to be drilled down to +//! search for gems. + +use num_traits::One; +use crate::changes_trie::{ConfigurationRange, BlockNumber}; + +/// Returns surface iterator for given range of blocks. +pub fn surface_iterator<'a, Number: BlockNumber>( + config: ConfigurationRange<'a, Number>, + max: Number, + begin: Number, + end: Number, +) -> Result, String> { + let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest( + config.clone(), + max.clone(), + begin.clone(), + end, + )?; + Ok(SurfaceIterator { + config, + begin, + max, + current: Some(current), + current_begin, + digest_step, + digest_level, + }) +} + +/// Surface iterator - only traverses top-level digests from given range and tries to find +/// all valid digest changes. +pub struct SurfaceIterator<'a, Number: BlockNumber> { + config: ConfigurationRange<'a, Number>, + begin: Number, + max: Number, + current: Option, + current_begin: Number, + digest_step: u32, + digest_level: u32, +} + +impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { + type Item = Result<(Number, u32), String>; + + fn next(&mut self) -> Option { + let current = self.current.clone()?; + let digest_level = self.digest_level; + + if current < self.digest_step.into() { + self.current = None; + } else { + let next = current.clone() - self.digest_step.into(); + if next.is_zero() || next < self.begin { + self.current = None; + } else if next > self.current_begin { + self.current = Some(next); + } else { + let max_digest_interval = lower_bound_max_digest( + self.config.clone(), + self.max.clone(), + self.begin.clone(), + next, + ); + let (current, current_begin, digest_step, digest_level) = match max_digest_interval { + Err(err) => return Some(Err(err)), + Ok(range) => range, + }; + + self.current = Some(current); + self.current_begin = current_begin; + self.digest_step = digest_step; + self.digest_level = digest_level; + } + } + + Some(Ok((current, digest_level))) + } +} + +/// Returns parameters of highest level digest block that includes the end of given range +/// and tends to include the whole range. +fn lower_bound_max_digest<'a, Number: BlockNumber>( + config: ConfigurationRange<'a, Number>, + max: Number, + begin: Number, + end: Number, +) -> Result<(Number, Number, u32, u32), String> { + if end > max || begin > end { + return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)); + } + + let mut digest_level = 0u32; + let mut digest_step = 1u32; + let mut digest_interval = 0u32; + let mut current = end.clone(); + let mut current_begin = begin.clone(); + if current_begin != current { + while digest_level != config.config.digest_levels { + let new_digest_level = digest_level + 1; + let new_digest_step = digest_step * config.config.digest_interval; + let new_digest_interval = config.config.digest_interval * { + if digest_interval == 0 { 1 } else { digest_interval } + }; + let new_digest_begin = config.zero.clone() + ((current.clone() - One::one() - config.zero.clone()) + / new_digest_interval.into()) * new_digest_interval.into(); + let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); + let new_current = new_digest_begin.clone() + new_digest_interval.into(); + + if new_digest_end > max { + if begin < new_digest_begin { + current_begin = new_digest_begin; + } + break; + } + + digest_level = new_digest_level; + digest_step = new_digest_step; + digest_interval = new_digest_interval; + current = new_current; + current_begin = new_digest_begin; + + if current_begin <= begin && new_digest_end >= end { + break; + } + } + } + + Ok(( + current, + current_begin, + digest_step, + digest_level, + )) +} + +#[cfg(test)] +mod tests { + use crate::changes_trie::{Configuration}; + use super::*; + + fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { + ConfigurationRange { + config, + zero, + end: None, + } + } + + #[test] + fn lower_bound_max_digest_works() { + let config = Configuration { digest_interval: 4, digest_levels: 2 }; + + // when config activates at 0 + assert_eq!( + lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64).unwrap(), + (192, 176, 16, 2), + ); + + // when config activates at 30 + assert_eq!( + lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 20u64, 180u64).unwrap(), + (190, 174, 16, 2), + ); + } + + #[test] + fn surface_iterator_works() { + let config = Configuration { digest_interval: 4, digest_levels: 2 }; + + // when config activates at 0 + assert_eq!( + surface_iterator(configuration_range(&config, 0u64), 100_000u64, 40u64, 180u64).unwrap().collect::>(), + vec![ + Ok((192, 2)), Ok((176, 2)), Ok((160, 2)), Ok((144, 2)), Ok((128, 2)), Ok((112, 2)), + Ok((96, 2)), Ok((80, 2)), Ok((64, 2)), Ok((48, 2)), + ], + ); + + // when config activates at 30 + assert_eq!( + surface_iterator(configuration_range(&config, 30u64), 100_000u64, 40u64, 180u64).unwrap().collect::>(), + vec![ + Ok((190, 2)), Ok((174, 2)), Ok((158, 2)), Ok((142, 2)), Ok((126, 2)), Ok((110, 2)), + Ok((94, 2)), Ok((78, 2)), Ok((62, 2)), Ok((46, 2)), + ], + ); + + // when config activates at 0 AND max block is before next digest + assert_eq!( + surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64).unwrap().collect::>(), + vec![ + Ok((183, 0)), Ok((182, 0)), Ok((181, 0)), Ok((180, 1)), + Ok((176, 2)), Ok((160, 2)), Ok((144, 2)), Ok((128, 2)), Ok((112, 2)), + Ok((96, 2)), Ok((80, 2)), Ok((64, 2)), Ok((48, 2)), + ], + ); + } + + #[test] + fn surface_iterator_works_with_skewed_digest() { + } +} From 68527fb42b66dd8920dec1f2dd75f6f6f7c00c14 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 30 Jul 2019 11:52:51 +0300 Subject: [PATCH 20/63] key_changes works with skewed digests --- core/primitives/src/changes_trie.rs | 34 +++++++++++++ .../src/changes_trie/changes_iterator.rs | 50 +++++++++++++++++-- core/state-machine/src/changes_trie/mod.rs | 5 +- .../src/changes_trie/surface_iterator.rs | 34 +++++++++++++ 4 files changed, 118 insertions(+), 5 deletions(-) diff --git a/core/primitives/src/changes_trie.rs b/core/primitives/src/changes_trie.rs index 66166c4941cce..6506a7d73baa1 100644 --- a/core/primitives/src/changes_trie.rs +++ b/core/primitives/src/changes_trie.rs @@ -75,6 +75,28 @@ impl ChangesTrieConfiguration { } } + /// Returns max level digest block number that must be created at block <= passed block number. + pub fn prev_max_level_digest_block( + &self, + zero: Number, + block: Number, + ) -> Option + where + Number: Clone + From + PartialEq + ::rstd::ops::Add + ::rstd::ops::Sub + ::rstd::ops::Div + ::rstd::ops::Mul + Zero, + { + if !self.is_digest_build_enabled() { + return None; + } + + let max_digest_interval: Number = self.max_digest_interval().into(); + let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); + let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); + if last_max_digest_block.is_zero() { + None + } else { + Some(last_max_digest_block) + } + } /// Returns max level digest block number that must be created at block >= passed block number. pub fn next_max_level_digest_block( &self, @@ -218,5 +240,17 @@ mod tests { // TODO: more test cases } + #[test] + fn prev_max_level_digest_block_works() { + assert_eq!(config(0, 0).prev_max_level_digest_block(0u64, 16), None); + assert_eq!(config(1, 1).prev_max_level_digest_block(0u64, 16), None); + assert_eq!(config(2, 1).prev_max_level_digest_block(0u64, 16), Some(16)); + assert_eq!(config(4, 1).prev_max_level_digest_block(0u64, 16), Some(16)); + assert_eq!(config(32, 1).prev_max_level_digest_block(0u64, 16), None); + assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 10), Some(8)); + assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 8), Some(8)); + // TODO: more test cases + } + // TODO: test that it doesn't panic when zero > block } diff --git a/core/state-machine/src/changes_trie/changes_iterator.rs b/core/state-machine/src/changes_trie/changes_iterator.rs index 725c0f116696f..dd5bfbfe55bf9 100644 --- a/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/core/state-machine/src/changes_trie/changes_iterator.rs @@ -21,11 +21,12 @@ use std::cell::RefCell; use std::collections::VecDeque; use parity_codec::{Decode, Encode}; use hash_db::{HashDB, Hasher}; +use num_traits::Zero; use trie::{Recorder, MemoryDB}; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; -use crate::changes_trie::surface_iterator::{surface_iterator, SurfaceIterator}; +use crate::changes_trie::surface_iterator::{surface_iterator, SurfaceIterator, SKEWED_DIGEST_LEVEL}; use crate::proving_backend::ProvingBackendEssence; use crate::trie_backend_essence::{TrieBackendEssence}; @@ -50,6 +51,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( storage, begin: begin.clone(), end, + config: config.clone(), surface: surface_iterator( config, max, @@ -85,6 +87,7 @@ pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( storage, begin: begin.clone(), end, + config: config.clone(), surface: surface_iterator( config, max, @@ -136,6 +139,7 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( storage: &proof_db, begin: begin.clone(), end, + config: config.clone(), surface: surface_iterator( config, max, @@ -164,6 +168,7 @@ pub struct DrilldownIteratorEssence<'a, H, Number> storage: &'a dyn Storage, begin: Number, end: &'a AnchorBlockId, + config: ConfigurationRange<'a, Number>, surface: SurfaceIterator<'a, Number>, extrinsics: VecDeque<(Number, u32)>, @@ -229,10 +234,21 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> // AND digest block changes could also include changes for out-of-range blocks let begin = self.begin.clone(); let end = self.end.number.clone(); + let is_skewed_digest = level == SKEWED_DIGEST_LEVEL; + let config = self.config.clone(); self.blocks.extend(blocks.into_iter() .rev() .filter(|b| level > 1 || (*b >= begin && *b <= end)) - .map(|b| (b, level - 1)) + .map(|b| { + let prev_level = if is_skewed_digest { + config.config.digest_level_at_block(config.zero.clone(), b.clone()) + .map(|(level, _, _)| level) + .unwrap_or_else(|| Zero::zero()) + } else { + level - 1 + }; + (b, prev_level) + }) ); } } @@ -241,7 +257,7 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> } match self.surface.next() { - Some(Ok(block)) => { println!("=== XXX: {}", block.0); self.blocks.push_back(block) }, + Some(Ok(block)) => self.blocks.push_back(block), Some(Err(err)) => return Err(err), None => return Ok(None), } @@ -448,4 +464,32 @@ mod tests { // check that drilldown result is the same as if it was happening at the full node assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); } + + #[test] + fn drilldown_iterator_works_with_skewed_digest() { + let config = Configuration { digest_interval: 4, digest_levels: 3 }; + let mut config_range = configuration_range(&config, 0); + config_range.end = Some(91); + + // when 4^3 deactivates at block 91: + // last L3 digest has been created at block#64 + // skewed digest covers: + // L2 digests at blocks: 80 + // L1 digests at blocks: 84, 88 + // regular blocks: 89, 90, 91 + let mut input = (1u64..92u64).map(|b| (b, vec![])).collect::>(); + // changed at block#63 and covered by L3 digest at block#64 + input[63 - 1].1.push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 63, key: vec![42] }, vec![0])); + input[64 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 64, key: vec![42] }, vec![63])); + // changed at block#79 and covered by L2 digest at block#80 + skewed digest at block#91 + input[79 - 1].1.push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 79, key: vec![42] }, vec![1])); + input[80 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 80, key: vec![42] }, vec![79])); + input[91 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 91, key: vec![42] }, vec![80])); + let storage = InMemoryStorage::with_inputs(input); + + let drilldown_result = key_changes::( + config_range, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 91 }, 100_000u64, &[42]) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(79, 1), (63, 0)])); + } } diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index 86313c382eef1..9010af9d00d0c 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -47,6 +47,7 @@ pub use self::storage::InMemoryStorage; pub use self::changes_iterator::{key_changes, key_changes_proof, key_changes_proof_check}; pub use self::prune::{prune, oldest_non_pruned_trie}; +use std::convert::TryInto; use hash_db::Hasher; use crate::backend::Backend; use num_traits::{One, Zero}; @@ -64,7 +65,7 @@ pub trait BlockNumber: Send + Sync + 'static + ::std::fmt::Display + Clone + - From + One + Zero + + From + TryInto + One + Zero + PartialEq + Ord + ::std::ops::Add + ::std::ops::Sub + ::std::ops::Mul + ::std::ops::Div + @@ -78,7 +79,7 @@ impl BlockNumber for T where T: Send + Sync + 'static + ::std::fmt::Display + Clone + - From + One + Zero + + From + TryInto + One + Zero + PartialEq + Ord + ::std::ops::Add + ::std::ops::Sub + ::std::ops::Mul + ::std::ops::Div + diff --git a/core/state-machine/src/changes_trie/surface_iterator.rs b/core/state-machine/src/changes_trie/surface_iterator.rs index bc126161e14ec..b8e969b59d9de 100644 --- a/core/state-machine/src/changes_trie/surface_iterator.rs +++ b/core/state-machine/src/changes_trie/surface_iterator.rs @@ -23,6 +23,8 @@ use num_traits::One; use crate::changes_trie::{ConfigurationRange, BlockNumber}; +pub const SKEWED_DIGEST_LEVEL: u32 = 0xFFFFFFFF; // TODO: replace with Option + /// Returns surface iterator for given range of blocks. pub fn surface_iterator<'a, Number: BlockNumber>( config: ConfigurationRange<'a, Number>, @@ -30,6 +32,8 @@ pub fn surface_iterator<'a, Number: BlockNumber>( begin: Number, end: Number, ) -> Result, String> { + // TODO: check that end <= config.end + let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest( config.clone(), max.clone(), @@ -116,6 +120,7 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( let mut current_begin = begin.clone(); if current_begin != current { while digest_level != config.config.digest_levels { + // try to use next level digest let new_digest_level = digest_level + 1; let new_digest_step = digest_step * config.config.digest_interval; let new_digest_interval = config.config.digest_interval * { @@ -126,6 +131,21 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); let new_current = new_digest_begin.clone() + new_digest_interval.into(); + // check if we met skewed digest + if let Some(skewed_digest_end) = config.end.as_ref() { + if new_digest_end > *skewed_digest_end { + let skewed_digest_start = config.config.prev_max_level_digest_block(config.zero.clone(), skewed_digest_end.clone()).expect("TODO"); + let skewed_digest_range = (skewed_digest_end.clone() - skewed_digest_start.clone()).try_into().ok().expect("TODO"); + return Ok(( + skewed_digest_end.clone(), + skewed_digest_start, + skewed_digest_range, + SKEWED_DIGEST_LEVEL, + )); + } + } + + // we can't use next level digest if it touches any unknown (> max) blocks if new_digest_end > max { if begin < new_digest_begin { current_begin = new_digest_begin; @@ -133,12 +153,14 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( break; } + // we can (and will) use this digest digest_level = new_digest_level; digest_step = new_digest_step; digest_interval = new_digest_interval; current = new_current; current_begin = new_digest_begin; + // if current digest covers the whole range => no need to use next level digest if current_begin <= begin && new_digest_end >= end { break; } @@ -218,5 +240,17 @@ mod tests { #[test] fn surface_iterator_works_with_skewed_digest() { + let config = Configuration { digest_interval: 4, digest_levels: 2 }; + let mut config_range = configuration_range(&config, 0u64); + + // when config activates at 0 AND ends at 170 + config_range.end = Some(170); + assert_eq!( + surface_iterator(config_range, 100_000u64, 40u64, 170u64).unwrap().collect::>(), + vec![ + Ok((170, SKEWED_DIGEST_LEVEL)), Ok((160, 2)), Ok((144, 2)), Ok((128, 2)), Ok((112, 2)), + Ok((96, 2)), Ok((80, 2)), Ok((64, 2)), Ok((48, 2)), + ], + ); } } From 5a839e5a664e662d82410d140b6f751b47163d60 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 30 Jul 2019 12:55:21 +0300 Subject: [PATCH 21/63] fix client build --- core/client/src/client.rs | 19 ++++++++++++++----- core/client/src/light/fetcher.rs | 13 +++++++++---- core/state-machine/src/lib.rs | 1 + 3 files changed, 24 insertions(+), 9 deletions(-) diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 2be6b328e9515..da0fa95be8f34 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -53,7 +53,8 @@ use primitives::storage::well_known_keys; use parity_codec::{Encode, Decode}; use state_machine::{ DBValue, Backend as StateBackend, CodeExecutor, ChangesTrieAnchorBlockId, - ExecutionStrategy, ExecutionManager, prove_read, prove_child_read, + ChangesTrieConfigurationRange, ExecutionStrategy, ExecutionManager, + prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, key_changes, key_changes_proof, OverlayedChanges, NeverOffchainExt, }; @@ -538,10 +539,14 @@ impl Client where let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: activation_block, + end: None, // TODO: wrong + }; key_changes::( - &config, + config_range, storage.storage(), - activation_block, first, &ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), @@ -645,10 +650,14 @@ impl Client where .expect_block_number_from_id(&BlockId::Hash(first))?; let last_number = self.backend.blockchain() .expect_block_number_from_id(&BlockId::Hash(last))?; + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: activation_block, + end: None, // TODO: wrong + }; let key_changes_proof = key_changes_proof::( - &config, + config_range, &recording_storage, - activation_block, first_number, &ChangesTrieAnchorBlockId { hash: convert_hash(&last), diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index 1c94d1e47d64a..166ff7587c054 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -29,8 +29,9 @@ use runtime_primitives::traits::{ SimpleArithmetic, CheckedConversion, Zero, }; use state_machine::{CodeExecutor, ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, - TrieBackend, read_proof_check, key_changes_proof_check, - create_proof_check_backend_storage, read_child_proof_check}; + ChangesTrieConfigurationRange, TrieBackend, read_proof_check, key_changes_proof_check, + create_proof_check_backend_storage, read_child_proof_check, +}; use crate::cht; use crate::error::{Error as ClientError, Result as ClientResult}; @@ -284,14 +285,18 @@ impl, F> LightDataChecker( - &request.changes_trie_config, + config_range, &RootsStorage { roots: (request.tries_roots.0, &request.tries_roots.2), prev_roots: remote_roots, }, remote_proof, - Zero::zero(), // TODO: wrong request.first_block.0, &ChangesTrieAnchorBlockId { hash: convert_hash(&request.last_block.1), diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index e20c1546995b1..1fa37504288f1 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -47,6 +47,7 @@ pub use changes_trie::{ Storage as ChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, InMemoryStorage as InMemoryChangesTrieStorage, + ConfigurationRange as ChangesTrieConfigurationRange, key_changes, key_changes_proof, key_changes_proof_check, prune as prune_changes_tries, oldest_non_pruned_trie as oldest_non_pruned_changes_trie, From 97299123c1911a1589b3ed88562dbb3a3412e7d7 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 30 Jul 2019 12:55:38 +0300 Subject: [PATCH 22/63] add test for NeverPrune --- core/client/db/src/cache/list_cache.rs | 65 +++++++++++++++----------- 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index f38a811a135de..a77ada9e74e31 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -1358,32 +1358,45 @@ pub mod tests { } #[test] - fn ancient_entries_are_pruned() { - let cache = ListCache::new(DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_id(20, H256::from_low_u64_be(20)) - .with_id(30, H256::from_low_u64_be(30)) - .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }), - PruningStrategy::ByDepth(10), test_id(9)); - let mut tx = DummyTransaction::new(); + fn ancient_entries_are_pruned_when_pruning_enabled() { + fn do_test(strategy: PruningStrategy) { + let cache = ListCache::new(DummyStorage::new() + .with_id(10, H256::from_low_u64_be(10)) + .with_id(20, H256::from_low_u64_be(20)) + .with_id(30, H256::from_low_u64_be(30)) + .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) + .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) + .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }), + strategy, test_id(9)); + let mut tx = DummyTransaction::new(); + + // when finalizing entry #10: no entries pruned + cache.prune_finalized_entries(&mut tx, &test_id(10)); + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + // when finalizing entry #19: no entries pruned + cache.prune_finalized_entries(&mut tx, &test_id(19)); + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + // when finalizing entry #20: no entries pruned + cache.prune_finalized_entries(&mut tx, &test_id(20)); + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + // when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is enabled) + cache.prune_finalized_entries(&mut tx, &test_id(30)); + match strategy { + PruningStrategy::NeverPrune => { + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + }, + PruningStrategy::ByDepth(_) => { + assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect()); + assert_eq!(*tx.inserted_entries(), vec![test_id(20).hash].into_iter().collect()); + }, + } + } - // when finalizing entry #10: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(10)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #19: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(19)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #20: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(20)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #30: entry 10 pruned + entry 20 is truncated - cache.prune_finalized_entries(&mut tx, &test_id(30)); - assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect()); - assert_eq!(*tx.inserted_entries(), vec![test_id(20).hash].into_iter().collect()); + do_test(PruningStrategy::ByDepth(10)); + do_test(PruningStrategy::NeverPrune) } } From bca439a8df730a80b3d03ffe57077841e9145dab Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 30 Jul 2019 14:02:40 +0300 Subject: [PATCH 23/63] fix TODO --- core/client/src/light/fetcher.rs | 60 ++++++++++++++------------ core/network/src/protocol/on_demand.rs | 2 +- 2 files changed, 34 insertions(+), 28 deletions(-) diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index 166ff7587c054..a9d3cf1269c79 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -26,7 +26,7 @@ use parity_codec::{Decode, Encode}; use primitives::{ChangesTrieConfiguration, convert_hash}; use runtime_primitives::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, - SimpleArithmetic, CheckedConversion, Zero, + SimpleArithmetic, CheckedConversion, }; use state_machine::{CodeExecutor, ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, TrieBackend, read_proof_check, key_changes_proof_check, @@ -95,8 +95,8 @@ pub struct RemoteReadChildRequest { /// Remote key changes read request. #[derive(Clone, Debug, PartialEq, Eq)] pub struct RemoteChangesRequest { - /// Changes trie configuration. - pub changes_trie_config: ChangesTrieConfiguration, + /// All changes trie configurations that are valid within [first_block; last_block]. + pub changes_trie_configs: Vec<(Header::Number, Option, ChangesTrieConfiguration)>, /// Query changes from range of blocks, starting (and including) with this hash... pub first_block: (Header::Number, Header::Hash), /// ...ending (and including) with this hash. Should come after first_block and @@ -285,26 +285,32 @@ impl, F> LightDataChecker( - config_range, - &RootsStorage { - roots: (request.tries_roots.0, &request.tries_roots.2), - prev_roots: remote_roots, - }, - remote_proof, - request.first_block.0, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&request.last_block.1), - number: request.last_block.0, - }, - remote_max_block, - &request.key) - .map_err(|err| ClientError::ChangesTrieAccessFailed(err)) + let mut key_changes = Vec::new(); + for (config_zero, config_end, config) in &request.changes_trie_configs { + let config_range = ChangesTrieConfigurationRange { + config, + zero: config_zero.clone(), + end: config_end.clone(), + }; + let key_changes_range = key_changes_proof_check::( + config_range, + &RootsStorage { + roots: (request.tries_roots.0, &request.tries_roots.2), + prev_roots: &remote_roots, + }, + remote_proof.clone(), // TODO: use prepared MDB instead of creating it on every loop iter + request.first_block.0, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&request.last_block.1), + number: request.last_block.0, + }, + remote_max_block, + &request.key) + .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; + key_changes.extend(key_changes_range); + } + + Ok(key_changes) } /// Check CHT-based proof for changes tries roots. @@ -448,7 +454,7 @@ impl FetchChecker for LightDataChecker as a changes trie roots storage. struct RootsStorage<'a, Number: SimpleArithmetic, Hash: 'a> { roots: (Number, &'a [Hash]), - prev_roots: BTreeMap, + prev_roots: &'a BTreeMap, } impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> @@ -690,7 +696,7 @@ pub mod tests { // check proof on local client let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); let request = RemoteChangesRequest::
{ - changes_trie_config: runtime::changes_trie_config(), + changes_trie_configs: vec![(0, None, runtime::changes_trie_config())], first_block: (begin, begin_hash), last_block: (end, end_hash), max_block: (max, max_hash), @@ -744,7 +750,7 @@ pub mod tests { // check proof on local client let request = RemoteChangesRequest::
{ - changes_trie_config: runtime::changes_trie_config(), + changes_trie_configs: vec![(0, None, runtime::changes_trie_config())], first_block: (1, b1), last_block: (4, b4), max_block: (4, b4), @@ -784,7 +790,7 @@ pub mod tests { let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); let request = RemoteChangesRequest::
{ - changes_trie_config: runtime::changes_trie_config(), + changes_trie_configs: vec![(0, None, runtime::changes_trie_config())], first_block: (begin, begin_hash), last_block: (end, end_hash), max_block: (max, max_hash), diff --git a/core/network/src/protocol/on_demand.rs b/core/network/src/protocol/on_demand.rs index 76c926df107c8..4bf6ca75b3ab2 100644 --- a/core/network/src/protocol/on_demand.rs +++ b/core/network/src/protocol/on_demand.rs @@ -1063,7 +1063,7 @@ pub mod tests { let (tx, response) = oneshot::channel(); on_demand.add_request(&mut network_interface, RequestData::RemoteChanges(RemoteChangesRequest { - changes_trie_config: changes_trie_config(), + changes_trie_configs: vec![(0, None, changes_trie_config())], first_block: (1, Default::default()), last_block: (100, Default::default()), max_block: (100, Default::default()), From 383795aa839852818e45decb47dff072957593d6 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 30 Jul 2019 15:32:20 +0300 Subject: [PATCH 24/63] fixed some TODOs --- core/client/db/src/cache/list_cache.rs | 19 +-- core/client/db/src/cache/mod.rs | 9 +- core/client/db/src/changes_tries_storage.rs | 4 +- core/client/db/src/light.rs | 2 +- core/client/src/backend.rs | 4 +- core/client/src/blockchain.rs | 6 +- core/client/src/client.rs | 150 ++++++++++++-------- core/client/src/light/fetcher.rs | 8 +- 8 files changed, 125 insertions(+), 77 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index a77ada9e74e31..b75a1148138fd 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -143,7 +143,10 @@ impl> ListCache } /// Get value valid at block. - pub fn value_at_block(&self, at: &ComplexBlockId) -> ClientResult, T)>> { + pub fn value_at_block( + &self, + at: &ComplexBlockId, + ) -> ClientResult, Option>, T)>> { let head = if at.number <= self.best_finalized_block.number { // if the block is older than the best known finalized block // => we're should search for the finalized value @@ -177,7 +180,7 @@ impl> ListCache match head { Some(head) => head.search_best_before(&self.storage, at.number) - .map(|e| e.map(|e| (e.0.valid_from, e.0.value))), + .map(|e| e.map(|e| (e.0.valid_from, e.1, e.0.value))), None => Ok(None), } } @@ -697,7 +700,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&test_id(50)).unwrap(), Some((test_id(30), 30))); + ).value_at_block(&test_id(50)).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); // when block is the best finalized block AND value is some // ---> [100] assert_eq!(ListCache::new( @@ -707,7 +710,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&test_id(100)).unwrap(), Some((test_id(100), 100))); + ).value_at_block(&test_id(100)).unwrap(), Some((test_id(100), None, 100))); // when block is parallel to the best finalized block // ---- 100 // ---> [100] @@ -728,7 +731,7 @@ pub mod tests { .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&test_id(200)).unwrap(), Some((test_id(100), 100))); + ).value_at_block(&test_id(200)).unwrap(), Some((test_id(100), None, 100))); // when block is later than last finalized block AND there are no matching forks // AND block is connected to finalized block AND finalized value is Some @@ -744,7 +747,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), 2))); + ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); // when block is later than last finalized block AND there are no matching forks // AND block is not connected to finalized block // --- 2 --- 3 @@ -774,7 +777,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(test_header(5)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), 4))); + ).value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), None, 4))); // when block is later than last finalized block AND it does not fits unfinalized fork // AND it is connected to the finalized block AND finalized value is Some // ---> [2] ----------> [4] @@ -789,7 +792,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), 2))); + ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); } #[test] diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index 34992b5fbcf0b..3cc547bfe381c 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -294,7 +294,11 @@ impl BlockchainCache for DbCacheSync { Ok(()) } - fn get_at(&self, key: &CacheKeyId, at: &BlockId) -> Option<(NumberFor, Block::Hash, Vec)> { + fn get_at( + &self, + key: &CacheKeyId, + at: &BlockId, + ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)> { let cache = self.0.read(); let storage = cache.cache_at.get(key)?.storage(); let db = storage.db(); @@ -320,7 +324,8 @@ impl BlockchainCache for DbCacheSync { cache.cache_at.get(key)? .value_at_block(&at) - .map(|block_and_value| block_and_value.map(|(block, value)| (block.number, block.hash, value))) + .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| + ((begin_block.number, begin_block.hash), end_block.map(|end_block| (end_block.number, end_block.hash)), value))) .ok()? } } diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index 08d2f085ba186..77ea78cf16e26 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -157,7 +157,7 @@ impl> DbChangesTrieStorage { }; // prune changes tries that are created using newest configuration - let (activation_num, _, newest_config) = self.configuration_at(&BlockId::Hash(parent_hash))?; + let ((activation_num, _), _, newest_config) = self.configuration_at(&BlockId::Hash(parent_hash))?; if let Some(config) = newest_config { state_machine::prune_changes_tries( activation_num, @@ -189,7 +189,7 @@ where fn configuration_at( &self, at: &BlockId, - ) -> ClientResult<(NumberFor, Block::Hash, Option)> { + ) -> ClientResult<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Option)> { self.cache .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) .and_then(|(number, hash, encoded)| Decode::decode(&mut &encoded[..]).map(|config| (number, hash, config))) diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index 67569b0cbbbc8..4ec0521afa84b 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -1111,6 +1111,6 @@ pub(crate) mod tests { db.cache().initialize(b"test", vec![42]).unwrap(); // after genesis is inserted + cache is initialized => Some - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some((0, genesis_hash.unwrap(), vec![42]))); + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some(((0, genesis_hash.unwrap()), None, vec![42]))); } } diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 6b86a3fd55fe9..84faf68fee8d8 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -225,8 +225,8 @@ pub trait PrunableStateChangesTrieStorage: fn storage(&self) -> &dyn StateChangesTrieStorage>; /// Get coniguration at given block. fn configuration_at(&self, at: &BlockId) -> error::Result<( - NumberFor, - Block::Hash, + (NumberFor, Block::Hash), + Option<(NumberFor, Block::Hash)>, Option, )>; /// Get number block of oldest, non-pruned changes trie. diff --git a/core/client/src/blockchain.rs b/core/client/src/blockchain.rs index 0fbcfc2323f23..f10f573604ea6 100644 --- a/core/client/src/blockchain.rs +++ b/core/client/src/blockchain.rs @@ -106,7 +106,11 @@ pub trait Cache: Send + Sync { fn initialize(&self, key: &well_known_cache_keys::Id, value_at_genesis: Vec) -> Result<()>; /// For given key and block, returns cached value actual at this block AND block where this value /// has been originally set. - fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId) -> Option<(NumberFor, Block::Hash, Vec)>; + fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId) -> Option<( + (NumberFor, Block::Hash), + Option<(NumberFor, Block::Hash)>, + Vec, + )>; } /// Blockchain info diff --git a/core/client/src/client.rs b/core/client/src/client.rs index da0fa95be8f34..d812831b46f33 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -17,7 +17,7 @@ //! Substrate Client use std::{ - marker::PhantomData, collections::{HashSet, BTreeMap, HashMap}, sync::Arc, + marker::PhantomData, collections::{HashSet, BTreeMap, HashMap, VecDeque}, sync::Arc, panic::UnwindSafe, result, cell::RefCell, rc::Rc, }; use crate::error::Error; @@ -510,17 +510,22 @@ impl Client where first: NumberFor, last: BlockId, ) -> error::Result, BlockId)>> { - let (activation_block, config, storage) = match self.require_changes_trie().ok() { - Some((activation_block, config, storage)) => (activation_block, config, storage), - None => return Ok(None), - }; - let last_num = self.backend.blockchain().expect_block_number_from_id(&last)?; - if first > last_num { + let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; + let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + if first > last_number { return Err(error::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); } + + let (storage, mut configs) = match self.require_changes_trie(first, last_hash).ok() { + Some((storage, configs)) => (storage, configs), + None => return Ok(None), + }; + + // TODO: we only work with the last config range here!!! + let (config_zero_number, _, config) = configs.pop_back().expect("TODO"); let finalized_number = self.backend.blockchain().info().finalized_number; - let oldest = storage.oldest_changes_trie_block(activation_block, config, finalized_number); - let oldest = ::std::cmp::max(activation_block + One::one(), oldest); + let oldest = storage.oldest_changes_trie_block(config_zero_number, config, finalized_number); + let oldest = ::std::cmp::max(config_zero_number + One::one(), oldest); let first = ::std::cmp::max(first, oldest); Ok(Some((first, last))) } @@ -535,27 +540,33 @@ impl Client where last: BlockId, key: &StorageKey ) -> error::Result, u32)>> { - let (activation_block, config, storage) = self.require_changes_trie()?; let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + let (storage, configs) = self.require_changes_trie(first, last_hash)?; + + let mut result = Vec::new(); + for (config_zero, config_end, config) in configs { + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: config_zero.clone(), + end: config_end.clone(), + }; + let result_range: Vec<(NumberFor, u32)> = key_changes::( + config_range, + storage.storage(), + first, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&last_hash), + number: last_number, + }, + self.backend.blockchain().info().best_number, + &key.0) + .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) + .map_err(|err| error::Error::ChangesTrieAccessFailed(err))?; + result.extend(result_range); + } - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: activation_block, - end: None, // TODO: wrong - }; - key_changes::( - config_range, - storage.storage(), - first, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last_hash), - number: last_number, - }, - self.backend.blockchain().info().best_number, - &key.0) - .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) - .map_err(|err| error::Error::ChangesTrieAccessFailed(err)) + Ok(result) } /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. @@ -631,7 +642,9 @@ impl Client where } } - let (activation_block, config, storage) = self.require_changes_trie()?; + let first_number = self.backend.blockchain() + .expect_block_number_from_id(&BlockId::Hash(first))?; + let (storage, configs) = self.require_changes_trie(first_number, last)?; let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; let recording_storage = AccessedRootsRecorder:: { @@ -646,27 +659,29 @@ impl Client where ); // fetch key changes proof - let first_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(first))?; - let last_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(last))?; - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: activation_block, - end: None, // TODO: wrong - }; - let key_changes_proof = key_changes_proof::( - config_range, - &recording_storage, - first_number, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last), - number: last_number, - }, - max_number, - &key.0 - ) - .map_err(|err| error::Error::from(error::Error::ChangesTrieAccessFailed(err)))?; + let mut proof = Vec::new(); + for (config_zero, config_end, config) in configs { + let last_number = self.backend.blockchain() + .expect_block_number_from_id(&BlockId::Hash(last))?; + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: config_zero, + end: config_end, + }; + let proof_range = key_changes_proof::( + config_range, + &recording_storage, + first_number, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&last), + number: last_number, + }, + max_number, + &key.0 + ) + .map_err(|err| error::Error::from(error::Error::ChangesTrieAccessFailed(err)))?; + proof.extend(proof_range); + } // now gather proofs for all changes tries roots that were touched during key_changes_proof // execution AND are unknown (i.e. replaced with CHT) to the requester @@ -675,7 +690,7 @@ impl Client where Ok(ChangesProof { max_block: max_number, - proof: key_changes_proof, + proof, roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(), roots_proof, }) @@ -721,18 +736,39 @@ impl Client where Ok(proof) } - /// Returns changes trie configuration and storage or an error if it is not supported. - fn require_changes_trie(&self) -> error::Result<(NumberFor, ChangesTrieConfiguration, &PrunableStateChangesTrieStorage)> { - let best_block = self.backend.blockchain().info().best_hash; + /// Returns changes trie storage and all configurations that have been active in the range [first; last]. + /// + /// Fails if or an error if it is not supported. + fn require_changes_trie( + &self, + first: NumberFor, + last: Block::Hash, + ) -> error::Result<( + &PrunableStateChangesTrieStorage, + VecDeque<(NumberFor, Option>, ChangesTrieConfiguration)>, + )> { let storage = match self.backend.changes_trie_storage() { Some(storage) => storage, None => return Err(error::Error::ChangesTriesNotSupported), }; - let (activation_block, _, config) = storage.configuration_at(&BlockId::Hash(best_block))?; - match config { - Some(config) => Ok((activation_block, config, storage)), - None => Err(error::Error::ChangesTriesNotSupported.into()), + + let mut configs = VecDeque::with_capacity(1); + let mut current = last; + loop { + let ((config_zero_number, config_zero_hash), config_end, config) = storage.configuration_at(&BlockId::Hash(current))?; + match config { + Some(config) => configs.push_front((config_zero_number, config_end.map(|(config_end_number, _)| config_end_number), config)), + None => return Err(error::Error::ChangesTriesNotSupported), + } + + if config_zero_number < first { + break; + } + + current = config_zero_hash; } + + Ok((storage, configs)) } /// Create a new block, built on the head of the chain. diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index a9d3cf1269c79..4b71dfc169539 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -285,14 +285,14 @@ impl, F> LightDataChecker( + let result_range = key_changes_proof_check::( config_range, &RootsStorage { roots: (request.tries_roots.0, &request.tries_roots.2), @@ -307,10 +307,10 @@ impl, F> LightDataChecker Date: Wed, 31 Jul 2019 14:15:28 +0300 Subject: [PATCH 25/63] more tests --- core/client/src/backend.rs | 31 ++-- core/client/src/call_executor.rs | 19 ++- core/client/src/client.rs | 132 +++++++++++++++--- core/client/src/genesis.rs | 6 +- core/client/src/light/call_executor.rs | 3 + core/client/src/light/fetcher.rs | 9 +- core/primitives/src/changes_trie.rs | 5 + .../src/changes_trie/changes_iterator.rs | 18 +-- .../src/changes_trie/surface_iterator.rs | 11 +- .../client/src/block_builder_ext.rs | 13 ++ core/test-runtime/client/src/lib.rs | 17 +-- core/test-runtime/src/genesismap.rs | 7 +- core/test-runtime/src/lib.rs | 13 +- core/test-runtime/src/system.rs | 31 +++- 14 files changed, 229 insertions(+), 86 deletions(-) diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 84faf68fee8d8..2d45efd06c1ab 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -18,10 +18,9 @@ use std::collections::HashMap; use crate::error; -use parity_codec::Decode; -use primitives::{storage::well_known_keys::CHANGES_TRIE_CONFIG, ChangesTrieConfiguration}; +use primitives::ChangesTrieConfiguration; use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; -use runtime_primitives::traits::{Block as BlockT, Zero, NumberFor}; +use runtime_primitives::traits::{Block as BlockT, NumberFor}; use state_machine::backend::Backend as StateBackend; use state_machine::{ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieState}; use crate::blockchain::well_known_cache_keys; @@ -256,29 +255,21 @@ where } /// Return changes tries state at given block. -pub fn changes_tries_state_at_block<'a, B: Backend, Block: BlockT, H: Hasher>( - backend: &'a B, +pub fn changes_tries_state_at_block<'a, Block: BlockT, H: Hasher>( block: &BlockId, + maybe_storage: Option<&'a dyn PrunableStateChangesTrieStorage>, ) -> error::Result>>> where H: Hasher, { - let changes_trie_storage = match backend.changes_trie_storage() { - Some(changes_trie_storage) => changes_trie_storage.storage(), + let storage = match maybe_storage { + Some(storage) => storage, None => return Ok(None), }; - let state = backend.state_at(*block)?; - changes_tries_state_at_state::<_, Block, _>(&state, changes_trie_storage) -} - -/// Return changes tries state at given state. -pub fn changes_tries_state_at_state<'a, S: StateBackend, Block: BlockT, H: Hasher>( - state: &S, - storage: &'a dyn StateChangesTrieStorage>, -) -> error::Result>>> { - Ok(state.storage(CHANGES_TRIE_CONFIG) - .map_err(|e| error::Error::from_state(Box::new(e)))? - .and_then(|v| Decode::decode(&mut &v[..])) - .map(|config| ChangesTrieState::new(config, Zero::zero(), storage))) + let ((zero, _), _, config) = storage.configuration_at(block)?; + match config { + Some(config) => Ok(Some(ChangesTrieState::new(config, zero, storage.storage()))), + None => Ok(None), + } } diff --git a/core/client/src/call_executor.rs b/core/client/src/call_executor.rs index 5ea8c142dea3b..d4b0479e384e8 100644 --- a/core/client/src/call_executor.rs +++ b/core/client/src/call_executor.rs @@ -104,6 +104,7 @@ where NC: FnOnce() -> result::Result + UnwindSafe, >(&self, state: &S, + state_block: &BlockId, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8], @@ -191,7 +192,7 @@ where let state = self.backend.state_at(*id)?; let return_data = state_machine::new( &state, - backend::changes_tries_state_at_block(&*self.backend, id)?, + backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?, side_effects_handler, &mut changes, &self.executor, @@ -240,7 +241,7 @@ where } let mut state = self.backend.state_at(*at)?; - let changes_trie_state = backend::changes_tries_state_at_block(&*self.backend, at)?; + let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; match recorder { Some(recorder) => { @@ -294,7 +295,7 @@ where fn runtime_version(&self, id: &BlockId) -> error::Result { let mut overlay = OverlayedChanges::default(); let state = self.backend.state_at(*id)?; - let changes_trie_state = backend::changes_tries_state_at_block(&*self.backend, id)?; + let changes_trie_state = backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; let mut ext = Ext::new(&mut overlay, &state, changes_trie_state.as_ref(), NeverOffchainExt::new()); self.executor.runtime_version(&mut ext).ok_or(error::Error::VersionInvalid.into()) } @@ -311,6 +312,7 @@ where NC: FnOnce() -> result::Result + UnwindSafe, >(&self, state: &S, + state_block: &BlockId, changes: &mut OverlayedChanges, method: &str, call_data: &[u8], @@ -322,13 +324,10 @@ where (S::Transaction, ::Out), Option>, )> { - let changes_trie_state = match self.backend.changes_trie_storage() { - Some(changes_trie_storage) => backend::changes_tries_state_at_state::<_, Block, _>( - state, - changes_trie_storage.storage(), - )?, - None => None, - }; + let changes_trie_state = backend::changes_tries_state_at_block::( + state_block, + self.backend.changes_trie_storage(), + )?; state_machine::new( state, changes_trie_state, diff --git a/core/client/src/client.rs b/core/client/src/client.rs index d812831b46f33..469f07865b14e 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -17,7 +17,7 @@ //! Substrate Client use std::{ - marker::PhantomData, collections::{HashSet, BTreeMap, HashMap, VecDeque}, sync::Arc, + marker::PhantomData, collections::{HashSet, BTreeMap, HashMap}, sync::Arc, panic::UnwindSafe, result, cell::RefCell, rc::Rc, }; use crate::error::Error; @@ -522,7 +522,7 @@ impl Client where }; // TODO: we only work with the last config range here!!! - let (config_zero_number, _, config) = configs.pop_back().expect("TODO"); + let (config_zero_number, _, config) = configs.pop().expect("TODO"); let finalized_number = self.backend.blockchain().info().finalized_number; let oldest = storage.oldest_changes_trie_block(config_zero_number, config, finalized_number); let oldest = ::std::cmp::max(config_zero_number + One::one(), oldest); @@ -545,21 +545,29 @@ impl Client where let (storage, configs) = self.require_changes_trie(first, last_hash)?; let mut result = Vec::new(); + let best_number = self.backend.blockchain().info().best_number; for (config_zero, config_end, config) in configs { + let range_first = ::std::cmp::max(first, config_zero + One::one()); + let range_anchor = match config_end { + Some((config_end_number, config_end_hash)) => if last_number > config_end_number { + ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number } + } else { + ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number } + }, + None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, + }; + let config_range = ChangesTrieConfigurationRange { config: &config, zero: config_zero.clone(), - end: config_end.clone(), + end: config_end.map(|(config_end_number, _)| config_end_number), }; let result_range: Vec<(NumberFor, u32)> = key_changes::( config_range, storage.storage(), - first, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last_hash), - number: last_number, - }, - self.backend.blockchain().info().best_number, + range_first, + &range_anchor, + best_number, &key.0) .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) .map_err(|err| error::Error::ChangesTrieAccessFailed(err))?; @@ -666,7 +674,7 @@ impl Client where let config_range = ChangesTrieConfigurationRange { config: &config, zero: config_zero, - end: config_end, + end: config_end.map(|(config_end_number, _)| config_end_number), }; let proof_range = key_changes_proof::( config_range, @@ -745,19 +753,19 @@ impl Client where last: Block::Hash, ) -> error::Result<( &PrunableStateChangesTrieStorage, - VecDeque<(NumberFor, Option>, ChangesTrieConfiguration)>, + Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, )> { let storage = match self.backend.changes_trie_storage() { Some(storage) => storage, None => return Err(error::Error::ChangesTriesNotSupported), }; - let mut configs = VecDeque::with_capacity(1); + let mut configs = Vec::with_capacity(1); let mut current = last; loop { let ((config_zero_number, config_zero_hash), config_end, config) = storage.configuration_at(&BlockId::Hash(current))?; match config { - Some(config) => configs.push_front((config_zero_number, config_end.map(|(config_end_number, _)| config_end_number), config)), + Some(config) => configs.push((config_zero_number, config_end, config)), None => return Err(error::Error::ChangesTriesNotSupported), } @@ -765,7 +773,7 @@ impl Client where break; } - current = config_zero_hash; + current = *self.backend.blockchain().expect_header(BlockId::Hash(config_zero_hash))?.parent_hash(); } Ok((storage, configs)) @@ -978,7 +986,7 @@ impl Client where } // FIXME #1232: correct path logic for when to execute this function - let (storage_update,changes_update,storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, body.clone())?; + let (storage_update,changes_update,storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, parent_hash, body.clone())?; let is_new_best = finalized || match fork_choice { ForkChoiceStrategy::LongestChain => import_headers.post().number() > &last_best_number, @@ -1031,6 +1039,7 @@ impl Client where import_headers: &PrePostHeader, origin: BlockOrigin, hash: Block::Hash, + parent_hash: Block::Hash, body: Option>, ) -> error::Result<( Option>, @@ -1068,6 +1077,7 @@ impl Client where }; let (_, storage_update, changes_update) = self.executor.call_at_state::<_, _, _, NeverNativeValue, fn() -> _>( transaction_state, + &BlockId::Hash(parent_hash), &mut overlay, "Core_execute_block", &::new(import_headers.pre().clone(), body.unwrap_or_default()).encode(), @@ -1927,7 +1937,8 @@ pub(crate) mod tests { // prepare client ang import blocks let mut local_roots = Vec::new(); - let remote_client = TestClientBuilder::new().set_support_changes_trie(true).build(); + let config = Some(ChangesTrieConfiguration::new(4, 2)); + let remote_client = TestClientBuilder::new().changes_trie_config(config).build(); let mut nonces: HashMap<_, u64> = Default::default(); for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { let mut builder = remote_client.new_block(Default::default()).unwrap(); @@ -2739,4 +2750,93 @@ pub(crate) mod tests { let id = BlockId::::Number(72340207214430721); client.header(&id).expect_err("invalid block number overflows u32"); } + + #[test] + fn imports_blocks_with_changes_tries_config_change() { + // create client with initial 4^2 configuration + let client = TestClientBuilder::with_default_backend() + .changes_trie_config(Some(ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + })).build(); + + // =================================================================== + // blocks 1,2,3,4,5,6,7,8,9,10 are empty + // block 11 changes the key + // block 12 is the L1 digest that covers this change + // blocks 13,14,15,16,17,18,19,20,21,22 are empty + // block 23 changes the configuration to 5^1 AND is skewed digest + // =================================================================== + // blocks 24,25 are changing the key + // block 26 is empty + // block 27 changes the key + // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to 3^1 + // =================================================================== + // block 29 is empty + // block 30 changes the key + // block 31 is L1 digest that covers this change + // =================================================================== + (1..11).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, block).unwrap(); + }); + (11..12).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + client.import(BlockOrigin::Own, block.bake().unwrap()).unwrap(); + }); + (12..23).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, block).unwrap(); + }); + (23..24).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap(); + block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 5, + digest_levels: 1, + })).unwrap(); + client.import(BlockOrigin::Own, block.bake().unwrap()).unwrap(); + }); + (24..26).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + client.import(BlockOrigin::Own, block.bake().unwrap()).unwrap(); + }); + (26..27).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, block).unwrap(); + }); + (27..28).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + client.import(BlockOrigin::Own, block.bake().unwrap()).unwrap(); + }); + (28..29).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap(); + block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 3, + digest_levels: 1, + })).unwrap(); + client.import(BlockOrigin::Own, block.bake().unwrap()).unwrap(); + }); + (29..30).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, block).unwrap(); + }); + (30..31).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + client.import(BlockOrigin::Own, block.bake().unwrap()).unwrap(); + }); + (31..32).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default()).unwrap().bake().unwrap(); + client.import(BlockOrigin::Own, block).unwrap(); + }); + + // now check that configuration cache works + assert_eq!( + client.key_changes(1, BlockId::Number(31), &StorageKey(vec![42])).unwrap(), + vec![(30, 0), (27, 0), (25, 0), (24, 0), (11, 0)] + ); + } } diff --git a/core/client/src/genesis.rs b/core/client/src/genesis.rs index a26ed5f950e56..a956da34d5222 100644 --- a/core/client/src/genesis.rs +++ b/core/client/src/genesis.rs @@ -146,7 +146,7 @@ mod tests { #[test] fn construct_genesis_should_work_with_native() { - let mut storage = GenesisConfig::new(false, + let mut storage = GenesisConfig::new(None, vec![AuthorityKeyring::One.into(), AuthorityKeyring::Two.into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000 @@ -175,7 +175,7 @@ mod tests { #[test] fn construct_genesis_should_work_with_wasm() { - let mut storage = GenesisConfig::new(false, + let mut storage = GenesisConfig::new(None, vec![AuthorityKeyring::One.into(), AuthorityKeyring::Two.into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000 @@ -204,7 +204,7 @@ mod tests { #[test] fn construct_genesis_with_bad_transaction_should_panic() { - let mut storage = GenesisConfig::new(false, + let mut storage = GenesisConfig::new(None, vec![AuthorityKeyring::One.into(), AuthorityKeyring::Two.into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 68 diff --git a/core/client/src/light/call_executor.rs b/core/client/src/light/call_executor.rs index faa7c10def070..1887f96dfa643 100644 --- a/core/client/src/light/call_executor.rs +++ b/core/client/src/light/call_executor.rs @@ -163,6 +163,7 @@ where NC: FnOnce() -> result::Result, >(&self, _state: &S, + _state_block: &BlockId, _changes: &mut OverlayedChanges, _method: &str, _call_data: &[u8], @@ -340,6 +341,7 @@ impl CallExecutor for NC: FnOnce() -> result::Result + UnwindSafe, >(&self, state: &S, + state_block: &BlockId, changes: &mut OverlayedChanges, method: &str, call_data: &[u8], @@ -366,6 +368,7 @@ impl CallExecutor for >( &self.remote, state, + state_block, changes, method, call_data, diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index 4b71dfc169539..e7b0835e92b63 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -695,8 +695,9 @@ pub mod tests { // check proof on local client let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![(0, None, runtime::changes_trie_config())], + changes_trie_configs: vec![(0, None, config)], first_block: (begin, begin_hash), last_block: (end, end_hash), max_block: (max, max_hash), @@ -749,8 +750,9 @@ pub mod tests { ); // check proof on local client + let config = ChangesTrieConfiguration::new(4, 2); let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![(0, None, runtime::changes_trie_config())], + changes_trie_configs: vec![(0, None, config)], first_block: (1, b1), last_block: (4, b4), max_block: (4, b4), @@ -789,8 +791,9 @@ pub mod tests { begin_hash, end_hash, begin_hash, max_hash, &key).unwrap(); let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![(0, None, runtime::changes_trie_config())], + changes_trie_configs: vec![(0, None, config)], first_block: (begin, begin_hash), last_block: (end, end_hash), max_block: (max, max_hash), diff --git a/core/primitives/src/changes_trie.rs b/core/primitives/src/changes_trie.rs index 6506a7d73baa1..05ac13a9f1f32 100644 --- a/core/primitives/src/changes_trie.rs +++ b/core/primitives/src/changes_trie.rs @@ -39,6 +39,11 @@ pub struct ChangesTrieConfiguration { } impl ChangesTrieConfiguration { + /// Create new configuration given digest interval and levels. + pub fn new(digest_interval: u32, digest_levels: u32) -> Self { + Self { digest_interval, digest_levels } + } + /// Is digest build enabled? pub fn is_digest_build_enabled(&self) -> bool { self.digest_interval > 1 && self.digest_levels > 0 diff --git a/core/state-machine/src/changes_trie/changes_iterator.rs b/core/state-machine/src/changes_trie/changes_iterator.rs index dd5bfbfe55bf9..aadecaf53ff4e 100644 --- a/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/core/state-machine/src/changes_trie/changes_iterator.rs @@ -392,22 +392,22 @@ mod tests { fn drilldown_iterator_works() { let (config, storage) = prepare_for_drilldown(); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]) + configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 2 }, 4, &[42]) + configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 2 }, 4, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![])); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 3 }, 4, &[42]) + configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 3 }, 4, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(3, 0)])); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 7 }, 7, &[42]) + configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 7 }, 7, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); @@ -428,7 +428,7 @@ mod tests { storage.clear_storage(); assert!(key_changes::( - configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, &[42]) + configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, &[42]) .and_then(|i| i.collect::, _>>()).is_err()); } @@ -436,7 +436,7 @@ mod tests { fn drilldown_iterator_fails_when_range_is_invalid() { let (config, storage) = prepare_for_drilldown(); assert!(key_changes::( - configuration_range(&config, 0), &storage, 0, &AnchorBlockId { hash: Default::default(), number: 100 }, 50, &[42]).is_err()); + configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 100 }, 50, &[42]).is_err()); assert!(key_changes::( configuration_range(&config, 0), &storage, 20, &AnchorBlockId { hash: Default::default(), number: 10 }, 100, &[42]).is_err()); } @@ -450,7 +450,7 @@ mod tests { let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof = key_changes_proof::( configuration_range(&remote_config, 0), &remote_storage, - 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]).unwrap(); + 1, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]).unwrap(); // happens on local light node: @@ -459,7 +459,7 @@ mod tests { local_storage.clear_storage(); let local_result = key_changes_proof_check::( configuration_range(&local_config, 0), &local_storage, remote_proof, - 0, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]); + 1, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]); // check that drilldown result is the same as if it was happening at the full node assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); @@ -488,7 +488,7 @@ mod tests { let storage = InMemoryStorage::with_inputs(input); let drilldown_result = key_changes::( - config_range, &storage, 0, &AnchorBlockId { hash: Default::default(), number: 91 }, 100_000u64, &[42]) + config_range, &storage, 1, &AnchorBlockId { hash: Default::default(), number: 91 }, 100_000u64, &[42]) .and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(79, 1), (63, 0)])); } diff --git a/core/state-machine/src/changes_trie/surface_iterator.rs b/core/state-machine/src/changes_trie/surface_iterator.rs index b8e969b59d9de..2947048ab655e 100644 --- a/core/state-machine/src/changes_trie/surface_iterator.rs +++ b/core/state-machine/src/changes_trie/surface_iterator.rs @@ -112,6 +112,13 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( if end > max || begin > end { return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)); } + if begin <= config.zero || config.end.as_ref().map(|config_end| end > *config_end).unwrap_or(false) { + return Err(format!("changes trie range is not covered by configuration: {}..{}/{}..{}", + begin, end, config.zero, match config.end.as_ref() { + Some(config_end) => format!("{}", config_end), + None => "None".into(), + })); + } let mut digest_level = 0u32; let mut digest_step = 1u32; @@ -200,8 +207,8 @@ mod tests { // when config activates at 30 assert_eq!( - lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 20u64, 180u64).unwrap(), - (190, 174, 16, 2), + lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 50u64, 210u64).unwrap(), + (222, 206, 16, 2), ); } diff --git a/core/test-runtime/client/src/block_builder_ext.rs b/core/test-runtime/client/src/block_builder_ext.rs index 9b7d343f02f88..728574d543aa1 100644 --- a/core/test-runtime/client/src/block_builder_ext.rs +++ b/core/test-runtime/client/src/block_builder_ext.rs @@ -16,6 +16,7 @@ //! Block Builder extensions for tests. +use primitives::ChangesTrieConfiguration; use runtime; use runtime_primitives::traits::ProvideRuntimeApi; use generic_test_client::client; @@ -27,6 +28,11 @@ pub trait BlockBuilderExt { fn push_transfer(&mut self, transfer: runtime::Transfer) -> Result<(), client::error::Error>; /// Add storage change extrinsic to the block. fn push_storage_change(&mut self, key: Vec, value: Option>) -> Result<(), client::error::Error>; + /// Add changes trie configuration update extrinsic to the block. + fn push_changes_trie_configuration_update( + &mut self, + new_config: Option, + ) -> Result<(), client::error::Error>; } impl<'a, A> BlockBuilderExt for client::block_builder::BlockBuilder<'a, runtime::Block, A> where @@ -40,4 +46,11 @@ impl<'a, A> BlockBuilderExt for client::block_builder::BlockBuilder<'a, runtime: fn push_storage_change(&mut self, key: Vec, value: Option>) -> Result<(), client::error::Error> { self.push(runtime::Extrinsic::StorageChange(key, value)) } + + fn push_changes_trie_configuration_update( + &mut self, + new_config: Option, + ) -> Result<(), client::error::Error> { + self.push(runtime::Extrinsic::ChangesTrieConfigUpdate(new_config)) + } } diff --git a/core/test-runtime/client/src/lib.rs b/core/test-runtime/client/src/lib.rs index 104ffac820c50..28fdd4c78c131 100644 --- a/core/test-runtime/client/src/lib.rs +++ b/core/test-runtime/client/src/lib.rs @@ -28,6 +28,7 @@ pub use runtime; use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash as HashT}; +use primitives::ChangesTrieConfiguration; /// A prelude to import in tests. pub mod prelude { @@ -94,12 +95,12 @@ pub type LightExecutor = client::light::call_executor::RemoteOrLocalCallExecutor /// Parameters of test-client builder with test-runtime. #[derive(Default)] pub struct GenesisParameters { - support_changes_trie: bool, + changes_trie_config: Option, } impl generic_test_client::GenesisInit for GenesisParameters { fn genesis_storage(&self) -> (StorageOverlay, ChildrenStorageOverlay) { - let mut storage = genesis_config(self.support_changes_trie).genesis_map(); + let mut storage = genesis_config(self.changes_trie_config.clone()).genesis_map(); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.clone().into_iter() @@ -142,8 +143,8 @@ impl DefaultTestClientBuilderExt for TestClientBuilder< /// A `test-runtime` extensions to `TestClientBuilder`. pub trait TestClientBuilderExt: Sized { - /// Enable or disable support for changes trie in genesis. - fn set_support_changes_trie(self, support_changes_trie: bool) -> Self; + /// Set changes trie configuration for genesis. + fn changes_trie_config(self, config: Option) -> Self; /// Build the test client. fn build(self) -> Client { @@ -160,8 +161,8 @@ impl TestClientBuilderExt for TestClientBuilder< > where B: client::backend::Backend, { - fn set_support_changes_trie(mut self, support_changes_trie: bool) -> Self { - self.genesis_init_mut().support_changes_trie = support_changes_trie; + fn changes_trie_config(mut self, config: Option) -> Self { + self.genesis_init_mut().changes_trie_config = config; self } @@ -170,8 +171,8 @@ impl TestClientBuilderExt for TestClientBuilder< } } -fn genesis_config(support_changes_trie: bool) -> GenesisConfig { - GenesisConfig::new(support_changes_trie, vec![ +fn genesis_config(changes_trie_config: Option) -> GenesisConfig { + GenesisConfig::new(changes_trie_config, vec![ AuthorityKeyring::Alice.into(), AuthorityKeyring::Bob.into(), AuthorityKeyring::Charlie.into(), diff --git a/core/test-runtime/src/genesismap.rs b/core/test-runtime/src/genesismap.rs index 21d7aae0a1a06..02451f2d792ef 100644 --- a/core/test-runtime/src/genesismap.rs +++ b/core/test-runtime/src/genesismap.rs @@ -32,16 +32,13 @@ pub struct GenesisConfig { impl GenesisConfig { pub fn new( - support_changes_trie: bool, + changes_trie_config: Option, authorities: Vec, endowed_accounts: Vec, balance: u64 ) -> Self { GenesisConfig { - changes_trie_config: match support_changes_trie { - true => Some(super::changes_trie_config()), - false => None, - }, + changes_trie_config, authorities: authorities.clone(), balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), } diff --git a/core/test-runtime/src/lib.rs b/core/test-runtime/src/lib.rs index 13b8a9eac86c6..4d0af0866aa02 100644 --- a/core/test-runtime/src/lib.rs +++ b/core/test-runtime/src/lib.rs @@ -25,7 +25,7 @@ pub mod system; use rstd::{prelude::*, marker::PhantomData}; use parity_codec::{Encode, Decode, Input}; -use primitives::Blake2Hasher; +use primitives::{Blake2Hasher, ChangesTrieConfiguration}; use trie_db::{TrieMut, Trie}; use substrate_trie::{TrieDB, TrieDBMut, PrefixedMemoryDB}; @@ -111,6 +111,7 @@ pub enum Extrinsic { Transfer(Transfer, AccountSignature), IncludeData(Vec), StorageChange(Vec, Option>), + ChangesTrieConfigUpdate(Option), } #[cfg(feature = "std")] @@ -135,6 +136,8 @@ impl BlindCheckable for Extrinsic { }, Extrinsic::IncludeData(_) => Err(runtime_primitives::BAD_SIGNATURE), Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), + Extrinsic::ChangesTrieConfigUpdate(new_config) => + Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)), } } } @@ -195,14 +198,6 @@ pub fn run_tests(mut input: &[u8]) -> Vec { [stxs.len() as u8].encode() } -/// Changes trie configuration (optionally) used in tests. -pub fn changes_trie_config() -> primitives::ChangesTrieConfiguration { - primitives::ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - } -} - /// A type that can not be decoded. #[derive(PartialEq)] pub struct DecodeFails { diff --git a/core/test-runtime/src/system.rs b/core/test-runtime/src/system.rs index 01b032f59925a..1043148cfdbad 100644 --- a/core/test-runtime/src/system.rs +++ b/core/test-runtime/src/system.rs @@ -29,7 +29,7 @@ use parity_codec::{KeyedVec, Encode}; use super::{ AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest, AuthorityId }; -use primitives::{Blake2Hasher, storage::well_known_keys}; +use primitives::{Blake2Hasher, storage::well_known_keys, ChangesTrieConfiguration}; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; @@ -40,6 +40,7 @@ storage_items! { Number: b"sys:num" => BlockNumber; ParentHash: b"sys:pha" => required Hash; NewAuthorities: b"sys:new_auth" => Vec; + NewChangesTrieConfig: b"sys:new_changes_trie_config" => Option; StorageDigest: b"sys:digest" => Digest; Authorities get(authorities): b"sys:auth" => default Vec; } @@ -112,6 +113,7 @@ fn execute_block_with_state_root_handler( }); let o_new_authorities = ::take(); + let new_changes_trie_config = ::take(); if let Mode::Overwrite = mode { header.state_root = storage_root().into(); @@ -131,6 +133,11 @@ fn execute_block_with_state_root_handler( digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } + if let Some(new_config) = new_changes_trie_config { + digest.push(generic::DigestItem::ChangesTrieSignal( + generic::ChangesTrieSignal::NewConfiguration(new_config) + )); + } } /// The block executor. @@ -207,6 +214,8 @@ pub fn finalize_block() -> Header { let mut digest = ::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = ::take(); + let new_changes_trie_config = ::take(); + // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = BlakeTwo256::storage_root(); @@ -221,6 +230,12 @@ pub fn finalize_block() -> Header { digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } + if let Some(new_config) = new_changes_trie_config { + digest.push(generic::DigestItem::ChangesTrieSignal( + generic::ChangesTrieSignal::NewConfiguration(new_config) + )); + } + Header { number, extrinsics_root, @@ -244,6 +259,8 @@ fn execute_transaction_backend(utx: &Extrinsic) -> ApplyResult { Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(ApplyOutcome::Success), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), + Extrinsic::ChangesTrieConfigUpdate(ref new_config) => + execute_changes_trie_config_update(new_config.clone()), } } @@ -287,6 +304,18 @@ fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyResult { Ok(ApplyOutcome::Success) } +fn execute_changes_trie_config_update(new_config: Option) -> ApplyResult { + match new_config.clone() { + Some(new_config) => storage::unhashed::put_raw( + well_known_keys::CHANGES_TRIE_CONFIG, + &new_config.encode(), + ), + None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), + } + ::put(new_config); + Ok(ApplyOutcome::Success) +} + #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use primitives::hexdisplay::HexDisplay; From 8b70450b5dd00b5e71d185c06243785f67127aba Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 1 Aug 2019 11:16:56 +0300 Subject: [PATCH 26/63] fixing TODOs --- core/client/db/src/cache/list_cache.rs | 2 - core/client/src/client.rs | 2 +- core/client/src/light/fetcher.rs | 8 +- core/primitives/src/changes_trie.rs | 5 +- core/state-machine/src/changes_trie/build.rs | 318 ++++++++++-------- .../src/changes_trie/build_iterator.rs | 2 +- .../src/changes_trie/changes_iterator.rs | 53 +-- core/state-machine/src/changes_trie/mod.rs | 6 +- .../state-machine/src/changes_trie/storage.rs | 17 +- .../src/changes_trie/surface_iterator.rs | 61 ++-- core/state-machine/src/lib.rs | 3 +- 11 files changed, 275 insertions(+), 202 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index b75a1148138fd..b0f513f10eb54 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -200,7 +200,6 @@ impl> ListCache debug_assert!(entry_type != EntryType::Final || self.best_finalized_block.hash == parent.hash); // we do not store any values behind finalized -// TODO: how this works with CT configuration??? if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { return Ok(None); } @@ -685,7 +684,6 @@ pub mod tests { #[test] fn list_value_at_block_works() { -// TODO: check that value_at_block actually returns correct value!!! // when block is earlier than best finalized block AND it is not finalized // --- 50 --- // ----------> [100] diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 469f07865b14e..3faa5443d55f2 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -521,7 +521,7 @@ impl Client where None => return Ok(None), }; - // TODO: we only work with the last config range here!!! + // TODO: we only work with the last config range here!!! Need to stabilize pruning before fixing this. let (config_zero_number, _, config) = configs.pop().expect("TODO"); let finalized_number = self.backend.blockchain().info().finalized_number; let oldest = storage.oldest_changes_trie_block(config_zero_number, config, finalized_number); diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index e7b0835e92b63..a4cb29e1f7156 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -29,7 +29,8 @@ use runtime_primitives::traits::{ SimpleArithmetic, CheckedConversion, }; use state_machine::{CodeExecutor, ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, - ChangesTrieConfigurationRange, TrieBackend, read_proof_check, key_changes_proof_check, + ChangesTrieConfigurationRange, TrieBackend, InMemoryChangesTrieStorage, + read_proof_check, key_changes_proof_check_with_db, create_proof_check_backend_storage, read_child_proof_check, }; @@ -286,19 +287,20 @@ impl, F> LightDataChecker( + let result_range = key_changes_proof_check_with_db::( config_range, &RootsStorage { roots: (request.tries_roots.0, &request.tries_roots.2), prev_roots: &remote_roots, }, - remote_proof.clone(), // TODO: use prepared MDB instead of creating it on every loop iter + &proof_storage, request.first_block.0, &ChangesTrieAnchorBlockId { hash: convert_hash(&request.last_block.1), diff --git a/core/primitives/src/changes_trie.rs b/core/primitives/src/changes_trie.rs index 05ac13a9f1f32..97cbfed4752be 100644 --- a/core/primitives/src/changes_trie.rs +++ b/core/primitives/src/changes_trie.rs @@ -80,7 +80,10 @@ impl ChangesTrieConfiguration { } } - /// Returns max level digest block number that must be created at block <= passed block number. + /// Returns max level digest block number that has been created at block <= passed block number. + /// + /// Returns None if digests are not created at all. + /// This could return Some(zero), even though changes trie isn't ever created at this block. pub fn prev_max_level_digest_block( &self, zero: Number, diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index a49c6aebedc33..c60aa8d25ccc5 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -132,7 +132,6 @@ fn prepare_digest_input<'a, H, Number>( }; digest_build_iterator(config, block_for_digest) -// .take_while(|digest_build_block| config.end.as_ref().map(|end| digest_build_block <= end).unwrap_or(true)) .try_fold(BTreeMap::new(), move |mut map, digest_build_block| { let trie_root = storage.root(parent, digest_build_block.clone())?; let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block.clone()))?; @@ -191,7 +190,12 @@ mod test { use crate::overlayed_changes::OverlayedValue; use super::*; - fn prepare_for_build() -> (InMemory, InMemoryStorage, OverlayedChanges, Configuration) { + fn prepare_for_build(zero: u64) -> ( + InMemory, + InMemoryStorage, + OverlayedChanges, + Configuration, + ) { let backend: InMemory<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -201,38 +205,38 @@ mod test { (vec![105], vec![255]), ].into_iter().collect::<::std::collections::HashMap<_, _>>().into(); let storage = InMemoryStorage::with_inputs(vec![ - (1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![100] }, vec![1, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![101] }, vec![0, 2]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![105] }, vec![0, 2, 4]), + (zero + 1, vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![105] }, vec![0, 2, 4]), ]), - (2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 2, key: vec![102] }, vec![0]), + (zero + 2, vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0]), ]), - (3, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 3, key: vec![100] }, vec![0]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 3, key: vec![105] }, vec![1]), + (zero + 3, vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 3, key: vec![100] }, vec![0]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 3, key: vec![105] }, vec![1]), ]), - (4, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![100] }, vec![1, 3]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![105] }, vec![1, 3]), + (zero + 4, vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), + + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), ]), - (5, Vec::new()), - (6, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 6, key: vec![105] }, vec![2]), + (zero + 5, Vec::new()), + (zero + 6, vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 6, key: vec![105] }, vec![2]), ]), - (7, Vec::new()), - (8, vec![ - InputPair::DigestIndex(DigestIndex { block: 8, key: vec![105] }, vec![6]), + (zero + 7, Vec::new()), + (zero + 8, vec![ + InputPair::DigestIndex(DigestIndex { block: zero + 8, key: vec![105] }, vec![zero + 6]), ]), - (9, Vec::new()), (10, Vec::new()), (11, Vec::new()), (12, Vec::new()), (13, Vec::new()), - (14, Vec::new()), (15, Vec::new()), + (zero + 9, Vec::new()), (zero + 10, Vec::new()), (zero + 11, Vec::new()), (zero + 12, Vec::new()), + (zero + 13, Vec::new()), (zero + 14, Vec::new()), (zero + 15, Vec::new()), ]); let changes = OverlayedChanges { prospective: vec![ @@ -276,136 +280,166 @@ mod test { #[test] fn build_changes_trie_nodes_on_non_digest_block() { - let (backend, storage, changes, config) = prepare_for_build(); - let parent = AnchorBlockId { hash: Default::default(), number: 4 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, 0), // TODO: test other cases - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![103] }, vec![0, 1]), - ]); + fn test_with_zero(zero: u64) { + let (backend, storage, changes, config) = prepare_for_build(zero); + let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range(&config, zero), + &changes, + &parent, + ).unwrap(); + assert_eq!(changes_trie_nodes.collect::>>(), vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![103] }, vec![0, 1]), + ]); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } #[test] fn build_changes_trie_nodes_on_digest_block_l1() { - let (backend, storage, changes, config) = prepare_for_build(); - let parent = AnchorBlockId { hash: Default::default(), number: 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, 0), // TODO: test other cases - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![100] }, vec![1, 3]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![105] }, vec![1, 3]), - ]); + fn test_with_zero(zero: u64) { + let (backend, storage, changes, config) = prepare_for_build(zero); + let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range(&config, zero), + &changes, + &parent, + ).unwrap(); + assert_eq!(changes_trie_nodes.collect::>>(), vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), + + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), + ]); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } #[test] fn build_changes_trie_nodes_on_digest_block_l2() { - let (backend, storage, changes, config) = prepare_for_build(); - let parent = AnchorBlockId { hash: Default::default(), number: 15 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, 0), // TODO: test other cases - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![101] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![103] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![4, 8]), - ]); + fn test_with_zero(zero: u64) { + let (backend, storage, changes, config) = prepare_for_build(zero); + let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range(&config, zero), + &changes, + &parent, + ).unwrap(); + assert_eq!(changes_trie_nodes.collect::>>(), vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![103] }, vec![0, 1]), + + InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![100] }, vec![zero + 4]), + InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![101] }, vec![zero + 4]), + InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), + InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![103] }, vec![zero + 4]), + InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![105] }, vec![zero + 4, zero + 8]), + ]); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } #[test] fn build_changes_trie_nodes_on_skewed_digest_block() { - let (backend, storage, changes, config) = prepare_for_build(); - let parent = AnchorBlockId { hash: Default::default(), number: 10 }; - - let mut configuration_range = configuration_range(&config, 0); // TODO: test other cases - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range.clone(), - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![103] }, vec![0, 1]), - ]); + fn test_with_zero(zero: u64) { + let (backend, storage, changes, config) = prepare_for_build(zero); + let parent = AnchorBlockId { hash: Default::default(), number: zero + 10 }; + + let mut configuration_range = configuration_range(&config, zero); + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range.clone(), + &changes, + &parent, + ).unwrap(); + assert_eq!(changes_trie_nodes.collect::>>(), vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![103] }, vec![0, 1]), + ]); + + configuration_range.end = Some(zero + 11); + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range, + &changes, + &parent, + ).unwrap(); + assert_eq!(changes_trie_nodes.collect::>>(), vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![103] }, vec![0, 1]), + + InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![100] }, vec![zero + 4]), + InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![101] }, vec![zero + 4]), + InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![102] }, vec![zero + 4]), + InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![103] }, vec![zero + 4]), + InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![105] }, vec![zero + 4, zero + 8]), + ]); + } - configuration_range.end = Some(11); - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range, - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 11, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: 11, key: vec![100] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 11, key: vec![101] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 11, key: vec![102] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 11, key: vec![103] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 11, key: vec![105] }, vec![4, 8]), - ]); + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } #[test] fn build_changes_trie_nodes_ignores_temporary_storage_values() { - let (backend, storage, mut changes, config) = prepare_for_build(); - - // 110: missing from backend, set to None in overlay - changes.prospective.top.insert(vec![110], OverlayedValue { - value: None, - extrinsics: Some(vec![1].into_iter().collect()) - }); - - let parent = AnchorBlockId { hash: Default::default(), number: 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, 0), // TODO: test other cases - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![100] }, vec![1, 3]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![101] }, vec![1]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![102] }, vec![2]), - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![105] }, vec![1, 3]), - ]); + fn test_with_zero(zero: u64) { + let (backend, storage, mut changes, config) = prepare_for_build(zero); + + // 110: missing from backend, set to None in overlay + changes.prospective.top.insert(vec![110], OverlayedValue { + value: None, + extrinsics: Some(vec![1].into_iter().collect()) + }); + + let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range(&config, zero), + &changes, + &parent, + ).unwrap(); + assert_eq!(changes_trie_nodes.collect::>>(), vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), + + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), + InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), + ]); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); } } diff --git a/core/state-machine/src/changes_trie/build_iterator.rs b/core/state-machine/src/changes_trie/build_iterator.rs index 1e2b5ce5986f3..0180e75f5812f 100644 --- a/core/state-machine/src/changes_trie/build_iterator.rs +++ b/core/state-machine/src/changes_trie/build_iterator.rs @@ -51,7 +51,7 @@ pub struct DigestBuildIterator { end: Number, /// Interval of L1 digest blocks. digest_interval: u32, - /// Max step valid that could be used when digest is created + /// Max step that could be used when digest is created. max_step: u32, // Mutable data below: diff --git a/core/state-machine/src/changes_trie/changes_iterator.rs b/core/state-machine/src/changes_trie/changes_iterator.rs index aadecaf53ff4e..8210e2dd95ecb 100644 --- a/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/core/state-machine/src/changes_trie/changes_iterator.rs @@ -20,13 +20,13 @@ use std::cell::RefCell; use std::collections::VecDeque; use parity_codec::{Decode, Encode}; -use hash_db::{HashDB, Hasher}; +use hash_db::Hasher; use num_traits::Zero; -use trie::{Recorder, MemoryDB}; +use trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; -use crate::changes_trie::surface_iterator::{surface_iterator, SurfaceIterator, SKEWED_DIGEST_LEVEL}; +use crate::changes_trie::surface_iterator::{surface_iterator, SurfaceIterator}; use crate::proving_backend::ProvingBackendEssence; use crate::trie_backend_essence::{TrieBackendEssence}; @@ -122,21 +122,36 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( end: &AnchorBlockId, max: Number, key: &[u8] +) -> Result, String> { + key_changes_proof_check_with_db( + config, + roots_storage, + &InMemoryStorage::with_proof(proof), + begin, + end, + max, + key, + ) +} + +/// Similar to the `key_changes_proof_check` function, but works with prepared proof storage. +pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( + config: ConfigurationRange<'a, Number>, + roots_storage: &dyn RootsStorage, + proof_db: &InMemoryStorage, + begin: Number, + end: &AnchorBlockId, + max: Number, + key: &[u8] ) -> Result, String> { // we can't query any roots before root let max = ::std::cmp::min(max.clone(), end.number.clone()); - let mut proof_db = MemoryDB::::default(); - for item in proof { - proof_db.insert(&[], &item); - } - - let proof_db = InMemoryStorage::with_db(proof_db); DrilldownIterator { essence: DrilldownIteratorEssence { key, roots_storage, - storage: &proof_db, + storage: proof_db, begin: begin.clone(), end, config: config.clone(), @@ -172,7 +187,7 @@ pub struct DrilldownIteratorEssence<'a, H, Number> surface: SurfaceIterator<'a, Number>, extrinsics: VecDeque<(Number, u32)>, - blocks: VecDeque<(Number, u32)>, + blocks: VecDeque<(Number, Option)>, _hasher: ::std::marker::PhantomData, } @@ -234,19 +249,17 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> // AND digest block changes could also include changes for out-of-range blocks let begin = self.begin.clone(); let end = self.end.number.clone(); - let is_skewed_digest = level == SKEWED_DIGEST_LEVEL; let config = self.config.clone(); self.blocks.extend(blocks.into_iter() .rev() - .filter(|b| level > 1 || (*b >= begin && *b <= end)) + .filter(|b| level.map(|level| level > 1).unwrap_or(true) || (*b >= begin && *b <= end)) .map(|b| { - let prev_level = if is_skewed_digest { - config.config.digest_level_at_block(config.zero.clone(), b.clone()) - .map(|(level, _, _)| level) - .unwrap_or_else(|| Zero::zero()) - } else { - level - 1 - }; + let prev_level = level + .map(|level| Some(level - 1)) + .unwrap_or_else(|| + Some(config.config.digest_level_at_block(config.zero.clone(), b.clone()) + .map(|(level, _, _)| level) + .unwrap_or_else(|| Zero::zero()))); (b, prev_level) }) ); diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index 9010af9d00d0c..eca3d55bc3c9c 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -44,7 +44,10 @@ mod storage; mod surface_iterator; pub use self::storage::InMemoryStorage; -pub use self::changes_iterator::{key_changes, key_changes_proof, key_changes_proof_check}; +pub use self::changes_iterator::{ + key_changes, key_changes_proof, + key_changes_proof_check, key_changes_proof_check_with_db, +}; pub use self::prune::{prune, oldest_non_pruned_trie}; use std::convert::TryInto; @@ -196,7 +199,6 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( // prepare configuration range - we already know zero block. Current block may be the end block if configuration // has been changed in this block - // TODO: ^^^ this won't work for forced digests let is_config_changed = match changes.storage(primitives::storage::well_known_keys::CHANGES_TRIE_CONFIG) { Some(Some(new_config)) => new_config != &state.config.encode()[..], Some(None) => true, diff --git a/core/state-machine/src/changes_trie/storage.rs b/core/state-machine/src/changes_trie/storage.rs index a3b0655e396ab..38cd90e86a5c2 100644 --- a/core/state-machine/src/changes_trie/storage.rs +++ b/core/state-machine/src/changes_trie/storage.rs @@ -48,7 +48,7 @@ struct InMemoryStorageData { } impl InMemoryStorage { - /// Create the storage from given in-memory database. + /// Creates storage from given in-memory database. pub fn with_db(mdb: MemoryDB) -> Self { Self { data: RwLock::new(InMemoryStorageData { @@ -58,12 +58,23 @@ impl InMemoryStorage { } } - /// Create the storage with empty database. + /// Creates storage with empty database. pub fn new() -> Self { Self::with_db(Default::default()) } - /// Create the storage with given blocks. + /// Creates storage with given proof. + pub fn with_proof(proof: Vec>) -> Self { + use hash_db::HashDB; + + let mut proof_db = MemoryDB::::default(); + for item in proof { + proof_db.insert(&[], &item); + } + Self::with_db(proof_db) + } + + /// Creates storage with given blocks. pub fn with_blocks(blocks: Vec<(Number, H::Out)>) -> Self { Self { data: RwLock::new(InMemoryStorageData { diff --git a/core/state-machine/src/changes_trie/surface_iterator.rs b/core/state-machine/src/changes_trie/surface_iterator.rs index 2947048ab655e..3711fac569770 100644 --- a/core/state-machine/src/changes_trie/surface_iterator.rs +++ b/core/state-machine/src/changes_trie/surface_iterator.rs @@ -23,8 +23,6 @@ use num_traits::One; use crate::changes_trie::{ConfigurationRange, BlockNumber}; -pub const SKEWED_DIGEST_LEVEL: u32 = 0xFFFFFFFF; // TODO: replace with Option - /// Returns surface iterator for given range of blocks. pub fn surface_iterator<'a, Number: BlockNumber>( config: ConfigurationRange<'a, Number>, @@ -32,8 +30,6 @@ pub fn surface_iterator<'a, Number: BlockNumber>( begin: Number, end: Number, ) -> Result, String> { - // TODO: check that end <= config.end - let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest( config.clone(), max.clone(), @@ -53,6 +49,10 @@ pub fn surface_iterator<'a, Number: BlockNumber>( /// Surface iterator - only traverses top-level digests from given range and tries to find /// all valid digest changes. +/// +/// Iterator item is the tuple of (last block of the current point + digest level of the current point). +/// Digest level is Some(0) when it is regular block, is Some(non-zero) when it is digest block and None +/// if it is skewed digest block. pub struct SurfaceIterator<'a, Number: BlockNumber> { config: ConfigurationRange<'a, Number>, begin: Number, @@ -60,11 +60,11 @@ pub struct SurfaceIterator<'a, Number: BlockNumber> { current: Option, current_begin: Number, digest_step: u32, - digest_level: u32, + digest_level: Option, } impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { - type Item = Result<(Number, u32), String>; + type Item = Result<(Number, Option), String>; fn next(&mut self) -> Option { let current = self.current.clone()?; @@ -108,7 +108,7 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( max: Number, begin: Number, end: Number, -) -> Result<(Number, Number, u32, u32), String> { +) -> Result<(Number, Number, u32, Option), String> { if end > max || begin > end { return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)); } @@ -141,14 +141,22 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( // check if we met skewed digest if let Some(skewed_digest_end) = config.end.as_ref() { if new_digest_end > *skewed_digest_end { - let skewed_digest_start = config.config.prev_max_level_digest_block(config.zero.clone(), skewed_digest_end.clone()).expect("TODO"); - let skewed_digest_range = (skewed_digest_end.clone() - skewed_digest_start.clone()).try_into().ok().expect("TODO"); - return Ok(( + let skewed_digest_start = config.config.prev_max_level_digest_block( + config.zero.clone(), skewed_digest_end.clone(), - skewed_digest_start, - skewed_digest_range, - SKEWED_DIGEST_LEVEL, - )); + ); + if let Some(skewed_digest_start) = skewed_digest_start { + let skewed_digest_range = (skewed_digest_end.clone() - skewed_digest_start.clone()) + .try_into().ok() + .expect("skewed digest range is always <= max level digest range;\ + max level digest range always fits u32; qed"); + return Ok(( + skewed_digest_end.clone(), + skewed_digest_start, + skewed_digest_range, + None, + )); + } } } @@ -178,7 +186,7 @@ fn lower_bound_max_digest<'a, Number: BlockNumber>( current, current_begin, digest_step, - digest_level, + Some(digest_level), )) } @@ -202,13 +210,13 @@ mod tests { // when config activates at 0 assert_eq!( lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64).unwrap(), - (192, 176, 16, 2), + (192, 176, 16, Some(2)), ); // when config activates at 30 assert_eq!( lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 50u64, 210u64).unwrap(), - (222, 206, 16, 2), + (222, 206, 16, Some(2)), ); } @@ -220,8 +228,9 @@ mod tests { assert_eq!( surface_iterator(configuration_range(&config, 0u64), 100_000u64, 40u64, 180u64).unwrap().collect::>(), vec![ - Ok((192, 2)), Ok((176, 2)), Ok((160, 2)), Ok((144, 2)), Ok((128, 2)), Ok((112, 2)), - Ok((96, 2)), Ok((80, 2)), Ok((64, 2)), Ok((48, 2)), + Ok((192, Some(2))), Ok((176, Some(2))), Ok((160, Some(2))), Ok((144, Some(2))), + Ok((128, Some(2))), Ok((112, Some(2))), Ok((96, Some(2))), Ok((80, Some(2))), + Ok((64, Some(2))), Ok((48, Some(2))), ], ); @@ -229,8 +238,8 @@ mod tests { assert_eq!( surface_iterator(configuration_range(&config, 30u64), 100_000u64, 40u64, 180u64).unwrap().collect::>(), vec![ - Ok((190, 2)), Ok((174, 2)), Ok((158, 2)), Ok((142, 2)), Ok((126, 2)), Ok((110, 2)), - Ok((94, 2)), Ok((78, 2)), Ok((62, 2)), Ok((46, 2)), + Ok((190, Some(2))), Ok((174, Some(2))), Ok((158, Some(2))), Ok((142, Some(2))), Ok((126, Some(2))), + Ok((110, Some(2))), Ok((94, Some(2))), Ok((78, Some(2))), Ok((62, Some(2))), Ok((46, Some(2))), ], ); @@ -238,9 +247,9 @@ mod tests { assert_eq!( surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64).unwrap().collect::>(), vec![ - Ok((183, 0)), Ok((182, 0)), Ok((181, 0)), Ok((180, 1)), - Ok((176, 2)), Ok((160, 2)), Ok((144, 2)), Ok((128, 2)), Ok((112, 2)), - Ok((96, 2)), Ok((80, 2)), Ok((64, 2)), Ok((48, 2)), + Ok((183, Some(0))), Ok((182, Some(0))), Ok((181, Some(0))), Ok((180, Some(1))), + Ok((176, Some(2))), Ok((160, Some(2))), Ok((144, Some(2))), Ok((128, Some(2))), Ok((112, Some(2))), + Ok((96, Some(2))), Ok((80, Some(2))), Ok((64, Some(2))), Ok((48, Some(2))), ], ); } @@ -255,8 +264,8 @@ mod tests { assert_eq!( surface_iterator(config_range, 100_000u64, 40u64, 170u64).unwrap().collect::>(), vec![ - Ok((170, SKEWED_DIGEST_LEVEL)), Ok((160, 2)), Ok((144, 2)), Ok((128, 2)), Ok((112, 2)), - Ok((96, 2)), Ok((80, 2)), Ok((64, 2)), Ok((48, 2)), + Ok((170, None)), Ok((160, Some(2))), Ok((144, Some(2))), Ok((128, Some(2))), Ok((112, Some(2))), + Ok((96, Some(2))), Ok((80, Some(2))), Ok((64, Some(2))), Ok((48, Some(2))), ], ); } diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index 1fa37504288f1..6815967bc2ff0 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -48,7 +48,8 @@ pub use changes_trie::{ RootsStorage as ChangesTrieRootsStorage, InMemoryStorage as InMemoryChangesTrieStorage, ConfigurationRange as ChangesTrieConfigurationRange, - key_changes, key_changes_proof, key_changes_proof_check, + key_changes, key_changes_proof, + key_changes_proof_check, key_changes_proof_check_with_db, prune as prune_changes_tries, oldest_non_pruned_trie as oldest_non_pruned_changes_trie, disabled_state as disabled_changes_trie_state, From 1b3b5652b8320d44ec17e062abec84e9d0d6014f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 1 Aug 2019 11:44:40 +0300 Subject: [PATCH 27/63] fixed compilation --- core/network/src/protocol/on_demand.rs | 5 +++-- core/rpc/src/state/tests.rs | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/core/network/src/protocol/on_demand.rs b/core/network/src/protocol/on_demand.rs index 4bf6ca75b3ab2..7560e9ff73969 100644 --- a/core/network/src/protocol/on_demand.rs +++ b/core/network/src/protocol/on_demand.rs @@ -637,6 +637,7 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::{Future, sync::oneshot}; + use primitives::ChangesTrieConfiguration; use runtime_primitives::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use client::{error::{Error as ClientError, Result as ClientResult}}; use client::light::fetcher::{FetchChecker, RemoteHeaderRequest, @@ -646,7 +647,7 @@ pub mod tests { use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; use super::{REQUEST_TIMEOUT, OnDemandCore, OnDemandNetwork, RequestData}; - use test_client::runtime::{changes_trie_config, Block, Extrinsic, Header}; + use test_client::runtime::{Block, Extrinsic, Header}; struct DummyFetchChecker { ok: bool } @@ -1063,7 +1064,7 @@ pub mod tests { let (tx, response) = oneshot::channel(); on_demand.add_request(&mut network_interface, RequestData::RemoteChanges(RemoteChangesRequest { - changes_trie_configs: vec![(0, None, changes_trie_config())], + changes_trie_configs: vec![(0, None, ChangesTrieConfiguration::new(4, 2))], first_block: (1, Default::default()), last_block: (100, Default::default()), max_block: (100, Default::default()), diff --git a/core/rpc/src/state/tests.rs b/core/rpc/src/state/tests.rs index 6a8eefa10b660..d28b3aec3bc73 100644 --- a/core/rpc/src/state/tests.rs +++ b/core/rpc/src/state/tests.rs @@ -18,7 +18,7 @@ use super::*; use self::error::Error; use assert_matches::assert_matches; -use primitives::storage::well_known_keys; +use primitives::{ChangesTrieConfiguration, storage::well_known_keys}; use sr_io::blake2_256; use test_client::{ prelude::*, @@ -236,7 +236,9 @@ fn should_query_storage() { } run_tests(Arc::new(test_client::new())); - run_tests(Arc::new(TestClientBuilder::new().set_support_changes_trie(true).build())); + run_tests(Arc::new(TestClientBuilder::new() + .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) + .build())); } #[test] From 1de7f77e4fafcf7993c007babd48e3e1022eaa42 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 1 Aug 2019 14:08:01 +0300 Subject: [PATCH 28/63] update runtime version --- node/runtime/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/runtime/src/lib.rs b/node/runtime/src/lib.rs index d178eeff18fef..c57a6633a7283 100644 --- a/node/runtime/src/lib.rs +++ b/node/runtime/src/lib.rs @@ -79,8 +79,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to equal spec_version. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 125, - impl_version: 126, + spec_version: 126, + impl_version: 127, apis: RUNTIME_API_VERSIONS, }; From ce78f66dcd87b80fcf42c0eb9a3bc9ccc83e5082 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 1 Aug 2019 14:21:43 +0300 Subject: [PATCH 29/63] git rid of large tuple --- core/client/db/src/changes_tries_storage.rs | 30 ++++++++++----------- core/client/src/backend.rs | 22 +++++++++------ core/client/src/client.rs | 10 +++---- 3 files changed, 33 insertions(+), 29 deletions(-) diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index cbe2b840d41e9..bd7a98efe1fc8 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -23,7 +23,7 @@ use parity_codec::Encode; use parking_lot::{RwLock, RwLockWriteGuard}; use client::error::{Error as ClientError, Result as ClientResult}; use trie::MemoryDB; -use client::backend::PrunableStateChangesTrieStorage; +use client::backend::{PrunableStateChangesTrieStorage, ChangesTrieConfigurationRange}; use client::blockchain::{Cache, well_known_cache_keys}; use parity_codec::Decode; use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; @@ -157,10 +157,10 @@ impl> DbChangesTrieStorage { }; // prune changes tries that are created using newest configuration - let ((activation_num, _), _, newest_config) = self.configuration_at(&BlockId::Hash(parent_hash))?; - if let Some(config) = newest_config { + let config_range = self.configuration_at(&BlockId::Hash(parent_hash))?; + if let Some(config) = config_range.config { state_machine::prune_changes_tries( - activation_num, + config_range.zero.0, &config, &*self, min_blocks_to_keep.into(), @@ -186,13 +186,11 @@ where self } - fn configuration_at( - &self, - at: &BlockId, - ) -> ClientResult<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Option)> { + fn configuration_at(&self, at: &BlockId) -> ClientResult> { self.cache .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) - .and_then(|(number, hash, encoded)| Decode::decode(&mut &encoded[..]).map(|config| (number, hash, config))) + .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]) + .map(|config| ChangesTrieConfigurationRange { zero, end, config })) .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) } @@ -631,31 +629,31 @@ mod tests { // test configuration cache let storage = &backend.changes_tries_storage; assert_eq!( - storage.configuration_at(&BlockId::Hash(block1)).unwrap().2, + storage.configuration_at(&BlockId::Hash(block1)).unwrap().config, config_at_1.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block2)).unwrap().2, + storage.configuration_at(&BlockId::Hash(block2)).unwrap().config, config_at_1.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block3)).unwrap().2, + storage.configuration_at(&BlockId::Hash(block3)).unwrap().config, config_at_3.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block4)).unwrap().2, + storage.configuration_at(&BlockId::Hash(block4)).unwrap().config, config_at_3.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block5)).unwrap().2, + storage.configuration_at(&BlockId::Hash(block5)).unwrap().config, config_at_5.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block6)).unwrap().2, + storage.configuration_at(&BlockId::Hash(block6)).unwrap().config, config_at_5.clone(), ); assert_eq!( - storage.configuration_at(&BlockId::Hash(block7)).unwrap().2, + storage.configuration_at(&BlockId::Hash(block7)).unwrap().config, config_at_7.clone(), ); } diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 53511f5173ea6..22377e345222c 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -216,6 +216,16 @@ pub trait OffchainStorage: Clone + Send + Sync { ) -> bool; } +/// Changes trie configuration range. +pub struct ChangesTrieConfigurationRange { + /// Zero block of this configuration. First trie that uses this configuration is build at the next block. + pub zero: (NumberFor, Block::Hash), + /// End block where last trie that uses this configuration has been build. None if configuration is active. + pub end: Option<(NumberFor, Block::Hash)>, + /// Configuration itself. None if changes tries are disabled within this range. + pub config: Option, +} + /// Changes trie storage that supports pruning. pub trait PrunableStateChangesTrieStorage: StateChangesTrieStorage> @@ -223,11 +233,7 @@ pub trait PrunableStateChangesTrieStorage: /// Get reference to StateChangesTrieStorage. fn storage(&self) -> &dyn StateChangesTrieStorage>; /// Get coniguration at given block. - fn configuration_at(&self, at: &BlockId) -> error::Result<( - (NumberFor, Block::Hash), - Option<(NumberFor, Block::Hash)>, - Option, - )>; + fn configuration_at(&self, at: &BlockId) -> error::Result>; /// Get number block of oldest, non-pruned changes trie. fn oldest_changes_trie_block( &self, @@ -267,9 +273,9 @@ pub fn changes_tries_state_at_block<'a, Block: BlockT, H: Hasher>( None => return Ok(None), }; - let ((zero, _), _, config) = storage.configuration_at(block)?; - match config { - Some(config) => Ok(Some(ChangesTrieState::new(config, zero, storage.storage()))), + let config_range = storage.configuration_at(block)?; + match config_range.config { + Some(config) => Ok(Some(ChangesTrieState::new(config, config_range.zero.0, storage.storage()))), None => Ok(None), } } diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 9ec9440b8a553..9cf28eb327984 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -763,17 +763,17 @@ impl Client where let mut configs = Vec::with_capacity(1); let mut current = last; loop { - let ((config_zero_number, config_zero_hash), config_end, config) = storage.configuration_at(&BlockId::Hash(current))?; - match config { - Some(config) => configs.push((config_zero_number, config_end, config)), + let config_range = storage.configuration_at(&BlockId::Hash(current))?; + match config_range.config { + Some(config) => configs.push((config_range.zero.0, config_range.end, config)), None => return Err(error::Error::ChangesTriesNotSupported), } - if config_zero_number < first { + if config_range.zero.0 < first { break; } - current = *self.backend.blockchain().expect_header(BlockId::Hash(config_zero_hash))?.parent_hash(); + current = *self.backend.blockchain().expect_header(BlockId::Hash(config_range.zero.1))?.parent_hash(); } Ok((storage, configs)) From 82e74d502d5082a7a95d5cff3aed1294ca0b4176 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 1 Aug 2019 14:31:17 +0300 Subject: [PATCH 30/63] too long lines --- core/client/db/src/lib.rs | 16 +++- core/client/src/client.rs | 3 +- core/primitives/src/changes_trie.rs | 16 +++- .../src/changes_trie/build_iterator.rs | 84 +++++++++++++++---- .../src/changes_trie/changes_iterator.rs | 79 +++++++++++++---- core/state-machine/src/changes_trie/prune.rs | 24 ++++-- core/test-runtime/client/src/lib.rs | 5 +- 7 files changed, 181 insertions(+), 46 deletions(-) diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index f7303ecd89387..791176e8218a1 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -929,7 +929,10 @@ impl> Backend { let changes_trie_cache_ops = self.changes_tries_storage.commit( &mut transaction, changes_trie_updates, - cache::ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), + cache::ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { Zero::zero() } else { number - One::one() }, + ), cache::ComplexBlockId::new(hash, number), finalized, changes_trie_config_update, @@ -992,7 +995,16 @@ impl> Backend { let write_result = self.storage.db.write(transaction).map_err(db_err); - if let Some((number, hash, enacted, retracted, displaced_leaf, is_best, mut cache, changes_trie_cache_ops)) = imported { + if let Some(( + number, + hash, + enacted, + retracted, + displaced_leaf, + is_best, + mut cache, + changes_trie_cache_ops, + )) = imported { if let Err(e) = write_result { let mut leaves = self.blockchain.leaves.write(); let mut undo = leaves.undo(); diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 9cf28eb327984..072509b94a4b0 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -986,7 +986,8 @@ impl Client where } // FIXME #1232: correct path logic for when to execute this function - let (storage_update,changes_update,storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, parent_hash, body.clone())?; + let (storage_update,changes_update,storage_changes) = + self.block_execution(&operation.op, &import_headers, origin, hash, parent_hash, body.clone())?; let is_new_best = finalized || match fork_choice { ForkChoiceStrategy::LongestChain => import_headers.post().number() > &last_best_number, diff --git a/core/primitives/src/changes_trie.rs b/core/primitives/src/changes_trie.rs index 97cbfed4752be..660f14abe4236 100644 --- a/core/primitives/src/changes_trie.rs +++ b/core/primitives/src/changes_trie.rs @@ -56,7 +56,9 @@ impl ChangesTrieConfiguration { block: Number, ) -> bool where - Number: From + PartialEq + ::rstd::ops::Rem + ::rstd::ops::Sub + ::rstd::cmp::PartialOrd + Zero, + Number: From + PartialEq + + ::rstd::ops::Rem + ::rstd::ops::Sub + + ::rstd::cmp::PartialOrd + Zero, { block > zero && self.is_digest_build_enabled() @@ -90,7 +92,9 @@ impl ChangesTrieConfiguration { block: Number, ) -> Option where - Number: Clone + From + PartialEq + ::rstd::ops::Add + ::rstd::ops::Sub + ::rstd::ops::Div + ::rstd::ops::Mul + Zero, + Number: Clone + From + PartialEq + + ::rstd::ops::Add + ::rstd::ops::Sub + + ::rstd::ops::Div + ::rstd::ops::Mul + Zero, { if !self.is_digest_build_enabled() { return None; @@ -112,7 +116,9 @@ impl ChangesTrieConfiguration { block: Number, ) -> Option where - Number: Clone + From + PartialEq + ::rstd::ops::Add + ::rstd::ops::Sub + ::rstd::ops::Div + ::rstd::ops::Mul, + Number: Clone + From + PartialEq + + ::rstd::ops::Add + ::rstd::ops::Sub + + ::rstd::ops::Div + ::rstd::ops::Mul, { if !self.is_digest_build_enabled() { return None; @@ -137,7 +143,9 @@ impl ChangesTrieConfiguration { /// ) pub fn digest_level_at_block(&self, zero: Number, block: Number) -> Option<(u32, u32, u32)> where - Number: Clone + From + PartialEq + ::rstd::ops::Rem + ::rstd::ops::Sub + ::rstd::cmp::PartialOrd + Zero, + Number: Clone + From + PartialEq + + ::rstd::ops::Rem + ::rstd::ops::Sub + + ::rstd::cmp::PartialOrd + Zero, { if !self.is_digest_build_required_at_block(zero.clone(), block.clone()) { return None; diff --git a/core/state-machine/src/changes_trie/build_iterator.rs b/core/state-machine/src/changes_trie/build_iterator.rs index 0180e75f5812f..3a1ed738c18bc 100644 --- a/core/state-machine/src/changes_trie/build_iterator.rs +++ b/core/state-machine/src/changes_trie/build_iterator.rs @@ -215,10 +215,26 @@ mod tests { assert_eq!(digest_build_iterator_basic(0, 16, zero, zero + 64), empty, "digest_interval is 0"); assert_eq!(digest_build_iterator_basic(1, 16, zero, zero + 64), empty, "digest_interval is 1"); assert_eq!(digest_build_iterator_basic(4, 0, zero, zero + 64), empty, "digest_levels is 0"); - assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 1), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 2), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 15), empty, "digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 17), empty, "digest is not required for this block"); + assert_eq!( + digest_build_iterator_basic(4, 16, zero, zero + 1), + empty, + "digest is not required for this block", + ); + assert_eq!( + digest_build_iterator_basic(4, 16, zero, zero + 2), + empty, + "digest is not required for this block", + ); + assert_eq!( + digest_build_iterator_basic(4, 16, zero, zero + 15), + empty, + "digest is not required for this block", + ); + assert_eq!( + digest_build_iterator_basic(4, 16, zero, zero + 17), + empty, + "digest is not required for this block", + ); assert_eq!(digest_build_iterator_basic( ::std::u32::MAX / 2 + 1, 16, @@ -237,10 +253,26 @@ mod tests { #[test] fn suggest_digest_inclusion_returns_level1_iterator() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_basic(16, 1, zero, zero + 16), (zero + 16, 16, 1), "!(block % interval) && first digest level == block"); - assert_eq!(digest_build_iterator_basic(16, 1, zero, zero + 256), (zero + 256, 16, 1), "!(block % interval^2), but there's only 1 digest level"); - assert_eq!(digest_build_iterator_basic(16, 2, zero, zero + 32), (zero + 32, 16, 1), "second level digest is not required for this block"); - assert_eq!(digest_build_iterator_basic(16, 3, zero, zero + 4080), (zero + 4080, 16, 1), "second && third level digest are not required for this block"); + assert_eq!( + digest_build_iterator_basic(16, 1, zero, zero + 16), + (zero + 16, 16, 1), + "!(block % interval) && first digest level == block", + ); + assert_eq!( + digest_build_iterator_basic(16, 1, zero, zero + 256), + (zero + 256, 16, 1), + "!(block % interval^2), but there's only 1 digest level", + ); + assert_eq!( + digest_build_iterator_basic(16, 2, zero, zero + 32), + (zero + 32, 16, 1), + "second level digest is not required for this block", + ); + assert_eq!( + digest_build_iterator_basic(16, 3, zero, zero + 4080), + (zero + 4080, 16, 1), + "second && third level digest are not required for this block", + ); } test_with_zero(0); @@ -251,8 +283,16 @@ mod tests { #[test] fn suggest_digest_inclusion_returns_level2_iterator() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_basic(16, 2, zero, zero + 256), (zero + 256, 16, 16), "second level digest"); - assert_eq!(digest_build_iterator_basic(16, 2, zero, zero + 4096), (zero + 4096, 16, 16), "!(block % interval^3), but there's only 2 digest levels"); + assert_eq!( + digest_build_iterator_basic(16, 2, zero, zero + 256), + (zero + 256, 16, 16), + "second level digest", + ); + assert_eq!( + digest_build_iterator_basic(16, 2, zero, zero + 4096), + (zero + 4096, 16, 16), + "!(block % interval^3), but there's only 2 digest levels", + ); } test_with_zero(0); @@ -263,8 +303,16 @@ mod tests { #[test] fn suggest_digest_inclusion_returns_level3_iterator() { fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_basic(16, 3, zero, zero + 4096), (zero + 4096, 16, 256), "third level digest: beginning"); - assert_eq!(digest_build_iterator_basic(16, 3, zero, zero + 8192), (zero + 8192, 16, 256), "third level digest: next"); + assert_eq!( + digest_build_iterator_basic(16, 3, zero, zero + 4096), + (zero + 4096, 16, 256), + "third level digest: beginning", + ); + assert_eq!( + digest_build_iterator_basic(16, 3, zero, zero + 8192), + (zero + 8192, 16, 256), + "third level digest: next", + ); } test_with_zero(0); @@ -276,13 +324,17 @@ mod tests { fn digest_iterator_returns_level1_blocks() { fn test_with_zero(zero: u64) { assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 16, None), - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15].iter().map(|item| zero + item).collect::>()); + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + .iter().map(|item| zero + item).collect::>()); assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 256, None), - [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255].iter().map(|item| zero + item).collect::>()); + [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] + .iter().map(|item| zero + item).collect::>()); assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 32, None), - [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31].iter().map(|item| zero + item).collect::>()); + [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + .iter().map(|item| zero + item).collect::>()); assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), - [4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079].iter().map(|item| zero + item).collect::>()); + [4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079] + .iter().map(|item| zero + item).collect::>()); } test_with_zero(0); diff --git a/core/state-machine/src/changes_trie/changes_iterator.rs b/core/state-machine/src/changes_trie/changes_iterator.rs index 8210e2dd95ecb..cf3b18ddd23cf 100644 --- a/core/state-machine/src/changes_trie/changes_iterator.rs +++ b/core/state-machine/src/changes_trie/changes_iterator.rs @@ -405,33 +405,63 @@ mod tests { fn drilldown_iterator_works() { let (config, storage) = prepare_for_drilldown(); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, &[42]) - .and_then(Result::from_iter); + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 16 }, + 16, + &[42], + ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 2 }, 4, &[42]) - .and_then(Result::from_iter); + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 2 }, + 4, + &[42], + ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![])); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 3 }, 4, &[42]) - .and_then(Result::from_iter); + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 3 }, + 4, + &[42], + ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(3, 0)])); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 7 }, 7, &[42]) - .and_then(Result::from_iter); + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 7 }, + 7, + &[42], + ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 7, &AnchorBlockId { hash: Default::default(), number: 8 }, 8, &[42]) - .and_then(Result::from_iter); + configuration_range(&config, 0), + &storage, + 7, + &AnchorBlockId { hash: Default::default(), number: 8 }, + 8, + &[42], + ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); let drilldown_result = key_changes::( - configuration_range(&config, 0), &storage, 5, &AnchorBlockId { hash: Default::default(), number: 7 }, 8, &[42]) - .and_then(Result::from_iter); + configuration_range(&config, 0), + &storage, + 5, + &AnchorBlockId { hash: Default::default(), number: 7 }, + 8, + &[42], + ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3)])); } @@ -441,17 +471,34 @@ mod tests { storage.clear_storage(); assert!(key_changes::( - configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, &[42]) - .and_then(|i| i.collect::, _>>()).is_err()); + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 100 }, + 1000, + &[42], + ).and_then(|i| i.collect::, _>>()).is_err()); } #[test] fn drilldown_iterator_fails_when_range_is_invalid() { let (config, storage) = prepare_for_drilldown(); assert!(key_changes::( - configuration_range(&config, 0), &storage, 1, &AnchorBlockId { hash: Default::default(), number: 100 }, 50, &[42]).is_err()); + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { hash: Default::default(), number: 100 }, + 50, + &[42], + ).is_err()); assert!(key_changes::( - configuration_range(&config, 0), &storage, 20, &AnchorBlockId { hash: Default::default(), number: 10 }, 100, &[42]).is_err()); + configuration_range(&config, 0), + &storage, + 20, + &AnchorBlockId { hash: Default::default(), number: 10 }, + 100, + &[42], + ).is_err()); } diff --git a/core/state-machine/src/changes_trie/prune.rs b/core/state-machine/src/changes_trie/prune.rs index 34c4c5675db8d..2a4fa9a9c73bc 100644 --- a/core/state-machine/src/changes_trie/prune.rs +++ b/core/state-machine/src/changes_trie/prune.rs @@ -35,7 +35,8 @@ pub fn oldest_non_pruned_trie( best_finalized_block: Number, ) -> Number { let max_digest_interval = config.max_digest_interval(); - let best_finalized_block_rem = (best_finalized_block.clone() - config_activation_block.clone()) % max_digest_interval.into(); + let best_finalized_block_rem = + (best_finalized_block.clone() - config_activation_block.clone()) % max_digest_interval.into(); let max_digest_block = best_finalized_block - best_finalized_block_rem; match pruning_range(config_activation_block.clone(), config, min_blocks_to_keep, max_digest_block) { Some((_, last_pruned_block)) => last_pruned_block + One::one(), @@ -56,7 +57,8 @@ pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H:: mut remove_trie_node: F, ) { // select range for pruning - let (first, last) = match pruning_range(config_activation_block, config, min_blocks_to_keep, current_block.number.clone()) { + let range = pruning_range(config_activation_block, config, min_blocks_to_keep, current_block.number.clone()); + let (first, last) = match range { Some((first, last)) => (first, last), None => return, }; @@ -193,7 +195,8 @@ mod tests { current_block: u64, ) -> HashSet { let mut pruned_trie_nodes = HashSet::new(); - prune(zero, config, storage, min_blocks_to_keep, &AnchorBlockId { hash: Default::default(), number: current_block }, + let anchor = AnchorBlockId { hash: Default::default(), number: current_block }; + prune(zero, config, storage, min_blocks_to_keep, &anchor, |node| { pruned_trie_nodes.insert(node); }); pruned_trie_nodes } @@ -204,11 +207,20 @@ mod tests { let mut mdb1 = MemoryDB::::default(); let root1 = insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]).unwrap(); let mut mdb2 = MemoryDB::::default(); - let root2 = insert_into_memory_db::(&mut mdb2, vec![(vec![11], vec![21]), (vec![12], vec![22])]).unwrap(); + let root2 = insert_into_memory_db::( + &mut mdb2, + vec![(vec![11], vec![21]), (vec![12], vec![22])], + ).unwrap(); let mut mdb3 = MemoryDB::::default(); - let root3 = insert_into_memory_db::(&mut mdb3, vec![(vec![13], vec![23]), (vec![14], vec![24])]).unwrap(); + let root3 = insert_into_memory_db::( + &mut mdb3, + vec![(vec![13], vec![23]), (vec![14], vec![24])], + ).unwrap(); let mut mdb4 = MemoryDB::::default(); - let root4 = insert_into_memory_db::(&mut mdb4, vec![(vec![15], vec![25])]).unwrap(); + let root4 = insert_into_memory_db::( + &mut mdb4, + vec![(vec![15], vec![25])], + ).unwrap(); let storage = InMemoryStorage::new(); storage.insert(zero + 65, root1, mdb1); storage.insert(zero + 66, root2, mdb2); diff --git a/core/test-runtime/client/src/lib.rs b/core/test-runtime/client/src/lib.rs index 47564c83f2ec4..1be36649e5bce 100644 --- a/core/test-runtime/client/src/lib.rs +++ b/core/test-runtime/client/src/lib.rs @@ -180,7 +180,10 @@ impl TestClientBuilderExt for TestClientBuilder< } } -fn genesis_config(changes_trie_config: Option, heap_pages_override: Option) -> GenesisConfig { +fn genesis_config( + changes_trie_config: Option, + heap_pages_override: Option, +) -> GenesisConfig { GenesisConfig::new( changes_trie_config, vec![ From cfb1cf1e02bc74da4499e8355d37940ea48914f3 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 1 Aug 2019 14:42:38 +0300 Subject: [PATCH 31/63] config_activation_block -> zero --- core/client/db/src/changes_tries_storage.rs | 4 ++-- core/client/src/backend.rs | 2 +- core/state-machine/src/changes_trie/mod.rs | 8 ++++---- core/state-machine/src/changes_trie/prune.rs | 18 +++++++++--------- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index bd7a98efe1fc8..09e419a693732 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -196,13 +196,13 @@ where fn oldest_changes_trie_block( &self, - config_activation_block: NumberFor, + zero: NumberFor, config: ChangesTrieConfiguration, best_finalized_block: NumberFor, ) -> NumberFor { match self.min_blocks_to_keep { Some(min_blocks_to_keep) => state_machine::oldest_non_pruned_changes_trie( - config_activation_block, + zero, &config, min_blocks_to_keep.into(), best_finalized_block, diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 22377e345222c..ed276709bf209 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -237,7 +237,7 @@ pub trait PrunableStateChangesTrieStorage: /// Get number block of oldest, non-pruned changes trie. fn oldest_changes_trie_block( &self, - config_activation_block: NumberFor, + zero: NumberFor, config: ChangesTrieConfiguration, best_finalized: NumberFor, ) -> NumberFor; diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index eca3d55bc3c9c..efc444ce19e82 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -108,7 +108,7 @@ pub struct State<'a, H, Number> { /// Configuration activation block number. Zero if it is the first coonfiguration on the chain, /// or number of the block that have emit NewConfiguration signal (thus activating configuration /// starting from the **next** block). - pub config_activation_block: Number, + pub zero: Number, /// Underlying changes tries storage reference. pub storage: &'a dyn Storage, } @@ -159,12 +159,12 @@ impl<'a, H, Number> State<'a, H, Number> { /// Create state with given config and storage. pub fn new( config: Configuration, - config_activation_block: Number, + zero: Number, storage: &'a dyn Storage, ) -> Self { Self { config, - config_activation_block, + zero, storage, } } @@ -206,7 +206,7 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( }; let config_range = ConfigurationRange { config: &state.config, - zero: state.config_activation_block.clone(), + zero: state.zero.clone(), end: if is_config_changed { Some(parent.number.clone() + One::one()) } else { None }, }; diff --git a/core/state-machine/src/changes_trie/prune.rs b/core/state-machine/src/changes_trie/prune.rs index 2a4fa9a9c73bc..52e351bf8423e 100644 --- a/core/state-machine/src/changes_trie/prune.rs +++ b/core/state-machine/src/changes_trie/prune.rs @@ -29,18 +29,18 @@ use crate::changes_trie::storage::TrieBackendAdapter; /// given changes trie configuration, pruning parameter and number of /// best finalized block. pub fn oldest_non_pruned_trie( - config_activation_block: Number, + zero: Number, config: &Configuration, min_blocks_to_keep: Number, best_finalized_block: Number, ) -> Number { let max_digest_interval = config.max_digest_interval(); let best_finalized_block_rem = - (best_finalized_block.clone() - config_activation_block.clone()) % max_digest_interval.into(); + (best_finalized_block.clone() - zero.clone()) % max_digest_interval.into(); let max_digest_block = best_finalized_block - best_finalized_block_rem; - match pruning_range(config_activation_block.clone(), config, min_blocks_to_keep, max_digest_block) { + match pruning_range(zero.clone(), config, min_blocks_to_keep, max_digest_block) { Some((_, last_pruned_block)) => last_pruned_block + One::one(), - None => config_activation_block + One::one(), + None => zero + One::one(), } } @@ -49,7 +49,7 @@ pub fn oldest_non_pruned_trie( /// `min_blocks_to_keep` blocks. We only prune changes tries at `max_digest_interval` /// ranges. pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H::Out)>( - config_activation_block: Number, + zero: Number, config: &Configuration, storage: &S, min_blocks_to_keep: Number, @@ -57,7 +57,7 @@ pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H:: mut remove_trie_node: F, ) { // select range for pruning - let range = pruning_range(config_activation_block, config, min_blocks_to_keep, current_block.number.clone()); + let range = pruning_range(zero, config, min_blocks_to_keep, current_block.number.clone()); let (first, last) = match range { Some((first, last)) => (first, last), None => return, @@ -105,7 +105,7 @@ pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H:: /// Select blocks range (inclusive from both ends) for pruning changes tries in. fn pruning_range( - config_activation_block: Number, + zero: Number, config: &Configuration, min_blocks_to_keep: Number, block: Number, @@ -113,7 +113,7 @@ fn pruning_range( // compute number of changes tries we actually want to keep let (prune_interval, blocks_to_keep) = if config.is_digest_build_enabled() { // we only CAN prune at block where max-level-digest is created - let max_digest_interval = match config.digest_level_at_block(config_activation_block.clone(), block.clone()) { + let max_digest_interval = match config.digest_level_at_block(zero.clone(), block.clone()) { Some((digest_level, digest_interval, _)) if digest_level == config.digest_levels => digest_interval, _ => return None, @@ -136,7 +136,7 @@ fn pruning_range( // last block for which changes trie is pruned let last_block_to_prune = match blocks_to_keep.and_then(|b| block.checked_sub(&b)) { - Some(last_block_to_prune) => if last_block_to_prune > config_activation_block { + Some(last_block_to_prune) => if last_block_to_prune > zero { last_block_to_prune } else { return None; From 94979f8974821852528eabab7021e1bbae4e3dd2 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 1 Aug 2019 14:42:53 +0300 Subject: [PATCH 32/63] obsolete TODO --- core/client/src/call_executor.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/core/client/src/call_executor.rs b/core/client/src/call_executor.rs index af73daea10b3c..b464a29c8f2a8 100644 --- a/core/client/src/call_executor.rs +++ b/core/client/src/call_executor.rs @@ -300,7 +300,6 @@ where self.executor.runtime_version(&mut ext).ok_or(error::Error::VersionInvalid.into()) } - // TODO: probably remove this method??? fn call_at_state< O: offchain::Externalities, S: state_machine::Backend, From dd5b6021ca51521084417d3190ab771629668f6a Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 1 Aug 2019 14:55:31 +0300 Subject: [PATCH 33/63] removed unjustified expect --- core/client/src/client.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 072509b94a4b0..88a2728fc699c 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -522,12 +522,17 @@ impl Client where }; // TODO: we only work with the last config range here!!! Need to stabilize pruning before fixing this. - let (config_zero_number, _, config) = configs.pop().expect("TODO"); - let finalized_number = self.backend.blockchain().info().finalized_number; - let oldest = storage.oldest_changes_trie_block(config_zero_number, config, finalized_number); - let oldest = ::std::cmp::max(config_zero_number + One::one(), oldest); - let first = ::std::cmp::max(first, oldest); - Ok(Some((first, last))) + match configs.pop() { + Some((zero, _, config)) => { + let finalized_number = self.backend.blockchain().info().finalized_number; + let oldest = storage.oldest_changes_trie_block(zero, config, finalized_number); + let oldest = ::std::cmp::max(zero + One::one(), oldest); + let first = ::std::cmp::max(first, oldest); + Ok(Some((first, last))) + }, + None => Ok(None), + } + } /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. From 7260719bb8566c7249df23c14084051fb649e4a0 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 1 Aug 2019 15:03:30 +0300 Subject: [PATCH 34/63] update TODOs with issue number --- core/client/db/src/changes_tries_storage.rs | 2 +- core/client/src/client.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index 09e419a693732..db706a5e0085e 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -171,7 +171,7 @@ impl> DbChangesTrieStorage { |node| tx.delete(self.changes_tries_column, node.as_ref())); } - // TODO: prune tries that were created using previous configurations + // TODO (#3282): prune tries that were created using previous configurations Ok(()) } diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 88a2728fc699c..c9c1ec8e39eb4 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -521,7 +521,7 @@ impl Client where None => return Ok(None), }; - // TODO: we only work with the last config range here!!! Need to stabilize pruning before fixing this. + // TODO (#3282): we only work with the last config range here!!! Need to stabilize pruning before fixing this. match configs.pop() { Some((zero, _, config)) => { let finalized_number = self.backend.blockchain().info().finalized_number; From 4552586adbf75542d6a8fa3c25d96546f0f33ed6 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 7 Aug 2019 17:46:03 +0300 Subject: [PATCH 35/63] new CT pruning algorithm fixed cache + multiple blocks finalization track CT configuraiton on light clients support CT configuration change revert revert CT config test new CT pruning algorithm fixed cache + multiple blocks finalization track CT configuraiton on light clients support CT configuration change revert revert CT config test --- core/client/db/src/cache/list_cache.rs | 542 +++++++++--- core/client/db/src/cache/list_storage.rs | 8 + core/client/db/src/cache/mod.rs | 309 +++++-- core/client/db/src/changes_tries_storage.rs | 862 +++++++++++++------ core/client/db/src/lib.rs | 106 ++- core/client/db/src/light.rs | 57 +- core/client/db/src/utils.rs | 2 + core/client/src/backend.rs | 12 +- core/client/src/client.rs | 5 +- core/client/src/light/backend.rs | 14 + core/client/src/light/blockchain.rs | 4 +- core/primitives/src/changes_trie.rs | 56 +- core/state-machine/src/changes_trie/build.rs | 3 +- core/state-machine/src/changes_trie/mod.rs | 2 +- core/state-machine/src/changes_trie/prune.rs | 274 +----- core/state-machine/src/lib.rs | 1 - 16 files changed, 1445 insertions(+), 812 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index 9095b80fb6728..a040004dbf70c 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -39,16 +39,14 @@ //! Finalized entry E1 is pruned when block B is finalized so that: //! EntryAt(B.number - prune_depth).points_to(E1) -use std::collections::BTreeSet; +use std::collections::{BTreeSet, BTreeMap}; use log::warn; use client::error::{Error as ClientError, Result as ClientResult}; -use sr_primitives::traits::{ - Block as BlockT, NumberFor, Zero, Bounded, CheckedSub -}; +use sr_primitives::traits::{Block as BlockT, NumberFor, Zero, Bounded, CheckedSub}; -use crate::cache::{CacheItemT, ComplexBlockId, EntryType}; +use crate::cache::{CacheItemT, ComplexBlockId, BlockIdOrHeader, EntryType}; use crate::cache::list_entry::{Entry, StorageEntry}; use crate::cache::list_storage::{Storage, StorageTransaction, Metadata}; @@ -89,6 +87,8 @@ pub enum CommitOperation { /// - new entry is finalized AND/OR /// - some forks are destroyed BlockFinalized(ComplexBlockId, Option>, BTreeSet), + /// When best block is reverted - contains the forks that have to be updated. + BlockReverted(BTreeMap>>), } /// Single fork of list-based cache. @@ -114,27 +114,23 @@ pub enum ForkAppendResult { Fork(ComplexBlockId), } -impl> ListCache { +impl> ListCache { /// Create new db list cache entry. pub fn new( storage: S, pruning_strategy: PruningStrategy>, best_finalized_block: ComplexBlockId, - ) -> Self { + ) -> ClientResult { let (best_finalized_entry, unfinalized) = storage.read_meta() - .and_then(|meta| read_forks(&storage, meta)) - .unwrap_or_else(|error| { - warn!(target: "db", "Unable to initialize list cache: {}. Restarting", error); - (None, Vec::new()) - }); + .and_then(|meta| read_forks(&storage, meta))?; - ListCache { + Ok(ListCache { storage, pruning_strategy, best_finalized_block, best_finalized_entry, unfinalized, - } + }) } /// Get reference to the storage. @@ -142,18 +138,24 @@ impl> ListCache &self.storage } + /// Get unfinalized forks reference. + #[cfg(test)] + pub fn unfinalized(&self) -> &[Fork] { + &self.unfinalized + } + /// Get value valid at block. pub fn value_at_block( &self, - at: &ComplexBlockId, + at: BlockIdOrHeader, ) -> ClientResult, Option>, T)>> { - let head = if at.number <= self.best_finalized_block.number { + let head = if at.number() <= self.best_finalized_block.number { // if the block is older than the best known finalized block // => we're should search for the finalized value // BUT since we're not guaranteeing to provide correct values for forks // behind the finalized block, check if the block is finalized first - if !chain::is_finalized_block(&self.storage, at, Bounded::max_value())? { + if !chain::is_finalized_block(&self.storage, &at.id(), Bounded::max_value())? { return Ok(None); } @@ -168,18 +170,21 @@ impl> ListCache // IF there's no matching fork, ensure that this isn't a block from a fork that has forked // behind the best finalized block and search at finalized fork - match self.find_unfinalized_fork(at)? { + match self.find_unfinalized_fork(at.clone())? { Some(fork) => Some(&fork.head), None => match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) if chain::is_connected_to_block(&self.storage, &best_finalized_entry.valid_from, at)? => - Some(best_finalized_entry), + Some(best_finalized_entry) if chain::is_connected_to_block( + &self.storage, + at.clone(), + &best_finalized_entry.valid_from, + )? => Some(best_finalized_entry), _ => None, }, } }; match head { - Some(head) => head.search_best_before(&self.storage, at.number) + Some(head) => head.search_best_before(&self.storage, at.number()) .map(|e| e.map(|e| (e.0.valid_from, e.1, e.0.value))), None => Ok(None), } @@ -195,9 +200,18 @@ impl> ListCache block: ComplexBlockId, value: Option, entry_type: EntryType, + prev_operation: Option<&CommitOperation>, ) -> ClientResult>> { // this guarantee is currently provided by LightStorage && we're relying on it here - debug_assert!(entry_type != EntryType::Final || self.best_finalized_block.hash == parent.hash); + debug_assert!( + entry_type != EntryType::Final || + self.best_finalized_block.hash == parent.hash || + match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) + => best_finalized_block.hash == parent.hash, + _ => false, + } + ); // we do not store any values behind finalized if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { @@ -293,7 +307,7 @@ impl> ListCache } // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block); + let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); self.prune_finalized_entries(tx, &block); match new_storage_entry { @@ -313,59 +327,104 @@ impl> ListCache tx: &mut Tx, parent: ComplexBlockId, block: ComplexBlockId, + prev_operation: Option<&CommitOperation>, ) -> ClientResult>> { - // this guarantee is currently provided by LightStorage && we're relying on it here - debug_assert_eq!(self.best_finalized_block.hash, parent.hash); + // this guarantee is currently provided by db backend && we're relying on it here + debug_assert!( + self.best_finalized_block.hash == parent.hash || + match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) + => best_finalized_block.hash == parent.hash, + _ => false, + } + ); // there could be at most one entry that is finalizing let finalizing_entry = self.storage.read_entry(&block)? .map(|entry| entry.into_entry(block.clone())); // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block); + let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); self.prune_finalized_entries(tx, &block); - let update_meta = finalizing_entry.is_some(); let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks); - if update_meta { - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - } + tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); + Ok(Some(operation)) } + /// When previously inserted block is finalized. + pub fn on_block_revert>( + &self, + tx: &mut Tx, + block: NumberFor, + ) -> ClientResult> { + // can't revert finalized blocks + debug_assert!(self.best_finalized_block.number < block); + + // iterate all unfinalized forks and truncate/destroy if required + let mut updated = BTreeMap::new(); + for (index, fork) in self.unfinalized.iter().enumerate() { + if fork.head.valid_from.number >= block { + let updated_fork = fork.truncate( + &self.storage, + tx, + block, + self.best_finalized_block.number, + )?; + updated.insert(index, updated_fork); + } + } + + let operation = CommitOperation::BlockReverted(updated); + tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); + + Ok(operation) + } + /// When transaction is committed. - pub fn on_transaction_commit(&mut self, op: CommitOperation) { - match op { - CommitOperation::AppendNewBlock(index, best_block) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(best_block); - }, - CommitOperation::AppendNewEntry(index, entry) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(entry.valid_from.clone()); - fork.head = entry; - }, - CommitOperation::AddNewFork(entry) => { - self.unfinalized.push(Fork { - best_block: Some(entry.valid_from.clone()), - head: entry, - }); - }, - CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { - self.best_finalized_block = block; - if let Some(finalizing_entry) = finalizing_entry { - self.best_finalized_entry = Some(finalizing_entry); - } - for fork_index in forks.iter().rev() { - self.unfinalized.remove(*fork_index); - } - }, + pub fn on_transaction_commit(&mut self, ops: Vec>) { + for op in ops { + match op { + CommitOperation::AppendNewBlock(index, best_block) => { + let mut fork = self.unfinalized.get_mut(index) + .expect("ListCache is a crate-private type; + internal clients of ListCache are committing transaction while cache is locked; + CommitOperation holds valid references while cache is locked; qed"); + fork.best_block = Some(best_block); + }, + CommitOperation::AppendNewEntry(index, entry) => { + let mut fork = self.unfinalized.get_mut(index) + .expect("ListCache is a crate-private type; + internal clients of ListCache are committing transaction while cache is locked; + CommitOperation holds valid references while cache is locked; qed"); + fork.best_block = Some(entry.valid_from.clone()); + fork.head = entry; + }, + CommitOperation::AddNewFork(entry) => { + self.unfinalized.push(Fork { + best_block: Some(entry.valid_from.clone()), + head: entry, + }); + }, + CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { + self.best_finalized_block = block; + if let Some(finalizing_entry) = finalizing_entry { + self.best_finalized_entry = Some(finalizing_entry); + } + for fork_index in forks.into_iter().rev() { + self.unfinalized.remove(fork_index); + } + }, + CommitOperation::BlockReverted(forks) => { + for (fork_index, updated_fork) in forks.into_iter().rev() { + match updated_fork { + Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, + None => { self.unfinalized.remove(fork_index); }, + } + } + }, + } } } @@ -426,10 +485,22 @@ impl> ListCache fn destroy_abandoned_forks>( &self, tx: &mut Tx, - block: &ComplexBlockId + block: &ComplexBlockId, + prev_operation: Option<&CommitOperation>, ) -> BTreeSet { - let mut destroyed = BTreeSet::new(); - for (index, fork) in self.unfinalized.iter().enumerate() { + // if some block has been finalized already => take it into account + let prev_abandoned_forks = match prev_operation { + Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => Some(abandoned_forks), + _ => None, + }; + + let mut destroyed = prev_abandoned_forks.cloned().unwrap_or_else(|| BTreeSet::new()); + let live_unfinalized = self.unfinalized.iter() + .enumerate() + .filter(|(idx, _)| prev_abandoned_forks + .map(|prev_abandoned_forks| !prev_abandoned_forks.contains(idx)) + .unwrap_or(true)); + for (index, fork) in live_unfinalized { if fork.head.valid_from.number == block.number { destroyed.insert(index); if fork.head.valid_from.hash != block.hash { @@ -444,9 +515,12 @@ impl> ListCache } /// Search unfinalized fork where given block belongs. - fn find_unfinalized_fork(&self, block: &ComplexBlockId) -> ClientResult>> { + fn find_unfinalized_fork( + &self, + block: BlockIdOrHeader, + ) -> ClientResult>> { for unfinalized in &self.unfinalized { - if unfinalized.matches(&self.storage, block)? { + if unfinalized.matches(&self.storage, block.clone())? { return Ok(Some(&unfinalized)); } } @@ -465,9 +539,9 @@ impl Fork { pub fn matches>( &self, storage: &S, - block: &ComplexBlockId, + block: BlockIdOrHeader, ) -> ClientResult { - let range = self.head.search_best_range_before(storage, block.number)?; + let range = self.head.search_best_range_before(storage, block.number())?; match range { None => Ok(false), Some((begin, end)) => chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), @@ -500,7 +574,7 @@ impl Fork { }; // check if the parent is connected to the beginning of the range - if !chain::is_connected_to_block(storage, &parent, &begin)? { + if !chain::is_connected_to_block(storage, BlockIdOrHeader::Id(parent), &begin)? { return Ok(None); } @@ -533,6 +607,74 @@ impl Fork { best_finalized_block, ) } + + /// Truncate fork by deleting all entries that are descendants of given block. + pub fn truncate, Tx: StorageTransaction>( + &self, + storage: &S, + tx: &mut Tx, + truncating_block: NumberFor, + best_finalized_block: NumberFor, + ) -> ClientResult>> { + let mut current = self.head.valid_from.clone(); + loop { + // read pointer to previous entry + let entry = storage.require_entry(¤t)?; + + // truncation stops when we have reached the ancestor of truncated block + if current.number < truncating_block { + // if we have reached finalized block => destroy fork + if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { + return Ok(None); + } + + // else fork needs to be updated + return Ok(Some(Fork { + best_block: None, + head: entry.into_entry(current), + })); + } + + tx.remove_storage_entry(¤t); + + // truncation also stops when there are no more entries in the list + current = match entry.prev_valid_from { + Some(prev_valid_from) => prev_valid_from, + None => return Ok(None), + }; + } + } +} + +impl CommitOperation { + /// Try to merge two ops into single one. + pub fn merge_with(self, other: CommitOperation) -> (Option, Option) { + match self { + CommitOperation::BlockFinalized(old_finalized_block, old_finalized_entry, old_abandoned_forks) => { + match other { + CommitOperation::BlockFinalized(new_finalized_block, new_finalized_entry, new_abandoned_forks) => { + ( + Some(CommitOperation::BlockFinalized( + new_finalized_block, + new_finalized_entry, + new_abandoned_forks, + )), + None, + ) + }, + _ => ( + Some(CommitOperation::BlockFinalized( + old_finalized_block, + old_finalized_entry, + old_abandoned_forks, + )), + Some(other), + ), + } + }, + _ => (Some(self), Some(other)), + } + } } /// Destroy fork by deleting all unfinalized entries. @@ -571,11 +713,11 @@ mod chain { /// Is the block1 connected both ends of the range. pub fn is_connected_to_range>( storage: &S, - block: &ComplexBlockId, + block: BlockIdOrHeader, range: (&ComplexBlockId, Option<&ComplexBlockId>), ) -> ClientResult { let (begin, end) = range; - Ok(is_connected_to_block(storage, block, begin)? + Ok(is_connected_to_block(storage, block.clone(), begin)? && match end { Some(end) => is_connected_to_block(storage, block, end)?, None => true, @@ -585,12 +727,18 @@ mod chain { /// Is the block1 directly connected (i.e. part of the same fork) to block2? pub fn is_connected_to_block>( storage: &S, - block1: &ComplexBlockId, + block1: BlockIdOrHeader, block2: &ComplexBlockId, ) -> ClientResult { - let (begin, end) = if block1 > block2 { (block2, block1) } else { (block1, block2) }; - let mut current = storage.read_header(&end.hash)? - .ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; + let block1_id = block1.id(); + let (begin, end) = if block1_id > *block2 { (block2, &block1_id) } else { (&block1_id, block2) }; + let mut current = match end.hash == block1_id.hash { + true => match block1 { + BlockIdOrHeader::Id(_) => storage.read_header(&end.hash)?, + BlockIdOrHeader::Header(header) => Some(header.clone()), + }, + false => storage.read_header(&end.hash)?, + }.ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; while *current.number() > begin.number { current = storage.read_header(current.parent_hash())? .ok_or_else(|| ClientError::UnknownBlock(format!("{}", current.parent_hash())))?; @@ -688,7 +836,7 @@ pub mod tests { // --- 50 --- // ----------> [100] assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .value_at_block(&test_id(50)).unwrap(), None); + .unwrap().value_at_block((&test_id(50)).into()).unwrap(), None); // when block is earlier than best finalized block AND it is finalized AND value is some // [30] ---- 50 ---> [100] assert_eq!(ListCache::new( @@ -698,7 +846,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&test_id(50)).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); + ).unwrap().value_at_block((&test_id(50)).into()).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); // when block is the best finalized block AND value is some // ---> [100] assert_eq!(ListCache::new( @@ -708,7 +856,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&test_id(100)).unwrap(), Some((test_id(100), None, 100))); + ).unwrap().value_at_block((&test_id(100)).into()).unwrap(), Some((test_id(100), None, 100))); // when block is parallel to the best finalized block // ---- 100 // ---> [100] @@ -719,7 +867,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).unwrap(), None); + ).unwrap().value_at_block((&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).into()).unwrap(), None); // when block is later than last finalized block AND there are no forks AND finalized value is Some // ---> [100] --- 200 @@ -729,7 +877,7 @@ pub mod tests { .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), PruningStrategy::ByDepth(1024), test_id(100) - ).value_at_block(&test_id(200)).unwrap(), Some((test_id(100), None, 100))); + ).unwrap().value_at_block((&test_id(200)).into()).unwrap(), Some((test_id(100), None, 100))); // when block is later than last finalized block AND there are no matching forks // AND block is connected to finalized block AND finalized value is Some @@ -745,7 +893,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); + ).unwrap().value_at_block((&fork_id(0, 2, 3)).into()).unwrap(), Some((correct_id(2), None, 2))); // when block is later than last finalized block AND there are no matching forks // AND block is not connected to finalized block // --- 2 --- 3 @@ -762,7 +910,7 @@ pub mod tests { .with_header(fork_header(0, 1, 3)) .with_header(fork_header(0, 1, 2)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&fork_id(0, 1, 3)).unwrap(), None); + ).unwrap().value_at_block((&fork_id(0, 1, 3)).into()).unwrap(), None); // when block is later than last finalized block AND it appends to unfinalized fork from the end // AND unfinalized value is Some @@ -775,7 +923,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(test_header(5)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), None, 4))); + ).unwrap().value_at_block((&correct_id(5)).into()).unwrap(), Some((correct_id(4), None, 4))); // when block is later than last finalized block AND it does not fits unfinalized fork // AND it is connected to the finalized block AND finalized value is Some // ---> [2] ----------> [4] @@ -790,7 +938,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), PruningStrategy::ByDepth(1024), test_id(2) - ).value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); + ).unwrap().value_at_block((&fork_id(0, 2, 3)).into()).unwrap(), Some((correct_id(2), None, 2))); } #[test] @@ -799,22 +947,24 @@ pub mod tests { let fin = EntryType::Final; // when trying to insert block < finalized number - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() .on_block_insert( &mut DummyTransaction::new(), test_id(49), test_id(50), Some(50), nfin, + None, ).unwrap().is_none()); // when trying to insert block @ finalized number - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) + assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() .on_block_insert( &mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), nfin, + None, ).unwrap().is_none()); // when trying to insert non-final block AND it appends to the best block of unfinalized fork @@ -824,10 +974,10 @@ pub mod tests { .with_meta(None, vec![test_id(4)]) .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: 4 }), PruningStrategy::ByDepth(1024), test_id(2) - ); + ).unwrap(); cache.unfinalized[0].best_block = Some(test_id(4)); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin, None).unwrap(), Some(CommitOperation::AppendNewBlock(0, test_id(5)))); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); @@ -835,7 +985,7 @@ pub mod tests { // when trying to insert non-final block AND it appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin, None).unwrap(), Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 }))); assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); @@ -849,9 +999,9 @@ pub mod tests { .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: 4 }) .with_header(test_header(4)), PruningStrategy::ByDepth(1024), test_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), nfin).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), nfin, None).unwrap(), Some(CommitOperation::AppendNewBlock(0, correct_id(5)))); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); @@ -859,7 +1009,7 @@ pub mod tests { // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), nfin).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), nfin, None).unwrap(), Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 }))); assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); @@ -875,9 +1025,9 @@ pub mod tests { .with_header(test_header(3)) .with_header(test_header(4)), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin, None).unwrap(), Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 }))); assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); @@ -890,9 +1040,9 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin).unwrap(), None); + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin, None).unwrap(), None); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); @@ -903,19 +1053,19 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin, None).unwrap(), Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 }))); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] })); // when inserting finalized entry AND there are no previous finalized entries - let cache = ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)); + let cache = ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)).unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), + cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, None).unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), Some(Entry { valid_from: correct_id(3), value: 3 }), @@ -931,9 +1081,9 @@ pub mod tests { .with_meta(Some(correct_id(2)), vec![]) .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, None).unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default()))); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); @@ -941,7 +1091,7 @@ pub mod tests { // when inserting finalized entry AND value differs from previous finalized let mut tx = DummyTransaction::new(); assert_eq!( - cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), + cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, None).unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), Some(Entry { valid_from: correct_id(3), value: 3 }), @@ -959,9 +1109,9 @@ pub mod tests { .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, None).unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect()))); } @@ -974,13 +1124,13 @@ pub mod tests { .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(), + assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3), None).unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default()))); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); + assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); // finalization finalizes entry let cache = ListCache::new( DummyStorage::new() @@ -988,10 +1138,10 @@ pub mod tests { .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), PruningStrategy::ByDepth(1024), correct_id(4) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.on_block_finalize(&mut tx, correct_id(4), correct_id(5)).unwrap(), + cache.on_block_finalize(&mut tx, correct_id(4), correct_id(5), None).unwrap(), Some(CommitOperation::BlockFinalized( correct_id(5), Some(Entry { valid_from: correct_id(5), value: 5 }), @@ -1008,9 +1158,9 @@ pub mod tests { .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3)).unwrap(), + assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3), None).unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect()))); } @@ -1023,25 +1173,29 @@ pub mod tests { .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }), PruningStrategy::ByDepth(1024), correct_id(2) - ); + ).unwrap(); // when new block is appended to unfinalized fork - cache.on_transaction_commit(CommitOperation::AppendNewBlock(0, correct_id(6))); + cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))]); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); // when new entry is appended to unfinalized fork - cache.on_transaction_commit(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 })); + cache.on_transaction_commit(vec![ + CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 }), + ]); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: 7 }); // when new fork is added - cache.on_transaction_commit(CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 })); + cache.on_transaction_commit(vec![ + CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 }), + ]); assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: 10 }); // when block is finalized + entry is finalized + unfinalized forks are deleted - cache.on_transaction_commit(CommitOperation::BlockFinalized( + cache.on_transaction_commit(vec![CommitOperation::BlockFinalized( correct_id(20), Some(Entry { valid_from: correct_id(20), value: 20 }), vec![0, 1, 2].into_iter().collect(), - )); + )]); assert_eq!(cache.best_finalized_block, correct_id(20)); assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: 20 })); assert!(cache.unfinalized.is_empty()); @@ -1062,7 +1216,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(test_header(5)), PruningStrategy::ByDepth(1024), correct_id(0) - ).find_unfinalized_fork(&correct_id(4)).unwrap().unwrap().head.valid_from, correct_id(5)); + ).unwrap().find_unfinalized_fork((&correct_id(4)).into()).unwrap().unwrap().head.valid_from, correct_id(5)); // --- [2] ---------------> [5] // ----------> [3] ---> 4 assert_eq!(ListCache::new( @@ -1079,7 +1233,8 @@ pub mod tests { .with_header(fork_header(0, 1, 3)) .with_header(fork_header(0, 1, 4)), PruningStrategy::ByDepth(1024), correct_id(0) - ).find_unfinalized_fork(&fork_id(0, 1, 4)).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3)); + ).unwrap() + .find_unfinalized_fork((&fork_id(0, 1, 4)).into()).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3)); // --- [2] ---------------> [5] // ----------> [3] // -----------------> 4 @@ -1099,7 +1254,7 @@ pub mod tests { .with_header(fork_header(1, 1, 3)) .with_header(fork_header(1, 1, 4)), PruningStrategy::ByDepth(1024), correct_id(0) - ).find_unfinalized_fork(&fork_id(1, 1, 4)).unwrap().is_none()); + ).unwrap().find_unfinalized_fork((&fork_id(1, 1, 4)).into()).unwrap().is_none()); } #[test] @@ -1109,7 +1264,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .matches(&storage, &test_id(20)).unwrap(), false); + .matches(&storage, (&test_id(20)).into()).unwrap(), false); // when block is not connected to the begin block let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) @@ -1120,7 +1275,7 @@ pub mod tests { .with_header(fork_header(0, 2, 4)) .with_header(fork_header(0, 2, 3)); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, &fork_id(0, 2, 4)).unwrap(), false); + .matches(&storage, (&fork_id(0, 2, 4)).into()).unwrap(), false); // when block is not connected to the end block let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) @@ -1130,14 +1285,14 @@ pub mod tests { .with_header(test_header(3)) .with_header(fork_header(0, 3, 4)); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, &fork_id(0, 3, 4)).unwrap(), false); + .matches(&storage, (&fork_id(0, 3, 4)).into()).unwrap(), false); // when block is connected to the begin block AND end is open let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: 100 }) .with_header(test_header(5)) .with_header(test_header(6)); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, &correct_id(6)).unwrap(), true); + .matches(&storage, (&correct_id(6)).into()).unwrap(), true); // when block is connected to the begin block AND to the end block let storage = DummyStorage::new() .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) @@ -1146,7 +1301,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(test_header(3)); assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, &correct_id(4)).unwrap(), true); + .matches(&storage, (&correct_id(4)).into()).unwrap(), true); } #[test] @@ -1252,9 +1407,9 @@ pub mod tests { #[test] fn is_connected_to_block_fails() { // when storage returns error - assert!(chain::is_connected_to_block::<_, u64, _>(&FaultyStorage, &test_id(1), &test_id(100)).is_err()); + assert!(chain::is_connected_to_block::<_, u64, _>(&FaultyStorage, (&test_id(1)).into(), &test_id(100)).is_err()); // when there's no header in the storage - assert!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), &test_id(100)).is_err()); + assert!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new(), (&test_id(1)).into(), &test_id(100)).is_err()); } #[test] @@ -1262,35 +1417,35 @@ pub mod tests { // when without iterations we end up with different block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(1)), - &test_id(1), &correct_id(1)).unwrap(), false); + (&test_id(1)).into(), &correct_id(1)).unwrap(), false); // when with ASC iterations we end up with different block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(0)) .with_header(test_header(1)) .with_header(test_header(2)), - &test_id(0), &correct_id(2)).unwrap(), false); + (&test_id(0)).into(), &correct_id(2)).unwrap(), false); // when with DESC iterations we end up with different block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(0)) .with_header(test_header(1)) .with_header(test_header(2)), - &correct_id(2), &test_id(0)).unwrap(), false); + (&correct_id(2)).into(), &test_id(0)).unwrap(), false); // when without iterations we end up with the same block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(1)), - &correct_id(1), &correct_id(1)).unwrap(), true); + (&correct_id(1)).into(), &correct_id(1)).unwrap(), true); // when with ASC iterations we end up with the same block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(0)) .with_header(test_header(1)) .with_header(test_header(2)), - &correct_id(0), &correct_id(2)).unwrap(), true); + (&correct_id(0)).into(), &correct_id(2)).unwrap(), true); // when with DESC iterations we end up with the same block assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() .with_header(test_header(0)) .with_header(test_header(1)) .with_header(test_header(2)), - &correct_id(2), &correct_id(0)).unwrap(), true); + (&correct_id(2)).into(), &correct_id(0)).unwrap(), true); } #[test] @@ -1368,7 +1523,7 @@ pub mod tests { .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }), - strategy, test_id(9)); + strategy, test_id(9)).unwrap(); let mut tx = DummyTransaction::new(); // when finalizing entry #10: no entries pruned @@ -1400,4 +1555,119 @@ pub mod tests { do_test(PruningStrategy::ByDepth(10)); do_test(PruningStrategy::NeverPrune) } + + #[test] + fn value_at_not_yet_inserted_block_works() { + fn make_storage() -> DummyStorage { + // 3 -> 5 -> 7_1 -> 10_1 + // \-> 9_2 -> 12_2 + let mut storage = DummyStorage::new() + // entries 3 && 5 are finalized + .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 5 }) + .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 3 }) + // entries 7_1 and 10_1 is unfinalied fork#1 + .with_entry(correct_id(10), StorageEntry { prev_valid_from: Some(correct_id(7)), value: 110 }) + .with_entry(correct_id(7), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 107 }) + // entries 7_1 and 10_1 is unfinalied fork#1 + .with_entry(fork_id(1, 6, 12), StorageEntry { prev_valid_from: Some(fork_id(1, 6, 9)), value: 210 }) + .with_entry(fork_id(1, 6, 9), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 207 }); + for i in 0..=12 { + storage = storage.with_header(test_header(i)); + } + for i in 7..=12 { + storage = storage.with_header(fork_header(1, 6, i)); + } + storage + } + + // when there are no unfinalized forks + let header9 = test_header(9); + let storage = make_storage().with_meta(Some(correct_id(5)), Vec::new()); + assert_eq!( + ListCache::new(storage, PruningStrategy::NeverPrune, correct_id(5)) + .unwrap().value_at_block(BlockIdOrHeader::Header(&header9)).unwrap(), + Some((correct_id(5), None, 5)), + ); + + // when there's single unfinalized fork + let header11 = test_header(11); + let storage = make_storage().with_meta(Some(correct_id(5)), vec![correct_id(10)]); + assert_eq!( + ListCache::new(storage, PruningStrategy::NeverPrune, correct_id(5)) + .unwrap().value_at_block(BlockIdOrHeader::Header(&header11)).unwrap(), + Some((correct_id(10), None, 110)), + ); + + // when there are several unfinalized forks: fork1 + let header13 = test_header(13); + let storage = make_storage().with_meta(Some(correct_id(5)), vec![correct_id(10), fork_id(1, 6, 12)]); + assert_eq!( + ListCache::new(storage, PruningStrategy::NeverPrune, correct_id(5)) + .unwrap().value_at_block(BlockIdOrHeader::Header(&header13)).unwrap(), + Some((correct_id(10), None, 110)), + ); + + // when there are several unfinalized forks: fork2 + let header13 = fork_header(1, 6, 13); + let storage = make_storage().with_meta(Some(correct_id(5)), vec![correct_id(10), fork_id(1, 6, 12)]); + assert_eq!( + ListCache::new(storage, PruningStrategy::NeverPrune, correct_id(5)) + .unwrap().value_at_block(BlockIdOrHeader::Header(&header13)).unwrap(), + Some((fork_id(1, 6, 12), None, 210)), + ); + } + + #[test] + fn revert_block_works() { + let mut cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(10)), vec![test_id(15), fork_id(1, 11, 17)]) + .with_id(10, test_id(10).hash) + .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) + .with_entry(test_id(15), StorageEntry { prev_valid_from: Some(test_id(10)), value: 15 }) + .with_entry(fork_id(1, 11,13), StorageEntry { prev_valid_from: Some(test_id(10)), value: 13 }) + .with_entry(fork_id(1, 11,17), StorageEntry { prev_valid_from: Some(fork_id(1, 11, 13)), value: 17 }), + PruningStrategy::NeverPrune, test_id(10) + ).unwrap(); + + let op = cache.on_block_revert(&mut DummyTransaction::new(), 17).unwrap(); + assert_eq!(op, CommitOperation::BlockReverted(vec![ + (1, Some(Fork { best_block: None, head: Entry { valid_from: fork_id(1, 11, 13), value: 13 } })), + ].into_iter().collect())); + cache.on_transaction_commit(vec![op]); + + let op = cache.on_block_revert(&mut DummyTransaction::new(), 14).unwrap(); + assert_eq!(op, CommitOperation::BlockReverted(vec![ + (0, None), + ].into_iter().collect())); + cache.on_transaction_commit(vec![op]); + + let op = cache.on_block_revert(&mut DummyTransaction::new(), 11).unwrap(); + assert_eq!(op, CommitOperation::BlockReverted(vec![ + (0, None), // it is actually 1, because 0 has been destroyed already + ].into_iter().collect())); + cache.on_transaction_commit(vec![op]); + } + + #[test] + fn merge_commit_operation_works() { + let op1 = CommitOperation::BlockFinalized( + test_id(10), + Some(Entry { valid_from: test_id(10), value: 10 }), + vec![5].into_iter().collect(), + ); + let op2 = CommitOperation::BlockFinalized( + test_id(20), + Some(Entry { valid_from: test_id(20), value: 20 }), + vec![5, 6].into_iter().collect(), + ); + assert_eq!( + op1.merge_with(op2), + (Some(CommitOperation::BlockFinalized( + test_id(20), + Some(Entry { valid_from: test_id(20), value: 20 }), + vec![5, 6].into_iter().collect(), + )), None), + ); + } } diff --git a/core/client/db/src/cache/list_storage.rs b/core/client/db/src/cache/list_storage.rs index 3c4a7252f30d9..45272b4ce7736 100644 --- a/core/client/db/src/cache/list_storage.rs +++ b/core/client/db/src/cache/list_storage.rs @@ -227,6 +227,14 @@ mod meta { unfinalized.remove(*fork_index); } }, + CommitOperation::BlockReverted(ref forks) => { + for (fork_index, updated_fork) in forks.iter().rev() { + match updated_fork { + Some(updated_fork) => unfinalized[*fork_index] = &updated_fork.head().valid_from, + None => { unfinalized.remove(*fork_index); }, + } + } + }, } (finalized, unfinalized).encode() diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index d0abf3d0e1a84..dd0b942957e05 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -16,7 +16,7 @@ //! DB-backed cache of blockchain data. -use std::{sync::Arc, collections::HashMap}; +use std::{sync::Arc, collections::{HashMap, hash_map::Entry}}; use parking_lot::RwLock; use kvdb::{KeyValueDB, DBTransaction}; @@ -52,8 +52,10 @@ pub enum EntryType { /// Block identifier that holds both hash and number. #[derive(Clone, Debug, Encode, Decode, PartialEq)] pub struct ComplexBlockId { - hash: Block::Hash, - number: NumberFor, + /// Hash of the block. + pub(crate) hash: Block::Hash, + /// Number of the block. + pub(crate) number: NumberFor, } impl ComplexBlockId { @@ -69,6 +71,39 @@ impl ::std::cmp::PartialOrd for ComplexBlockId { } } +/// Block id or header. +#[derive(Debug, Clone)] +pub enum BlockIdOrHeader<'a, Block: BlockT> { + /// Block id. + Id(&'a ComplexBlockId), + /// Block header. + Header(&'a Block::Header), +} + +impl<'a, Block: BlockT> From<&'a ComplexBlockId> for BlockIdOrHeader<'a, Block> { + fn from(id: &'a ComplexBlockId) -> Self { + BlockIdOrHeader::Id(id) + } +} + +impl<'a, Block: BlockT> BlockIdOrHeader<'a, Block> { + /// Get id of refrenced block. + pub fn id(&self) -> ComplexBlockId { + match *self { + BlockIdOrHeader::Id(id) => id.clone(), + BlockIdOrHeader::Header(header) => ComplexBlockId::new(header.hash(), *header.number()), + } + } + + /// Get number of refrenced block. + pub fn number(&self) -> NumberFor { + match *self { + BlockIdOrHeader::Id(id) => id.number, + BlockIdOrHeader::Header(header) => *header.number(), + } + } +} + /// All cache items must implement this trait. pub trait CacheItemT: Clone + Decode + Encode + PartialEq {} @@ -116,23 +151,41 @@ impl DbCache { DbCacheTransaction { cache: self, tx, - cache_at_op: HashMap::new(), + cache_at_ops: HashMap::new(), best_finalized_block: None, } } + /// Begin cache transaction with given ops. + pub fn transaction_with_ops<'a>( + &'a mut self, + tx: &'a mut DBTransaction, + ops: DbCacheTransactionOps, + ) -> DbCacheTransaction<'a, Block> { + DbCacheTransaction { + cache: self, + tx, + cache_at_ops: ops.cache_at_ops, + best_finalized_block: ops.best_finalized_block, + } + } + /// Run post-commit cache operations. - pub fn commit(&mut self, ops: DbCacheTransactionOps) { - for (name, op) in ops.cache_at_op.into_iter() { - self.get_cache(name).on_transaction_commit(op); + pub fn commit(&mut self, ops: DbCacheTransactionOps) -> ClientResult<()> { + for (name, ops) in ops.cache_at_ops.into_iter() { + self.get_cache(name)?.on_transaction_commit(ops); } if let Some(best_finalized_block) = ops.best_finalized_block { self.best_finalized_block = best_finalized_block; } + Ok(()) } /// Creates `ListCache` with the given name or returns a reference to the existing. - fn get_cache(&mut self, name: CacheKeyId) -> &mut ListCache, self::list_storage::DbStorage> { + pub(crate) fn get_cache( + &mut self, + name: CacheKeyId, + ) -> ClientResult<&mut ListCache, self::list_storage::DbStorage>> { get_cache_helper( &mut self.cache_at, name, @@ -155,34 +208,49 @@ fn get_cache_helper<'a, Block: BlockT>( header: Option, cache: Option, best_finalized_block: &ComplexBlockId, -) -> &'a mut ListCache, self::list_storage::DbStorage> { - cache_at.entry(name).or_insert_with(|| { - ListCache::new( - self::list_storage::DbStorage::new(name.to_vec(), db.clone(), - self::list_storage::DbColumns { - meta: COLUMN_META, - key_lookup, - header, - cache, - }, - ), - cache_pruning_strategy(name), - best_finalized_block.clone(), - ) - }) +) -> ClientResult<&'a mut ListCache, self::list_storage::DbStorage>> { + match cache_at.entry(name) { + Entry::Occupied(entry) => Ok(entry.into_mut()), + Entry::Vacant(entry) => { + let cache = ListCache::new( + self::list_storage::DbStorage::new(name.to_vec(), db.clone(), + self::list_storage::DbColumns { + meta: COLUMN_META, + key_lookup, + header, + cache, + }, + ), + cache_pruning_strategy(name), + best_finalized_block.clone(), + )?; + Ok(entry.insert(cache)) + } + } } /// Cache operations that are to be committed after database transaction is committed. +#[derive(Default)] pub struct DbCacheTransactionOps { - cache_at_op: HashMap>>, + cache_at_ops: HashMap>>>, best_finalized_block: Option>, } +impl DbCacheTransactionOps { + /// Empty transaction ops. + pub fn empty() -> DbCacheTransactionOps { + DbCacheTransactionOps { + cache_at_ops: HashMap::new(), + best_finalized_block: None, + } + } +} + /// Database-backed blockchain data cache transaction valid for single block import. pub struct DbCacheTransaction<'a, Block: BlockT> { cache: &'a mut DbCache, tx: &'a mut DBTransaction, - cache_at_op: HashMap>>, + cache_at_ops: HashMap>>>, best_finalized_block: Option>, } @@ -190,7 +258,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { /// Convert transaction into post-commit operations set. pub fn into_ops(self) -> DbCacheTransactionOps { DbCacheTransactionOps { - cache_at_op: self.cache_at_op, + cache_at_ops: self.cache_at_ops, best_finalized_block: self.best_finalized_block, } } @@ -203,8 +271,6 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { data_at: HashMap>, entry_type: EntryType, ) -> ClientResult { - assert!(self.cache_at_op.is_empty()); - // prepare list of caches that are not update // (we might still need to do some cache maintenance in this case) let missed_caches = self.cache.cache_at.keys() @@ -213,7 +279,8 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { .collect::>(); let mut insert_op = |name: CacheKeyId, value: Option>| -> Result<(), client::error::Error> { - let cache = self.cache.get_cache(name); + let cache = self.cache.get_cache(name)?; + let mut cache_ops = self.cache_at_ops.remove(&name).unwrap_or_default(); let op = cache.on_block_insert( &mut self::list_storage::DbStorageTransaction::new( cache.storage(), @@ -223,10 +290,11 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { block.clone(), value, entry_type, + cache_ops.last(), )?; - if let Some(op) = op { - self.cache_at_op.insert(name, op); - } + + push_cache_op(&mut cache_ops, op); + self.cache_at_ops.insert(name, cache_ops); Ok(()) }; @@ -246,37 +314,117 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { pub fn on_block_finalize( mut self, parent: ComplexBlockId, - block: ComplexBlockId + block: ComplexBlockId, ) -> ClientResult { - assert!(self.cache_at_op.is_empty()); - - for (name, cache_at) in self.cache.cache_at.iter() { - let op = cache_at.on_block_finalize( + for (name, cache) in self.cache.cache_at.iter() { + let mut cache_ops = self.cache_at_ops.remove(name).unwrap_or_default(); + let op = cache.on_block_finalize( &mut self::list_storage::DbStorageTransaction::new( - cache_at.storage(), + cache.storage(), &mut self.tx ), parent.clone(), block.clone(), + cache_ops.last(), )?; - if let Some(op) = op { - self.cache_at_op.insert(name.to_owned(), op); - } + push_cache_op(&mut cache_ops, op); + self.cache_at_ops.insert(*name, cache_ops); } self.best_finalized_block = Some(block); Ok(self) } + + /// When block is reverted. + pub fn on_block_revert( + mut self, + block: NumberFor, + ) -> ClientResult { + for (name, cache) in self.cache.cache_at.iter() { + let mut cache_ops = self.cache_at_ops.remove(name).unwrap_or_default(); + let op = cache.on_block_revert( + &mut self::list_storage::DbStorageTransaction::new( + cache.storage(), + &mut self.tx + ), + block, + )?; + + cache_ops.push(op); + self.cache_at_ops.insert(*name, cache_ops); + } + + Ok(self) + } } /// Synchronous implementation of database-backed blockchain data cache. -pub struct DbCacheSync(pub RwLock>); +pub struct DbCacheSync { + db: Arc, + key_lookup_column: Option, + header_column: Option, + cache: RwLock>, +} + +impl DbCacheSync { + /// Create new sync cache. + pub fn new(cache: DbCache) -> Self { + Self { + db: cache.db.clone(), + key_lookup_column: cache.key_lookup_column.clone(), + header_column: cache.header_column.clone(), + cache: RwLock::new(cache), + } + } + + /// Get reference to the cache. + pub(crate) fn cache(&self) -> &RwLock> { + &self.cache + } + + /// Convert block id into complex block id. + pub fn to_complex_id(&self, block: &BlockId) -> ClientResult> { + Ok(match *block { + BlockId::Hash(hash) => { + let header = utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Hash(hash.clone()) + )?; + ComplexBlockId::new(hash, *header.number()) + }, + BlockId::Number(number) => { + let header = utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Number(number.clone()) + )?; + ComplexBlockId::new(header.hash(), number) + }, + }) + } + + /// Get value at inserted or not yet inserted block. + pub fn get_at_block( + &self, + key: &CacheKeyId, + at: BlockIdOrHeader, + ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)> { + self.cache.read().cache_at.get(key)? + .value_at_block(at) + .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| + ((begin_block.number, begin_block.hash), end_block.map(|end_block| (end_block.number, end_block.hash)), value))) + .ok()? + } +} impl BlockchainCache for DbCacheSync { fn initialize(&self, key: &CacheKeyId, data: Vec) -> ClientResult<()> { - let mut cache = self.0.write(); + let mut cache = self.cache.write(); let genesis_hash = cache.genesis_hash; let cache_contents = vec![(*key, data)].into_iter().collect(); let db = cache.db.clone(); @@ -290,7 +438,7 @@ impl BlockchainCache for DbCacheSync { )?; let tx_ops = tx.into_ops(); db.write(dbtx).map_err(db_err)?; - cache.commit(tx_ops); + cache.commit(tx_ops)?; Ok(()) } @@ -299,34 +447,32 @@ impl BlockchainCache for DbCacheSync { key: &CacheKeyId, at: &BlockId, ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)> { - let cache = self.0.read(); - let storage = cache.cache_at.get(key)?.storage(); - let db = storage.db(); - let columns = storage.columns(); - let at = match *at { - BlockId::Hash(hash) => { - let header = utils::read_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Hash(hash.clone())).ok()??; - ComplexBlockId::new(hash, *header.number()) - }, - BlockId::Number(number) => { - let hash = utils::read_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Number(number.clone())).ok()??.hash(); - ComplexBlockId::new(hash, number) - }, + let id = { + let cache = self.cache.read(); + let storage = cache.cache_at.get(key)?.storage(); + let db = storage.db(); + let columns = storage.columns(); + match *at { + BlockId::Hash(hash) => { + let header = utils::read_header::( + &**db, + columns.key_lookup, + columns.header, + BlockId::Hash(hash.clone())).ok()??; + ComplexBlockId::new(hash, *header.number()) + }, + BlockId::Number(number) => { + let hash = utils::read_header::( + &**db, + columns.key_lookup, + columns.header, + BlockId::Number(number.clone())).ok()??.hash(); + ComplexBlockId::new(hash, number) + }, + } }; - cache.cache_at.get(key)? - .value_at_block(&at) - .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| - ((begin_block.number, begin_block.hash), end_block.map(|end_block| (end_block.number, end_block.hash)), value))) - .ok()? + self.get_at_block(key, (&id).into()) } } @@ -337,3 +483,26 @@ fn cache_pruning_strategy>(cache: CacheKeyId) -> PruningStrategy _ => PruningStrategy::ByDepth(PRUNE_DEPTH.into()), } } + +/// Push new operation to the operations vec. +fn push_cache_op( + cache_ops: &mut Vec>>, + new_op: Option>>, +) { + if let Some(new_op) = new_op { + if let Some(prev_op) = cache_ops.pop() { + match prev_op.merge_with(new_op) { + (Some(merged_op), None) => { + cache_ops.push(merged_op); + }, + (Some(prev_op), Some(new_op)) => { + cache_ops.push(prev_op); + cache_ops.push(new_op); + }, + _ => unreachable!("merge of 2 ops can never lead to noop; qed"), + } + } else { + cache_ops.push(new_op); + } + } +} diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index 9d8a90ced3271..801b73e536c65 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -16,25 +16,30 @@ //! DB-backed changes tries storage. +// TODO: use last non-pruned digest block!!! + use std::collections::HashMap; use std::sync::Arc; use hash_db::Prefix; use kvdb::{KeyValueDB, DBTransaction}; use parity_codec::Encode; -use parking_lot::{RwLock, RwLockWriteGuard}; +use parking_lot::RwLock; use client::error::{Error as ClientError, Result as ClientResult}; use trie::MemoryDB; -use client::backend::{PrunableStateChangesTrieStorage, ChangesTrieConfigurationRange}; -use client::blockchain::{Cache, well_known_cache_keys}; +use client::backend::ChangesTrieConfigurationRange; +use client::blockchain::well_known_cache_keys; use parity_codec::Decode; use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; use sr_primitives::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, One, + Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub, }; use sr_primitives::generic::{BlockId, DigestItem, ChangesTrieSignal}; use state_machine::DBValue; -use crate::utils::{self, Meta}; -use crate::cache::{DbCacheSync, DbCache, DbCacheTransactionOps, ComplexBlockId, EntryType as CacheEntryType}; +use crate::utils::{self, Meta, meta_keys, db_err}; +use crate::cache::{ + DbCacheSync, DbCache, DbCacheTransactionOps, + ComplexBlockId, BlockIdOrHeader, EntryType as CacheEntryType, +}; /// Extract new changes trie configuration (if available) from the header. pub fn extract_new_configuration(header: &Header) -> Option<&Option> { @@ -43,58 +48,77 @@ pub fn extract_new_configuration(header: &Header) -> Option<&Op .and_then(ChangesTrieSignal::as_new_configuration) } -/// Opaque configuration cache transaction. -pub struct DbChangesTrieStorageTransaction<'a, Block: BlockT> { - /// Lock needs to be held between commit and post_commit calls. - lock: RwLockWriteGuard<'a, DbCache>, - /// Cache operations that needs to be performed once tx is committed. - ops: DbCacheTransactionOps, -} +/// Opaque configuration cache transaction. During its lifetime, noone should modify cache. This is currently +/// guaranteed because import lock is held during block import/finalization. +pub type DbChangesTrieStorageTransaction = DbCacheTransactionOps; /// Changes tries storage. /// /// Stores all tries in separate DB column. +/// Lock order: meta, tries_meta, cache. pub struct DbChangesTrieStorage { db: Arc, + meta_column: Option, changes_tries_column: Option, key_lookup_column: Option, header_column: Option, meta: Arc, Block::Hash>>>, + tries_meta: RwLock>, min_blocks_to_keep: Option, cache: DbCacheSync, } +/// Persistent struct that contains all the changes tries metadata. +#[derive(Decode, Encode, Debug)] +struct ChangesTriesMeta { + /// Oldest unpruned max-level (or skewed) digest trie blocks range. + /// The range is inclusive from both sides. + /// Is None only if: + /// 1) we haven't yet finalized any blocks (except genesis) + /// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are disabled + /// 3) changes tries pruning is disabled + pub oldest_digest_range: Option<(NumberFor, NumberFor)>, + /// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. + /// It is guaranteed that we have no any changes tries before (and including) this block. + /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). + pub oldest_pruned_digest_range_end: NumberFor, +} + impl> DbChangesTrieStorage { /// Create new changes trie storage. pub fn new( db: Arc, + meta_column: Option, changes_tries_column: Option, key_lookup_column: Option, header_column: Option, cache_column: Option, meta: Arc, Block::Hash>>>, min_blocks_to_keep: Option, - ) -> Self { + ) -> ClientResult { let (finalized_hash, finalized_number, genesis_hash) = { let meta = meta.read(); (meta.finalized_hash, meta.finalized_number, meta.genesis_hash) }; - Self { + let tries_meta = read_tries_meta(&*db, meta_column)?; + Ok(Self { db: db.clone(), + meta_column, changes_tries_column, key_lookup_column, header_column, meta, min_blocks_to_keep, - cache: DbCacheSync(RwLock::new(DbCache::new( + cache: DbCacheSync::new(DbCache::new( db.clone(), key_lookup_column, header_column, cache_column, genesis_hash, ComplexBlockId::new(finalized_hash, finalized_number), - ))), - } + )), + tries_meta: RwLock::new(tries_meta), + }) } /// Commit new changes trie. @@ -104,52 +128,124 @@ impl> DbChangesTrieStorage { mut changes_trie: MemoryDB, parent_block: ComplexBlockId, block: ComplexBlockId, + new_header: &Block::Header, finalized: bool, new_configuration: Option>, - ) -> ClientResult>> { + cache_tx: Option>, + ) -> ClientResult> { // insert changes trie, associated with block, into DB for (key, (val, _)) in changes_trie.drain() { tx.put(self.changes_tries_column, &key[..], &val); } - // if configuration has been changed, we need to update configuration cache as well + // if configuration has not been changed AND block is not finalized => nothing to do here let new_configuration = match new_configuration { Some(new_configuration) => new_configuration, - None => return Ok(None), + None if !finalized => return Ok(DbChangesTrieStorageTransaction::empty()), + None => return self.finalize(tx, parent_block.hash, block.hash, block.number, Some(new_header), cache_tx), }; + // update configuration cache let mut cache_at = HashMap::new(); cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); + Ok(match cache_tx { + Some(cache_tx) => { + self.cache.cache().write().transaction_with_ops(tx, cache_tx) + .on_block_insert( + parent_block, + block, + cache_at, + if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, + )? + .into_ops() + }, + None => { + self.cache.cache().write().transaction(tx) + .on_block_insert( + parent_block, + block, + cache_at, + if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, + )? + .into_ops() + }, + }) + } + + /// Called when block is finalized. + pub fn finalize( + &self, + tx: &mut DBTransaction, + parent_block_hash: Block::Hash, + block_hash: Block::Hash, + block_num: NumberFor, + new_header: Option<&Block::Header>, + cache_tx: Option>, + ) -> ClientResult> { + // prune obsolete changes tries + self.prune(tx, block_hash, block_num, new_header.clone())?; + + // if we have inserted the block that we're finalizing in the same transaction + // => then we have already finalized it from the commit() call + if cache_tx.is_some() { + if let Some(new_header) = new_header { + if new_header.hash() == block_hash { + return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")); + } + } + } - let mut cache = self.cache.0.write(); - let cache_ops = cache.transaction(tx) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(); - Ok(Some(DbChangesTrieStorageTransaction { - lock: cache, - ops: cache_ops, - })) + // and finalize configuration cache entries + let block = ComplexBlockId::new(block_hash, block_num); + let parent_block_num = block_num.checked_sub(&One::one()).unwrap_or_else(|| Zero::zero()); + let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); + Ok(match cache_tx { + Some(cache_tx) => { + self.cache.cache().write().transaction_with_ops(tx, cache_tx) + .on_block_finalize( + parent_block, + block, + )? + .into_ops() + }, + None => { + self.cache.cache().write().transaction(tx) + .on_block_finalize( + parent_block, + block, + )? + .into_ops() + }, + }) + } + + /// When block is reverted. + pub fn revert( + &self, + tx: &mut DBTransaction, + block: NumberFor, + ) -> ClientResult> { + Ok(self.cache.cache().write().transaction(tx) + .on_block_revert(block)? + .into_ops()) } /// When transaction has been committed. pub fn post_commit(&self, tx: Option>) { - if let Some(mut tx) = tx { - tx.lock.commit(tx.ops); + if let Some(tx) = tx { + self.cache.cache().write().commit(tx) + .expect("only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there is tx; qed"); } } /// Prune obsolete changes tries. - pub fn prune( + fn prune( &self, tx: &mut DBTransaction, - parent_hash: Block::Hash, block_hash: Block::Hash, block_num: NumberFor, + new_header: Option<&Block::Header>, ) -> ClientResult<()> { // never prune on archive nodes let min_blocks_to_keep = match self.min_blocks_to_keep { @@ -157,25 +253,81 @@ impl> DbChangesTrieStorage { None => return Ok(()), }; - // prune changes tries that are created using newest configuration - let config_range = self.configuration_at(&BlockId::Hash(parent_hash))?; - if let Some(config) = config_range.config { - state_machine::prune_changes_tries( - config_range.zero.0, - &config, - &*self, - min_blocks_to_keep.into(), - &state_machine::ChangesTrieAnchorBlockId { - hash: convert_hash(&block_hash), - number: block_num, - }, - |node| tx.delete(self.changes_tries_column, node.as_ref())); - } + let mut tries_meta = self.tries_meta.write(); + let mut next_digest_range_start = block_num; + loop { + // prune oldest digest if it is known + // it could be unknown if: + // 1) either we're finalizing block#1 + // 2) or we are (or were) in period where changes tries are disabled + if let Some((begin, end)) = tries_meta.oldest_digest_range { + if block_num <= end || block_num - end <= min_blocks_to_keep.into() { + break; + } + + tries_meta.oldest_pruned_digest_range_end = end; + state_machine::prune_changes_tries( + &*self, + begin, + end, + &state_machine::ChangesTrieAnchorBlockId { + hash: convert_hash(&block_hash), + number: block_num, + }, + |node| tx.delete(self.changes_tries_column, node.as_ref()), + ); + + next_digest_range_start = end + One::one(); + } - // TODO (#3282): prune tries that were created using previous configurations + // proceed to the next configuration range + let next_digest_range_start_hash = match block_num == next_digest_range_start { + true => block_hash, + false => utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Number(next_digest_range_start), + )?.hash(), + }; + let block_id = ComplexBlockId::new(next_digest_range_start_hash, next_digest_range_start); + let block_id_or_header = match new_header { + Some(ref new_header) if *new_header.number() == next_digest_range_start + => BlockIdOrHeader::Header(*new_header), + _ => BlockIdOrHeader::Id(&block_id), + }; + let next_config = self.configuration_at_block(block_id_or_header)?; + if let Some(config) = next_config.config { + let mut oldest_digest_range = config + .next_max_level_digest_range(next_config.zero.0, next_digest_range_start) + .unwrap_or_else(|| (next_digest_range_start, next_digest_range_start)); + + if let Some(end) = next_config.end { + if end.0 < oldest_digest_range.1 { + oldest_digest_range.1 = end.0; + } + } + + tries_meta.oldest_digest_range = Some(oldest_digest_range); + continue; + } + + tries_meta.oldest_digest_range = None; + break; + } + write_tries_meta(tx, self.meta_column, &*tries_meta); Ok(()) } + + /// Return configuration at given block id or header. + fn configuration_at_block(&self, at: BlockIdOrHeader) -> ClientResult> { + self.cache + .get_at_block(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) + .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]) + .map(|config| ChangesTrieConfigurationRange { zero, end, config })) + .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) + } } impl client::backend::PrunableStateChangesTrieStorage @@ -188,28 +340,12 @@ where } fn configuration_at(&self, at: &BlockId) -> ClientResult> { - self.cache - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) - .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]) - .map(|config| ChangesTrieConfigurationRange { zero, end, config })) - .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) + self.cache.to_complex_id(at) + .and_then(|at| self.configuration_at_block((&at).into())) } - fn oldest_changes_trie_block( - &self, - zero: NumberFor, - config: ChangesTrieConfiguration, - best_finalized_block: NumberFor, - ) -> NumberFor { - match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => state_machine::oldest_non_pruned_changes_trie( - zero, - &config, - min_blocks_to_keep.into(), - best_finalized_block, - ), - None => One::one(), - } + fn oldest_pruned_digest_range_end(&self) -> NumberFor { + self.tries_meta.read().oldest_pruned_digest_range_end } } @@ -295,18 +431,89 @@ where } } +/// Read changes tries metadata from database. +fn read_tries_meta( + db: &dyn KeyValueDB, + meta_column: Option, +) -> ClientResult> { + match db.get(meta_column, meta_keys::CHANGES_TRIES_META).map_err(db_err)? { + Some(h) => match Decode::decode(&mut &h[..]) { + Some(h) => Ok(h), + None => Err(client::error::Error::Backend("Error decoding changes tries metadata".into())), + }, + None => Ok(ChangesTriesMeta { + oldest_digest_range: None, + oldest_pruned_digest_range_end: Zero::zero(), + }), + } +} + +/// Write changes tries metadata from database. +fn write_tries_meta( + tx: &mut DBTransaction, + meta_column: Option, + meta: &ChangesTriesMeta, +) { + tx.put(meta_column, meta_keys::CHANGES_TRIES_META, &meta.encode()); +} + #[cfg(test)] mod tests { use hash_db::EMPTY_PREFIX; - use client::backend::{Backend as ClientBackend, NewBlockState, BlockImportOperation}; + use client::backend::{ + Backend as ClientBackend, NewBlockState, BlockImportOperation, PrunableStateChangesTrieStorage, + }; use client::blockchain::HeaderBackend as BlockchainHeaderBackend; - use sr_primitives::testing::Header; + use sr_primitives::testing::{Digest, Header}; use sr_primitives::traits::{Hash, BlakeTwo256}; use state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; use crate::Backend; use crate::tests::{Block, insert_header, prepare_changes}; use super::*; + fn changes(number: u64) -> Option, Vec)>> { + Some(vec![(number.to_le_bytes().to_vec(), number.to_le_bytes().to_vec())]) + } + + fn insert_header_with_configuration_change( + backend: &Backend, + number: u64, + parent_hash: H256, + changes: Option, Vec)>>, + new_configuration: Option, + ) -> H256 { + let mut digest = Digest::default(); + let mut changes_trie_update = Default::default(); + if let Some(changes) = changes { + let (root, update) = prepare_changes(changes); + digest.push(DigestItem::ChangesTrieRoot(root)); + changes_trie_update = update; + } + digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_configuration))); + + let header = Header { + number, + parent_hash, + state_root: BlakeTwo256::trie_root::<_, &[u8], &[u8]>(Vec::new()), + digest, + extrinsics_root: Default::default(), + }; + let header_hash = header.hash(); + + let block_id = if number == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(number - 1) + }; + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, block_id).unwrap(); + op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); + op.update_changes_trie(changes_trie_update).unwrap(); + backend.commit_operation(op).unwrap(); + + header_hash + } + #[test] fn changes_trie_storage_works() { let backend = Backend::::new_test(1000, 100); @@ -333,9 +540,9 @@ mod tests { ]; let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); - let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); - let _ = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); + let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); + let _ = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); // check that the storage contains tries for all blocks check_changes(&backend, 0, changes0); @@ -350,19 +557,19 @@ mod tests { let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), changes0.clone(), Default::default()); - let block1 = insert_header(&backend, 1, block0, changes1.clone(), Default::default()); - let block2 = insert_header(&backend, 2, block1, changes2.clone(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); + let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); + let block2 = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = insert_header(&backend, 3, block2, changes2_1_0.clone(), Default::default()); - let block2_1_1 = insert_header(&backend, 4, block2_1_0, changes2_1_1.clone(), Default::default()); + let block2_1_0 = insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); + let block2_1_1 = insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = insert_header(&backend, 3, block2, changes2_2_0.clone(), Default::default()); - let block2_2_1 = insert_header(&backend, 4, block2_2_0, changes2_2_1.clone(), Default::default()); + let block2_2_0 = insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); + let block2_2_1 = insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); // finalize block1 backend.changes_tries_storage.meta.write().finalized_number = 1; @@ -400,207 +607,157 @@ mod tests { } #[test] - fn changes_tries_with_digest_are_pruned_on_finalization() { + fn changes_tries_are_pruned_on_finalization() { let mut backend = Backend::::new_test(1000, 100); - let config = ChangesTrieConfiguration { - digest_interval: 2, - digest_levels: 2, - }; - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - // insert some blocks - let mut blocks = Vec::new(); - let mut last_block = Default::default(); - for i in 0u64..14u64 { - let key = i.to_le_bytes().to_vec(); - let val = key.clone(); - last_block = insert_header( + let parent_hash = |number| { + if number == 0 { + Default::default() + } else { + backend.blockchain().header(BlockId::Number(number - 1)).unwrap().unwrap().hash() + } + }; + + let insert_regular_header = |with_changes, number| { + insert_header( &backend, - i, - last_block, - vec![(key, val)], + number, + parent_hash(number), + if with_changes { changes(number) } else { None }, Default::default(), ); - blocks.push(last_block); - } - backend.changes_tries_storage.meta.write().finalized_number = 13; - backend.changes_tries_storage.cache.initialize( - &well_known_cache_keys::CHANGES_TRIE_CONFIG, - Some(config).encode(), - ).unwrap(); - - // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: blocks[13], number: 13 }; - fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { - backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() - .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() - } - let root1 = read_changes_trie_root(&backend, 1); - assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); - assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); - assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); - assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); - assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); - assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); - let root7 = read_changes_trie_root(&backend, 7); - assert_eq!(backend.changes_tries_storage.root(&anchor, 7).unwrap(), Some(root7)); - let root8 = read_changes_trie_root(&backend, 8); - assert_eq!(backend.changes_tries_storage.root(&anchor, 8).unwrap(), Some(root8)); - let root9 = read_changes_trie_root(&backend, 9); - assert_eq!(backend.changes_tries_storage.root(&anchor, 9).unwrap(), Some(root9)); - let root10 = read_changes_trie_root(&backend, 10); - assert_eq!(backend.changes_tries_storage.root(&anchor, 10).unwrap(), Some(root10)); - let root11 = read_changes_trie_root(&backend, 11); - assert_eq!(backend.changes_tries_storage.root(&anchor, 11).unwrap(), Some(root11)); - let root12 = read_changes_trie_root(&backend, 12); - assert_eq!(backend.changes_tries_storage.root(&anchor, 12).unwrap(), Some(root12)); - - // now simulate finalization of block#12, causing prune of tries at #1..#4 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, blocks[0], Default::default(), 12).unwrap(); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root1, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root2, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root3, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root4, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root5, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root6, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root7, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root8, EMPTY_PREFIX).unwrap().is_some()); - - // now simulate finalization of block#16, causing prune of tries at #5..#8 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, blocks[0], Default::default(), 16).unwrap(); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root5, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root6, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root7, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root8, EMPTY_PREFIX).unwrap().is_none()); - - // now "change" pruning mode to archive && simulate finalization of block#20 - // => no changes tries are pruned, because we never prune in archive mode - backend.changes_tries_storage.min_blocks_to_keep = None; - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, blocks[0], Default::default(), 20).unwrap(); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root9, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root10, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root11, EMPTY_PREFIX).unwrap().is_some()); - assert!(backend.changes_tries_storage.get(&root12, EMPTY_PREFIX).unwrap().is_some()); - } - - #[test] - fn changes_tries_without_digest_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - let config = ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, }; - backend.changes_tries_storage.min_blocks_to_keep = Some(4); + let is_pruned = |number| { + let trie_root = backend + .blockchain() + .header(BlockId::Number(number)) + .unwrap().unwrap() + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned(); + match trie_root { + Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), + None => true, + } + }; - // insert some blocks - let mut blocks = Vec::new(); - let mut last_block = Default::default(); - for i in 0u64..7u64 { - let key = i.to_le_bytes().to_vec(); - let val = key.clone(); - last_block = insert_header( - &backend, - i, - last_block, - vec![(key, val)], - Default::default(), - ); - blocks.push(last_block); - } - backend.changes_tries_storage.cache.initialize( - &well_known_cache_keys::CHANGES_TRIE_CONFIG, - Some(config).encode(), - ).unwrap(); - - // check that roots of all tries are in the columns::CHANGES_TRIE - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: blocks[6], number: 6 }; - fn read_changes_trie_root(backend: &Backend, num: u64) -> H256 { - backend.blockchain().header(BlockId::Number(num)).unwrap().unwrap().digest().logs().iter() - .find(|i| i.as_changes_trie_root().is_some()).unwrap().as_changes_trie_root().unwrap().clone() - } + let finalize_block = |number| { + let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap(); + let mut tx = DBTransaction::new(); + let cache_ops = backend.changes_tries_storage.finalize( + &mut tx, + *header.parent_hash(), + header.hash(), + number, + None, + None, + ).unwrap(); + backend.storage.db.write(tx).unwrap(); + backend.changes_tries_storage.post_commit(Some(cache_ops)); + }; - let root1 = read_changes_trie_root(&backend, 1); - assert_eq!(backend.changes_tries_storage.root(&anchor, 1).unwrap(), Some(root1)); - let root2 = read_changes_trie_root(&backend, 2); - assert_eq!(backend.changes_tries_storage.root(&anchor, 2).unwrap(), Some(root2)); - let root3 = read_changes_trie_root(&backend, 3); - assert_eq!(backend.changes_tries_storage.root(&anchor, 3).unwrap(), Some(root3)); - let root4 = read_changes_trie_root(&backend, 4); - assert_eq!(backend.changes_tries_storage.root(&anchor, 4).unwrap(), Some(root4)); - let root5 = read_changes_trie_root(&backend, 5); - assert_eq!(backend.changes_tries_storage.root(&anchor, 5).unwrap(), Some(root5)); - let root6 = read_changes_trie_root(&backend, 6); - assert_eq!(backend.changes_tries_storage.root(&anchor, 6).unwrap(), Some(root6)); - - // now simulate finalization of block#5, causing prune of trie at #1 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, blocks[1], blocks[5], 5).unwrap(); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root1, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root2, EMPTY_PREFIX).unwrap().is_some()); - - // now simulate finalization of block#6, causing prune of tries at #2 - let mut tx = DBTransaction::new(); - backend.changes_tries_storage.prune(&mut tx, blocks[1], blocks[6], 6).unwrap(); - backend.storage.db.write(tx).unwrap(); - assert!(backend.changes_tries_storage.get(&root2, EMPTY_PREFIX).unwrap().is_none()); - assert!(backend.changes_tries_storage.get(&root3, EMPTY_PREFIX).unwrap().is_some()); + // configuration ranges: + // (0; 6] - None + // [7; 17] - Some(2^2): D2 is built at #10, #14; SD is built at #17 + // [18; 21] - None + // [22; 32] - Some(8^1): D1 is built at #29; SD is built at #32 + // [33; ... - Some(1) + let config_at_6 = Some(ChangesTrieConfiguration::new(2, 2)); + let config_at_17 = None; + let config_at_21 = Some(ChangesTrieConfiguration::new(8, 1)); + let config_at_32 = Some(ChangesTrieConfiguration::new(1, 0)); + + (0..6).for_each(|number| insert_regular_header(false, number)); + insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6); + (7..17).for_each(|number| insert_regular_header(true, number)); + insert_header_with_configuration_change(&backend, 17, parent_hash(17), changes(17), config_at_17); + (18..21).for_each(|number| insert_regular_header(false, number)); + insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21); + (22..32).for_each(|number| insert_regular_header(true, number)); + insert_header_with_configuration_change(&backend, 32, parent_hash(32), changes(32), config_at_32); + (33..50).for_each(|number| insert_regular_header(true, number)); + + // when only genesis is finalized, nothing is pruned + (0..=6).for_each(|number| assert!(is_pruned(number))); + (7..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [1; 18] are finalized, nothing is pruned + (1..=18).for_each(|number| finalize_block(number)); + (0..=6).for_each(|number| assert!(is_pruned(number))); + (7..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 19 is finalized, changes tries for blocks [7; 10] are pruned + finalize_block(19); + (0..=10).for_each(|number| assert!(is_pruned(number))); + (11..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [20; 22] are finalized, nothing is pruned + (20..=22).for_each(|number| finalize_block(number)); + (0..=10).for_each(|number| assert!(is_pruned(number))); + (11..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 23 is finalized, changes tries for blocks [11; 14] are pruned + finalize_block(23); + (0..=14).for_each(|number| assert!(is_pruned(number))); + (15..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [24; 25] are finalized, nothing is pruned + (24..=25).for_each(|number| finalize_block(number)); + (0..=14).for_each(|number| assert!(is_pruned(number))); + (15..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 26 is finalized, changes tries for blocks [15; 17] are pruned + finalize_block(26); + (0..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [27; 37] are finalized, nothing is pruned + (27..=37).for_each(|number| finalize_block(number)); + (0..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 38 is finalized, changes tries for blocks [22; 29] are pruned + finalize_block(38); + (0..=29).for_each(|number| assert!(is_pruned(number))); + (30..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [39; 40] are finalized, nothing is pruned + (39..=40).for_each(|number| finalize_block(number)); + (0..=29).for_each(|number| assert!(is_pruned(number))); + (30..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 41 is finalized, changes tries for blocks [30; 32] are pruned + finalize_block(41); + (0..=32).for_each(|number| assert!(is_pruned(number))); + (33..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 42 is finalized, changes trie for block 33 is pruned + finalize_block(42); + (0..=33).for_each(|number| assert!(is_pruned(number))); + (34..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 43 is finalized, changes trie for block 34 is pruned + finalize_block(43); + (0..=34).for_each(|number| assert!(is_pruned(number))); + (35..50).for_each(|number| assert!(!is_pruned(number))); } #[test] fn changes_tries_configuration_is_updated_on_block_insert() { - fn insert_header_with_configuration_change( - backend: &Backend, - number: u64, - parent_hash: H256, - changes: Vec<(Vec, Vec)>, - new_configuration: Option, - ) -> H256 { - use sr_primitives::testing::Digest; - - let (changes_root, changes_trie_update) = prepare_changes(changes); - let digest = Digest { - logs: vec![ - DigestItem::ChangesTrieRoot(changes_root), - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_configuration)), - ], - }; - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root::<_, &[u8], &[u8]>(Vec::new()), - digest, - extrinsics_root: Default::default(), - }; - let header_hash = header.hash(); - - let block_id = if number == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(number - 1) - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie(changes_trie_update).unwrap(); - backend.commit_operation(op).unwrap(); - - header_hash - } - let backend = Backend::::new_test(1000, 100); // configurations at blocks @@ -619,14 +776,14 @@ mod tests { }); // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, Vec::new(), config_at_1.clone()); - let block2 = insert_header(&backend, 2, block1, Vec::new(), Default::default()); - let block3 = insert_header_with_configuration_change(&backend, 3, block2, Vec::new(), config_at_3.clone()); - let block4 = insert_header(&backend, 4, block3, Vec::new(), Default::default()); - let block5 = insert_header_with_configuration_change(&backend, 5, block4, Vec::new(), config_at_5.clone()); - let block6 = insert_header(&backend, 6, block5, Vec::new(), Default::default()); - let block7 = insert_header_with_configuration_change(&backend, 7, block6, Vec::new(), config_at_7.clone()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + let block3 = insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); + let block4 = insert_header(&backend, 4, block3, None, Default::default()); + let block5 = insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); + let block6 = insert_header(&backend, 6, block5, None, Default::default()); + let block7 = insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); // test configuration cache let storage = &backend.changes_tries_storage; @@ -659,4 +816,149 @@ mod tests { config_at_7.clone(), ); } + + #[test] + fn test_finalize_several_configuration_change_blocks_in_single_operation() { + let mut backend = Backend::::new_test(10, 10); + backend.changes_tries_storage.min_blocks_to_keep = Some(8); + + let configs = (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); + + // insert unfinalized headers + let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, configs[0].clone()); + let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(1), configs[1].clone()); + let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(2), configs[2].clone()); + + let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2)); + let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3)); + let block2_1 = insert_header_with_configuration_change(&backend, 2, block1, changes(8), side_config2_1.clone()); + let _ = insert_header_with_configuration_change(&backend, 3, block2_1, changes(9), side_config2_2.clone()); + + // insert finalized header => 4 headers are finalized at once + let header3 = Header { + number: 3, + parent_hash: block2, + state_root: Default::default(), + digest: Digest { + logs: vec![ + DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[3].clone())), + ], + }, + extrinsics_root: Default::default(), + }; + let block3 = header3.hash(); + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + op.mark_finalized(BlockId::Hash(block1), None).unwrap(); + op.mark_finalized(BlockId::Hash(block2), None).unwrap(); + op.set_block_data(header3, None, None, NewBlockState::Final).unwrap(); + backend.commit_operation(op).unwrap(); + + // insert more unfinalized headers + let block4 = insert_header_with_configuration_change(&backend, 4, block3, changes(4), configs[4].clone()); + let block5 = insert_header_with_configuration_change(&backend, 5, block4, changes(5), configs[5].clone()); + let block6 = insert_header_with_configuration_change(&backend, 6, block5, changes(6), configs[6].clone()); + + // insert finalized header => 4 headers are finalized at once + let header7 = Header { + number: 7, + parent_hash: block6, + state_root: Default::default(), + digest: Digest { + logs: vec![ + DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[7].clone())), + ], + }, + extrinsics_root: Default::default(), + }; + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block6)).unwrap(); + op.mark_finalized(BlockId::Hash(block4), None).unwrap(); + op.mark_finalized(BlockId::Hash(block5), None).unwrap(); + op.mark_finalized(BlockId::Hash(block6), None).unwrap(); + op.set_block_data(header7, None, None, NewBlockState::Final).unwrap(); + backend.commit_operation(op).unwrap(); + } + + #[test] + fn changes_tries_configuration_is_reverted() { + let backend = Backend::::new_test(10, 10); + + let config0 = Some(ChangesTrieConfiguration::new(2, 5)); + let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); + let config1 = Some(ChangesTrieConfiguration::new(2, 6)); + let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); + backend.finalize_block(BlockId::Number(1), Some(vec![42])).unwrap(); + let config2 = Some(ChangesTrieConfiguration::new(2, 7)); + let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); + let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); + let _ = insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); + let config2_2 = Some(ChangesTrieConfiguration::new(2, 9)); + let block2_2 = insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); + let config2_3 = Some(ChangesTrieConfiguration::new(2, 10)); + let _ = insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); + + // before truncate there are 2 unfinalized forks - block2_1+block2_3 + assert_eq!( + backend.changes_tries_storage.cache.cache().write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>(), + vec![3, 4], + ); + + // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 + backend.revert(1).unwrap(); + assert_eq!( + backend.changes_tries_storage.cache.cache().write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>(), + vec![3, 3], + ); + + // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl specifics), + // though they're pointing to the same block + backend.revert(1).unwrap(); + assert_eq!( + backend.changes_tries_storage.cache.cache().write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>(), + vec![2, 2], + ); + assert_eq!( + backend.changes_tries_storage.cache.cache().write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::<::std::collections::HashSet<_>>() + .len(), + 1, + ); + + // after truncating block2 - there are no unfinalized forks + backend.revert(1).unwrap(); + assert!( + backend.changes_tries_storage.cache.cache().write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>() + .is_empty(), + ); + } } diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 1869505a65310..f8aa0ce70445e 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -61,7 +61,7 @@ use state_machine::backend::Backend as StateBackend; use executor::RuntimeInfo; use state_machine::{CodeExecutor, DBValue}; use crate::utils::{Meta, db_err, meta_keys, read_db, block_id_to_lookup_key, read_meta}; -use crate::changes_tries_storage::DbChangesTrieStorage; +use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; use client::leaves::{LeafSet, FinalizationDisplaced}; use client::{children, well_known_cache_keys}; use state_db::StateDb; @@ -468,7 +468,10 @@ where Block: BlockT, let (root, transaction) = self.old_state.full_storage_root( top.into_iter().map(|(k, v)| { if k == well_known_keys::CHANGES_TRIE_CONFIG { - changes_trie_config = Decode::decode(&mut &v[..]); + changes_trie_config = Some( + Decode::decode(&mut &v[..]) + .expect("changes trie configuration is encoded properly at genesis") + ); } (k, Some(v)) }), @@ -627,13 +630,14 @@ impl> Backend { let offchain_storage = offchain::LocalStorage::new(db.clone()); let changes_tries_storage = DbChangesTrieStorage::new( db, + columns::META, columns::CHANGES_TRIE, columns::KEY_LOOKUP, columns::HEADER, columns::CACHE, meta, if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, - ); + )?; Ok(Backend { storage: Arc::new(storage_db), @@ -783,6 +787,7 @@ impl> Backend { header: &Block::Header, last_finalized: Option, justification: Option, + changes_trie_cache_ops: &mut Option>, finalization_displaced: &mut Option>>, ) -> Result<(Block::Hash, ::Number, bool, bool), client::error::Error> { // TODO: ensure best chain contains this block. @@ -790,8 +795,10 @@ impl> Backend { self.ensure_sequential_finalization(header, last_finalized)?; self.note_finalized( transaction, + false, header, *hash, + changes_trie_cache_ops, finalization_displaced, )?; @@ -850,6 +857,7 @@ impl> Backend { let mut meta_updates = Vec::new(); let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash; + let mut changes_trie_cache_ops = None; if !operation.finalized_blocks.is_empty() { for (block, justification) in operation.finalized_blocks { let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; @@ -861,6 +869,7 @@ impl> Backend { &block_header, Some(last_finalized_hash), justification, + &mut changes_trie_cache_ops, &mut finalization_displaced_leaves, )?); last_finalized_hash = block_hash; @@ -926,7 +935,7 @@ impl> Backend { let is_best = pending_block.leaf_state.is_best(); let changes_trie_updates = operation.changes_trie_updates; let changes_trie_config_update = operation.changes_trie_config_update; - let changes_trie_cache_ops = self.changes_tries_storage.commit( + changes_trie_cache_ops = Some(self.changes_tries_storage.commit( &mut transaction, changes_trie_updates, cache::ComplexBlockId::new( @@ -934,9 +943,11 @@ impl> Backend { if number.is_zero() { Zero::zero() } else { number - One::one() }, ), cache::ComplexBlockId::new(hash, number), + header, finalized, changes_trie_config_update, - )?; + changes_trie_cache_ops, + )?); let cache = operation.old_state.release(); // release state reference so that it can be finalized if finalized { @@ -944,8 +955,10 @@ impl> Backend { self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; self.note_finalized( &mut transaction, + true, header, hash, + &mut changes_trie_cache_ops, &mut finalization_displaced_leaves, )?; } else { @@ -1050,17 +1063,17 @@ impl> Backend { fn note_finalized( &self, transaction: &mut DBTransaction, + is_inserted: bool, f_header: &Block::Header, f_hash: Block::Hash, - displaced: &mut Option>> + changes_trie_cache_ops: &mut Option>, + displaced: &mut Option>>, ) -> Result<(), client::error::Error> where Block: BlockT, { let f_num = f_header.number().clone(); if self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::() > c).unwrap_or(true) { - let parent_hash = f_header.parent_hash().clone(); - let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, &lookup_key); @@ -1069,7 +1082,15 @@ impl> Backend { apply_state_commit(transaction, commit); if !f_num.is_zero() { - self.changes_tries_storage.prune(transaction, parent_hash, f_hash, f_num)?; + let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( + transaction, + *f_header.parent_hash(), + f_hash, + f_num, + if is_inserted { Some(&f_header) } else { None }, + changes_trie_cache_ops.take(), + )?; + *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); } } @@ -1172,16 +1193,19 @@ impl client::backend::Backend for Backend whe let header = self.blockchain.expect_header(block)?; let mut displaced = None; let commit = |displaced| { + let mut changes_trie_cache_ops = None; let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( &mut transaction, &hash, &header, None, justification, + &mut changes_trie_cache_ops, displaced, )?; self.storage.db.write(transaction).map_err(db_err)?; self.blockchain.update_meta(hash, number, is_best, is_finalized); + self.changes_tries_storage.post_commit(changes_trie_cache_ops); Ok(()) }; match commit(&mut displaced) { @@ -1219,6 +1243,7 @@ impl client::backend::Backend for Backend whe match self.storage.state_db.revert_one() { Some(commit) => { apply_state_commit(&mut transaction, commit); + let removed_number = best; let removed = self.blockchain.header(BlockId::Number(best))?.ok_or_else( || client::error::Error::UnknownBlock( format!("Error reverting to {}. Block hash not found.", best)))?; @@ -1228,10 +1253,12 @@ impl client::backend::Backend for Backend whe || client::error::Error::UnknownBlock( format!("Error reverting to {}. Block hash not found.", best)))?; let key = utils::number_and_hash_to_lookup_key(best.clone(), &hash)?; + let changes_trie_cache_ops = self.changes_tries_storage.revert(&mut transaction, removed_number)?; transaction.put(columns::META, meta_keys::BEST_BLOCK, &key); transaction.delete(columns::KEY_LOOKUP, removed.hash().as_ref()); children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, hash); self.storage.db.write(transaction).map_err(db_err)?; + self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); self.blockchain.update_meta(hash, best, true, false); self.blockchain.leaves.write().revert(removed.hash().clone(), removed.number().clone(), removed.parent_hash().clone()); } @@ -1338,17 +1365,18 @@ pub(crate) mod tests { backend: &Backend, number: u64, parent_hash: H256, - changes: Vec<(Vec, Vec)>, + changes: Option, Vec)>>, extrinsics_root: H256, ) -> H256 { use sr_primitives::testing::Digest; - let (changes_root, changes_trie_update) = prepare_changes(changes); - let digest = Digest { - logs: vec![ - DigestItem::ChangesTrieRoot(changes_root), - ], - }; + let mut digest = Digest::default(); + let mut changes_trie_update = Default::default(); + if let Some(changes) = changes { + let (root, update) = prepare_changes(changes); + digest.push(DigestItem::ChangesTrieRoot(root)); + changes_trie_update = update; + } let header = Header { number, parent_hash, @@ -1667,16 +1695,16 @@ pub(crate) mod tests { #[test] fn tree_route_works() { let backend = Backend::::new_test(1000, 100); - let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); // fork from genesis: 3 prong. - let a1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); - let a2 = insert_header(&backend, 2, a1, Vec::new(), Default::default()); - let a3 = insert_header(&backend, 3, a2, Vec::new(), Default::default()); + let a1 = insert_header(&backend, 1, block0, None, Default::default()); + let a2 = insert_header(&backend, 2, a1, None, Default::default()); + let a3 = insert_header(&backend, 3, a2, None, Default::default()); // fork from genesis: 2 prong. - let b1 = insert_header(&backend, 1, block0, Vec::new(), H256::from([1; 32])); - let b2 = insert_header(&backend, 2, b1, Vec::new(), Default::default()); + let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); + let b2 = insert_header(&backend, 2, b1, None, Default::default()); { let tree_route = ::client::blockchain::tree_route( @@ -1731,8 +1759,8 @@ pub(crate) mod tests { fn tree_route_child() { let backend = Backend::::new_test(1000, 100); - let block0 = insert_header(&backend, 0, Default::default(), Vec::new(), Default::default()); - let block1 = insert_header(&backend, 1, block0, Vec::new(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); { let tree_route = ::client::blockchain::tree_route( @@ -1768,17 +1796,17 @@ pub(crate) mod tests { #[test] fn test_leaves_pruned_on_finality() { let backend: Backend = Backend::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1_a = insert_header(&backend, 1, block0, Default::default(), Default::default()); - let block1_b = insert_header(&backend, 1, block0, Default::default(), [1; 32].into()); - let block1_c = insert_header(&backend, 1, block0, Default::default(), [2; 32].into()); + let block1_a = insert_header(&backend, 1, block0, None, Default::default()); + let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into()); + let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]); - let block2_a = insert_header(&backend, 2, block1_a, Default::default(), Default::default()); - let block2_b = insert_header(&backend, 2, block1_b, Default::default(), Default::default()); - let block2_c = insert_header(&backend, 2, block1_b, Default::default(), [1; 32].into()); + let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); + let block2_b = insert_header(&backend, 2, block1_b, None, Default::default()); + let block2_c = insert_header(&backend, 2, block1_b, None, [1; 32].into()); assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c, block1_c]); @@ -1805,8 +1833,8 @@ pub(crate) mod tests { let backend = Backend::::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - let _ = insert_header(&backend, 1, block0, Default::default(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let _ = insert_header(&backend, 1, block0, None, Default::default()); let justification = Some(vec![1, 2, 3]); backend.finalize_block(BlockId::Number(1), justification.clone()).unwrap(); @@ -1821,9 +1849,9 @@ pub(crate) mod tests { fn test_finalize_multiple_blocks_in_single_op() { let backend = Backend::::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - let block1 = insert_header(&backend, 1, block0, Default::default(), Default::default()); - let block2 = insert_header(&backend, 2, block1, Default::default(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); @@ -1837,9 +1865,9 @@ pub(crate) mod tests { fn test_finalize_non_sequential() { let backend = Backend::::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), Default::default(), Default::default()); - let block1 = insert_header(&backend, 1, block0, Default::default(), Default::default()); - let block2 = insert_header(&backend, 2, block1, Default::default(), Default::default()); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index def898b5a14be..f530ffe871102 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -108,7 +108,7 @@ impl LightStorage Ok(LightStorage { db, meta: RwLock::new(meta), - cache: Arc::new(DbCacheSync(RwLock::new(cache))), + cache: Arc::new(DbCacheSync::new(cache)), leaves: RwLock::new(leaves), }) } @@ -394,7 +394,7 @@ impl LightBlockchainStorage for LightStorage fn import_header( &self, header: Block::Header, - cache_at: HashMap>, + mut cache_at: HashMap>, leaf_state: NewBlockState, aux_ops: Vec<(Vec, Option>)>, ) -> ClientResult<()> { @@ -429,7 +429,7 @@ impl LightBlockchainStorage for LightStorage let is_genesis = number.is_zero(); if is_genesis { - self.cache.0.write().set_genesis_hash(hash); + self.cache.cache().write().set_genesis_hash(hash); transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); } @@ -448,11 +448,18 @@ impl LightBlockchainStorage for LightStorage )?; } + // update changes trie configuration cache + if !cache_at.contains_key(&well_known_cache_keys::CHANGES_TRIE_CONFIG) { + if let Some(new_configuration) = crate::changes_tries_storage::extract_new_configuration(&header) { + cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); + } + } + { let mut leaves = self.leaves.write(); let displaced_leaf = leaves.import(hash, number, parent_hash); - let mut cache = self.cache.0.write(); + let mut cache = self.cache.cache().write(); let cache_ops = cache.transaction(&mut transaction) .on_block_insert( ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), @@ -480,7 +487,9 @@ impl LightBlockchainStorage for LightStorage return Err(e); } - cache.commit(cache_ops); + cache.commit(cache_ops) + .expect("only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there are cache_ops; qed"); } self.update_meta(hash, number, leaf_state.is_best(), finalized); @@ -527,7 +536,7 @@ impl LightBlockchainStorage for LightStorage let number = *header.number(); self.note_finalized(&mut transaction, &header, hash.clone(), &mut displaced)?; { - let mut cache = self.cache.0.write(); + let mut cache = self.cache.cache().write(); let cache_ops = cache.transaction(&mut transaction) .on_block_finalize( ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), @@ -541,7 +550,9 @@ impl LightBlockchainStorage for LightStorage } return Err(e); } - cache.commit(cache_ops); + cache.commit(cache_ops) + .expect("only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there are cache_ops; qed"); } self.update_meta(hash, header.number().clone(), false, true); @@ -570,7 +581,8 @@ fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { #[cfg(test)] pub(crate) mod tests { use client::cht; - use sr_primitives::generic::DigestItem; + use primitives::ChangesTrieConfiguration; + use sr_primitives::generic::{BlockId, DigestItem, ChangesTrieSignal}; use sr_primitives::testing::{H256 as Hash, Header, Block as RawBlock, ExtrinsicWrapper}; use super::*; @@ -1113,4 +1125,33 @@ pub(crate) mod tests { // after genesis is inserted + cache is initialized => Some assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some(((0, genesis_hash.unwrap()), None, vec![42]))); } + + #[test] + fn changes_trie_configuration_is_tracked_on_light_client() { + let db = LightStorage::::new_test(); + + let new_config = Some(ChangesTrieConfiguration::new(2, 2)); + + // insert block#0 && block#1 (no value for cache is provided) + let hash0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + assert_eq!( + db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)) + .map(|(_, _, v)| ChangesTrieConfiguration::decode(&mut &v[..]).unwrap()), + None, + ); + + // insert configuration at block#1 (starts from block#2) + insert_block(&db, HashMap::new(), || { + let mut header = default_header(&hash0, 1); + header.digest_mut().push( + DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_config.clone())) + ); + header + }); + assert_eq!( + db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)) + .map(|(_, _, v)| Option::::decode(&mut &v[..]).unwrap()), + Some(new_config), + ); + } } diff --git a/core/client/db/src/utils.rs b/core/client/db/src/utils.rs index 0dd42a5c63f3a..a5850f6e56df1 100644 --- a/core/client/db/src/utils.rs +++ b/core/client/db/src/utils.rs @@ -53,6 +53,8 @@ pub mod meta_keys { pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; /// Meta information prefix for list-based caches. pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; + /// Meta information for changes tries key. + pub const CHANGES_TRIES_META: &[u8; 5] = b"ctrie"; /// Genesis block hash. pub const GENESIS_HASH: &[u8; 3] = b"gen"; /// Leaves prefix list key. diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index ed276709bf209..2a3c4a3b2731f 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -217,6 +217,7 @@ pub trait OffchainStorage: Clone + Send + Sync { } /// Changes trie configuration range. +#[derive(Debug)] pub struct ChangesTrieConfigurationRange { /// Zero block of this configuration. First trie that uses this configuration is build at the next block. pub zero: (NumberFor, Block::Hash), @@ -234,13 +235,10 @@ pub trait PrunableStateChangesTrieStorage: fn storage(&self) -> &dyn StateChangesTrieStorage>; /// Get coniguration at given block. fn configuration_at(&self, at: &BlockId) -> error::Result>; - /// Get number block of oldest, non-pruned changes trie. - fn oldest_changes_trie_block( - &self, - zero: NumberFor, - config: ChangesTrieConfiguration, - best_finalized: NumberFor, - ) -> NumberFor; + /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. + /// It is guaranteed that we have no any changes tries before (and including) this block. + /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). + fn oldest_pruned_digest_range_end(&self) -> NumberFor; } /// Mark for all Backend implementations, that are making use of state data, stored locally. diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 75eda8aada839..907bb88640adb 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -522,9 +522,8 @@ impl Client where // TODO (#3282): we only work with the last config range here!!! Need to stabilize pruning before fixing this. match configs.pop() { - Some((zero, _, config)) => { - let finalized_number = self.backend.blockchain().info().finalized_number; - let oldest = storage.oldest_changes_trie_block(zero, config, finalized_number); + Some((zero, _, _)) => { + let oldest = storage.oldest_pruned_digest_range_end(); let oldest = ::std::cmp::max(zero + One::one(), oldest); let first = ::std::cmp::max(first, oldest); Ok(Some((first, last))) diff --git a/core/client/src/light/backend.rs b/core/client/src/light/backend.rs index 55551179f8cab..9f473e1a889a0 100644 --- a/core/client/src/light/backend.rs +++ b/core/client/src/light/backend.rs @@ -21,6 +21,8 @@ use std::collections::HashMap; use std::sync::{Arc, Weak}; use parking_lot::{RwLock, Mutex}; +use parity_codec::{Decode, Encode}; +use primitives::{ChangesTrieConfiguration, storage::well_known_keys}; use sr_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; use state_machine::{Backend as StateBackend, TrieBackend, backend::InMemory as InMemoryState}; use sr_primitives::traits::{Block as BlockT, NumberFor, Zero, Header}; @@ -54,6 +56,7 @@ pub struct ImportOperation { finalized_blocks: Vec>, set_head: Option>, storage_update: Option>, + changes_trie_config_update: Option>, _phantom: ::std::marker::PhantomData<(S, F)>, } @@ -126,6 +129,7 @@ impl ClientBackend for Backend where finalized_blocks: Vec::new(), set_head: None, storage_update: None, + changes_trie_config_update: None, _phantom: Default::default(), }) } @@ -147,6 +151,9 @@ impl ClientBackend for Backend where if let Some(header) = operation.header { let is_genesis_import = header.number().is_zero(); + if let Some(new_config) = operation.changes_trie_config_update { + operation.cache.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); + } self.blockchain.storage().import_header( header, operation.cache, @@ -286,6 +293,13 @@ where fn reset_storage(&mut self, top: StorageOverlay, children: ChildrenStorageOverlay) -> ClientResult { check_genesis_storage(&top, &children)?; + // changes trie configuration + let changes_trie_config = top.iter() + .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) + .map(|(_, v)| Decode::decode(&mut &v[..]) + .expect("changes trie configuration is encoded properly at genesis")); + self.changes_trie_config_update = Some(changes_trie_config); + // this is only called when genesis block is imported => shouldn't be performance bottleneck let mut storage: HashMap>, StorageOverlay> = HashMap::new(); storage.insert(None, top); diff --git a/core/client/src/light/blockchain.rs b/core/client/src/light/blockchain.rs index 9b1fe0ae9122f..c0d9810ab432b 100644 --- a/core/client/src/light/blockchain.rs +++ b/core/client/src/light/blockchain.rs @@ -36,9 +36,7 @@ use crate::light::fetcher::{Fetcher, RemoteBodyRequest, RemoteHeaderRequest}; /// Light client blockchain storage. pub trait Storage: AuxStore + BlockchainHeaderBackend { /// Store new header. Should refuse to revert any finalized blocks. - /// - /// Takes new authorities, the leaf state of the new block, and - /// any auxiliary storage updates to place in the same operation. + fn import_header( &self, header: Block::Header, diff --git a/core/primitives/src/changes_trie.rs b/core/primitives/src/changes_trie.rs index 660f14abe4236..a963ea73859f5 100644 --- a/core/primitives/src/changes_trie.rs +++ b/core/primitives/src/changes_trie.rs @@ -92,7 +92,7 @@ impl ChangesTrieConfiguration { block: Number, ) -> Option where - Number: Clone + From + PartialEq + + Number: Clone + From + PartialOrd + PartialEq + ::rstd::ops::Add + ::rstd::ops::Sub + ::rstd::ops::Div + ::rstd::ops::Mul + Zero, { @@ -100,23 +100,27 @@ impl ChangesTrieConfiguration { return None; } + if block <= zero { + return None; + } + let max_digest_interval: Number = self.max_digest_interval().into(); let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); - let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); - if last_max_digest_block.is_zero() { + let last_max_digest_block = zero.clone() + max_digests_since_zero * max_digest_interval.clone(); + if last_max_digest_block == zero { None } else { Some(last_max_digest_block) } } /// Returns max level digest block number that must be created at block >= passed block number. - pub fn next_max_level_digest_block( + pub fn next_max_level_digest_range( &self, zero: Number, - block: Number, - ) -> Option + mut block: Number, + ) -> Option<(Number, Number)> where - Number: Clone + From + PartialEq + + Number: Clone + From + PartialOrd + PartialEq + ::rstd::ops::Add + ::rstd::ops::Sub + ::rstd::ops::Div + ::rstd::ops::Mul, { @@ -124,13 +128,20 @@ impl ChangesTrieConfiguration { return None; } + if block <= zero { + block = zero.clone() + 1.into(); + } + let max_digest_interval: Number = self.max_digest_interval().into(); let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); + if max_digests_since_zero == 0.into() { + return Some((zero.clone() + 1.into(), zero + max_digest_interval)); + } let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); Some(if block == last_max_digest_block { - block + (block.clone() - max_digest_interval + 1.into(), block) } else { - last_max_digest_block + max_digest_interval + (last_max_digest_block.clone() + 1.into(), last_max_digest_block + max_digest_interval) }) } @@ -245,15 +256,18 @@ mod tests { } #[test] - fn next_max_level_digest_block_works() { - assert_eq!(config(0, 0).next_max_level_digest_block(0u64, 16), None); - assert_eq!(config(1, 1).next_max_level_digest_block(0u64, 16), None); - assert_eq!(config(2, 1).next_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 1).next_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(32, 1).next_max_level_digest_block(0u64, 16), Some(32)); - assert_eq!(config(2, 3).next_max_level_digest_block(0u64, 10), Some(16)); - assert_eq!(config(2, 3).next_max_level_digest_block(0u64, 8), Some(8)); - // TODO: more test cases + fn next_max_level_digest_range_works() { + assert_eq!(config(0, 0).next_max_level_digest_range(0u64, 16), None); + assert_eq!(config(1, 1).next_max_level_digest_range(0u64, 16), None); + assert_eq!(config(2, 1).next_max_level_digest_range(0u64, 16), Some((15, 16))); + assert_eq!(config(4, 1).next_max_level_digest_range(0u64, 16), Some((13, 16))); + assert_eq!(config(32, 1).next_max_level_digest_range(0u64, 16), Some((1, 32))); + assert_eq!(config(2, 3).next_max_level_digest_range(0u64, 10), Some((9, 16))); + assert_eq!(config(2, 3).next_max_level_digest_range(0u64, 8), Some((1, 8))); + assert_eq!(config(2, 1).next_max_level_digest_range(1u64, 1), Some((2, 3))); + assert_eq!(config(2, 2).next_max_level_digest_range(7u64, 9), Some((8, 11))); + + assert_eq!(config(2, 2).next_max_level_digest_range(7u64, 5), Some((8, 11))); } #[test] @@ -265,8 +279,8 @@ mod tests { assert_eq!(config(32, 1).prev_max_level_digest_block(0u64, 16), None); assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 10), Some(8)); assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 8), Some(8)); - // TODO: more test cases - } + assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 8), None); - // TODO: test that it doesn't panic when zero > block + assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 5), None); + } } diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index c60aa8d25ccc5..c7205acbf7b1a 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -125,7 +125,8 @@ fn prepare_digest_input<'a, H, Number>( { let build_skewed_digest = config.end.as_ref() == Some(&block); let block_for_digest = if build_skewed_digest { - config.config.next_max_level_digest_block(config.zero.clone(), block.clone()) + config.config.next_max_level_digest_range(config.zero.clone(), block.clone()) + .map(|(_, end)| end) .unwrap_or_else(|| block.clone()) } else { block.clone() diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index c4c666b432173..3d96210cf5ee0 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -48,7 +48,7 @@ pub use self::changes_iterator::{ key_changes, key_changes_proof, key_changes_proof_check, key_changes_proof_check_with_db, }; -pub use self::prune::{prune, oldest_non_pruned_trie}; +pub use self::prune::prune; use std::convert::TryInto; use hash_db::{Hasher, Prefix}; diff --git a/core/state-machine/src/changes_trie/prune.rs b/core/state-machine/src/changes_trie/prune.rs index 52e351bf8423e..fab0f6ac7f975 100644 --- a/core/state-machine/src/changes_trie/prune.rs +++ b/core/state-machine/src/changes_trie/prune.rs @@ -22,47 +22,20 @@ use log::warn; use num_traits::One; use crate::proving_backend::ProvingBackendEssence; use crate::trie_backend_essence::TrieBackendEssence; -use crate::changes_trie::{AnchorBlockId, Configuration, Storage, BlockNumber}; +use crate::changes_trie::{AnchorBlockId, Storage, BlockNumber}; use crate::changes_trie::storage::TrieBackendAdapter; -/// Get number of oldest block for which changes trie is not pruned -/// given changes trie configuration, pruning parameter and number of -/// best finalized block. -pub fn oldest_non_pruned_trie( - zero: Number, - config: &Configuration, - min_blocks_to_keep: Number, - best_finalized_block: Number, -) -> Number { - let max_digest_interval = config.max_digest_interval(); - let best_finalized_block_rem = - (best_finalized_block.clone() - zero.clone()) % max_digest_interval.into(); - let max_digest_block = best_finalized_block - best_finalized_block_rem; - match pruning_range(zero.clone(), config, min_blocks_to_keep, max_digest_block) { - Some((_, last_pruned_block)) => last_pruned_block + One::one(), - None => zero + One::one(), - } -} - /// Prune obsolete changes tries. Pruning happens at the same block, where highest /// level digest is created. Pruning guarantees to save changes tries for last /// `min_blocks_to_keep` blocks. We only prune changes tries at `max_digest_interval` /// ranges. -pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H::Out)>( - zero: Number, - config: &Configuration, - storage: &S, - min_blocks_to_keep: Number, +pub fn prune( + storage: &Storage, + first: Number, + last: Number, current_block: &AnchorBlockId, mut remove_trie_node: F, ) { - // select range for pruning - let range = pruning_range(zero, config, min_blocks_to_keep, current_block.number.clone()); - let (first, last) = match range { - Some((first, last)) => (first, last), - None => return, - }; - // delete changes trie for every block in range let mut block = first; loop { @@ -103,107 +76,31 @@ pub fn prune, H: Hasher, Number: BlockNumber, F: FnMut(H:: } } -/// Select blocks range (inclusive from both ends) for pruning changes tries in. -fn pruning_range( - zero: Number, - config: &Configuration, - min_blocks_to_keep: Number, - block: Number, -) -> Option<(Number, Number)> { - // compute number of changes tries we actually want to keep - let (prune_interval, blocks_to_keep) = if config.is_digest_build_enabled() { - // we only CAN prune at block where max-level-digest is created - let max_digest_interval = match config.digest_level_at_block(zero.clone(), block.clone()) { - Some((digest_level, digest_interval, _)) if digest_level == config.digest_levels => - digest_interval, - _ => return None, - }; - - // compute maximal number of high-level digests to keep - let max_digest_intervals_to_keep = max_digest_intervals_to_keep(min_blocks_to_keep, max_digest_interval); - - // number of blocks BEFORE current block where changes tries are not pruned - ( - max_digest_interval, - max_digest_intervals_to_keep.checked_mul(&max_digest_interval.into()) - ) - } else { - ( - 1, - Some(min_blocks_to_keep) - ) - }; - - // last block for which changes trie is pruned - let last_block_to_prune = match blocks_to_keep.and_then(|b| block.checked_sub(&b)) { - Some(last_block_to_prune) => if last_block_to_prune > zero { - last_block_to_prune - } else { - return None; - }, - _ => return None, - }; - let first_block_to_prune = last_block_to_prune.checked_sub(&prune_interval.into()); - - first_block_to_prune.map(|first| (first + One::one(), last_block_to_prune)) -} - -/// Select pruning delay for the changes tries. To make sure we could build a changes -/// trie at block B, we need an access to previous: -/// max_digest_interval = config.digest_interval ^ config.digest_levels -/// blocks. So we can only prune blocks that are earlier than B - max_digest_interval. -/// The pruning_delay stands for number of max_digest_interval-s that we want to keep: -/// 0 or 1: means that only last changes trie is guaranteed to exists; -/// 2: the last changes trie + previous changes trie -/// ... -fn max_digest_intervals_to_keep( - min_blocks_to_keep: Number, - max_digest_interval: u32, -) -> Number { - // config.digest_level_at_block ensures that it is not zero - debug_assert!(max_digest_interval != 0); - - let max_digest_intervals_to_keep = min_blocks_to_keep / max_digest_interval.into(); - if max_digest_intervals_to_keep.is_zero() { - One::one() - } else { - max_digest_intervals_to_keep - } -} - #[cfg(test)] mod tests { use std::collections::HashSet; use trie::MemoryDB; - use primitives::Blake2Hasher; + use primitives::{H256, Blake2Hasher}; use crate::backend::insert_into_memory_db; use crate::changes_trie::storage::InMemoryStorage; use super::*; - fn config(interval: u32, levels: u32) -> Configuration { - Configuration { - digest_interval: interval, - digest_levels: levels, - } - } - - fn prune_by_collect, H: Hasher>( - config: &Configuration, - storage: &S, - min_blocks_to_keep: u64, - zero: u64, + fn prune_by_collect( + storage: &Storage, + first: u64, + last: u64, current_block: u64, - ) -> HashSet { + ) -> HashSet { let mut pruned_trie_nodes = HashSet::new(); let anchor = AnchorBlockId { hash: Default::default(), number: current_block }; - prune(zero, config, storage, min_blocks_to_keep, &anchor, + prune(storage, first, last, &anchor, |node| { pruned_trie_nodes.insert(node); }); pruned_trie_nodes } #[test] fn prune_works() { - fn prepare_storage(zero: u64) -> InMemoryStorage { + fn prepare_storage() -> InMemoryStorage { let mut mdb1 = MemoryDB::::default(); let root1 = insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]).unwrap(); let mut mdb2 = MemoryDB::::default(); @@ -222,135 +119,28 @@ mod tests { vec![(vec![15], vec![25])], ).unwrap(); let storage = InMemoryStorage::new(); - storage.insert(zero + 65, root1, mdb1); - storage.insert(zero + 66, root2, mdb2); - storage.insert(zero + 67, root3, mdb3); - storage.insert(zero + 68, root4, mdb4); + storage.insert(65, root1, mdb1); + storage.insert(66, root2, mdb2); + storage.insert(67, root3, mdb3); + storage.insert(68, root4, mdb4); storage } - fn test_with_zero(zero: u64) { - // l1-digest is created every 2 blocks - // l2-digest is created every 4 blocks - // we do not want to keep any additional changes tries - // => only one l2-digest is saved AND it is pruned once next is created - let config = Configuration { digest_interval: 2, digest_levels: 2 }; - let storage = prepare_storage(zero); - assert!(prune_by_collect(&config, &storage, 0, zero, zero + 69).is_empty()); - assert!(prune_by_collect(&config, &storage, 0, zero, zero + 70).is_empty()); - assert!(prune_by_collect(&config, &storage, 0, zero, zero + 71).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 0, zero, zero + 72); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); - - // l1-digest is created every 2 blocks - // l2-digest is created every 4 blocks - // we want keep 1 additional changes tries - let config = Configuration { digest_interval: 2, digest_levels: 2 }; - let storage = prepare_storage(zero); - assert!(prune_by_collect(&config, &storage, 8, zero, zero + 69).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, zero, zero + 70).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, zero, zero + 71).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, zero, zero + 72).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, zero, zero + 73).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, zero, zero + 74).is_empty()); - assert!(prune_by_collect(&config, &storage, 8, zero, zero + 75).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 8, zero, zero + 76); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); - - // l1-digest is created every 2 blocks - // we want keep 2 additional changes tries - let config = Configuration { digest_interval: 2, digest_levels: 1 }; - let storage = prepare_storage(zero); - assert!(prune_by_collect(&config, &storage, 4, zero, zero + 69).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 4, zero, zero + 70); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(prune_by_collect(&config, &storage, 4, zero, zero + 71).is_empty()); - let non_empty = prune_by_collect(&config, &storage, 4, zero, zero + 72); - assert!(!non_empty.is_empty()); - storage.remove_from_storage(&non_empty); - assert!(storage.into_mdb().drain().is_empty()); - } - - test_with_zero(0); - test_with_zero(1023); - test_with_zero(1024); - } - - #[test] - fn pruning_range_works() { - fn test_with_zero(zero: u64) { - // DIGESTS ARE NOT CREATED + NO TRIES ARE PRUNED - assert_eq!(pruning_range(zero, &config(10, 0), 2u64, zero + 2u64), None); - - // DIGESTS ARE NOT CREATED + SOME TRIES ARE PRUNED - assert_eq!(pruning_range(zero, &config(10, 0), 100u64, zero + 110u64), Some((zero + 10, zero + 10))); - assert_eq!(pruning_range(zero, &config(10, 0), 100u64, zero + 210u64), Some((zero + 110, zero + 110))); - - // DIGESTS ARE CREATED + NO TRIES ARE PRUNED - - assert_eq!(pruning_range(zero, &config(10, 2), 2u64, zero + 0u64), None); - assert_eq!(pruning_range(zero, &config(10, 2), 30u64, zero + 100u64), None); - assert_eq!(pruning_range(zero, &config(::std::u32::MAX, 2), 1u64, zero + 1024u64), None); - assert_eq!(pruning_range(zero, &config(::std::u32::MAX, 2), ::std::u64::MAX, zero + 1024u64), None); - assert_eq!(pruning_range(zero, &config(32, 2), 2048u64, zero + 512u64), None); - assert_eq!(pruning_range(zero, &config(32, 2), 2048u64, zero + 1024u64), None); - - // DIGESTS ARE CREATED + SOME TRIES ARE PRUNED - - // when we do not want to keep any highest-level-digests - // (system forces to keep at least one) - assert_eq!(pruning_range(zero, &config(4, 2), 0u64, zero + 32u64), Some((zero + 1, zero + 16))); - assert_eq!(pruning_range(zero, &config(4, 2), 0u64, zero + 64u64), Some((zero + 33, zero + 48))); - // when we want to keep 1 (last) highest-level-digest - assert_eq!(pruning_range(zero, &config(4, 2), 16u64, zero + 32u64), Some((zero + 1, zero + 16))); - assert_eq!(pruning_range(zero, &config(4, 2), 16u64, zero + 64u64), Some((zero + 33, zero + 48))); - // when we want to keep 1 (last) + 1 additional level digests - assert_eq!(pruning_range(zero, &config(32, 2), 4096u64, zero + 5120u64), Some((zero + 1, zero + 1024))); - assert_eq!(pruning_range(zero, &config(32, 2), 4096u64, zero + 6144u64), Some((zero + 1025, zero + 2048))); - } - - test_with_zero(0); - test_with_zero(1023); - test_with_zero(1024); - } - - #[test] - fn max_digest_intervals_to_keep_works() { - assert_eq!(max_digest_intervals_to_keep(1024u64, 1025), 1u64); - assert_eq!(max_digest_intervals_to_keep(1024u64, 1023), 1u64); - assert_eq!(max_digest_intervals_to_keep(1024u64, 512), 2u64); - assert_eq!(max_digest_intervals_to_keep(1024u64, 511), 2u64); - assert_eq!(max_digest_intervals_to_keep(1024u64, 100), 10u64); - } - - #[test] - fn oldest_non_pruned_trie_works() { - fn test_with_zero(zero: u64) { - // when digests are not created at all - assert_eq!(oldest_non_pruned_trie(zero, &config(0, 0), 100u64, zero + 10u64), zero + 1); - assert_eq!(oldest_non_pruned_trie(zero, &config(0, 0), 100u64, zero + 110u64), zero + 11); - - // when only l1 digests are created - assert_eq!(oldest_non_pruned_trie(zero, &config(100, 1), 100u64, zero +50u64), zero + 1); - assert_eq!(oldest_non_pruned_trie(zero, &config(100, 1), 100u64, zero +110u64), zero + 1); - assert_eq!(oldest_non_pruned_trie(zero, &config(100, 1), 100u64, zero +210u64), zero + 101); - - // when l2 digests are created - assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 50u64), zero + 1); - assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 110u64), zero + 1); - assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 210u64), zero + 1); - assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 10110u64), zero + 1); - assert_eq!(oldest_non_pruned_trie(zero, &config(100, 2), 100u64, zero + 20110u64), zero + 10001); - } - - test_with_zero(0); - test_with_zero(100); - test_with_zero(101); + let storage = prepare_storage(); + assert!(prune_by_collect(&storage, 20, 30, 90).is_empty()); + assert!(!storage.into_mdb().drain().is_empty()); + + let storage = prepare_storage(); + let prune60_65 = prune_by_collect(&storage, 60, 65, 90); + assert!(!prune60_65.is_empty()); + storage.remove_from_storage(&prune60_65); + assert!(!storage.into_mdb().drain().is_empty()); + + let storage = prepare_storage(); + let prune60_70 = prune_by_collect(&storage, 60, 70, 90); + assert!(!prune60_70.is_empty()); + storage.remove_from_storage(&prune60_70); + assert!(storage.into_mdb().drain().is_empty()); } } diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index e5bcbe6842e6d..d6c0e2b5be136 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -52,7 +52,6 @@ pub use changes_trie::{ key_changes, key_changes_proof, key_changes_proof_check, key_changes_proof_check_with_db, prune as prune_changes_tries, - oldest_non_pruned_trie as oldest_non_pruned_changes_trie, disabled_state as disabled_changes_trie_state, }; pub use overlayed_changes::OverlayedChanges; From 27eecae465d0ae41205c373a10335b16a48f09ab Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 14 Aug 2019 16:46:58 +0300 Subject: [PATCH 36/63] BlockIdOrHeader isn't really required --- core/client/db/src/cache/list_cache.rs | 121 ++++------------ core/client/db/src/cache/mod.rs | 104 ++------------ core/client/db/src/changes_tries_storage.rs | 151 ++++++++++++-------- core/client/db/src/light.rs | 8 +- 4 files changed, 129 insertions(+), 255 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index a040004dbf70c..10942d926f228 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -46,7 +46,7 @@ use log::warn; use client::error::{Error as ClientError, Result as ClientResult}; use sr_primitives::traits::{Block as BlockT, NumberFor, Zero, Bounded, CheckedSub}; -use crate::cache::{CacheItemT, ComplexBlockId, BlockIdOrHeader, EntryType}; +use crate::cache::{CacheItemT, ComplexBlockId, EntryType}; use crate::cache::list_entry::{Entry, StorageEntry}; use crate::cache::list_storage::{Storage, StorageTransaction, Metadata}; @@ -147,15 +147,15 @@ impl> Lis /// Get value valid at block. pub fn value_at_block( &self, - at: BlockIdOrHeader, + at: ComplexBlockId, ) -> ClientResult, Option>, T)>> { - let head = if at.number() <= self.best_finalized_block.number { + let head = if at.number <= self.best_finalized_block.number { // if the block is older than the best known finalized block // => we're should search for the finalized value // BUT since we're not guaranteeing to provide correct values for forks // behind the finalized block, check if the block is finalized first - if !chain::is_finalized_block(&self.storage, &at.id(), Bounded::max_value())? { + if !chain::is_finalized_block(&self.storage, &at, Bounded::max_value())? { return Ok(None); } @@ -170,12 +170,12 @@ impl> Lis // IF there's no matching fork, ensure that this isn't a block from a fork that has forked // behind the best finalized block and search at finalized fork - match self.find_unfinalized_fork(at.clone())? { + match self.find_unfinalized_fork(&at)? { Some(fork) => Some(&fork.head), None => match self.best_finalized_entry.as_ref() { Some(best_finalized_entry) if chain::is_connected_to_block( &self.storage, - at.clone(), + &at, &best_finalized_entry.valid_from, )? => Some(best_finalized_entry), _ => None, @@ -184,7 +184,7 @@ impl> Lis }; match head { - Some(head) => head.search_best_before(&self.storage, at.number()) + Some(head) => head.search_best_before(&self.storage, at.number) .map(|e| e.map(|e| (e.0.valid_from, e.1, e.0.value))), None => Ok(None), } @@ -517,10 +517,10 @@ impl> Lis /// Search unfinalized fork where given block belongs. fn find_unfinalized_fork( &self, - block: BlockIdOrHeader, + block: &ComplexBlockId, ) -> ClientResult>> { for unfinalized in &self.unfinalized { - if unfinalized.matches(&self.storage, block.clone())? { + if unfinalized.matches(&self.storage, block)? { return Ok(Some(&unfinalized)); } } @@ -539,9 +539,9 @@ impl Fork { pub fn matches>( &self, storage: &S, - block: BlockIdOrHeader, + block: &ComplexBlockId, ) -> ClientResult { - let range = self.head.search_best_range_before(storage, block.number())?; + let range = self.head.search_best_range_before(storage, block.number)?; match range { None => Ok(false), Some((begin, end)) => chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), @@ -574,7 +574,7 @@ impl Fork { }; // check if the parent is connected to the beginning of the range - if !chain::is_connected_to_block(storage, BlockIdOrHeader::Id(parent), &begin)? { + if !chain::is_connected_to_block(storage, parent, &begin)? { return Ok(None); } @@ -713,11 +713,11 @@ mod chain { /// Is the block1 connected both ends of the range. pub fn is_connected_to_range>( storage: &S, - block: BlockIdOrHeader, + block: &ComplexBlockId, range: (&ComplexBlockId, Option<&ComplexBlockId>), ) -> ClientResult { let (begin, end) = range; - Ok(is_connected_to_block(storage, block.clone(), begin)? + Ok(is_connected_to_block(storage, block, begin)? && match end { Some(end) => is_connected_to_block(storage, block, end)?, None => true, @@ -727,18 +727,12 @@ mod chain { /// Is the block1 directly connected (i.e. part of the same fork) to block2? pub fn is_connected_to_block>( storage: &S, - block1: BlockIdOrHeader, + block1: &ComplexBlockId, block2: &ComplexBlockId, ) -> ClientResult { - let block1_id = block1.id(); - let (begin, end) = if block1_id > *block2 { (block2, &block1_id) } else { (&block1_id, block2) }; - let mut current = match end.hash == block1_id.hash { - true => match block1 { - BlockIdOrHeader::Id(_) => storage.read_header(&end.hash)?, - BlockIdOrHeader::Header(header) => Some(header.clone()), - }, - false => storage.read_header(&end.hash)?, - }.ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; + let (begin, end) = if *block1 > *block2 { (block2, block1) } else { (block1, block2) }; + let mut current = storage.read_header(&end.hash)? + .ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; while *current.number() > begin.number { current = storage.read_header(current.parent_hash())? .ok_or_else(|| ClientError::UnknownBlock(format!("{}", current.parent_hash())))?; @@ -836,7 +830,7 @@ pub mod tests { // --- 50 --- // ----------> [100] assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .unwrap().value_at_block((&test_id(50)).into()).unwrap(), None); + .unwrap().value_at_block(test_id(50)).unwrap(), None); // when block is earlier than best finalized block AND it is finalized AND value is some // [30] ---- 50 ---> [100] assert_eq!(ListCache::new( @@ -846,7 +840,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block((&test_id(50)).into()).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); + ).unwrap().value_at_block(test_id(50)).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); // when block is the best finalized block AND value is some // ---> [100] assert_eq!(ListCache::new( @@ -856,7 +850,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block((&test_id(100)).into()).unwrap(), Some((test_id(100), None, 100))); + ).unwrap().value_at_block(test_id(100)).unwrap(), Some((test_id(100), None, 100))); // when block is parallel to the best finalized block // ---- 100 // ---> [100] @@ -867,7 +861,7 @@ pub mod tests { .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block((&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).into()).unwrap(), None); + ).unwrap().value_at_block(ComplexBlockId::new(H256::from_low_u64_be(2), 100)).unwrap(), None); // when block is later than last finalized block AND there are no forks AND finalized value is Some // ---> [100] --- 200 @@ -877,7 +871,7 @@ pub mod tests { .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block((&test_id(200)).into()).unwrap(), Some((test_id(100), None, 100))); + ).unwrap().value_at_block(test_id(200)).unwrap(), Some((test_id(100), None, 100))); // when block is later than last finalized block AND there are no matching forks // AND block is connected to finalized block AND finalized value is Some @@ -893,7 +887,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block((&fork_id(0, 2, 3)).into()).unwrap(), Some((correct_id(2), None, 2))); + ).unwrap().value_at_block(fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); // when block is later than last finalized block AND there are no matching forks // AND block is not connected to finalized block // --- 2 --- 3 @@ -910,7 +904,7 @@ pub mod tests { .with_header(fork_header(0, 1, 3)) .with_header(fork_header(0, 1, 2)), PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block((&fork_id(0, 1, 3)).into()).unwrap(), None); + ).unwrap().value_at_block(fork_id(0, 1, 3)).unwrap(), None); // when block is later than last finalized block AND it appends to unfinalized fork from the end // AND unfinalized value is Some @@ -923,7 +917,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(test_header(5)), PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block((&correct_id(5)).into()).unwrap(), Some((correct_id(4), None, 4))); + ).unwrap().value_at_block(correct_id(5)).unwrap(), Some((correct_id(4), None, 4))); // when block is later than last finalized block AND it does not fits unfinalized fork // AND it is connected to the finalized block AND finalized value is Some // ---> [2] ----------> [4] @@ -938,7 +932,7 @@ pub mod tests { .with_header(test_header(4)) .with_header(fork_header(0, 2, 3)), PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block((&fork_id(0, 2, 3)).into()).unwrap(), Some((correct_id(2), None, 2))); + ).unwrap().value_at_block(fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); } #[test] @@ -1556,67 +1550,6 @@ pub mod tests { do_test(PruningStrategy::NeverPrune) } - #[test] - fn value_at_not_yet_inserted_block_works() { - fn make_storage() -> DummyStorage { - // 3 -> 5 -> 7_1 -> 10_1 - // \-> 9_2 -> 12_2 - let mut storage = DummyStorage::new() - // entries 3 && 5 are finalized - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 5 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 3 }) - // entries 7_1 and 10_1 is unfinalied fork#1 - .with_entry(correct_id(10), StorageEntry { prev_valid_from: Some(correct_id(7)), value: 110 }) - .with_entry(correct_id(7), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 107 }) - // entries 7_1 and 10_1 is unfinalied fork#1 - .with_entry(fork_id(1, 6, 12), StorageEntry { prev_valid_from: Some(fork_id(1, 6, 9)), value: 210 }) - .with_entry(fork_id(1, 6, 9), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 207 }); - for i in 0..=12 { - storage = storage.with_header(test_header(i)); - } - for i in 7..=12 { - storage = storage.with_header(fork_header(1, 6, i)); - } - storage - } - - // when there are no unfinalized forks - let header9 = test_header(9); - let storage = make_storage().with_meta(Some(correct_id(5)), Vec::new()); - assert_eq!( - ListCache::new(storage, PruningStrategy::NeverPrune, correct_id(5)) - .unwrap().value_at_block(BlockIdOrHeader::Header(&header9)).unwrap(), - Some((correct_id(5), None, 5)), - ); - - // when there's single unfinalized fork - let header11 = test_header(11); - let storage = make_storage().with_meta(Some(correct_id(5)), vec![correct_id(10)]); - assert_eq!( - ListCache::new(storage, PruningStrategy::NeverPrune, correct_id(5)) - .unwrap().value_at_block(BlockIdOrHeader::Header(&header11)).unwrap(), - Some((correct_id(10), None, 110)), - ); - - // when there are several unfinalized forks: fork1 - let header13 = test_header(13); - let storage = make_storage().with_meta(Some(correct_id(5)), vec![correct_id(10), fork_id(1, 6, 12)]); - assert_eq!( - ListCache::new(storage, PruningStrategy::NeverPrune, correct_id(5)) - .unwrap().value_at_block(BlockIdOrHeader::Header(&header13)).unwrap(), - Some((correct_id(10), None, 110)), - ); - - // when there are several unfinalized forks: fork2 - let header13 = fork_header(1, 6, 13); - let storage = make_storage().with_meta(Some(correct_id(5)), vec![correct_id(10), fork_id(1, 6, 12)]); - assert_eq!( - ListCache::new(storage, PruningStrategy::NeverPrune, correct_id(5)) - .unwrap().value_at_block(BlockIdOrHeader::Header(&header13)).unwrap(), - Some((fork_id(1, 6, 12), None, 210)), - ); - } - #[test] fn revert_block_works() { let mut cache = ListCache::new( diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index 2830d9d436bf3..04d90a34ac5d9 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -71,39 +71,6 @@ impl ::std::cmp::PartialOrd for ComplexBlockId { } } -/// Block id or header. -#[derive(Debug, Clone)] -pub enum BlockIdOrHeader<'a, Block: BlockT> { - /// Block id. - Id(&'a ComplexBlockId), - /// Block header. - Header(&'a Block::Header), -} - -impl<'a, Block: BlockT> From<&'a ComplexBlockId> for BlockIdOrHeader<'a, Block> { - fn from(id: &'a ComplexBlockId) -> Self { - BlockIdOrHeader::Id(id) - } -} - -impl<'a, Block: BlockT> BlockIdOrHeader<'a, Block> { - /// Get id of refrenced block. - pub fn id(&self) -> ComplexBlockId { - match *self { - BlockIdOrHeader::Id(id) => id.clone(), - BlockIdOrHeader::Header(header) => ComplexBlockId::new(header.hash(), *header.number()), - } - } - - /// Get number of refrenced block. - pub fn number(&self) -> NumberFor { - match *self { - BlockIdOrHeader::Id(id) => id.number, - BlockIdOrHeader::Header(header) => *header.number(), - } - } -} - /// All cache items must implement this trait. pub trait CacheItemT: Clone + Decode + Encode + PartialEq {} @@ -361,70 +328,11 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { } /// Synchronous implementation of database-backed blockchain data cache. -pub struct DbCacheSync { - db: Arc, - key_lookup_column: Option, - header_column: Option, - cache: RwLock>, -} - -impl DbCacheSync { - /// Create new sync cache. - pub fn new(cache: DbCache) -> Self { - Self { - db: cache.db.clone(), - key_lookup_column: cache.key_lookup_column.clone(), - header_column: cache.header_column.clone(), - cache: RwLock::new(cache), - } - } - - /// Get reference to the cache. - pub(crate) fn cache(&self) -> &RwLock> { - &self.cache - } - - /// Convert block id into complex block id. - pub fn to_complex_id(&self, block: &BlockId) -> ClientResult> { - Ok(match *block { - BlockId::Hash(hash) => { - let header = utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Hash(hash.clone()) - )?; - ComplexBlockId::new(hash, *header.number()) - }, - BlockId::Number(number) => { - let header = utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Number(number.clone()) - )?; - ComplexBlockId::new(header.hash(), number) - }, - }) - } - - /// Get value at inserted or not yet inserted block. - pub fn get_at_block( - &self, - key: &CacheKeyId, - at: BlockIdOrHeader, - ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)> { - self.cache.read().cache_at.get(key)? - .value_at_block(at) - .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| - ((begin_block.number, begin_block.hash), end_block.map(|end_block| (end_block.number, end_block.hash)), value))) - .ok()? - } -} +pub struct DbCacheSync(pub RwLock>); impl BlockchainCache for DbCacheSync { fn initialize(&self, key: &CacheKeyId, data: Vec) -> ClientResult<()> { - let mut cache = self.cache.write(); + let mut cache = self.0.write(); let genesis_hash = cache.genesis_hash; let cache_contents = vec![(*key, data)].into_iter().collect(); let db = cache.db.clone(); @@ -448,7 +356,7 @@ impl BlockchainCache for DbCacheSync { at: &BlockId, ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)> { let id = { - let cache = self.cache.read(); + let cache = self.0.read(); let storage = cache.cache_at.get(key)?.storage(); let db = storage.db(); let columns = storage.columns(); @@ -472,7 +380,11 @@ impl BlockchainCache for DbCacheSync { } }; - self.get_at_block(key, (&id).into()) + self.0.read().cache_at.get(key)? + .value_at_block(id) + .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| + ((begin_block.number, begin_block.hash), end_block.map(|end_block| (end_block.number, end_block.hash)), value))) + .ok()? } } diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index df03e81a5bb34..78e7bea870b7a 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -24,8 +24,8 @@ use codec::{Decode, Encode}; use parking_lot::RwLock; use client::error::{Error as ClientError, Result as ClientResult}; use trie::MemoryDB; -use client::backend::ChangesTrieConfigurationRange; -use client::blockchain::well_known_cache_keys; +use client::backend::{PrunableStateChangesTrieStorage, ChangesTrieConfigurationRange}; +use client::blockchain::{well_known_cache_keys, Cache as BlockchainCache}; use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; use sr_primitives::traits::{ Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub, @@ -35,7 +35,7 @@ use state_machine::DBValue; use crate::utils::{self, Meta, meta_keys, db_err}; use crate::cache::{ DbCacheSync, DbCache, DbCacheTransactionOps, - ComplexBlockId, BlockIdOrHeader, EntryType as CacheEntryType, + ComplexBlockId, EntryType as CacheEntryType, }; /// Extract new changes trie configuration (if available) from the header. @@ -47,7 +47,29 @@ pub fn extract_new_configuration(header: &Header) -> Option<&Op /// Opaque configuration cache transaction. During its lifetime, noone should modify cache. This is currently /// guaranteed because import lock is held during block import/finalization. -pub type DbChangesTrieStorageTransaction = DbCacheTransactionOps; +pub struct DbChangesTrieStorageTransaction { + /// Cache operations that must be performed after db transaction is comitted. + cache_ops: DbCacheTransactionOps, + /// New configuration (if changed at current block). + new_config: Option>, +} + +impl DbChangesTrieStorageTransaction { + /// Consume self and return transaction with given new configuration. + pub fn with_new_config(mut self, new_config: Option>) -> Self { + self.new_config = new_config; + self + } +} + +impl From> for DbChangesTrieStorageTransaction { + fn from(cache_ops: DbCacheTransactionOps) -> Self { + DbChangesTrieStorageTransaction { + cache_ops, + new_config: None, + } + } +} /// Changes tries storage. /// @@ -106,14 +128,14 @@ impl> DbChangesTrieStorage { header_column, meta, min_blocks_to_keep, - cache: DbCacheSync::new(DbCache::new( + cache: DbCacheSync(RwLock::new(DbCache::new( db.clone(), key_lookup_column, header_column, cache_column, genesis_hash, ComplexBlockId::new(finalized_hash, finalized_number), - )), + ))), tries_meta: RwLock::new(tries_meta), }) } @@ -138,35 +160,33 @@ impl> DbChangesTrieStorage { // if configuration has not been changed AND block is not finalized => nothing to do here let new_configuration = match new_configuration { Some(new_configuration) => new_configuration, - None if !finalized => return Ok(DbChangesTrieStorageTransaction::empty()), + None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), None => return self.finalize(tx, parent_block.hash, block.hash, block.number, Some(new_header), cache_tx), }; // update configuration cache let mut cache_at = HashMap::new(); cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); - Ok(match cache_tx { - Some(cache_tx) => { - self.cache.cache().write().transaction_with_ops(tx, cache_tx) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops() - }, - None => { - self.cache.cache().write().transaction(tx) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops() - }, - }) + Ok(DbChangesTrieStorageTransaction::from(match cache_tx { + Some(cache_tx) => self.cache.0.write() + .transaction_with_ops(tx, cache_tx.cache_ops) + .on_block_insert( + parent_block, + block, + cache_at, + if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, + )? + .into_ops(), + None => self.cache.0.write() + .transaction(tx) + .on_block_insert( + parent_block, + block, + cache_at, + if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, + )? + .into_ops(), + }).with_new_config(Some(new_configuration))) } /// Called when block is finalized. @@ -180,7 +200,7 @@ impl> DbChangesTrieStorage { cache_tx: Option>, ) -> ClientResult> { // prune obsolete changes tries - self.prune(tx, block_hash, block_num, new_header.clone())?; + self.prune(tx, block_hash, block_num, new_header.clone(), cache_tx.as_ref())?; // if we have inserted the block that we're finalizing in the same transaction // => then we have already finalized it from the commit() call @@ -197,22 +217,24 @@ impl> DbChangesTrieStorage { let parent_block_num = block_num.checked_sub(&One::one()).unwrap_or_else(|| Zero::zero()); let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); Ok(match cache_tx { - Some(cache_tx) => { - self.cache.cache().write().transaction_with_ops(tx, cache_tx) + Some(cache_tx) => DbChangesTrieStorageTransaction::from( + self.cache.0.write() + .transaction_with_ops(tx, cache_tx.cache_ops) .on_block_finalize( parent_block, block, )? .into_ops() - }, - None => { - self.cache.cache().write().transaction(tx) + ).with_new_config(cache_tx.new_config), + None => DbChangesTrieStorageTransaction::from( + self.cache.0.write() + .transaction(tx) .on_block_finalize( parent_block, block, )? .into_ops() - }, + ), }) } @@ -222,15 +244,16 @@ impl> DbChangesTrieStorage { tx: &mut DBTransaction, block: NumberFor, ) -> ClientResult> { - Ok(self.cache.cache().write().transaction(tx) + Ok(self.cache.0.write().transaction(tx) .on_block_revert(block)? - .into_ops()) + .into_ops() + .into()) } /// When transaction has been committed. pub fn post_commit(&self, tx: Option>) { if let Some(tx) = tx { - self.cache.cache().write().commit(tx) + self.cache.0.write().commit(tx.cache_ops) .expect("only fails if cache with given name isn't loaded yet;\ cache is already loaded because there is tx; qed"); } @@ -243,6 +266,7 @@ impl> DbChangesTrieStorage { block_hash: Block::Hash, block_num: NumberFor, new_header: Option<&Block::Header>, + cache_tx: Option<&DbChangesTrieStorageTransaction>, ) -> ClientResult<()> { // never prune on archive nodes let min_blocks_to_keep = match self.min_blocks_to_keep { @@ -287,13 +311,24 @@ impl> DbChangesTrieStorage { BlockId::Number(next_digest_range_start), )?.hash(), }; - let block_id = ComplexBlockId::new(next_digest_range_start_hash, next_digest_range_start); - let block_id_or_header = match new_header { - Some(ref new_header) if *new_header.number() == next_digest_range_start - => BlockIdOrHeader::Header(*new_header), - _ => BlockIdOrHeader::Id(&block_id), + + let config_for_new_block = new_header + .map(|header| *header.number() == next_digest_range_start) + .unwrap_or(false); + let next_config = match cache_tx { + Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { + let config = cache_tx + .new_config + .clone() + .expect("guarded by is_some(); qed"); + ChangesTrieConfigurationRange { + zero: (block_num, block_hash), + end: None, + config, + } + }, + _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash))?, }; - let next_config = self.configuration_at_block(block_id_or_header)?; if let Some(config) = next_config.config { let mut oldest_digest_range = config .next_max_level_digest_range(next_config.zero.0, next_digest_range_start) @@ -316,15 +351,6 @@ impl> DbChangesTrieStorage { write_tries_meta(tx, self.meta_column, &*tries_meta); Ok(()) } - - /// Return configuration at given block id or header. - fn configuration_at_block(&self, at: BlockIdOrHeader) -> ClientResult> { - self.cache - .get_at_block(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) - .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]).ok() - .map(|config| ChangesTrieConfigurationRange { zero, end, config })) - .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) - } } impl client::backend::PrunableStateChangesTrieStorage @@ -337,8 +363,11 @@ where } fn configuration_at(&self, at: &BlockId) -> ClientResult> { - self.cache.to_complex_id(at) - .and_then(|at| self.configuration_at_block((&at).into())) + self.cache + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) + .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]).ok() + .map(|config| ChangesTrieConfigurationRange { zero, end, config })) + .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) } fn oldest_pruned_digest_range_end(&self) -> NumberFor { @@ -897,7 +926,7 @@ mod tests { // before truncate there are 2 unfinalized forks - block2_1+block2_3 assert_eq!( - backend.changes_tries_storage.cache.cache().write() + backend.changes_tries_storage.cache.0.write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -910,7 +939,7 @@ mod tests { // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 backend.revert(1).unwrap(); assert_eq!( - backend.changes_tries_storage.cache.cache().write() + backend.changes_tries_storage.cache.0.write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -924,7 +953,7 @@ mod tests { // though they're pointing to the same block backend.revert(1).unwrap(); assert_eq!( - backend.changes_tries_storage.cache.cache().write() + backend.changes_tries_storage.cache.0.write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -934,7 +963,7 @@ mod tests { vec![2, 2], ); assert_eq!( - backend.changes_tries_storage.cache.cache().write() + backend.changes_tries_storage.cache.0.write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() @@ -948,7 +977,7 @@ mod tests { // after truncating block2 - there are no unfinalized forks backend.revert(1).unwrap(); assert!( - backend.changes_tries_storage.cache.cache().write() + backend.changes_tries_storage.cache.0.write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) .unwrap() .unfinalized() diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index a0ab0804e282d..415f9e811906c 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -105,7 +105,7 @@ impl LightStorage Ok(LightStorage { db, meta: RwLock::new(meta), - cache: Arc::new(DbCacheSync::new(cache)), + cache: Arc::new(DbCacheSync(RwLock::new(cache))), }) } @@ -417,7 +417,7 @@ impl LightBlockchainStorage for LightStorage let is_genesis = number.is_zero(); if is_genesis { - self.cache.cache().write().set_genesis_hash(hash); + self.cache.0.write().set_genesis_hash(hash); transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); } @@ -443,7 +443,7 @@ impl LightBlockchainStorage for LightStorage } { - let mut cache = self.cache.cache().write(); + let mut cache = self.cache.0.write(); let cache_ops = cache.transaction(&mut transaction) .on_block_insert( ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), @@ -504,7 +504,7 @@ impl LightBlockchainStorage for LightStorage let number = *header.number(); self.note_finalized(&mut transaction, &header, hash.clone())?; { - let mut cache = self.cache.cache().write(); + let mut cache = self.cache.0.write(); let cache_ops = cache.transaction(&mut transaction) .on_block_finalize( ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), From 9da96a5a81ad227546f6271c3c6723f4c7992668 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 12 Nov 2019 09:33:05 +0300 Subject: [PATCH 37/63] removed debug leftovers + some docs --- core/client/db/src/cache/list_cache.rs | 4 ++-- core/client/db/src/lib.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index 3e65c80b9925f..682b019e354fb 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -115,7 +115,7 @@ pub enum ForkAppendResult { Fork(ComplexBlockId), } -impl> ListCache { +impl> ListCache { /// Create new db list cache entry. pub fn new( storage: S, @@ -394,7 +394,6 @@ impl> Lis /// When transaction is committed. pub fn on_transaction_commit(&mut self, ops: Vec>) { -println!("=== on_transaction_commit: {:?}", ops); for op in ops { match op { CommitOperation::AppendNewBlock(index, best_block) => { @@ -660,6 +659,7 @@ impl Fork { impl CommitOperation { /// Try to merge two ops into single one. pub fn merge_with(self, other: CommitOperation) -> (Option, Option) { + // we only able to merge two consequent block finalization operations match self { CommitOperation::BlockFinalized(old_finalized_block, old_finalized_entry, old_abandoned_forks) => { match other { diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 5db94fe06041b..9fe6e13b5f53c 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -1443,7 +1443,7 @@ pub(crate) mod tests { use sr_primitives::generic::DigestItem; use sr_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; use sr_primitives::traits::{Hash, BlakeTwo256}; - use state_machine::{TrieMut, TrieDBMut, ChangesTrieRootsStorage, ChangesTrieStorage}; + use state_machine::{TrieMut, TrieDBMut}; use header_metadata::{lowest_common_ancestor, tree_route}; use test_client; From f8a14f5e2e78389c08463c34b618a43533885f17 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 12 Nov 2019 09:40:39 +0300 Subject: [PATCH 38/63] more docs --- core/client/db/src/changes_tries_storage.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index ffb946fed7353..bd332aeee42cf 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -74,7 +74,7 @@ impl From> for DbChangesTrieStorageT /// Changes tries storage. /// /// Stores all tries in separate DB column. -/// Lock order: meta, tries_meta, cache. +/// Lock order: meta, tries_meta, cache, build_cache. pub struct DbChangesTrieStorage { db: Arc, meta_column: Option, @@ -84,7 +84,11 @@ pub struct DbChangesTrieStorage { meta: Arc, Block::Hash>>>, tries_meta: RwLock>, min_blocks_to_keep: Option, + /// The cache stores all ever existing changes tries configurations. cache: DbCacheSync, + /// Build cache is a map of block => set of storage keys changed at this block. + /// They're used to build digest blocks - instead of reading+parsing tries from db + /// we just use keys sets from the cache. build_cache: RwLock>>, } From 68c4db0082d9dce3c3ae62f43359746aeac4c683 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 12 Nov 2019 10:11:49 +0300 Subject: [PATCH 39/63] more post-merge fixes --- core/client/db/src/cache/list_cache.rs | 16 ++++- core/client/db/src/lib.rs | 10 +-- core/client/db/src/utils.rs | 2 +- core/client/src/client.rs | 18 +++--- core/consensus/babe/src/lib.rs | 3 +- core/service/src/lib.rs | 87 -------------------------- 6 files changed, 31 insertions(+), 105 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index 682b019e354fb..a68a279fb7f4d 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -1412,9 +1412,21 @@ pub mod tests { #[test] fn is_connected_to_block_fails() { // when storage returns error - assert!(chain::is_connected_to_block::<_, u64, _>(&FaultyStorage, (&test_id(1)).into(), &test_id(100)).is_err()); + assert!( + chain::is_connected_to_block::<_, u64, _>( + &FaultyStorage, + (&test_id(1)).into(), + &test_id(100), + ).is_err(), + ); // when there's no header in the storage - assert!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new(), (&test_id(1)).into(), &test_id(100)).is_err()); + assert!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new(), + (&test_id(1)).into(), + &test_id(100), + ).is_err(), + ); } #[test] diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 9fe6e13b5f53c..7ab513025555f 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -455,7 +455,7 @@ pub struct BlockImportOperation { storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB, - changes_trie_cache_update: Option>>, + changes_trie_build_cache_update: Option>>, changes_trie_config_update: Option>, pending_block: Option>, aux_ops: Vec<(Vec, Option>)>, @@ -558,7 +558,7 @@ impl client::backend::BlockImportOperation update: ChangesTrieTransaction>, ) -> ClientResult<()> { self.changes_trie_updates = update.0; - self.changes_trie_cache_update = Some(update.1); + self.changes_trie_build_cache_update = Some(update.1); Ok(()) } @@ -1082,8 +1082,8 @@ impl> Backend { let write_result = self.storage.db.write(transaction).map_err(db_err); - if let Some(changes_trie_cache_update) = operation.changes_trie_cache_update { - self.changes_tries_storage.commit_build_cache(changes_trie_cache_update); + if let Some(changes_trie_build_cache_update) = operation.changes_trie_build_cache_update { + self.changes_tries_storage.commit_build_cache(changes_trie_build_cache_update); } if let Some(( @@ -1237,7 +1237,7 @@ impl client::backend::Backend for Backend whe child_storage_updates: Default::default(), changes_trie_config_update: None, changes_trie_updates: MemoryDB::default(), - changes_trie_cache_update: None, + changes_trie_build_cache_update: None, aux_ops: Vec::new(), finalized_blocks: Vec::new(), set_head: None, diff --git a/core/client/db/src/utils.rs b/core/client/db/src/utils.rs index aeaa61a6f9fd7..b8622745aea79 100644 --- a/core/client/db/src/utils.rs +++ b/core/client/db/src/utils.rs @@ -37,7 +37,7 @@ use crate::{DatabaseSettings, DatabaseSettingsSrc}; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. -pub const NUM_COLUMNS: u32 = 11; +pub const NUM_COLUMNS: u32 = 10; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: Option = Some(0); diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 099c8e7d7978a..7c77f4b840de2 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -514,17 +514,18 @@ impl Client where None => return Ok(None), }; - // TODO (#3282): we only work with the last config range here!!! Need to stabilize pruning before fixing this. - match configs.pop() { - Some((zero, _, _)) => { - let oldest = storage.oldest_pruned_digest_range_end(); - let oldest = ::std::cmp::max(zero + One::one(), oldest); - let first = ::std::cmp::max(first, oldest); + let first_available_changes_trie = configs.into_iter().rev() + .take_while(|config| config.config.is_some()) + .map(|config| config.zero + One::one()) + .next(); + match first_available_changes_trie { + Some(first_available_changes_trie) => { + let oldest_unpruned = storage.oldest_pruned_digest_range_end(); + let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); Ok(Some((first, last))) }, - None => Ok(None), + None => Ok(None) } - } /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. @@ -757,6 +758,7 @@ impl Client where /// Returns changes trie storage and all configurations that have been active in the range [first; last]. /// + /// Configurations are returned in descending order (and obviously never overlap). /// Fails if or an error if it is not supported. fn require_changes_trie( &self, diff --git a/core/consensus/babe/src/lib.rs b/core/consensus/babe/src/lib.rs index d39919a42b38f..801b47d0ec53d 100644 --- a/core/consensus/babe/src/lib.rs +++ b/core/consensus/babe/src/lib.rs @@ -89,13 +89,12 @@ use srml_babe::{ timestamp::{TimestampInherentData, InherentType as TimestampInherent} }; use consensus_common::SelectChain; -use consensus_common::import_queue::{Verifier, BasicQueue}; +use consensus_common::import_queue::{Verifier, BasicQueue, CacheKeyId}; use client::{ block_builder::api::BlockBuilder as BlockBuilderApi, blockchain::{self, HeaderBackend, ProvideCache}, BlockchainEvents, CallExecutor, Client, error::Result as ClientResult, error::Error as ClientError, backend::{AuxStore, Backend}, ProvideUncles, - well_known_cache_keys::{self, Id as CacheKeyId}, }; use slots::{CheckedHeader, check_equivocation}; use futures::prelude::*; diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index 299876a15b7bd..b267e0a635b23 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -663,93 +663,6 @@ where } } -/// Constructs a service factory with the given name that implements the `ServiceFactory` trait. -/// The required parameters are required to be given in the exact order. Some parameters are followed -/// by `{}` blocks. These blocks are required and used to initialize the given parameter. -/// In these block it is required to write a closure that takes the same number of arguments, -/// the corresponding function in the `ServiceFactory` trait provides. -/// -/// # Example -/// -/// ``` -/// # use substrate_service::{ -/// # construct_service_factory, Service, FullBackend, FullExecutor, LightBackend, LightExecutor, -/// # FullComponents, LightComponents, FactoryFullConfiguration, FullClient -/// # }; -/// # use transaction_pool::{self, txpool::{Pool as TransactionPool}}; -/// # use network::{config::DummyFinalityProofRequestBuilder, construct_simple_protocol}; -/// # use client::{self, well_known_cache_keys::Id as CacheKeyId, LongestChain}; -/// # use consensus_common::import_queue::{BasicQueue, Verifier}; -/// # use consensus_common::{BlockOrigin, BlockImportParams}; -/// # use node_runtime::{GenesisConfig, RuntimeApi}; -/// # use std::sync::Arc; -/// # use node_primitives::Block; -/// # use babe_primitives::AuthorityPair as BabePair; -/// # use grandpa_primitives::AuthorityPair as GrandpaPair; -/// # use sr_primitives::Justification; -/// # use sr_primitives::traits::Block as BlockT; -/// # use grandpa; -/// # construct_simple_protocol! { -/// # pub struct NodeProtocol where Block = Block { } -/// # } -/// # struct MyVerifier; -/// # impl Verifier for MyVerifier { -/// # fn verify( -/// # &mut self, -/// # origin: BlockOrigin, -/// # header: B::Header, -/// # justification: Option, -/// # body: Option>, -/// # ) -> Result<(BlockImportParams, Option)>>), String> { -/// # unimplemented!(); -/// # } -/// # } -/// type FullChainApi = transaction_pool::ChainApi< -/// client::Client, FullExecutor, Block, RuntimeApi>, Block>; -/// type LightChainApi = transaction_pool::ChainApi< -/// client::Client, LightExecutor, Block, RuntimeApi>, Block>; -/// -/// construct_service_factory! { -/// struct Factory { -/// // Declare the block type -/// Block = Block, -/// RuntimeApi = RuntimeApi, -/// // Declare the network protocol and give an initializer. -/// NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, -/// RuntimeDispatch = node_executor::Executor, -/// FullTransactionPoolApi = FullChainApi -/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, -/// LightTransactionPoolApi = LightChainApi -/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, -/// Genesis = GenesisConfig, -/// Configuration = (), -/// FullService = FullComponents -/// { |config| >::new(config) }, -/// // Setup as Consensus Authority (if the role and key are given) -/// AuthoritySetup = { -/// |service: Self::FullService| { -/// Ok(service) -/// }}, -/// LightService = LightComponents -/// { |config| >::new(config) }, -/// FullImportQueue = BasicQueue -/// { |_, client, _, _| Ok(BasicQueue::new(MyVerifier, Box::new(client), None, None)) }, -/// LightImportQueue = BasicQueue -/// { |_, client| { -/// let fprb = Box::new(DummyFinalityProofRequestBuilder::default()) as Box<_>; -/// Ok((BasicQueue::new(MyVerifier, Box::new(client), None, None), fprb)) -/// }}, -/// SelectChain = LongestChain, Self::Block> -/// { |config: &FactoryFullConfiguration, client: Arc>| { -/// #[allow(deprecated)] -/// Ok(LongestChain::new(client.backend().clone())) -/// }}, -/// FinalityProofProvider = { |client: Arc>| { -/// Ok(Some(Arc::new(grandpa::FinalityProofProvider::new(client.clone(), client)) as _)) -/// }}, -/// } -/// } -/// ``` #[cfg(test)] mod tests { use super::*; From d63d65ef612800d3ff0e2e9bb027c088cbf438f7 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 12 Nov 2019 11:46:40 +0300 Subject: [PATCH 40/63] more post-merge fixes --- core/client/db/src/utils.rs | 2 +- core/client/src/call_executor.rs | 2 +- core/client/src/client.rs | 15 +- core/executor/src/integration_tests/mod.rs | 529 +++++++++--------- .../executor/src/integration_tests/sandbox.rs | 507 ++++++++--------- core/executor/src/lib.rs | 21 +- core/state-machine/src/changes_trie/mod.rs | 10 + core/state-machine/src/ext.rs | 10 +- core/state-machine/src/lib.rs | 8 +- core/state-machine/src/overlayed_changes.rs | 3 +- core/state-machine/src/testing.rs | 61 +- core/test-runtime/src/system.rs | 12 +- node/executor/benches/bench.rs | 26 +- node/executor/src/lib.rs | 13 +- 14 files changed, 597 insertions(+), 622 deletions(-) diff --git a/core/client/db/src/utils.rs b/core/client/db/src/utils.rs index b8622745aea79..aeaa61a6f9fd7 100644 --- a/core/client/db/src/utils.rs +++ b/core/client/db/src/utils.rs @@ -37,7 +37,7 @@ use crate::{DatabaseSettings, DatabaseSettingsSrc}; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. -pub const NUM_COLUMNS: u32 = 10; +pub const NUM_COLUMNS: u32 = 11; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: Option = Some(0); diff --git a/core/client/src/call_executor.rs b/core/client/src/call_executor.rs index 153800f2fdf0b..01aae80e3e2a3 100644 --- a/core/client/src/call_executor.rs +++ b/core/client/src/call_executor.rs @@ -322,7 +322,7 @@ where let mut ext = Ext::new( &mut overlay, &state, - changes_trie_state.as_ref(), + changes_trie_state, None, ); let version = self.executor.runtime_version(&mut ext); diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 7c77f4b840de2..3c3aa603edf7b 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -509,15 +509,12 @@ impl Client where return Err(error::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); } - let (storage, mut configs) = match self.require_changes_trie(first, last_hash).ok() { + let (storage, configs) = match self.require_changes_trie(first, last_hash, true).ok() { Some((storage, configs)) => (storage, configs), None => return Ok(None), }; - let first_available_changes_trie = configs.into_iter().rev() - .take_while(|config| config.config.is_some()) - .map(|config| config.zero + One::one()) - .next(); + let first_available_changes_trie = configs.last().map(|config| config.0); match first_available_changes_trie { Some(first_available_changes_trie) => { let oldest_unpruned = storage.oldest_pruned_digest_range_end(); @@ -541,7 +538,7 @@ impl Client where ) -> error::Result, u32)>> { let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - let (storage, configs) = self.require_changes_trie(first, last_hash)?; + let (storage, configs) = self.require_changes_trie(first, last_hash, false)?; let mut result = Vec::new(); let best_number = self.backend.blockchain().info().best_number; @@ -663,7 +660,7 @@ impl Client where let first_number = self.backend.blockchain() .expect_block_number_from_id(&BlockId::Hash(first))?; - let (storage, configs) = self.require_changes_trie(first_number, last)?; + let (storage, configs) = self.require_changes_trie(first_number, last, false)?; let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; let recording_storage = AccessedRootsRecorder:: { @@ -759,11 +756,14 @@ impl Client where /// Returns changes trie storage and all configurations that have been active in the range [first; last]. /// /// Configurations are returned in descending order (and obviously never overlap). + /// If prefer_configs is true, returns maximal consequent configurations ranges, starting from last and + /// stopping on either first, or when CT have been disabled. /// Fails if or an error if it is not supported. fn require_changes_trie( &self, first: NumberFor, last: Block::Hash, + prefer_configs: bool, ) -> error::Result<( &dyn PrunableStateChangesTrieStorage, Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, @@ -779,6 +779,7 @@ impl Client where let config_range = storage.configuration_at(&BlockId::Hash(current))?; match config_range.config { Some(config) => configs.push((config_range.zero.0, config_range.end, config)), + None if prefer_configs => return Ok((storage, configs)), None => return Err(error::Error::ChangesTriesNotSupported), } diff --git a/core/executor/src/integration_tests/mod.rs b/core/executor/src/integration_tests/mod.rs index 35a6f39ff06e2..e907aec6d6224 100644 --- a/core/executor/src/integration_tests/mod.rs +++ b/core/executor/src/integration_tests/mod.rs @@ -54,58 +54,56 @@ fn call_in_wasm( #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn returning_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; + let mut ext = ext.ext(); + let test_code = WASM_BINARY; - let output = call_in_wasm( - "test_empty_return", - &[], - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(); - assert_eq!(output, vec![0u8; 0]); - }); + let output = call_in_wasm( + "test_empty_return", + &[], + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(); + assert_eq!(output, vec![0u8; 0]); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn panicking_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - - let output = call_in_wasm( - "test_panic", - &[], - wasm_method, - &mut ext, - &test_code[..], - 8, - ); - assert!(output.is_err()); - - let output = call_in_wasm( - "test_conditional_panic", - &[0], - wasm_method, - &mut ext, - &test_code[..], - 8, - ); - assert_eq!(Decode::decode(&mut &output.unwrap()[..]), Ok(Vec::::new())); + let mut ext = ext.ext(); + let test_code = WASM_BINARY; - let output = call_in_wasm( - "test_conditional_panic", - &vec![2].encode(), - wasm_method, - &mut ext, - &test_code[..], - 8, - ); - assert!(output.is_err()); - }); + let output = call_in_wasm( + "test_panic", + &[], + wasm_method, + &mut ext, + &test_code[..], + 8, + ); + assert!(output.is_err()); + + let output = call_in_wasm( + "test_conditional_panic", + &[0], + wasm_method, + &mut ext, + &test_code[..], + 8, + ); + assert_eq!(Decode::decode(&mut &output.unwrap()[..]), Ok(Vec::::new())); + + let output = call_in_wasm( + "test_conditional_panic", + &vec![2].encode(), + wasm_method, + &mut ext, + &test_code[..], + 8, + ); + assert!(output.is_err()); } #[test_case(WasmExecutionMethod::Interpreted)] @@ -113,7 +111,8 @@ fn panicking_should_work(wasm_method: WasmExecutionMethod) { fn storage_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { + { + let mut ext = ext.ext(); ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); let test_code = WASM_BINARY; @@ -127,7 +126,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { ).unwrap(); assert_eq!(output, b"all ok!".to_vec().encode()); - }); + } let expected = TestExternalities::new((map![ b"input".to_vec() => b"Hello world".to_vec(), @@ -141,8 +140,8 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - - ext.with_ext(|mut ext| { + { + let mut ext = ext.ext(); ext.set_storage(b"aaa".to_vec(), b"1".to_vec()); ext.set_storage(b"aab".to_vec(), b"2".to_vec()); ext.set_storage(b"aba".to_vec(), b"3".to_vec()); @@ -161,7 +160,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { ).unwrap(); assert_eq!(output, b"all ok!".to_vec().encode()); - }); + } let expected = TestExternalities::new((map![ b"aaa".to_vec() => b"1".to_vec(), @@ -175,235 +174,228 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn blake2_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - assert_eq!( - call_in_wasm( - "test_blake2_256", - &[0], - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - blake2_256(&b""[..]).to_vec().encode(), - ); - assert_eq!( - call_in_wasm( - "test_blake2_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - blake2_256(&b"Hello world!"[..]).to_vec().encode(), - ); - }); + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + assert_eq!( + call_in_wasm( + "test_blake2_256", + &[0], + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + blake2_256(&b""[..]).to_vec().encode(), + ); + assert_eq!( + call_in_wasm( + "test_blake2_256", + &b"Hello world!".to_vec().encode(), + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + blake2_256(&b"Hello world!"[..]).to_vec().encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn blake2_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - assert_eq!( - call_in_wasm( - "test_blake2_128", - &[0], - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - blake2_128(&b""[..]).to_vec().encode(), - ); - assert_eq!( - call_in_wasm( - "test_blake2_128", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - blake2_128(&b"Hello world!"[..]).to_vec().encode(), - ); - }); + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + assert_eq!( + call_in_wasm( + "test_blake2_128", + &[0], + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + blake2_128(&b""[..]).to_vec().encode(), + ); + assert_eq!( + call_in_wasm( + "test_blake2_128", + &b"Hello world!".to_vec().encode(), + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + blake2_128(&b"Hello world!"[..]).to_vec().encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn twox_256_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - assert_eq!( - call_in_wasm( - "test_twox_256", - &[0], - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - hex!( - "99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a" - ).to_vec().encode(), - ); - assert_eq!( - call_in_wasm( - "test_twox_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - hex!( - "b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74" - ).to_vec().encode(), - ); - }); + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + assert_eq!( + call_in_wasm( + "test_twox_256", + &[0], + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + hex!( + "99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a" + ).to_vec().encode(), + ); + assert_eq!( + call_in_wasm( + "test_twox_256", + &b"Hello world!".to_vec().encode(), + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + hex!( + "b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74" + ).to_vec().encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn twox_128_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - assert_eq!( - call_in_wasm( - "test_twox_128", - &[0], - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), - ); - assert_eq!( - call_in_wasm( - "test_twox_128", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - hex!("b27dfd7f223f177f2a13647b533599af").to_vec().encode(), - ); - }); + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + assert_eq!( + call_in_wasm( + "test_twox_128", + &[0], + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), + ); + assert_eq!( + call_in_wasm( + "test_twox_128", + &b"Hello world!".to_vec().encode(), + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + hex!("b27dfd7f223f177f2a13647b533599af").to_vec().encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn ed25519_verify_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - let key = ed25519::Pair::from_seed(&blake2_256(b"test")); - let sig = key.sign(b"all ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(sig.as_ref()); - - assert_eq!( - call_in_wasm( - "test_ed25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - true.encode(), - ); - - let other_sig = key.sign(b"all is not ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(other_sig.as_ref()); - - assert_eq!( - call_in_wasm( - "test_ed25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - false.encode(), - ); - }); + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + let key = ed25519::Pair::from_seed(&blake2_256(b"test")); + let sig = key.sign(b"all ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(sig.as_ref()); + + assert_eq!( + call_in_wasm( + "test_ed25519_verify", + &calldata.encode(), + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + true.encode(), + ); + + let other_sig = key.sign(b"all is not ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(other_sig.as_ref()); + + assert_eq!( + call_in_wasm( + "test_ed25519_verify", + &calldata.encode(), + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + false.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn sr25519_verify_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - let key = sr25519::Pair::from_seed(&blake2_256(b"test")); - let sig = key.sign(b"all ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(sig.as_ref()); - - assert_eq!( - call_in_wasm( - "test_sr25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - true.encode(), - ); - - let other_sig = key.sign(b"all is not ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(other_sig.as_ref()); - - assert_eq!( - call_in_wasm( - "test_sr25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - false.encode(), - ); - }); + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + let key = sr25519::Pair::from_seed(&blake2_256(b"test")); + let sig = key.sign(b"all ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(sig.as_ref()); + + assert_eq!( + call_in_wasm( + "test_sr25519_verify", + &calldata.encode(), + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + true.encode(), + ); + + let other_sig = key.sign(b"all is not ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(other_sig.as_ref()); + + assert_eq!( + call_in_wasm( + "test_sr25519_verify", + &calldata.encode(), + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + false.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; - let test_code = WASM_BINARY; - assert_eq!( - call_in_wasm( - "test_ordered_trie_root", - &[0], - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), - ); - }); + let mut ext = ext.ext(); + let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; + let test_code = WASM_BINARY; + assert_eq!( + call_in_wasm( + "test_ordered_trie_root", + &[0], + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] @@ -415,20 +407,19 @@ fn offchain_local_storage_should_work(wasm_method: WasmExecutionMethod) { let (offchain, state) = testing::TestOffchainExt::new(); ext.register_extension(OffchainExt::new(offchain)); let test_code = WASM_BINARY; - ext.with_ext(|mut ext| { - assert_eq!( - call_in_wasm( - "test_offchain_local_storage", - &[0], - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - true.encode(), - ); - assert_eq!(state.read().persistent_storage.get(b"", b"test"), Some(vec![])); - }); + let mut ext = ext.ext(); + assert_eq!( + call_in_wasm( + "test_offchain_local_storage", + &[0], + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + true.encode(), + ); + assert_eq!(state.read().persistent_storage.get(b"", b"test"), Some(vec![])); } #[test_case(WasmExecutionMethod::Interpreted)] @@ -452,18 +443,16 @@ fn offchain_http_should_work(wasm_method: WasmExecutionMethod) { ); let test_code = WASM_BINARY; - ext.with_ext(|mut ext| { - assert_eq!( - call_in_wasm( - "test_offchain_http", - &[0], - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - true.encode(), - ); - }); + let mut ext = ext.ext(); + assert_eq!( + call_in_wasm( + "test_offchain_http", + &[0], + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + true.encode(), + ); } - diff --git a/core/executor/src/integration_tests/sandbox.rs b/core/executor/src/integration_tests/sandbox.rs index 1a41e212d69db..a4bdf093a7045 100644 --- a/core/executor/src/integration_tests/sandbox.rs +++ b/core/executor/src/integration_tests/sandbox.rs @@ -26,73 +26,71 @@ use wabt; #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn sandbox_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - - let code = wabt::wat2wasm(r#" - (module - (import "env" "assert" (func $assert (param i32))) - (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) - (func (export "call") - (drop - (call $inc_counter (i32.const 5)) - ) + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + + let code = wabt::wat2wasm(r#" + (module + (import "env" "assert" (func $assert (param i32))) + (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) + (func (export "call") + (drop + (call $inc_counter (i32.const 5)) + ) - (call $inc_counter (i32.const 3)) - ;; current counter value is on the stack + (call $inc_counter (i32.const 3)) + ;; current counter value is on the stack - ;; check whether current == 8 - i32.const 8 - i32.eq + ;; check whether current == 8 + i32.const 8 + i32.eq - call $assert - ) + call $assert ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - true.encode(), - ); - }); + ) + "#).unwrap().encode(); + + assert_eq!( + call_in_wasm( + "test_sandbox", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + true.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn sandbox_trap(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - - let code = wabt::wat2wasm(r#" - (module - (import "env" "assert" (func $assert (param i32))) - (func (export "call") - i32.const 0 - call $assert - ) + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + + let code = wabt::wat2wasm(r#" + (module + (import "env" "assert" (func $assert (param i32))) + (func (export "call") + i32.const 0 + call $assert ) - "#).unwrap(); - - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - vec![0], - ); - }); + ) + "#).unwrap(); + + assert_eq!( + call_in_wasm( + "test_sandbox", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + vec![0], + ); } #[test_case(WasmExecutionMethod::Interpreted)] @@ -100,272 +98,263 @@ fn sandbox_trap(wasm_method: WasmExecutionMethod) { #[should_panic(expected = "Allocator ran out of space")] fn sandbox_should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - - let code = wabt::wat2wasm(r#" - (module - (import "env" "assert" (func $assert (param i32))) - (func (export "call") - i32.const 0 - call $assert - ) + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + + let code = wabt::wat2wasm(r#" + (module + (import "env" "assert" (func $assert (param i32))) + (func (export "call") + i32.const 0 + call $assert ) ) "#).unwrap().encode(); - call_in_wasm( - "test_exhaust_heap", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(); - }); + call_in_wasm( + "test_exhaust_heap", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn start_called(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - - let code = wabt::wat2wasm(r#" - (module - (import "env" "assert" (func $assert (param i32))) - (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) - - ;; Start function - (start $start) - (func $start - ;; Increment counter by 1 - (drop - (call $inc_counter (i32.const 1)) - ) + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + + let code = wabt::wat2wasm(r#" + (module + (import "env" "assert" (func $assert (param i32))) + (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) + + ;; Start function + (start $start) + (func $start + ;; Increment counter by 1 + (drop + (call $inc_counter (i32.const 1)) ) + ) - (func (export "call") - ;; Increment counter by 1. The current value is placed on the stack. - (call $inc_counter (i32.const 1)) + (func (export "call") + ;; Increment counter by 1. The current value is placed on the stack. + (call $inc_counter (i32.const 1)) - ;; Counter is incremented twice by 1, once there and once in `start` func. - ;; So check the returned value is equal to 2. - i32.const 2 - i32.eq - call $assert - ) + ;; Counter is incremented twice by 1, once there and once in `start` func. + ;; So check the returned value is equal to 2. + i32.const 2 + i32.eq + call $assert ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - true.encode(), - ); - }); + ) + "#).unwrap().encode(); + + assert_eq!( + call_in_wasm( + "test_sandbox", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + true.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn invoke_args(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - - let code = wabt::wat2wasm(r#" - (module - (import "env" "assert" (func $assert (param i32))) - - (func (export "call") (param $x i32) (param $y i64) - ;; assert that $x = 0x12345678 - (call $assert - (i32.eq - (get_local $x) - (i32.const 0x12345678) - ) + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + + let code = wabt::wat2wasm(r#" + (module + (import "env" "assert" (func $assert (param i32))) + + (func (export "call") (param $x i32) (param $y i64) + ;; assert that $x = 0x12345678 + (call $assert + (i32.eq + (get_local $x) + (i32.const 0x12345678) ) + ) - (call $assert - (i64.eq - (get_local $y) - (i64.const 0x1234567887654321) - ) + (call $assert + (i64.eq + (get_local $y) + (i64.const 0x1234567887654321) ) ) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_args", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - true.encode(), - ); - }); + ) + "#).unwrap().encode(); + + assert_eq!( + call_in_wasm( + "test_sandbox_args", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + true.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn return_val(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - - let code = wabt::wat2wasm(r#" - (module - (func (export "call") (param $x i32) (result i32) - (i32.add - (get_local $x) - (i32.const 1) - ) + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + + let code = wabt::wat2wasm(r#" + (module + (func (export "call") (param $x i32) (result i32) + (i32.add + (get_local $x) + (i32.const 1) ) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_return_val", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - true.encode(), - ); - }); + ) + "#).unwrap().encode(); + + assert_eq!( + call_in_wasm( + "test_sandbox_return_val", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + true.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn unlinkable_module(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; + let mut ext = ext.ext(); + let test_code = WASM_BINARY; - let code = wabt::wat2wasm(r#" - (module - (import "env" "non-existent" (func)) + let code = wabt::wat2wasm(r#" + (module + (import "env" "non-existent" (func)) - (func (export "call") - ) + (func (export "call") ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - 1u8.encode(), - ); - }); + ) + "#).unwrap().encode(); + + assert_eq!( + call_in_wasm( + "test_sandbox_instantiate", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + 1u8.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn corrupted_module(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - - // Corrupted wasm file - let code = vec![0u8, 0, 0, 0, 1, 0, 0, 0].encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - 1u8.encode(), - ); - }); + let mut ext = ext.ext(); + let test_code = WASM_BINARY; + + // Corrupted wasm file + let code = vec![0u8, 0, 0, 0, 1, 0, 0, 0].encode(); + + assert_eq!( + call_in_wasm( + "test_sandbox_instantiate", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + 1u8.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn start_fn_ok(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; + let mut ext = ext.ext(); + let test_code = WASM_BINARY; - let code = wabt::wat2wasm(r#" - (module - (func (export "call") - ) - - (func $start - ) + let code = wabt::wat2wasm(r#" + (module + (func (export "call") + ) - (start $start) + (func $start ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - 0u8.encode(), - ); - }); + + (start $start) + ) + "#).unwrap().encode(); + + assert_eq!( + call_in_wasm( + "test_sandbox_instantiate", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + 0u8.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn start_fn_traps(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let test_code = WASM_BINARY; - - let code = wabt::wat2wasm(r#" - (module - (func (export "call") - ) + let mut ext = ext.ext(); + let test_code = WASM_BINARY; - (func $start - unreachable - ) + let code = wabt::wat2wasm(r#" + (module + (func (export "call") + ) - (start $start) + (func $start + unreachable ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - &test_code[..], - 8, - ).unwrap(), - 2u8.encode(), - ); - }); -} + + (start $start) + ) + "#).unwrap().encode(); + + assert_eq!( + call_in_wasm( + "test_sandbox_instantiate", + &code, + wasm_method, + &mut ext, + &test_code[..], + 8, + ).unwrap(), + 2u8.encode(), + ); +} \ No newline at end of file diff --git a/core/executor/src/lib.rs b/core/executor/src/lib.rs index 3b3d907a87f8e..0638a71d1c873 100644 --- a/core/executor/src/lib.rs +++ b/core/executor/src/lib.rs @@ -102,16 +102,15 @@ mod tests { #[test] fn call_in_interpreted_wasm_works() { let mut ext = TestExternalities::default(); - ext.with_ext(|mut ext| { - let res = call_in_wasm::<_, runtime_io::SubstrateHostFunctions>( - "test_empty_return", - &[], - WasmExecutionMethod::Interpreted, - &mut ext, - &WASM_BINARY, - 8, - ).unwrap(); - assert_eq!(res, vec![0u8; 0]); - }); + let mut ext = ext.ext(); + let res = call_in_wasm::<_, runtime_io::SubstrateHostFunctions>( + "test_empty_return", + &[], + WasmExecutionMethod::Interpreted, + &mut ext, + &WASM_BINARY, + 8, + ).unwrap(); + assert_eq!(res, vec![0u8; 0]); } } diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index 9d88352cb7465..46e084bb76abd 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -197,6 +197,16 @@ impl<'a, H, Number> State<'a, H, Number> { } } +impl<'a, H, Number: Clone> Clone for State<'a, H, Number> { + fn clone(&self) -> Self { + State { + config: self.config.clone(), + zero: self.zero.clone(), + storage: self.storage, + } + } +} + /// Create state where changes tries are disabled. pub fn disabled_state<'a, H, Number>() -> Option> { None diff --git a/core/state-machine/src/ext.rs b/core/state-machine/src/ext.rs index ac8fece23b3ac..b58534f59aa37 100644 --- a/core/state-machine/src/ext.rs +++ b/core/state-machine/src/ext.rs @@ -76,7 +76,7 @@ pub struct Ext<'a, H, N, B> where H: Hasher, B: 'a + Backend { /// `storage_root` is called and the cache is cleared on every subsequent change. storage_transaction: Option<(B::Transaction, H::Out)>, /// Changes trie state to read from. - changes_trie_state: Option<&'a ChangesTrieState<'a, H, N>>, + changes_trie_state: Option>, /// The changes trie transaction necessary to commit to the changes trie backend. /// Set to Some when `storage_changes_root` is called. Could be replaced later /// by calling `storage_changes_root` again => never used as cache. @@ -103,7 +103,7 @@ where pub fn new( overlay: &'a mut OverlayedChanges, backend: &'a B, - changes_trie_state: Option<&'a ChangesTrieState<'a, H, N>>, + changes_trie_state: Option>, extensions: Option<&'a mut Extensions>, ) -> Self { Ext { @@ -496,7 +496,7 @@ where let _guard = panic_handler::AbortGuard::force_abort(); self.changes_trie_transaction = build_changes_trie::<_, H, N>( self.backend, - self.changes_trie_state.clone(), + self.changes_trie_state.as_ref(), self.overlay, parent_hash, )?; @@ -584,7 +584,7 @@ mod tests { let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, state.as_ref(), None); + let mut ext = TestExt::new(&mut overlay, &backend, state, None); let root = hex!("bb0c2ef6e1d36d5490f9766cfcc7dfe2a6ca804504c3bb206053890d6dd02376").into(); assert_eq!( ext.storage_changes_root(Default::default()).unwrap(), @@ -599,7 +599,7 @@ mod tests { let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &backend, state.as_ref(), None); + let mut ext = TestExt::new(&mut overlay, &backend, state, None); let root = hex!("96f5aae4690e7302737b6f9b7f8567d5bbb9eac1c315f80101235a92d9ec27f4").into(); assert_eq!( ext.storage_changes_root(Default::default()).unwrap(), diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index eedd677237d1c..a8d8f8e52cd09 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -258,7 +258,7 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where let mut ext = Ext::new( self.overlay, self.backend, - self.changes_trie_state.as_ref(), + self.changes_trie_state.clone(), Some(&mut self.extensions), ); @@ -906,11 +906,10 @@ mod tests { }; { - let state = changes_trie::disabled_state::<_, u64>(); let mut ext = Ext::new( &mut overlay, backend, - state.as_ref(), + changes_trie::disabled_state::<_, u64>(), None, ); ext.clear_prefix(b"ab"); @@ -936,11 +935,10 @@ mod tests { let mut state = InMemory::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); - let changes_trie_state = changes_trie::disabled_state::<_, u64>(); let mut ext = Ext::new( &mut overlay, backend, - changes_trie_state.as_ref(), + changes_trie::disabled_state::<_, u64>(), None, ); diff --git a/core/state-machine/src/overlayed_changes.rs b/core/state-machine/src/overlayed_changes.rs index 1395c17656100..b795f6b59037d 100644 --- a/core/state-machine/src/overlayed_changes.rs +++ b/core/state-machine/src/overlayed_changes.rs @@ -402,11 +402,10 @@ mod tests { ..Default::default() }; - let changes_trie_state = crate::changes_trie::disabled_state::<_, u64>(); let mut ext = Ext::new( &mut overlay, &backend, - changes_trie_state.as_ref(), + crate::changes_trie::disabled_state::<_, u64>(), None, ); const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); diff --git a/core/state-machine/src/testing.rs b/core/state-machine/src/testing.rs index 4a989a0707ce6..6a94ab0e25807 100644 --- a/core/state-machine/src/testing.rs +++ b/core/state-machine/src/testing.rs @@ -51,26 +51,20 @@ pub struct TestExternalities=Blake2Hasher, N: ChangesTrieBlo impl, N: ChangesTrieBlockNumber> TestExternalities { /// Get externalities implementation. - pub fn with_ext<'a, F: FnOnce(Ext>) -> T, T>(&'a mut self, f: F) -> T { - let overlay = &mut self.overlay; - let backend = &self.backend; - let extensions = &mut self.extensions; - let changes_trie_storage = &self.changes_trie_storage; - let changes_trie_state = match self.changes_trie_config.clone() { - Some(changes_trie_config) => Some(ChangesTrieState { - config: changes_trie_config, - zero: 0.into(), - storage: changes_trie_storage, - }), - None => None, - }; - - f(Ext::new( - overlay, - backend, - changes_trie_state.as_ref(), - Some(extensions), - )) + pub fn ext(&mut self) -> Ext> { + Ext::new( + &mut self.overlay, + &self.backend, + match self.changes_trie_config.clone() { + Some(config) => Some(ChangesTrieState { + config, + zero: 0.into(), + storage: &self.changes_trie_storage, + }), + None => None, + }, + Some(&mut self.extensions), + ) } /// Create a new instance of `TestExternalities` with storage. @@ -141,9 +135,8 @@ impl, N: ChangesTrieBlockNumber> TestExternalities { /// /// Returns the result of the given closure. pub fn execute_with(&mut self, execute: impl FnOnce() -> R) -> R { - self.with_ext(|mut ext| { - externalities::set_and_run_with_externalities(&mut ext, execute) - }) + let mut ext = self.ext(); + externalities::set_and_run_with_externalities(&mut ext, execute) } } @@ -189,24 +182,22 @@ mod tests { #[test] fn commit_should_work() { let mut ext = TestExternalities::::default(); - ext.with_ext(|mut ext| { - ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); - ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); - ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = hex!("2a340d3dfd52f5992c6b117e9e45f479e6da5afffafeb26ab619cf137a95aeb8"); - assert_eq!(ext.storage_root(), H256::from(ROOT)); - }); + let mut ext = ext.ext(); + ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); + ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); + ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); + const ROOT: [u8; 32] = hex!("2a340d3dfd52f5992c6b117e9e45f479e6da5afffafeb26ab619cf137a95aeb8"); + assert_eq!(ext.storage_root(), H256::from(ROOT)); } #[test] fn set_and_retrieve_code() { let mut ext = TestExternalities::::default(); - ext.with_ext(|mut ext| { - let code = vec![1, 2, 3]; - ext.set_storage(CODE.to_vec(), code.clone()); + let mut ext = ext.ext(); + let code = vec![1, 2, 3]; + ext.set_storage(CODE.to_vec(), code.clone()); - assert_eq!(&ext.storage(CODE).unwrap(), &code); - }); + assert_eq!(&ext.storage(CODE).unwrap(), &code); } #[test] diff --git a/core/test-runtime/src/system.rs b/core/test-runtime/src/system.rs index 8c8077a7364f3..3c7b81b6462cd 100644 --- a/core/test-runtime/src/system.rs +++ b/core/test-runtime/src/system.rs @@ -33,7 +33,7 @@ use srml_system::Trait; use crate::{ AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest, AuthorityId }; -use primitives::{Blake2Hasher, storage::well_known_keys, ChangesTrieConfiguration}; +use primitives::{storage::well_known_keys, ChangesTrieConfiguration}; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; @@ -418,7 +418,8 @@ mod tests { #[test] fn block_import_works_wasm() { - block_import_works(|b, ext| ext.with_ext(|mut ext| { + block_import_works(|b, ext| { + let mut ext = ext.ext(); executor().call::<_, NeverNativeValue, fn() -> _>( &mut ext, "Core_execute_block", @@ -426,7 +427,7 @@ mod tests { false, None, ).0.unwrap(); - })) + }) } fn block_import_with_transaction_works(block_executor: F) @@ -510,7 +511,8 @@ mod tests { #[test] fn block_import_with_transaction_works_wasm() { - block_import_with_transaction_works(|b, ext| ext.with_ext(|mut ext| { + block_import_with_transaction_works(|b, ext| { + let mut ext = ext.ext(); executor().call::<_, NeverNativeValue, fn() -> _>( &mut ext, "Core_execute_block", @@ -518,6 +520,6 @@ mod tests { false, None, ).0.unwrap(); - })) + }) } } diff --git a/node/executor/benches/bench.rs b/node/executor/benches/bench.rs index 53e544ee0e969..01c2b05c51f39 100644 --- a/node/executor/benches/bench.rs +++ b/node/executor/benches/bench.rs @@ -59,9 +59,7 @@ fn new_test_ext(genesis_config: &GenesisConfig) -> TestExternalities _>( - &mut ext, - "Core_execute_block", - &block.0, - use_native, - None, - ).0.unwrap() - ); + executor.call::<_, NeverNativeValue, fn() -> _>( + &mut test_ext.ext(), + "Core_execute_block", + &block.0, + use_native, + None, + ).0.unwrap(); } }, BatchSize::LargeInput, diff --git a/node/executor/src/lib.rs b/node/executor/src/lib.rs index 3e7363293f22f..65ffc774f9f8c 100644 --- a/node/executor/src/lib.rs +++ b/node/executor/src/lib.rs @@ -128,13 +128,14 @@ mod tests { use_native: bool, native_call: Option, ) -> (Result>, bool) { - t.with_ext(|mut ext| executor().call::<_, R, NC>( - &mut ext, + let mut t = t.ext(); + executor().call::<_, R, NC>( + &mut t, method, data, use_native, native_call, - )) + ) } #[test] @@ -771,7 +772,7 @@ mod tests { #[test] fn wasm_big_block_import_fails() { let mut t = new_test_ext(COMPACT_CODE, false); - t.with_ext(|mut ext| set_heap_pages(&mut ext, 4)); + set_heap_pages(&mut t.ext(), 4); let result = executor_call:: _>( &mut t, @@ -898,7 +899,7 @@ mod tests { None, ).0.unwrap(); - assert!(t.with_ext(|mut ext| ext.storage_changes_root(GENESIS_HASH.into()).unwrap()).is_some()); + assert!(t.ext().storage_changes_root(GENESIS_HASH.into()).unwrap().is_some()); } #[test] @@ -914,7 +915,7 @@ mod tests { None, ).0.unwrap(); - assert!(t.with_ext(|mut ext| ext.storage_changes_root(GENESIS_HASH.into()).unwrap()).is_some()); + assert!(t.ext().storage_changes_root(GENESIS_HASH.into()).unwrap().is_some()); } #[test] From 6da8b23abc8c615fa2128781fda020653690389f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 12 Nov 2019 12:06:09 +0300 Subject: [PATCH 41/63] revertes some unnecessary changes --- core/executor/src/integration_tests/mod.rs | 1 + core/executor/src/integration_tests/sandbox.rs | 2 +- core/primitives/src/changes_trie.rs | 2 -- node/executor/src/lib.rs | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/executor/src/integration_tests/mod.rs b/core/executor/src/integration_tests/mod.rs index e907aec6d6224..6db9911d44ab4 100644 --- a/core/executor/src/integration_tests/mod.rs +++ b/core/executor/src/integration_tests/mod.rs @@ -456,3 +456,4 @@ fn offchain_http_should_work(wasm_method: WasmExecutionMethod) { true.encode(), ); } + diff --git a/core/executor/src/integration_tests/sandbox.rs b/core/executor/src/integration_tests/sandbox.rs index a4bdf093a7045..c18b848acce7e 100644 --- a/core/executor/src/integration_tests/sandbox.rs +++ b/core/executor/src/integration_tests/sandbox.rs @@ -357,4 +357,4 @@ fn start_fn_traps(wasm_method: WasmExecutionMethod) { ).unwrap(), 2u8.encode(), ); -} \ No newline at end of file +} diff --git a/core/primitives/src/changes_trie.rs b/core/primitives/src/changes_trie.rs index 4ad087c3811c9..5e88485a03994 100644 --- a/core/primitives/src/changes_trie.rs +++ b/core/primitives/src/changes_trie.rs @@ -281,11 +281,9 @@ mod tests { assert_eq!(config(1, 1).prev_max_level_digest_block(0u64, 16), None); assert_eq!(config(2, 1).prev_max_level_digest_block(0u64, 16), Some(16)); assert_eq!(config(4, 1).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 16), Some(16)); assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 17), Some(16)); assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 33), Some(32)); - assert_eq!(config(32, 1).prev_max_level_digest_block(0u64, 16), None); assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 10), Some(8)); assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 8), Some(8)); diff --git a/node/executor/src/lib.rs b/node/executor/src/lib.rs index 65ffc774f9f8c..ebff2da1a4b96 100644 --- a/node/executor/src/lib.rs +++ b/node/executor/src/lib.rs @@ -772,6 +772,7 @@ mod tests { #[test] fn wasm_big_block_import_fails() { let mut t = new_test_ext(COMPACT_CODE, false); + set_heap_pages(&mut t.ext(), 4); let result = executor_call:: _>( From d302d1e88eb37a667c05cdcc585092a91f822148 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 12 Nov 2019 12:10:58 +0300 Subject: [PATCH 42/63] reverted unnecessary changes --- core/client/src/light/blockchain.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/core/client/src/light/blockchain.rs b/core/client/src/light/blockchain.rs index 510d6f5ae3a0a..202f94cd74fed 100644 --- a/core/client/src/light/blockchain.rs +++ b/core/client/src/light/blockchain.rs @@ -29,7 +29,7 @@ use crate::backend::{AuxStore, NewBlockState}; use crate::blockchain::{ Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache, - well_known_cache_keys, + well_known_cache_keys }; use crate::cht; use crate::error::{Error as ClientError, Result as ClientResult}; @@ -38,7 +38,9 @@ use crate::light::fetcher::{Fetcher, RemoteHeaderRequest}; /// Light client blockchain storage. pub trait Storage: AuxStore + BlockchainHeaderBackend + HeaderMetadata { /// Store new header. Should refuse to revert any finalized blocks. - + /// + /// Takes new authorities, the leaf state of the new block, and + /// any auxiliary storage updates to place in the same operation. fn import_header( &self, header: Block::Header, From f4a82deee2befd4b87582012c76411064a3b5305 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 12 Nov 2019 12:37:03 +0300 Subject: [PATCH 43/63] fix compilation + unnecessary changes --- core/client/src/light/backend.rs | 2 +- core/state-machine/src/testing.rs | 1 + node/executor/benches/bench.rs | 6 +++--- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/core/client/src/light/backend.rs b/core/client/src/light/backend.rs index e1db5cec9cdd0..4b546953b1218 100644 --- a/core/client/src/light/backend.rs +++ b/core/client/src/light/backend.rs @@ -56,7 +56,7 @@ pub struct ImportOperation { set_head: Option>, storage_update: Option>, changes_trie_config_update: Option>, - _phantom: ::std::marker::PhantomData<(S)>, + _phantom: ::std::marker::PhantomData, } /// Either in-memory genesis state, or locally-unavailable state. diff --git a/core/state-machine/src/testing.rs b/core/state-machine/src/testing.rs index 6a94ab0e25807..cd31d85d116a5 100644 --- a/core/state-machine/src/testing.rs +++ b/core/state-machine/src/testing.rs @@ -194,6 +194,7 @@ mod tests { fn set_and_retrieve_code() { let mut ext = TestExternalities::::default(); let mut ext = ext.ext(); + let code = vec![1, 2, 3]; ext.set_storage(CODE.to_vec(), code.clone()); diff --git a/node/executor/benches/bench.rs b/node/executor/benches/bench.rs index 01c2b05c51f39..e72c28467fa7f 100644 --- a/node/executor/benches/bench.rs +++ b/node/executor/benches/bench.rs @@ -140,9 +140,9 @@ fn test_blocks(genesis_config: &GenesisConfig, executor: &NativeExecutor Date: Tue, 12 Nov 2019 14:42:57 +0300 Subject: [PATCH 44/63] (restart CI) --- core/client/db/src/changes_tries_storage.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index bd332aeee42cf..774d036dde7b1 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -167,7 +167,14 @@ impl> DbChangesTrieStorage { let new_configuration = match new_configuration { Some(new_configuration) => new_configuration, None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), - None => return self.finalize(tx, parent_block.hash, block.hash, block.number, Some(new_header), cache_tx), + None => return self.finalize( + tx, + parent_block.hash, + block.hash, + block.number, + Some(new_header), + cache_tx, + ), }; // update configuration cache From eae183bffd0fb888cac9f9266b40a243dfec9124 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 12 Nov 2019 15:42:27 +0300 Subject: [PATCH 45/63] fix cache update when finalizing multiple blocks --- core/client/db/src/lib.rs | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 7ab513025555f..4f0ddcff5a22e 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -1056,7 +1056,7 @@ impl> Backend { meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); - Some((number, hash, enacted, retracted, displaced_leaf, is_best, cache, changes_trie_cache_ops)) + Some((number, hash, enacted, retracted, displaced_leaf, is_best, cache)) } else { None }; @@ -1082,10 +1082,6 @@ impl> Backend { let write_result = self.storage.db.write(transaction).map_err(db_err); - if let Some(changes_trie_build_cache_update) = operation.changes_trie_build_cache_update { - self.changes_tries_storage.commit_build_cache(changes_trie_build_cache_update); - } - if let Some(( number, hash, @@ -1094,7 +1090,6 @@ impl> Backend { displaced_leaf, is_best, mut cache, - changes_trie_cache_ops, )) = imported { if let Err(e) = write_result { let mut leaves = self.blockchain.leaves.write(); @@ -1110,8 +1105,6 @@ impl> Backend { return Err(e) } - self.changes_tries_storage.post_commit(changes_trie_cache_ops); - cache.sync_cache( &enacted, &retracted, @@ -1123,6 +1116,11 @@ impl> Backend { ); } + if let Some(changes_trie_build_cache_update) = operation.changes_trie_build_cache_update { + self.changes_tries_storage.commit_build_cache(changes_trie_build_cache_update); + } + self.changes_tries_storage.post_commit(changes_trie_cache_ops); + if let Some((enacted, retracted)) = cache_update { self.shared_cache.lock().sync(&enacted, &retracted); } @@ -2035,6 +2033,8 @@ pub(crate) mod tests { let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); let block1 = insert_header(&backend, 1, block0, None, Default::default()); let block2 = insert_header(&backend, 2, block1, None, Default::default()); + let block3 = insert_header(&backend, 3, block2, None, Default::default()); + let block4 = insert_header(&backend, 4, block3, None, Default::default()); { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); @@ -2042,6 +2042,13 @@ pub(crate) mod tests { op.mark_finalized(BlockId::Hash(block2), None).unwrap(); backend.commit_operation(op).unwrap(); } + { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); + op.mark_finalized(BlockId::Hash(block3), None).unwrap(); + op.mark_finalized(BlockId::Hash(block4), None).unwrap(); + backend.commit_operation(op).unwrap(); + } } #[test] From 6d2b4dbb1e9d040320ee883f4bac4ab5490638ae Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 12 Nov 2019 19:09:41 +0300 Subject: [PATCH 46/63] fixed tests --- core/client/db/src/cache/list_cache.rs | 10 +++++----- core/client/db/src/cache/mod.rs | 18 ++++++++---------- core/client/db/src/changes_tries_storage.rs | 2 +- core/client/db/src/light.rs | 16 ++++++++-------- core/client/src/blockchain.rs | 2 +- core/client/src/client.rs | 8 +++++--- core/consensus/aura/src/lib.rs | 14 ++++++++------ 7 files changed, 36 insertions(+), 34 deletions(-) diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index a68a279fb7f4d..1a7b045309689 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -157,7 +157,7 @@ impl> ListCache // BUT since we're not guaranteeing to provide correct values for forks // behind the finalized block, check if the block is finalized first if !chain::is_finalized_block(&self.storage, &at, Bounded::max_value())? { - return Ok(None); + return Err(ClientError::NotInFinalizedChain); } self.best_finalized_entry.as_ref() @@ -840,8 +840,8 @@ pub mod tests { // when block is earlier than best finalized block AND it is not finalized // --- 50 --- // ----------> [100] - assert_eq!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .unwrap().value_at_block(&test_id(50)).unwrap(), None); + assert!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) + .unwrap().value_at_block(&test_id(50)).is_err()); // when block is earlier than best finalized block AND it is finalized AND value is some // [30] ---- 50 ---> [100] assert_eq!(ListCache::new( @@ -865,14 +865,14 @@ pub mod tests { // when block is parallel to the best finalized block // ---- 100 // ---> [100] - assert_eq!(ListCache::new( + assert!(ListCache::new( DummyStorage::new() .with_meta(Some(test_id(100)), Vec::new()) .with_id(50, H256::from_low_u64_be(50)) .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).unwrap(), None); + ).unwrap().value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).is_err()); // when block is later than last finalized block AND there are no forks AND finalized value is Some // ---> [100] --- 200 diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index 5529c008961de..10a116a0812e6 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -353,40 +353,38 @@ impl BlockchainCache for DbCacheSync { &self, key: &CacheKeyId, at: &BlockId, - ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)> { + ) -> ClientResult, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>> { let mut cache = self.0.write(); - let storage = cache.get_cache(*key).ok()?.storage(); + let cache = cache.get_cache(*key)?; + let storage = cache.storage(); let db = storage.db(); let columns = storage.columns(); let at = match *at { BlockId::Hash(hash) => { - let header = utils::read_header::( + let header = utils::require_header::( &**db, columns.key_lookup, columns.header, - BlockId::Hash(hash.clone())).ok()??; + BlockId::Hash(hash.clone()))?; ComplexBlockId::new(hash, *header.number()) }, BlockId::Number(number) => { - let hash = utils::read_header::( + let hash = utils::require_header::( &**db, columns.key_lookup, columns.header, - BlockId::Number(number.clone())).ok()??.hash(); + BlockId::Number(number.clone()))?.hash(); ComplexBlockId::new(hash, number) }, }; - cache.cache_at - .get(key)? - .value_at_block(&at) + cache.value_at_block(&at) .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| ( (begin_block.number, begin_block.hash), end_block.map(|end_block| (end_block.number, end_block.hash)), value, ))) - .ok()? } } diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs index 774d036dde7b1..5f94c61735dbf 100644 --- a/core/client/db/src/changes_tries_storage.rs +++ b/core/client/db/src/changes_tries_storage.rs @@ -387,7 +387,7 @@ where fn configuration_at(&self, at: &BlockId) -> ClientResult> { self.cache - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at) + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]).ok() .map(|config| ChangesTrieConfigurationRange { zero, end, config })) .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index e0adc7e59c647..c6f813bc32c57 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -929,7 +929,7 @@ pub(crate) mod tests { } fn get_authorities(cache: &dyn BlockchainCache, at: BlockId) -> Option> { - cache.get_at(&well_known_cache_keys::AUTHORITIES, &at) + cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).unwrap_or(None) .and_then(|(_, _, val)| Decode::decode(&mut &val[..]).ok()) } @@ -1113,8 +1113,8 @@ pub(crate) mod tests { let (genesis_hash, storage) = { let db = LightStorage::::new_test(); - // before cache is initialized => None - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); + // before cache is initialized => Err + assert!(db.cache().get_at(b"test", &BlockId::Number(0)).is_err()); // insert genesis block (no value for cache is provided) let mut genesis_hash = None; @@ -1125,14 +1125,14 @@ pub(crate) mod tests { }); // after genesis is inserted => None - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), None); // initialize cache db.cache().initialize(b"test", vec![42]).unwrap(); // after genesis is inserted + cache is initialized => Some assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)), + db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), Some(((0, genesis_hash.unwrap()), None, vec![42])), ); @@ -1142,7 +1142,7 @@ pub(crate) mod tests { // restart && check that after restart value is read from the cache let db = LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)), + db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), Some(((0, genesis_hash.unwrap()), None, vec![42])), ); } @@ -1156,7 +1156,7 @@ pub(crate) mod tests { // insert block#0 && block#1 (no value for cache is provided) let hash0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); assert_eq!( - db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)) + db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)).unwrap() .map(|(_, _, v)| ChangesTrieConfiguration::decode(&mut &v[..]).unwrap()), None, ); @@ -1170,7 +1170,7 @@ pub(crate) mod tests { header }); assert_eq!( - db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)) + db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)).unwrap() .map(|(_, _, v)| Option::::decode(&mut &v[..]).unwrap()), Some(new_config), ); diff --git a/core/client/src/blockchain.rs b/core/client/src/blockchain.rs index 73b7c138d020e..27587b3e8aaa1 100644 --- a/core/client/src/blockchain.rs +++ b/core/client/src/blockchain.rs @@ -232,7 +232,7 @@ pub trait Cache: Send + Sync { &self, key: &well_known_cache_keys::Id, block: &BlockId, - ) -> Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>; + ) -> Result, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>>; } /// Blockchain info diff --git a/core/client/src/client.rs b/core/client/src/client.rs index ce8365635878b..244e98250d446 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -2898,13 +2898,15 @@ pub(crate) mod tests { .unwrap().bake().unwrap(); client.import(BlockOrigin::Own, b2.clone()).unwrap(); + // prepare B3 before we finalize A2, because otherwise we won't be able to + // read changes trie configuration after A2 is finalized + let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default()) + .unwrap().bake().unwrap(); + // we will finalize A2 which should make it impossible to import a new // B3 at the same height but that doesnt't include it client.finalize_block(BlockId::Hash(a2.hash()), None).unwrap(); - let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default()) - .unwrap().bake().unwrap(); - let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); let expected_err = ConsensusError::ClientImport( error::Error::NotInFinalizedChain.to_string() diff --git a/core/consensus/aura/src/lib.rs b/core/consensus/aura/src/lib.rs index 0953ea6c973a3..1207eb3876609 100644 --- a/core/consensus/aura/src/lib.rs +++ b/core/consensus/aura/src/lib.rs @@ -608,19 +608,20 @@ fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusErro }; // check if we already have initialized the cache + let map_err = |error| consensus_common::Error::from(consensus_common::Error::ClientImport( + format!( + "Error initializing authorities cache: {}", + error, + ))); + let genesis_id = BlockId::Number(Zero::zero()); let genesis_authorities: Option> = cache - .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id) + .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id).map_err(map_err)? .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()); if genesis_authorities.is_some() { return Ok(()); } - let map_err = |error| consensus_common::Error::from(consensus_common::Error::ClientImport( - format!( - "Error initializing authorities cache: {}", - error, - ))); let genesis_authorities = authorities(client, &genesis_id)?; cache.initialize(&well_known_cache_keys::AUTHORITIES, genesis_authorities.encode()) .map_err(map_err)?; @@ -639,6 +640,7 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus .cache() .and_then(|cache| cache .get_at(&well_known_cache_keys::AUTHORITIES, at) + .unwrap_or(None) .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()) ) .or_else(|| AuraApi::authorities(&*client.runtime_api(), at).ok()) From af3c925c8780b79dbaffc950a29cfc212316dd81 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 12:12:24 +0300 Subject: [PATCH 47/63] collect_extrinsics -> set_collect_extrinsics --- primitives/state-machine/src/lib.rs | 2 +- primitives/state-machine/src/overlayed_changes.rs | 4 ++-- primitives/state-machine/src/testing.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 03492c3d098f6..ccca3357f1f01 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -389,7 +389,7 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where ) -> CallResult { let changes_tries_enabled = self.changes_trie_state.is_some(); - self.overlay.collect_extrinsics(changes_tries_enabled); + self.overlay.set_collect_extrinsics(changes_tries_enabled); let result = { let orig_prospective = self.overlay.prospective.clone(); diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 2dc80fdb12e8d..32413f0894e8a 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -89,7 +89,7 @@ impl OverlayedChanges { } /// Ask to collect/not to collect extrinsics indices where key(s) has been changed. - pub fn collect_extrinsics(&mut self, collect_extrinsics: bool) { + pub fn set_collect_extrinsics(&mut self, collect_extrinsics: bool) { self.collect_extrinsics = collect_extrinsics; } @@ -509,7 +509,7 @@ mod tests { #[test] fn extrinsic_changes_are_collected() { let mut overlay = OverlayedChanges::default(); - overlay.collect_extrinsics(true); + overlay.set_collect_extrinsics(true); overlay.set_storage(vec![100], Some(vec![101])); diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index de988617838b2..fa4e5d018875f 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -76,7 +76,7 @@ impl, N: ChangesTrieBlockNumber> TestExternalities { let mut overlay = OverlayedChanges::default(); let changes_trie_config = storage.top.get(CHANGES_TRIE_CONFIG) .and_then(|v| Decode::decode(&mut &v[..]).ok()); - overlay.collect_extrinsics(changes_trie_config.is_some()); + overlay.set_collect_extrinsics(changes_trie_config.is_some()); assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); assert!(storage.children.keys().all(|key| is_child_storage_key(key))); From 935fb7b846eca6ae815b30afe485899e50dc2724 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 12:20:54 +0300 Subject: [PATCH 48/63] restore lost test --- primitives/state-machine/src/lib.rs | 34 +++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index ccca3357f1f01..c996e58200886 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1010,4 +1010,38 @@ mod tests { vec![(b"value2".to_vec(), None)], ); } + + #[test] + fn child_storage_uuid() { + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + use crate::trie_backend::tests::test_trie; + let mut overlay = OverlayedChanges::default(); + + let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap(); + let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); + let mut transaction = { + let backend = test_trie(); + let changes_trie_storage = InMemoryChangesTrieStorage::::new(); + let mut ext = Ext::new( + &mut overlay, + &backend, + Some(&changes_trie_storage), + None, + ); + ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.storage_root(); + (ext.transaction().0).0 + }; + let mut duplicate = false; + for (k, (value, rc)) in transaction.drain().iter() { + // look for a key inserted twice: transaction rc is 2 + if *rc == 2 { + duplicate = true; + println!("test duplicate for {:?} {:?}", k, value); + } + } + assert!(!duplicate); + } } From 9da28c8e22ad94cf0dcd61c29f2240e08a9cb66b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 12:21:05 +0300 Subject: [PATCH 49/63] do not calculate block number twice --- primitives/state-machine/src/changes_trie/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 3223cd4765dca..b98bf30e36979 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -245,7 +245,7 @@ pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( let config_range = ConfigurationRange { config: &state.config, zero: state.zero.clone(), - end: if is_config_changed { Some(parent.number.clone() + One::one()) } else { None }, + end: if is_config_changed { Some(block.clone()) } else { None }, }; // storage errors are considered fatal (similar to situations when runtime fetches values from storage) From bf7fea2703500d1945ad3e0f94f2ea1209f12c5c Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 12:21:36 +0300 Subject: [PATCH 50/63] Update primitives/blockchain/src/error.rs Co-Authored-By: cheme --- primitives/blockchain/src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index 996fc51cfc2da..0044a6d20f46b 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -106,7 +106,7 @@ pub enum Error { /// Changes tries are not supported. #[display(fmt = "Changes tries are not supported by the runtime")] ChangesTriesNotSupported, - /// Error reading changes trie configuration. + /// Error reading changes tries configuration. #[display(fmt = "Error reading changes tries configuration")] ErrorReadingChangesTriesConfig, /// Key changes query has failed. From 3d570f193a97bc7ea303ac4e4b2dd59eb48b629b Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 12:27:50 +0300 Subject: [PATCH 51/63] map_err -> unwrap_or --- client/consensus/aura/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 38c38f049d31b..6f92a9dce80ec 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -661,7 +661,8 @@ fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusErro let genesis_id = BlockId::Number(Zero::zero()); let genesis_authorities: Option> = cache - .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id).map_err(map_err)? + .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id) + .unwrap_or(None) .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()); if genesis_authorities.is_some() { return Ok(()); From b89819163d9231e66c9eeee321ba3ab2d3ffced4 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 12:34:31 +0300 Subject: [PATCH 52/63] document get_at Result --- primitives/blockchain/src/backend.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index a290c4a7964dc..b79d0110b3427 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -228,6 +228,8 @@ pub trait Cache: Send + Sync { /// Returns cached value by the given key. /// /// Returned tuple is the range where value has been active and the value itself. + /// Fails if read from cache storage fails or if the value for block is discarded + /// (i.e. if block is earlier that best finalized, but it is not in canonical chain). fn get_at( &self, key: &well_known_cache_keys::Id, From 7db559694afcf1e42e77f34b09c4f19bf581a36f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 12:40:09 +0300 Subject: [PATCH 53/63] delete abandoned file --- core/client/db/src/changes_tries_storage.rs | 1010 ------------------- 1 file changed, 1010 deletions(-) delete mode 100644 core/client/db/src/changes_tries_storage.rs diff --git a/core/client/db/src/changes_tries_storage.rs b/core/client/db/src/changes_tries_storage.rs deleted file mode 100644 index 5f94c61735dbf..0000000000000 --- a/core/client/db/src/changes_tries_storage.rs +++ /dev/null @@ -1,1010 +0,0 @@ -// Copyright 2019 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! DB-backed changes tries storage. - -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use hash_db::Prefix; -use kvdb::{KeyValueDB, DBTransaction}; -use codec::{Decode, Encode}; -use parking_lot::RwLock; -use client::error::{Error as ClientError, Result as ClientResult}; -use trie::MemoryDB; -use client::backend::{PrunableStateChangesTrieStorage, ChangesTrieConfigurationRange}; -use client::blockchain::{well_known_cache_keys, Cache as BlockchainCache}; -use primitives::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; -use sr_primitives::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub, -}; -use sr_primitives::generic::{BlockId, DigestItem, ChangesTrieSignal}; -use state_machine::{DBValue, ChangesTrieBuildCache, ChangesTrieCacheAction}; -use crate::utils::{self, Meta, meta_keys, db_err}; -use crate::cache::{ - DbCacheSync, DbCache, DbCacheTransactionOps, - ComplexBlockId, EntryType as CacheEntryType, -}; - -/// Extract new changes trie configuration (if available) from the header. -pub fn extract_new_configuration(header: &Header) -> Option<&Option> { - header.digest() - .log(DigestItem::as_changes_trie_signal) - .and_then(ChangesTrieSignal::as_new_configuration) -} - -/// Opaque configuration cache transaction. During its lifetime, noone should modify cache. This is currently -/// guaranteed because import lock is held during block import/finalization. -pub struct DbChangesTrieStorageTransaction { - /// Cache operations that must be performed after db transaction is comitted. - cache_ops: DbCacheTransactionOps, - /// New configuration (if changed at current block). - new_config: Option>, -} - -impl DbChangesTrieStorageTransaction { - /// Consume self and return transaction with given new configuration. - pub fn with_new_config(mut self, new_config: Option>) -> Self { - self.new_config = new_config; - self - } -} - -impl From> for DbChangesTrieStorageTransaction { - fn from(cache_ops: DbCacheTransactionOps) -> Self { - DbChangesTrieStorageTransaction { - cache_ops, - new_config: None, - } - } -} - -/// Changes tries storage. -/// -/// Stores all tries in separate DB column. -/// Lock order: meta, tries_meta, cache, build_cache. -pub struct DbChangesTrieStorage { - db: Arc, - meta_column: Option, - changes_tries_column: Option, - key_lookup_column: Option, - header_column: Option, - meta: Arc, Block::Hash>>>, - tries_meta: RwLock>, - min_blocks_to_keep: Option, - /// The cache stores all ever existing changes tries configurations. - cache: DbCacheSync, - /// Build cache is a map of block => set of storage keys changed at this block. - /// They're used to build digest blocks - instead of reading+parsing tries from db - /// we just use keys sets from the cache. - build_cache: RwLock>>, -} - -/// Persistent struct that contains all the changes tries metadata. -#[derive(Decode, Encode, Debug)] -struct ChangesTriesMeta { - /// Oldest unpruned max-level (or skewed) digest trie blocks range. - /// The range is inclusive from both sides. - /// Is None only if: - /// 1) we haven't yet finalized any blocks (except genesis) - /// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are disabled - /// 3) changes tries pruning is disabled - pub oldest_digest_range: Option<(NumberFor, NumberFor)>, - /// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. - /// It is guaranteed that we have no any changes tries before (and including) this block. - /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). - pub oldest_pruned_digest_range_end: NumberFor, -} - -impl> DbChangesTrieStorage { - /// Create new changes trie storage. - pub fn new( - db: Arc, - meta_column: Option, - changes_tries_column: Option, - key_lookup_column: Option, - header_column: Option, - cache_column: Option, - meta: Arc, Block::Hash>>>, - min_blocks_to_keep: Option, - ) -> ClientResult { - let (finalized_hash, finalized_number, genesis_hash) = { - let meta = meta.read(); - (meta.finalized_hash, meta.finalized_number, meta.genesis_hash) - }; - let tries_meta = read_tries_meta(&*db, meta_column)?; - Ok(Self { - db: db.clone(), - meta_column, - changes_tries_column, - key_lookup_column, - header_column, - meta, - min_blocks_to_keep, - cache: DbCacheSync(RwLock::new(DbCache::new( - db.clone(), - key_lookup_column, - header_column, - cache_column, - genesis_hash, - ComplexBlockId::new(finalized_hash, finalized_number), - ))), - build_cache: RwLock::new(ChangesTrieBuildCache::new()), - tries_meta: RwLock::new(tries_meta), - }) - } - - /// Commit new changes trie. - pub fn commit( - &self, - tx: &mut DBTransaction, - mut changes_trie: MemoryDB, - parent_block: ComplexBlockId, - block: ComplexBlockId, - new_header: &Block::Header, - finalized: bool, - new_configuration: Option>, - cache_tx: Option>, - ) -> ClientResult> { - // insert changes trie, associated with block, into DB - for (key, (val, _)) in changes_trie.drain() { - tx.put(self.changes_tries_column, &key[..], &val); - } - - // if configuration has not been changed AND block is not finalized => nothing to do here - let new_configuration = match new_configuration { - Some(new_configuration) => new_configuration, - None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), - None => return self.finalize( - tx, - parent_block.hash, - block.hash, - block.number, - Some(new_header), - cache_tx, - ), - }; - - // update configuration cache - let mut cache_at = HashMap::new(); - cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); - Ok(DbChangesTrieStorageTransaction::from(match cache_tx { - Some(cache_tx) => self.cache.0.write() - .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(), - None => self.cache.0.write() - .transaction(tx) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(), - }).with_new_config(Some(new_configuration))) - } - - /// Called when block is finalized. - pub fn finalize( - &self, - tx: &mut DBTransaction, - parent_block_hash: Block::Hash, - block_hash: Block::Hash, - block_num: NumberFor, - new_header: Option<&Block::Header>, - cache_tx: Option>, - ) -> ClientResult> { - // prune obsolete changes tries - self.prune(tx, block_hash, block_num, new_header.clone(), cache_tx.as_ref())?; - - // if we have inserted the block that we're finalizing in the same transaction - // => then we have already finalized it from the commit() call - if cache_tx.is_some() { - if let Some(new_header) = new_header { - if new_header.hash() == block_hash { - return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")); - } - } - } - - // and finalize configuration cache entries - let block = ComplexBlockId::new(block_hash, block_num); - let parent_block_num = block_num.checked_sub(&One::one()).unwrap_or_else(|| Zero::zero()); - let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); - Ok(match cache_tx { - Some(cache_tx) => DbChangesTrieStorageTransaction::from( - self.cache.0.write() - .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_finalize( - parent_block, - block, - )? - .into_ops() - ).with_new_config(cache_tx.new_config), - None => DbChangesTrieStorageTransaction::from( - self.cache.0.write() - .transaction(tx) - .on_block_finalize( - parent_block, - block, - )? - .into_ops() - ), - }) - } - - /// When block is reverted. - pub fn revert( - &self, - tx: &mut DBTransaction, - block: &ComplexBlockId, - ) -> ClientResult> { - Ok(self.cache.0.write().transaction(tx) - .on_block_revert(block)? - .into_ops() - .into()) - } - - /// When transaction has been committed. - pub fn post_commit(&self, tx: Option>) { - if let Some(tx) = tx { - self.cache.0.write().commit(tx.cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there is tx; qed"); - } - } - - /// Commit changes into changes trie build cache. - pub fn commit_build_cache(&self, cache_update: ChangesTrieCacheAction>) { - self.build_cache.write().perform(cache_update); - } - - /// Prune obsolete changes tries. - fn prune( - &self, - tx: &mut DBTransaction, - block_hash: Block::Hash, - block_num: NumberFor, - new_header: Option<&Block::Header>, - cache_tx: Option<&DbChangesTrieStorageTransaction>, - ) -> ClientResult<()> { - // never prune on archive nodes - let min_blocks_to_keep = match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => min_blocks_to_keep, - None => return Ok(()), - }; - - let mut tries_meta = self.tries_meta.write(); - let mut next_digest_range_start = block_num; - loop { - // prune oldest digest if it is known - // it could be unknown if: - // 1) either we're finalizing block#1 - // 2) or we are (or were) in period where changes tries are disabled - if let Some((begin, end)) = tries_meta.oldest_digest_range { - if block_num <= end || block_num - end <= min_blocks_to_keep.into() { - break; - } - - tries_meta.oldest_pruned_digest_range_end = end; - state_machine::prune_changes_tries( - &*self, - begin, - end, - &state_machine::ChangesTrieAnchorBlockId { - hash: convert_hash(&block_hash), - number: block_num, - }, - |node| tx.delete(self.changes_tries_column, node.as_ref()), - ); - - next_digest_range_start = end + One::one(); - } - - // proceed to the next configuration range - let next_digest_range_start_hash = match block_num == next_digest_range_start { - true => block_hash, - false => utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Number(next_digest_range_start), - )?.hash(), - }; - - let config_for_new_block = new_header - .map(|header| *header.number() == next_digest_range_start) - .unwrap_or(false); - let next_config = match cache_tx { - Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { - let config = cache_tx - .new_config - .clone() - .expect("guarded by is_some(); qed"); - ChangesTrieConfigurationRange { - zero: (block_num, block_hash), - end: None, - config, - } - }, - _ if config_for_new_block => { - self.configuration_at(&BlockId::Hash(*new_header.expect( - "config_for_new_block is only true when new_header is passed; qed" - ).parent_hash()))? - }, - _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash))?, - }; - if let Some(config) = next_config.config { - let mut oldest_digest_range = config - .next_max_level_digest_range(next_config.zero.0, next_digest_range_start) - .unwrap_or_else(|| (next_digest_range_start, next_digest_range_start)); - - if let Some(end) = next_config.end { - if end.0 < oldest_digest_range.1 { - oldest_digest_range.1 = end.0; - } - } - - tries_meta.oldest_digest_range = Some(oldest_digest_range); - continue; - } - - tries_meta.oldest_digest_range = None; - break; - } - - write_tries_meta(tx, self.meta_column, &*tries_meta); - Ok(()) - } -} - -impl client::backend::PrunableStateChangesTrieStorage - for DbChangesTrieStorage -where - Block: BlockT, -{ - fn storage(&self) -> &dyn state_machine::ChangesTrieStorage> { - self - } - - fn configuration_at(&self, at: &BlockId) -> ClientResult> { - self.cache - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? - .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]).ok() - .map(|config| ChangesTrieConfigurationRange { zero, end, config })) - .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) - } - - fn oldest_pruned_digest_range_end(&self) -> NumberFor { - self.tries_meta.read().oldest_pruned_digest_range_end - } -} - -impl state_machine::ChangesTrieRootsStorage> - for DbChangesTrieStorage -where - Block: BlockT, -{ - fn build_anchor( - &self, - hash: H256, - ) -> Result>, String> { - utils::read_header::(&*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(hash)) - .map_err(|e| e.to_string()) - .and_then(|maybe_header| maybe_header.map(|header| - state_machine::ChangesTrieAnchorBlockId { - hash, - number: *header.number(), - } - ).ok_or_else(|| format!("Unknown header: {}", hash))) - } - - fn root( - &self, - anchor: &state_machine::ChangesTrieAnchorBlockId>, - block: NumberFor, - ) -> Result, String> { - // check API requirement: we can't get NEXT block(s) based on anchor - if block > anchor.number { - return Err(format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number)); - } - - // we need to get hash of the block to resolve changes trie root - let block_id = if block <= self.meta.read().finalized_number { - // if block is finalized, we could just read canonical hash - BlockId::Number(block) - } else { - // the block is not finalized - let mut current_num = anchor.number; - let mut current_hash: Block::Hash = convert_hash(&anchor.hash); - let maybe_anchor_header: Block::Header = utils::require_header::( - &*self.db, self.key_lookup_column, self.header_column, BlockId::Number(current_num) - ).map_err(|e| e.to_string())?; - if maybe_anchor_header.hash() == current_hash { - // if anchor is canonicalized, then the block is also canonicalized - BlockId::Number(block) - } else { - // else (block is not finalized + anchor is not canonicalized): - // => we should find the required block hash by traversing - // back from the anchor to the block with given number - while current_num != block { - let current_header: Block::Header = utils::require_header::( - &*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(current_hash) - ).map_err(|e| e.to_string())?; - - current_hash = *current_header.parent_hash(); - current_num = current_num - One::one(); - } - - BlockId::Hash(current_hash) - } - }; - - Ok(utils::require_header::(&*self.db, self.key_lookup_column, self.header_column, block_id) - .map_err(|e| e.to_string())? - .digest().log(DigestItem::as_changes_trie_root) - .map(|root| H256::from_slice(root.as_ref()))) - } -} - -impl state_machine::ChangesTrieStorage> - for DbChangesTrieStorage -where - Block: BlockT, -{ - fn as_roots_storage(&self) -> &dyn state_machine::ChangesTrieRootsStorage> { - self - } - - fn with_cached_changed_keys( - &self, - root: &H256, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), - ) -> bool { - self.build_cache.read().with_changed_keys(root, functor) - } - - fn get(&self, key: &H256, _prefix: Prefix) -> Result, String> { - self.db.get(self.changes_tries_column, &key[..]) - .map_err(|err| format!("{}", err)) - } -} - -/// Read changes tries metadata from database. -fn read_tries_meta( - db: &dyn KeyValueDB, - meta_column: Option, -) -> ClientResult> { - match db.get(meta_column, meta_keys::CHANGES_TRIES_META).map_err(db_err)? { - Some(h) => match Decode::decode(&mut &h[..]) { - Ok(h) => Ok(h), - Err(err) => Err(ClientError::Backend(format!("Error decoding changes tries metadata: {}", err))), - }, - None => Ok(ChangesTriesMeta { - oldest_digest_range: None, - oldest_pruned_digest_range_end: Zero::zero(), - }), - } -} - -/// Write changes tries metadata from database. -fn write_tries_meta( - tx: &mut DBTransaction, - meta_column: Option, - meta: &ChangesTriesMeta, -) { - tx.put(meta_column, meta_keys::CHANGES_TRIES_META, &meta.encode()); -} - -#[cfg(test)] -mod tests { - use hash_db::EMPTY_PREFIX; - use client::backend::{ - Backend as ClientBackend, NewBlockState, BlockImportOperation, PrunableStateChangesTrieStorage, - }; - use client::blockchain::HeaderBackend as BlockchainHeaderBackend; - use sr_primitives::testing::{Digest, Header}; - use sr_primitives::traits::{Hash, BlakeTwo256}; - use state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; - use crate::Backend; - use crate::tests::{Block, insert_header, prepare_changes}; - use super::*; - - fn changes(number: u64) -> Option, Vec)>> { - Some(vec![(number.to_le_bytes().to_vec(), number.to_le_bytes().to_vec())]) - } - - fn insert_header_with_configuration_change( - backend: &Backend, - number: u64, - parent_hash: H256, - changes: Option, Vec)>>, - new_configuration: Option, - ) -> H256 { - let mut digest = Digest::default(); - let mut changes_trie_update = Default::default(); - if let Some(changes) = changes { - let (root, update) = prepare_changes(changes); - digest.push(DigestItem::ChangesTrieRoot(root)); - changes_trie_update = update; - } - digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_configuration))); - - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root(Vec::new()), - digest, - extrinsics_root: Default::default(), - }; - let header_hash = header.hash(); - - let block_id = if number == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(number - 1) - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); - backend.commit_operation(op).unwrap(); - - header_hash - } - - #[test] - fn changes_trie_storage_works() { - let backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.meta.write().finalized_number = 1000; - - let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { - let (changes_root, mut changes_trie_update) = prepare_changes(changes); - let anchor = state_machine::ChangesTrieAnchorBlockId { - hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), - number: block - }; - assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); - - let storage = backend.changes_tries_storage.storage(); - for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); - } - }; - - let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; - let changes1 = vec![ - (b"key_at_1".to_vec(), b"val_at_1".to_vec()), - (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), - ]; - let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - - let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); - let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); - let _ = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); - - // check that the storage contains tries for all blocks - check_changes(&backend, 0, changes0); - check_changes(&backend, 1, changes1); - check_changes(&backend, 2, changes2); - } - - #[test] - fn changes_trie_storage_works_with_forks() { - let backend = Backend::::new_test(1000, 100); - - let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; - let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; - let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); - let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); - let block2 = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); - - let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; - let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); - let block2_1_1 = insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); - - let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; - let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); - let block2_2_1 = insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); - - // finalize block1 - backend.changes_tries_storage.meta.write().finalized_number = 1; - - // branch1: when asking for finalized block hash - let (changes1_root, _) = prepare_changes(changes1); - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch2: when asking for finalized block hash - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch1: when asking for non-finalized block hash (search by traversal) - let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root))); - - // branch2: when asking for non-finalized block hash (search using canonicalized hint) - let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // finalize first block of branch2 (block2_2_0) - backend.changes_tries_storage.meta.write().finalized_number = 3; - - // branch2: when asking for finalized block of this branch - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // branch1: when asking for finalized block of other branch - // => result is incorrect (returned for the block of branch1), but this is expected, - // because the other fork is abandoned (forked before finalized header) - let anchor = state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - } - - #[test] - fn changes_tries_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - - let parent_hash = |number| { - if number == 0 { - Default::default() - } else { - backend.blockchain().header(BlockId::Number(number - 1)).unwrap().unwrap().hash() - } - }; - - let insert_regular_header = |with_changes, number| { - insert_header( - &backend, - number, - parent_hash(number), - if with_changes { changes(number) } else { None }, - Default::default(), - ); - }; - - let is_pruned = |number| { - let trie_root = backend - .blockchain() - .header(BlockId::Number(number)) - .unwrap().unwrap() - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned(); - match trie_root { - Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), - None => true, - } - }; - - let finalize_block = |number| { - let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap(); - let mut tx = DBTransaction::new(); - let cache_ops = backend.changes_tries_storage.finalize( - &mut tx, - *header.parent_hash(), - header.hash(), - number, - None, - None, - ).unwrap(); - backend.storage.db.write(tx).unwrap(); - backend.changes_tries_storage.post_commit(Some(cache_ops)); - }; - - // configuration ranges: - // (0; 6] - None - // [7; 17] - Some(2^2): D2 is built at #10, #14; SD is built at #17 - // [18; 21] - None - // [22; 32] - Some(8^1): D1 is built at #29; SD is built at #32 - // [33; ... - Some(1) - let config_at_6 = Some(ChangesTrieConfiguration::new(2, 2)); - let config_at_17 = None; - let config_at_21 = Some(ChangesTrieConfiguration::new(8, 1)); - let config_at_32 = Some(ChangesTrieConfiguration::new(1, 0)); - - (0..6).for_each(|number| insert_regular_header(false, number)); - insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6); - (7..17).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change(&backend, 17, parent_hash(17), changes(17), config_at_17); - (18..21).for_each(|number| insert_regular_header(false, number)); - insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21); - (22..32).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change(&backend, 32, parent_hash(32), changes(32), config_at_32); - (33..50).for_each(|number| insert_regular_header(true, number)); - - // when only genesis is finalized, nothing is pruned - (0..=6).for_each(|number| assert!(is_pruned(number))); - (7..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [1; 18] are finalized, nothing is pruned - (1..=18).for_each(|number| finalize_block(number)); - (0..=6).for_each(|number| assert!(is_pruned(number))); - (7..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 19 is finalized, changes tries for blocks [7; 10] are pruned - finalize_block(19); - (0..=10).for_each(|number| assert!(is_pruned(number))); - (11..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [20; 22] are finalized, nothing is pruned - (20..=22).for_each(|number| finalize_block(number)); - (0..=10).for_each(|number| assert!(is_pruned(number))); - (11..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 23 is finalized, changes tries for blocks [11; 14] are pruned - finalize_block(23); - (0..=14).for_each(|number| assert!(is_pruned(number))); - (15..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [24; 25] are finalized, nothing is pruned - (24..=25).for_each(|number| finalize_block(number)); - (0..=14).for_each(|number| assert!(is_pruned(number))); - (15..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 26 is finalized, changes tries for blocks [15; 17] are pruned - finalize_block(26); - (0..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [27; 37] are finalized, nothing is pruned - (27..=37).for_each(|number| finalize_block(number)); - (0..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 38 is finalized, changes tries for blocks [22; 29] are pruned - finalize_block(38); - (0..=29).for_each(|number| assert!(is_pruned(number))); - (30..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [39; 40] are finalized, nothing is pruned - (39..=40).for_each(|number| finalize_block(number)); - (0..=29).for_each(|number| assert!(is_pruned(number))); - (30..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 41 is finalized, changes tries for blocks [30; 32] are pruned - finalize_block(41); - (0..=32).for_each(|number| assert!(is_pruned(number))); - (33..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 42 is finalized, changes trie for block 33 is pruned - finalize_block(42); - (0..=33).for_each(|number| assert!(is_pruned(number))); - (34..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 43 is finalized, changes trie for block 34 is pruned - finalize_block(43); - (0..=34).for_each(|number| assert!(is_pruned(number))); - (35..50).for_each(|number| assert!(!is_pruned(number))); - } - - #[test] - fn changes_tries_configuration_is_updated_on_block_insert() { - let backend = Backend::::new_test(1000, 100); - - // configurations at blocks - let config_at_1 = Some(ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - }); - let config_at_3 = Some(ChangesTrieConfiguration { - digest_interval: 8, - digest_levels: 1, - }); - let config_at_5 = None; - let config_at_7 = Some(ChangesTrieConfiguration { - digest_interval: 8, - digest_levels: 1, - }); - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - let block3 = insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); - let block4 = insert_header(&backend, 4, block3, None, Default::default()); - let block5 = insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); - let block6 = insert_header(&backend, 6, block5, None, Default::default()); - let block7 = insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); - - // test configuration cache - let storage = &backend.changes_tries_storage; - assert_eq!( - storage.configuration_at(&BlockId::Hash(block1)).unwrap().config, - config_at_1.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block2)).unwrap().config, - config_at_1.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block3)).unwrap().config, - config_at_3.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block4)).unwrap().config, - config_at_3.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block5)).unwrap().config, - config_at_5.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block6)).unwrap().config, - config_at_5.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block7)).unwrap().config, - config_at_7.clone(), - ); - } - - #[test] - fn test_finalize_several_configuration_change_blocks_in_single_operation() { - let mut backend = Backend::::new_test(10, 10); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - - let configs = (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); - - // insert unfinalized headers - let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, configs[0].clone()); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(1), configs[1].clone()); - let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(2), configs[2].clone()); - - let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2)); - let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3)); - let block2_1 = insert_header_with_configuration_change(&backend, 2, block1, changes(8), side_config2_1.clone()); - let _ = insert_header_with_configuration_change(&backend, 3, block2_1, changes(9), side_config2_2.clone()); - - // insert finalized header => 4 headers are finalized at once - let header3 = Header { - number: 3, - parent_hash: block2, - state_root: Default::default(), - digest: Digest { - logs: vec![ - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[3].clone())), - ], - }, - extrinsics_root: Default::default(), - }; - let block3 = header3.hash(); - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); - op.mark_finalized(BlockId::Hash(block1), None).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); - op.set_block_data(header3, None, None, NewBlockState::Final).unwrap(); - backend.commit_operation(op).unwrap(); - - // insert more unfinalized headers - let block4 = insert_header_with_configuration_change(&backend, 4, block3, changes(4), configs[4].clone()); - let block5 = insert_header_with_configuration_change(&backend, 5, block4, changes(5), configs[5].clone()); - let block6 = insert_header_with_configuration_change(&backend, 6, block5, changes(6), configs[6].clone()); - - // insert finalized header => 4 headers are finalized at once - let header7 = Header { - number: 7, - parent_hash: block6, - state_root: Default::default(), - digest: Digest { - logs: vec![ - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[7].clone())), - ], - }, - extrinsics_root: Default::default(), - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block6)).unwrap(); - op.mark_finalized(BlockId::Hash(block4), None).unwrap(); - op.mark_finalized(BlockId::Hash(block5), None).unwrap(); - op.mark_finalized(BlockId::Hash(block6), None).unwrap(); - op.set_block_data(header7, None, None, NewBlockState::Final).unwrap(); - backend.commit_operation(op).unwrap(); - } - - #[test] - fn changes_tries_configuration_is_reverted() { - let backend = Backend::::new_test(10, 10); - - let config0 = Some(ChangesTrieConfiguration::new(2, 5)); - let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); - let config1 = Some(ChangesTrieConfiguration::new(2, 6)); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); - backend.finalize_block(BlockId::Number(1), Some(vec![42])).unwrap(); - let config2 = Some(ChangesTrieConfiguration::new(2, 7)); - let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); - let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); - let _ = insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); - let config2_2 = Some(ChangesTrieConfiguration::new(2, 9)); - let block2_2 = insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); - let config2_3 = Some(ChangesTrieConfiguration::new(2, 10)); - let _ = insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); - - // before truncate there are 2 unfinalized forks - block2_1+block2_3 - assert_eq!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 4], - ); - - // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 - backend.revert(1).unwrap(); - assert_eq!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 3], - ); - - // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl specifics), - // the 1st one points to the block #3 because it isn't truncated - backend.revert(1).unwrap(); - assert_eq!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 2], - ); - - // after truncating block2 - there are no unfinalized forks - backend.revert(1).unwrap(); - assert!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>() - .is_empty(), - ); - } -} From 9481f59a13e330c9dcb526f79376f232eaa0c15f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 12:52:30 +0300 Subject: [PATCH 54/63] added weight for set_changes_trie_config --- frame/system/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index cb2a411c4de5d..67baa05b321f0 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -256,6 +256,7 @@ decl_module! { } /// Set the new changes trie configuration. + #[weight = SimpleDispatchInfo::FixedOperational(20_000)] pub fn set_changes_trie_config(origin, changes_trie_config: Option) { ensure_root(origin)?; match changes_trie_config.clone() { From 06755b036f4dcc57dc9fa12442edc47ad9f3b6ae Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 13:24:23 +0300 Subject: [PATCH 55/63] prefer_configs -> fail_if_disabled --- client/src/client.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/client/src/client.rs b/client/src/client.rs index e88f65b002acd..81ca0dce4a350 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -413,7 +413,7 @@ impl Client where return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); } - let (storage, configs) = match self.require_changes_trie(first, last_hash, true).ok() { + let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { Some((storage, configs)) => (storage, configs), None => return Ok(None), }; @@ -442,7 +442,7 @@ impl Client where ) -> sp_blockchain::Result, u32)>> { let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - let (storage, configs) = self.require_changes_trie(first, last_hash, false)?; + let (storage, configs) = self.require_changes_trie(first, last_hash, true)?; let mut result = Vec::new(); let best_number = self.backend.blockchain().info().best_number; @@ -564,7 +564,7 @@ impl Client where let first_number = self.backend.blockchain() .expect_block_number_from_id(&BlockId::Hash(first))?; - let (storage, configs) = self.require_changes_trie(first_number, last, false)?; + let (storage, configs) = self.require_changes_trie(first_number, last, true)?; let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; let recording_storage = AccessedRootsRecorder:: { @@ -660,14 +660,15 @@ impl Client where /// Returns changes trie storage and all configurations that have been active in the range [first; last]. /// /// Configurations are returned in descending order (and obviously never overlap). - /// If prefer_configs is true, returns maximal consequent configurations ranges, starting from last and + /// If fail_if_disabled is false, returns maximal consequent configurations ranges, starting from last and /// stopping on either first, or when CT have been disabled. - /// Fails if or an error if it is not supported. + /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled + /// inside first..last blocks range. fn require_changes_trie( &self, first: NumberFor, last: Block::Hash, - prefer_configs: bool, + fail_if_disabled: bool, ) -> sp_blockchain::Result<( &dyn PrunableStateChangesTrieStorage, Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, @@ -683,7 +684,7 @@ impl Client where let config_range = storage.configuration_at(&BlockId::Hash(current))?; match config_range.config { Some(config) => configs.push((config_range.zero.0, config_range.end, config)), - None if prefer_configs => return Ok((storage, configs)), + None if !fail_if_disabled => return Ok((storage, configs)), None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), } From d0b2de2789659fb28d1fe738ac16c9dd21474802 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Sun, 5 Jan 2020 13:27:52 +0300 Subject: [PATCH 56/63] Update client/api/src/backend.rs Co-Authored-By: cheme --- client/api/src/backend.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 6e00a549551cc..84866a0e2cbd2 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -328,7 +328,7 @@ pub trait PrunableStateChangesTrieStorage: { /// Get reference to StateChangesTrieStorage. fn storage(&self) -> &dyn StateChangesTrieStorage>; - /// Get coniguration at given block. + /// Get configuration at given block. fn configuration_at(&self, at: &BlockId) -> sp_blockchain::Result>; /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. /// It is guaranteed that we have no any changes tries before (and including) this block. From c4c887156650221a456ed8ab2f67a95ed2d2180f Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 6 Jan 2020 11:12:18 +0300 Subject: [PATCH 57/63] Update client/db/src/changes_tries_storage.rs Co-Authored-By: cheme --- client/db/src/changes_tries_storage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 3aad3ae019143..721e0538534d2 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -45,7 +45,7 @@ pub fn extract_new_configuration(header: &Header) -> Option<&Op .and_then(ChangesTrieSignal::as_new_configuration) } -/// Opaque configuration cache transaction. During its lifetime, noone should modify cache. This is currently +/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is currently /// guaranteed because import lock is held during block import/finalization. pub struct DbChangesTrieStorageTransaction { /// Cache operations that must be performed after db transaction is comitted. From 51fa091cf5c1bf4daf838bd022322ecd353e4a03 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 6 Jan 2020 13:33:45 +0300 Subject: [PATCH 58/63] CommitOperation+merge -> CommitOperations --- client/db/src/cache/list_cache.rs | 356 ++++++++++++++++++++---------- client/db/src/cache/mod.rs | 52 +---- 2 files changed, 250 insertions(+), 158 deletions(-) diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs index c8cf63efc61c0..b52aff8457b3c 100644 --- a/client/db/src/cache/list_cache.rs +++ b/client/db/src/cache/list_cache.rs @@ -94,6 +94,12 @@ pub enum CommitOperation { BlockReverted(BTreeMap>>), } +/// A set of commit operations. +#[derive(Debug)] +pub struct CommitOperations { + operations: Vec>, +} + /// Single fork of list-based cache. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] @@ -203,9 +209,89 @@ impl> ListCache block: ComplexBlockId, value: Option, entry_type: EntryType, - prev_operation: Option<&CommitOperation>, + operations: &mut CommitOperations, + ) -> ClientResult<()> { + Ok(operations.append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) + } + + /// When previously inserted block is finalized. + pub fn on_block_finalize>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + operations: &mut CommitOperations, + ) -> ClientResult<()> { + Ok(operations.append(self.do_on_block_finalize(tx, parent, block, operations)?)) + } + + /// When block is reverted. + pub fn on_block_revert>( + &self, + tx: &mut Tx, + reverted_block: &ComplexBlockId, + operations: &mut CommitOperations, + ) -> ClientResult<()> { + Ok(operations.append(Some(self.do_on_block_revert(tx, reverted_block)?))) + } + + /// When transaction is committed. + pub fn on_transaction_commit(&mut self, ops: CommitOperations) { + for op in ops.operations { + match op { + CommitOperation::AppendNewBlock(index, best_block) => { + let mut fork = self.unfinalized.get_mut(index) + .expect("ListCache is a crate-private type; + internal clients of ListCache are committing transaction while cache is locked; + CommitOperation holds valid references while cache is locked; qed"); + fork.best_block = Some(best_block); + }, + CommitOperation::AppendNewEntry(index, entry) => { + let mut fork = self.unfinalized.get_mut(index) + .expect("ListCache is a crate-private type; + internal clients of ListCache are committing transaction while cache is locked; + CommitOperation holds valid references while cache is locked; qed"); + fork.best_block = Some(entry.valid_from.clone()); + fork.head = entry; + }, + CommitOperation::AddNewFork(entry) => { + self.unfinalized.push(Fork { + best_block: Some(entry.valid_from.clone()), + head: entry, + }); + }, + CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { + self.best_finalized_block = block; + if let Some(finalizing_entry) = finalizing_entry { + self.best_finalized_entry = Some(finalizing_entry); + } + for fork_index in forks.iter().rev() { + self.unfinalized.remove(*fork_index); + } + }, + CommitOperation::BlockReverted(forks) => { + for (fork_index, updated_fork) in forks.into_iter().rev() { + match updated_fork { + Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, + None => { self.unfinalized.remove(fork_index); }, + } + } + }, + } + } + } + + fn do_on_block_insert>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + value: Option, + entry_type: EntryType, + operations: &CommitOperations, ) -> ClientResult>> { // this guarantee is currently provided by LightStorage && we're relying on it here + let prev_operation = operations.operations.last(); debug_assert!( entry_type != EntryType::Final || self.best_finalized_block.hash == parent.hash || @@ -324,15 +410,15 @@ impl> ListCache } } - /// When previously inserted block is finalized. - pub fn on_block_finalize>( + fn do_on_block_finalize>( &self, tx: &mut Tx, parent: ComplexBlockId, block: ComplexBlockId, - prev_operation: Option<&CommitOperation>, + operations: &CommitOperations, ) -> ClientResult>> { // this guarantee is currently provided by db backend && we're relying on it here + let prev_operation = operations.operations.last(); debug_assert!( self.best_finalized_block.hash == parent.hash || match prev_operation { @@ -356,8 +442,7 @@ impl> ListCache Ok(Some(operation)) } - /// When block is reverted. - pub fn on_block_revert>( + fn do_on_block_revert>( &self, tx: &mut Tx, reverted_block: &ComplexBlockId, @@ -394,52 +479,6 @@ impl> ListCache Ok(operation) } - /// When transaction is committed. - pub fn on_transaction_commit(&mut self, ops: Vec>) { - for op in ops { - match op { - CommitOperation::AppendNewBlock(index, best_block) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(best_block); - }, - CommitOperation::AppendNewEntry(index, entry) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(entry.valid_from.clone()); - fork.head = entry; - }, - CommitOperation::AddNewFork(entry) => { - self.unfinalized.push(Fork { - best_block: Some(entry.valid_from.clone()), - head: entry, - }); - }, - CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { - self.best_finalized_block = block; - if let Some(finalizing_entry) = finalizing_entry { - self.best_finalized_entry = Some(finalizing_entry); - } - for fork_index in forks.iter().rev() { - self.unfinalized.remove(*fork_index); - } - }, - CommitOperation::BlockReverted(forks) => { - for (fork_index, updated_fork) in forks.into_iter().rev() { - match updated_fork { - Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, - None => { self.unfinalized.remove(fork_index); }, - } - } - }, - } - } - } - /// Prune old finalized entries. fn prune_finalized_entries>( &self, @@ -658,34 +697,61 @@ impl Fork { } } -impl CommitOperation { - /// Try to merge two ops into single one. - pub fn merge_with(self, other: CommitOperation) -> (Option, Option) { - // we only able to merge two consequent block finalization operations - match self { +impl Default for CommitOperations { + fn default() -> Self { + CommitOperations { operations: Vec::new() } + } +} + +// This should never be allowed for non-test code to avoid revealing its internals. +#[cfg(test)] +impl From>> for CommitOperations { + fn from(operations: Vec>) -> Self { + CommitOperations { operations } + } +} + +impl CommitOperations { + /// Append operation to the set. + fn append(&mut self, new_operation: Option>) { + let new_operation = match new_operation { + Some(new_operation) => new_operation, + None => return, + }; + + let last_operation = match self.operations.pop() { + Some(last_operation) => last_operation, + None => { + self.operations.push(new_operation); + return; + }, + }; + + // we are able (and obliged to) to merge two consequent block finalization operations + match last_operation { CommitOperation::BlockFinalized(old_finalized_block, old_finalized_entry, old_abandoned_forks) => { - match other { + match new_operation { CommitOperation::BlockFinalized(new_finalized_block, new_finalized_entry, new_abandoned_forks) => { - ( - Some(CommitOperation::BlockFinalized( - new_finalized_block, - new_finalized_entry, - new_abandoned_forks, - )), - None, - ) + self.operations.push(CommitOperation::BlockFinalized( + new_finalized_block, + new_finalized_entry, + new_abandoned_forks, + )); }, - _ => ( - Some(CommitOperation::BlockFinalized( + _ => { + self.operations.push(CommitOperation::BlockFinalized( old_finalized_block, old_finalized_entry, old_abandoned_forks, - )), - Some(other), - ), + )); + self.operations.push(new_operation); + }, } }, - _ => (Some(self), Some(other)), + _ => { + self.operations.push(last_operation); + self.operations.push(new_operation); + }, } } } @@ -954,24 +1020,25 @@ pub mod tests { let fin = EntryType::Final; // when trying to insert block < finalized number + let mut ops = Default::default(); assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() - .on_block_insert( + .do_on_block_insert( &mut DummyTransaction::new(), test_id(49), test_id(50), Some(50), nfin, - None, + &mut ops, ).unwrap().is_none()); // when trying to insert block @ finalized number assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() - .on_block_insert( + .do_on_block_insert( &mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), nfin, - None, + &Default::default(), ).unwrap().is_none()); // when trying to insert non-final block AND it appends to the best block of unfinalized fork @@ -984,16 +1051,20 @@ pub mod tests { ).unwrap(); cache.unfinalized[0].best_block = Some(test_id(4)); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin, None).unwrap(), - Some(CommitOperation::AppendNewBlock(0, test_id(5)))); + assert_eq!( + cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin, &Default::default()).unwrap(), + Some(CommitOperation::AppendNewBlock(0, test_id(5))), + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); // when trying to insert non-final block AND it appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin, None).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 }))); + assert_eq!( + cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin, &Default::default()).unwrap(), + Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 })), + ); assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] })); @@ -1008,16 +1079,34 @@ pub mod tests { PruningStrategy::ByDepth(1024), test_id(2) ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), nfin, None).unwrap(), - Some(CommitOperation::AppendNewBlock(0, correct_id(5)))); + assert_eq!( + cache.do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(4), + nfin, + &Default::default(), + ).unwrap(), + Some(CommitOperation::AppendNewBlock(0, correct_id(5))), + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), nfin, None).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 }))); + assert_eq!( + cache.do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(5), + nfin, + &Default::default(), + ).unwrap(), + Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 })), + ); assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); @@ -1034,8 +1123,11 @@ pub mod tests { PruningStrategy::ByDepth(1024), correct_id(2) ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin, None).unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 }))); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin, &Default::default()) + .unwrap(), + Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 })), + ); assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] })); @@ -1049,7 +1141,11 @@ pub mod tests { PruningStrategy::ByDepth(1024), correct_id(2) ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin, None).unwrap(), None); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin, &Default::default()) + .unwrap(), + None, + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); @@ -1062,8 +1158,11 @@ pub mod tests { PruningStrategy::ByDepth(1024), correct_id(2) ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin, None).unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 }))); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin, &Default::default()) + .unwrap(), + Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 })), + ); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] })); @@ -1072,7 +1171,8 @@ pub mod tests { let cache = ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)).unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, None).unwrap(), + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()) + .unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), Some(Entry { valid_from: correct_id(3), value: 3 }), @@ -1090,15 +1190,17 @@ pub mod tests { PruningStrategy::ByDepth(1024), correct_id(2) ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, None).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default()))); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), + Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); // when inserting finalized entry AND value differs from previous finalized let mut tx = DummyTransaction::new(); assert_eq!( - cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, None).unwrap(), + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()).unwrap(), Some(CommitOperation::BlockFinalized( correct_id(3), Some(Entry { valid_from: correct_id(3), value: 3 }), @@ -1118,8 +1220,10 @@ pub mod tests { PruningStrategy::ByDepth(1024), correct_id(2) ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, None).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect()))); + assert_eq!( + cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), + Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), + ); } #[test] @@ -1133,8 +1237,10 @@ pub mod tests { PruningStrategy::ByDepth(1024), correct_id(2) ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3), None).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default()))); + assert_eq!( + cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), + Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), + ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); @@ -1148,7 +1254,7 @@ pub mod tests { ).unwrap(); let mut tx = DummyTransaction::new(); assert_eq!( - cache.on_block_finalize(&mut tx, correct_id(4), correct_id(5), None).unwrap(), + cache.do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()).unwrap(), Some(CommitOperation::BlockFinalized( correct_id(5), Some(Entry { valid_from: correct_id(5), value: 5 }), @@ -1167,8 +1273,10 @@ pub mod tests { PruningStrategy::ByDepth(1024), correct_id(2) ).unwrap(); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_finalize(&mut tx, correct_id(2), correct_id(3), None).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect()))); + assert_eq!( + cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), + Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), + ); } #[test] @@ -1183,18 +1291,18 @@ pub mod tests { ).unwrap(); // when new block is appended to unfinalized fork - cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))]); + cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))].into()); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); // when new entry is appended to unfinalized fork cache.on_transaction_commit(vec![ CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 }), - ]); + ].into()); assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: 7 }); // when new fork is added cache.on_transaction_commit(vec![ CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 }), - ]); + ].into()); assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: 10 }); // when block is finalized + entry is finalized + unfinalized forks are deleted @@ -1202,7 +1310,7 @@ pub mod tests { correct_id(20), Some(Entry { valid_from: correct_id(20), value: 20 }), vec![0, 1, 2].into_iter().collect(), - )]); + )].into()); assert_eq!(cache.best_finalized_block, correct_id(20)); assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: 20 })); assert!(cache.unfinalized.is_empty()); @@ -1606,47 +1714,61 @@ pub mod tests { ).unwrap(); // when 5 is reverted: entry 5 is truncated - let op = cache.on_block_revert(&mut DummyTransaction::new(), &correct_id(5)).unwrap(); + let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(5)).unwrap(); assert_eq!(op, CommitOperation::BlockReverted(vec![ (0, Some(Fork { best_block: None, head: Entry { valid_from: correct_id(4), value: 4 } })), ].into_iter().collect())); - cache.on_transaction_commit(vec![op]); + cache.on_transaction_commit(vec![op].into()); // when 3 is reverted: entries 4+5' are truncated - let op = cache.on_block_revert(&mut DummyTransaction::new(), &correct_id(3)).unwrap(); + let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(3)).unwrap(); assert_eq!(op, CommitOperation::BlockReverted(vec![ (0, None), (2, None), ].into_iter().collect())); - cache.on_transaction_commit(vec![op]); + cache.on_transaction_commit(vec![op].into()); // when 2 is reverted: entries 4'+5' are truncated - let op = cache.on_block_revert(&mut DummyTransaction::new(), &correct_id(2)).unwrap(); + let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(2)).unwrap(); assert_eq!(op, CommitOperation::BlockReverted(vec![ (0, None), ].into_iter().collect())); - cache.on_transaction_commit(vec![op]); + cache.on_transaction_commit(vec![op].into()); } #[test] - fn merge_commit_operation_works() { - let op1 = CommitOperation::BlockFinalized( + fn append_commit_operation_works() { + let mut ops = CommitOperations::default(); + ops.append(None); + assert_eq!(ops.operations, Vec::new()); + + ops.append(Some(CommitOperation::BlockFinalized( test_id(10), Some(Entry { valid_from: test_id(10), value: 10 }), vec![5].into_iter().collect(), + ))); + assert_eq!( + ops.operations, + vec![CommitOperation::BlockFinalized( + test_id(10), + Some(Entry { valid_from: test_id(10), value: 10 }), + vec![5].into_iter().collect(), + )], ); - let op2 = CommitOperation::BlockFinalized( + + ops.append(Some(CommitOperation::BlockFinalized( test_id(20), Some(Entry { valid_from: test_id(20), value: 20 }), vec![5, 6].into_iter().collect(), - ); + ))); + assert_eq!( - op1.merge_with(op2), - (Some(CommitOperation::BlockFinalized( + ops.operations, + vec![CommitOperation::BlockFinalized( test_id(20), Some(Entry { valid_from: test_id(20), value: 20 }), vec![5, 6].into_iter().collect(), - )), None), + )], ); } } diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs index 17b9fc86d4be1..4be3d242cebd8 100644 --- a/client/db/src/cache/mod.rs +++ b/client/db/src/cache/mod.rs @@ -198,7 +198,7 @@ fn get_cache_helper<'a, Block: BlockT>( /// Cache operations that are to be committed after database transaction is committed. #[derive(Default)] pub struct DbCacheTransactionOps { - cache_at_ops: HashMap>>>, + cache_at_ops: HashMap>>, best_finalized_block: Option>, } @@ -216,7 +216,7 @@ impl DbCacheTransactionOps { pub struct DbCacheTransaction<'a, Block: BlockT> { cache: &'a mut DbCache, tx: &'a mut DBTransaction, - cache_at_ops: HashMap>>>, + cache_at_ops: HashMap>>, best_finalized_block: Option>, } @@ -246,8 +246,8 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { let mut insert_op = |name: CacheKeyId, value: Option>| -> Result<(), sp_blockchain::Error> { let cache = self.cache.get_cache(name)?; - let mut cache_ops = self.cache_at_ops.remove(&name).unwrap_or_default(); - let op = cache.on_block_insert( + let cache_ops = self.cache_at_ops.entry(name).or_default(); + cache.on_block_insert( &mut self::list_storage::DbStorageTransaction::new( cache.storage(), &mut self.tx, @@ -256,11 +256,9 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { block.clone(), value, entry_type, - cache_ops.last(), + cache_ops, )?; - push_cache_op(&mut cache_ops, op); - self.cache_at_ops.insert(name, cache_ops); Ok(()) }; @@ -283,19 +281,16 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { block: ComplexBlockId, ) -> ClientResult { for (name, cache) in self.cache.cache_at.iter() { - let mut cache_ops = self.cache_at_ops.remove(name).unwrap_or_default(); - let op = cache.on_block_finalize( + let cache_ops = self.cache_at_ops.entry(*name).or_default(); + cache.on_block_finalize( &mut self::list_storage::DbStorageTransaction::new( cache.storage(), &mut self.tx ), parent.clone(), block.clone(), - cache_ops.last(), + cache_ops, )?; - - push_cache_op(&mut cache_ops, op); - self.cache_at_ops.insert(*name, cache_ops); } self.best_finalized_block = Some(block); @@ -309,17 +304,15 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { reverted_block: &ComplexBlockId, ) -> ClientResult { for (name, cache) in self.cache.cache_at.iter() { - let mut cache_ops = self.cache_at_ops.remove(name).unwrap_or_default(); - let op = cache.on_block_revert( + let cache_ops = self.cache_at_ops.entry(*name).or_default(); + cache.on_block_revert( &mut self::list_storage::DbStorageTransaction::new( cache.storage(), &mut self.tx ), reverted_block, + cache_ops, )?; - - cache_ops.push(op); - self.cache_at_ops.insert(*name, cache_ops); } Ok(self) @@ -402,26 +395,3 @@ fn cache_pruning_strategy>(cache: CacheKeyId) -> PruningStrategy _ => PruningStrategy::ByDepth(PRUNE_DEPTH.into()), } } - -/// Push new operation to the operations vec. -fn push_cache_op( - cache_ops: &mut Vec>>, - new_op: Option>>, -) { - if let Some(new_op) = new_op { - if let Some(prev_op) = cache_ops.pop() { - match prev_op.merge_with(new_op) { - (Some(merged_op), None) => { - cache_ops.push(merged_op); - }, - (Some(prev_op), Some(new_op)) => { - cache_ops.push(prev_op); - cache_ops.push(new_op); - }, - _ => unreachable!("merge of 2 ops can never lead to noop; qed"), - } - } else { - cache_ops.push(new_op); - } - } -} From 46d472100596d0d7d1f267ad23eb45fbb61d2e04 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 7 Jan 2020 11:48:51 +0300 Subject: [PATCH 59/63] fixed test compilation --- primitives/state-machine/src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index c996e58200886..d032ca5f500c9 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1022,11 +1022,10 @@ mod tests { let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); let mut transaction = { let backend = test_trie(); - let changes_trie_storage = InMemoryChangesTrieStorage::::new(); let mut ext = Ext::new( &mut overlay, &backend, - Some(&changes_trie_storage), + changes_trie::disabled_state::<_, u64>(), None, ); ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); From 39c5f64576bde149480ba0c3be4a17aff676396a Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 9 Jan 2020 10:57:10 +0300 Subject: [PATCH 60/63] merged two different CTRange structs --- client/api/src/backend.rs | 17 +++------- client/api/src/light.rs | 4 +-- client/db/src/changes_tries_storage.rs | 8 +++-- client/network/src/protocol/light_dispatch.rs | 6 +++- client/src/light/fetcher.rs | 31 +++++++++++++------ primitives/core/src/changes_trie.rs | 11 +++++++ primitives/core/src/lib.rs | 2 +- 7 files changed, 49 insertions(+), 30 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 84866a0e2cbd2..c691ee920f76f 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use std::collections::HashMap; -use sp_core::ChangesTrieConfiguration; +use sp_core::ChangesTrieConfigurationRange; use sp_core::offchain::OffchainStorage; use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor}; @@ -311,17 +311,6 @@ pub trait Backend: AuxStore + Send + Sync where fn get_import_lock(&self) -> &RwLock<()>; } -/// Changes trie configuration range. -#[derive(Debug)] -pub struct ChangesTrieConfigurationRange { - /// Zero block of this configuration. First trie that uses this configuration is build at the next block. - pub zero: (NumberFor, Block::Hash), - /// End block where last trie that uses this configuration has been build. None if configuration is active. - pub end: Option<(NumberFor, Block::Hash)>, - /// Configuration itself. None if changes tries are disabled within this range. - pub config: Option, -} - /// Changes trie storage that supports pruning. pub trait PrunableStateChangesTrieStorage: StateChangesTrieStorage> @@ -329,7 +318,9 @@ pub trait PrunableStateChangesTrieStorage: /// Get reference to StateChangesTrieStorage. fn storage(&self) -> &dyn StateChangesTrieStorage>; /// Get configuration at given block. - fn configuration_at(&self, at: &BlockId) -> sp_blockchain::Result>; + fn configuration_at(&self, at: &BlockId) -> sp_blockchain::Result< + ChangesTrieConfigurationRange, Block::Hash> + >; /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. /// It is guaranteed that we have no any changes tries before (and including) this block. /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 234832d743c08..80eaf61b5f1f6 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -26,7 +26,7 @@ use sp_runtime::{ }, generic::BlockId }; -use sp_core::ChangesTrieConfiguration; +use sp_core::ChangesTrieConfigurationRange; use sp_state_machine::StorageProof; use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, @@ -96,7 +96,7 @@ pub struct RemoteReadChildRequest { #[derive(Clone, Debug, PartialEq, Eq)] pub struct RemoteChangesRequest { /// All changes trie configurations that are valid within [first_block; last_block]. - pub changes_trie_configs: Vec<(Header::Number, Option, ChangesTrieConfiguration)>, + pub changes_trie_configs: Vec>, /// Query changes from range of blocks, starting (and including) with this hash... pub first_block: (Header::Number, Header::Hash), /// ...ending (and including) with this hash. Should come after first_block and diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 721e0538534d2..8adaa6b6677fa 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -24,9 +24,9 @@ use codec::{Decode, Encode}; use parking_lot::RwLock; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_trie::MemoryDB; -use sc_client_api::backend::{PrunableStateChangesTrieStorage, ChangesTrieConfigurationRange}; +use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache}; -use sp_core::{H256, Blake2Hasher, ChangesTrieConfiguration, convert_hash}; +use sp_core::{H256, Blake2Hasher, ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub, }; @@ -385,7 +385,9 @@ where self } - fn configuration_at(&self, at: &BlockId) -> ClientResult> { + fn configuration_at(&self, at: &BlockId) -> ClientResult< + ChangesTrieConfigurationRange, Block::Hash> + > { self.cache .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]).ok() diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index 93d9c27efaabf..2d9196d00b6a8 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -1092,7 +1092,11 @@ pub mod tests { let (tx, response) = oneshot::channel(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteChanges(RemoteChangesRequest { - changes_trie_configs: vec![(0, None, sp_core::ChangesTrieConfiguration::new(4, 2))], + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], first_block: (1, Default::default()), last_block: (100, Default::default()), max_block: (100, Default::default()), diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 44ab5a8469637..a7b740af621b0 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -116,14 +116,13 @@ impl> LightDataChecker { // and now check the key changes proof + get the changes let mut result = Vec::new(); let proof_storage = InMemoryChangesTrieStorage::with_proof(remote_proof); - for (config_zero, config_end, config) in &request.changes_trie_configs { - let config_range = ChangesTrieConfigurationRange { - config, - zero: config_zero.clone(), - end: config_end.clone(), - }; + for config_range in &request.changes_trie_configs { let result_range = key_changes_proof_check_with_db::( - config_range, + ChangesTrieConfigurationRange { + config: config_range.config.as_ref().ok_or(ClientError::ChangesTriesNotSupported)?, + zero: config_range.zero.0, + end: config_range.end.map(|(n, _)| n), + }, &RootsStorage { roots: (request.tries_roots.0, &request.tries_roots.2), prev_roots: &remote_roots, @@ -572,7 +571,11 @@ pub mod tests { let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); let config = ChangesTrieConfiguration::new(4, 2); let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![(0, None, config)], + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], first_block: (begin, begin_hash), last_block: (end, end_hash), max_block: (max, max_hash), @@ -628,7 +631,11 @@ pub mod tests { // check proof on local client let config = ChangesTrieConfiguration::new(4, 2); let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![(0, None, config)], + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], first_block: (1, b1), last_block: (4, b4), max_block: (4, b4), @@ -670,7 +677,11 @@ pub mod tests { let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); let config = ChangesTrieConfiguration::new(4, 2); let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![(0, None, config)], + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], first_block: (begin, begin_hash), last_block: (end, end_hash), max_block: (max, max_hash), diff --git a/primitives/core/src/changes_trie.rs b/primitives/core/src/changes_trie.rs index f746e1dc8d7b2..2fee131ee6929 100644 --- a/primitives/core/src/changes_trie.rs +++ b/primitives/core/src/changes_trie.rs @@ -38,6 +38,17 @@ pub struct ChangesTrieConfiguration { pub digest_levels: u32, } +/// Substrate changes trie configuration range. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ChangesTrieConfigurationRange { + /// Zero block of configuration. + pub zero: (Number, Hash), + /// Last block of configuration (if configuration has been deactivated at some point). + pub end: Option<(Number, Hash)>, + /// The configuration itself. None if changes tries were disabled in this range. + pub config: Option, +} + impl ChangesTrieConfiguration { /// Create new configuration given digest interval and levels. pub fn new(digest_interval: u32, digest_levels: u32) -> Self { diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 00343c0e7fd74..3dcf2a74e7ac5 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -75,7 +75,7 @@ mod tests; pub use self::hash::{H160, H256, H512, convert_hash}; pub use self::uint::U256; -pub use changes_trie::ChangesTrieConfiguration; +pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; From 25452a513ecada4b5892157a60c03100074b26da Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 9 Jan 2020 11:46:00 +0300 Subject: [PATCH 61/63] lost file --- client/db/src/changes_tries_storage.rs | 28 +++++++++++++------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 8adaa6b6677fa..c30d66ac462c7 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -77,10 +77,10 @@ impl From> for DbChangesTrieStorageT /// Lock order: meta, tries_meta, cache, build_cache. pub struct DbChangesTrieStorage { db: Arc, - meta_column: Option, - changes_tries_column: Option, - key_lookup_column: Option, - header_column: Option, + meta_column: u32, + changes_tries_column: u32, + key_lookup_column: u32, + header_column: u32, meta: Arc, Block::Hash>>>, tries_meta: RwLock>, min_blocks_to_keep: Option, @@ -112,11 +112,11 @@ impl> DbChangesTrieStorage { /// Create new changes trie storage. pub fn new( db: Arc, - meta_column: Option, - changes_tries_column: Option, - key_lookup_column: Option, - header_column: Option, - cache_column: Option, + meta_column: u32, + changes_tries_column: u32, + key_lookup_column: u32, + header_column: u32, + cache_column: u32, meta: Arc, Block::Hash>>>, min_blocks_to_keep: Option, ) -> ClientResult { @@ -493,7 +493,7 @@ where /// Read changes tries metadata from database. fn read_tries_meta( db: &dyn KeyValueDB, - meta_column: Option, + meta_column: u32, ) -> ClientResult> { match db.get(meta_column, meta_keys::CHANGES_TRIES_META).map_err(db_err)? { Some(h) => match Decode::decode(&mut &h[..]) { @@ -510,7 +510,7 @@ fn read_tries_meta( /// Write changes tries metadata from database. fn write_tries_meta( tx: &mut DBTransaction, - meta_column: Option, + meta_column: u32, meta: &ChangesTriesMeta, ) { tx.put(meta_column, meta_keys::CHANGES_TRIES_META, &meta.encode()); @@ -970,7 +970,7 @@ mod tests { ); // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 - backend.revert(1).unwrap(); + backend.revert(1, false).unwrap(); assert_eq!( backend.changes_tries_storage.cache.0.write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) @@ -984,7 +984,7 @@ mod tests { // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl specifics), // the 1st one points to the block #3 because it isn't truncated - backend.revert(1).unwrap(); + backend.revert(1, false).unwrap(); assert_eq!( backend.changes_tries_storage.cache.0.write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) @@ -997,7 +997,7 @@ mod tests { ); // after truncating block2 - there are no unfinalized forks - backend.revert(1).unwrap(); + backend.revert(1, false).unwrap(); assert!( backend.changes_tries_storage.cache.0.write() .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) From 233afad98bf82c93d6e38aaa5deb03a75beaf4c6 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Tue, 14 Jan 2020 16:05:58 +0300 Subject: [PATCH 62/63] uggrade db from v0 to v1 (init CT cache + add column) --- Cargo.lock | 11 ++ client/db/Cargo.toml | 2 + client/db/src/cache/list_cache.rs | 5 +- client/db/src/cache/list_storage.rs | 4 +- client/db/src/cache/mod.rs | 8 +- client/db/src/lib.rs | 8 +- client/db/src/light.rs | 11 +- client/db/src/upgrade.rs | 198 ++++++++++++++++++++++++++++ client/db/src/utils.rs | 84 +++++++++--- 9 files changed, 294 insertions(+), 37 deletions(-) create mode 100644 client/db/src/upgrade.rs diff --git a/Cargo.lock b/Cargo.lock index d649721e431fd..9576c4a1147cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5213,6 +5213,7 @@ dependencies = [ "sp-state-machine 2.0.0", "sp-trie 2.0.0", "substrate-test-runtime-client 2.0.0", + "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -7059,6 +7060,15 @@ name = "target_info" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "tempdir" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tempfile" version = "3.1.0" @@ -8681,6 +8691,7 @@ dependencies = [ "checksum take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60" "checksum target-lexicon 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f4c118a7a38378f305a9e111fcb2f7f838c0be324bfb31a77ea04f7f6e684b4" "checksum target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" +"checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" "checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" "checksum termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "96d6098003bde162e4277c70665bd87c326f5a0c3f3fbfb285787fa482d54e6e" "checksum test-case 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a605baa797821796a751f4a959e1206079b24a4b7e1ed302b7d785d81a9276c9" diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index d34b04a17edd3..817e84178bc47 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -31,6 +31,8 @@ sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } env_logger = "0.7.0" quickcheck = "0.9" +kvdb-rocksdb = "0.4" +tempdir = "0.3" [features] default = [] diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs index b4acca76c6df4..72278a1e85e6d 100644 --- a/client/db/src/cache/list_cache.rs +++ b/client/db/src/cache/list_cache.rs @@ -1243,7 +1243,10 @@ pub mod tests { ); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(5)] }), + ); // finalization finalizes entry let cache = ListCache::new( DummyStorage::new() diff --git a/client/db/src/cache/list_storage.rs b/client/db/src/cache/list_storage.rs index 9cd3b1049a4f8..606090ee1401d 100644 --- a/client/db/src/cache/list_storage.rs +++ b/client/db/src/cache/list_storage.rs @@ -222,7 +222,9 @@ mod meta { unfinalized.push(&entry.valid_from); }, CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => { - finalized = finalizing_entry.as_ref().map(|entry| &entry.valid_from); + if let Some(finalizing_entry) = finalizing_entry.as_ref() { + finalized = Some(&finalizing_entry.valid_from); + } for fork_index in forks.iter().rev() { unfinalized.remove(*fork_index); } diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs index 7b52c7a35aa2d..8fd1adc094ae4 100644 --- a/client/db/src/cache/mod.rs +++ b/client/db/src/cache/mod.rs @@ -81,7 +81,7 @@ pub struct DbCache { db: Arc, key_lookup_column: u32, header_column: u32, - authorities_column: u32, + cache_column: u32, genesis_hash: Block::Hash, best_finalized_block: ComplexBlockId, } @@ -92,7 +92,7 @@ impl DbCache { db: Arc, key_lookup_column: u32, header_column: u32, - authorities_column: u32, + cache_column: u32, genesis_hash: Block::Hash, best_finalized_block: ComplexBlockId, ) -> Self { @@ -101,7 +101,7 @@ impl DbCache { db, key_lookup_column, header_column, - authorities_column, + cache_column, genesis_hash, best_finalized_block, } @@ -158,7 +158,7 @@ impl DbCache { &self.db, self.key_lookup_column, self.header_column, - self.authorities_column, + self.cache_column, &self.best_finalized_block ) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 288681a848ab5..f2d08808e0088 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -33,6 +33,8 @@ mod children; mod cache; mod changes_tries_storage; mod storage_cache; +#[cfg(any(feature = "kvdb-rocksdb", test))] +mod upgrade; mod utils; mod stats; @@ -67,7 +69,7 @@ use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, backend::Backend as StateBackend, UsageInfo as StateUsageInfo, }; -use crate::utils::{Meta, db_err, meta_keys, read_db, read_meta}; +use crate::utils::{DatabaseType, Meta, db_err, meta_keys, read_db, read_meta}; use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; use sc_client::leaves::{LeafSet, FinalizationDisplaced}; use sc_state_db::StateDb; @@ -355,7 +357,7 @@ pub struct BlockchainDb { impl BlockchainDb { fn new(db: Arc) -> ClientResult { - let meta = read_meta::(&*db, columns::META, columns::HEADER)?; + let meta = read_meta::(&*db, columns::HEADER)?; let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; Ok(BlockchainDb { db, @@ -752,7 +754,7 @@ impl Backend { /// /// The pruning window is how old a block must be before the state is pruned. pub fn new(config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult { - let db = crate::utils::open_database(&config, columns::META, "full")?; + let db = crate::utils::open_database::(&config, DatabaseType::Full)?; Self::from_kvdb(db as Arc<_>, canonicalization_delay, &config) } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index 4c7f983800540..e663fc5699490 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -38,7 +38,7 @@ use codec::{Decode, Encode}; use sp_runtime::generic::{DigestItem, BlockId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HasherFor}; use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType}; -use crate::utils::{self, meta_keys, Meta, db_err, read_db, block_id_to_lookup_key, read_meta}; +use crate::utils::{self, meta_keys, DatabaseType, Meta, db_err, read_db, block_id_to_lookup_key, read_meta}; use crate::{DatabaseSettings, FrozenForDuration}; use log::{trace, warn, debug}; @@ -68,13 +68,10 @@ pub struct LightStorage { io_stats: FrozenForDuration, } -impl LightStorage - where - Block: BlockT, -{ +impl LightStorage { /// Create new storage with given settings. pub fn new(config: DatabaseSettings) -> ClientResult { - let db = crate::utils::open_database(&config, columns::META, "light")?; + let db = crate::utils::open_database::(&config, DatabaseType::Light)?; Self::from_kvdb(db as Arc<_>) } @@ -89,7 +86,7 @@ impl LightStorage } fn from_kvdb(db: Arc) -> ClientResult { - let meta = read_meta::(&*db, columns::META, columns::HEADER)?; + let meta = read_meta::(&*db, columns::HEADER)?; let cache = DbCache::new( db.clone(), columns::KEY_LOOKUP, diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs new file mode 100644 index 0000000000000..ab2d4bbf799b2 --- /dev/null +++ b/client/db/src/upgrade.rs @@ -0,0 +1,198 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Database upgrade logic. + +use std::fs; +use std::io::{Read, Write, ErrorKind}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use codec::Encode; +use kvdb_rocksdb::{Database, DatabaseConfig}; +use parking_lot::RwLock; +use sp_blockchain::{well_known_cache_keys, Cache}; +use sp_core::ChangesTrieConfiguration; +use sp_runtime::traits::Block as BlockT; +use crate::{ + cache::{ComplexBlockId, DbCache, DbCacheSync}, + utils::{DatabaseType, check_database_type, db_err, read_genesis_hash}, +}; + +/// Version file name. +const VERSION_FILE_NAME: &'static str = "db_version"; + +/// Current db version. +const CURRENT_VERSION: u32 = 1; + +/// Number of columns in v0. +const V0_NUM_COLUMNS: u32 = 10; + +/// Upgrade database to current version. +pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { + let db_version = current_version(db_path)?; + match db_version { + 0 => migrate_0_to_1::(db_path, db_type)?, + 1 => (), + _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, + } + + update_version(db_path) +} + +/// Migration from version0 to version1: +/// 1) the number of columns has changed from 10 to 11; +/// 2) changes tries configuration are now cached. +fn migrate_0_to_1(db_path: &Path, db_type: DatabaseType) -> sp_blockchain::Result<()> { + { + let db = open_database(db_path, db_type, V0_NUM_COLUMNS)?; + db.add_column().map_err(db_err)?; + db.flush().map_err(db_err)?; + } + + let db = open_database(db_path, db_type, V0_NUM_COLUMNS + 1)?; + + const V0_FULL_KEY_LOOKUP_COLUMN: u32 = 3; + const V0_FULL_HEADER_COLUMN: u32 = 4; + const V0_FULL_CACHE_COLUMN: u32 = 10; // that's the column we have just added + const V0_LIGHT_KEY_LOOKUP_COLUMN: u32 = 1; + const V0_LIGHT_HEADER_COLUMN: u32 = 2; + const V0_LIGHT_CACHE_COLUMN: u32 = 3; + + let (key_lookup_column, header_column, cache_column) = match db_type { + DatabaseType::Full => ( + V0_FULL_KEY_LOOKUP_COLUMN, + V0_FULL_HEADER_COLUMN, + V0_FULL_CACHE_COLUMN, + ), + DatabaseType::Light => ( + V0_LIGHT_KEY_LOOKUP_COLUMN, + V0_LIGHT_HEADER_COLUMN, + V0_LIGHT_CACHE_COLUMN, + ), + }; + + let genesis_hash: Option = read_genesis_hash(&db)?; + if let Some(genesis_hash) = genesis_hash { + let cache: DbCacheSync = DbCacheSync(RwLock::new(DbCache::new( + Arc::new(db), + key_lookup_column, + header_column, + cache_column, + genesis_hash, + ComplexBlockId::new(genesis_hash, 0.into()), + ))); + let changes_trie_config: Option = None; + cache.initialize(&well_known_cache_keys::CHANGES_TRIE_CONFIG, changes_trie_config.encode())?; + } + + Ok(()) +} + +/// Reads current database version from the file at given path. +/// If the file does not exist returns 0. +fn current_version(path: &Path) -> sp_blockchain::Result { + let unknown_version_err = || sp_blockchain::Error::Backend("Unknown database version".into()); + + match fs::File::open(version_file_path(path)) { + Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(0), + Err(_) => Err(unknown_version_err()), + Ok(mut file) => { + let mut s = String::new(); + file.read_to_string(&mut s).map_err(|_| unknown_version_err())?; + u32::from_str_radix(&s, 10).map_err(|_| unknown_version_err()) + }, + } +} + +/// Opens database of givent type with given number of columns. +fn open_database(db_path: &Path, db_type: DatabaseType, db_columns: u32) -> sp_blockchain::Result { + let db_path = db_path.to_str() + .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; + let db_cfg = DatabaseConfig::with_columns(db_columns); + let db = Database::open(&db_cfg, db_path).map_err(db_err)?; + check_database_type(&db, db_type)?; + Ok(db) +} + +/// Writes current database version to the file. +/// Creates a new file if the version file does not exist yet. +fn update_version(path: &Path) -> sp_blockchain::Result<()> { + fs::create_dir_all(path).map_err(db_err)?; + let mut file = fs::File::create(version_file_path(path)).map_err(db_err)?; + file.write_all(format!("{}", CURRENT_VERSION).as_bytes()).map_err(db_err)?; + Ok(()) +} + +/// Returns the version file path. +fn version_file_path(path: &Path) -> PathBuf { + let mut file_path = path.to_owned(); + file_path.push(VERSION_FILE_NAME); + file_path +} + +#[cfg(test)] +mod tests { + use sc_state_db::PruningMode; + use crate::{DatabaseSettings, DatabaseSettingsSrc}; + use crate::tests::Block; + use super::*; + + fn create_db(db_path: &Path, version: Option) { + let db_cfg = DatabaseConfig::with_columns(V0_NUM_COLUMNS); + Database::open(&db_cfg, db_path.to_str().unwrap()).unwrap(); + if let Some(version) = version { + fs::create_dir_all(db_path).unwrap(); + let mut file = fs::File::create(version_file_path(db_path)).unwrap(); + file.write_all(format!("{}", version).as_bytes()).unwrap(); + } + } + + fn open_database(db_path: &Path) -> sp_blockchain::Result<()> { + crate::utils::open_database::(&DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + pruning: PruningMode::ArchiveAll, + source: DatabaseSettingsSrc::Path { path: db_path.to_owned(), cache_size: None }, + }, DatabaseType::Full).map(|_| ()) + } + + #[test] + fn downgrade_never_happens() { + let db_dir = tempdir::TempDir::new("").unwrap(); + create_db(db_dir.path(), Some(CURRENT_VERSION + 1)); + assert!(open_database(db_dir.path()).is_err()); + } + + #[test] + fn open_empty_database_works() { + let db_dir = tempdir::TempDir::new("").unwrap(); + open_database(db_dir.path()).unwrap(); + open_database(db_dir.path()).unwrap(); + assert_eq!(current_version(db_dir.path()).unwrap(), CURRENT_VERSION); + } + + #[test] + fn upgrade_from_0_to_1_works() { + for version_from_file in &[None, Some(0)] { + let db_dir = tempdir::TempDir::new("").unwrap(); + let db_path = db_dir.path(); + create_db(db_path, *version_from_file); + open_database(db_path).unwrap(); + assert_eq!(current_version(db_path).unwrap(), CURRENT_VERSION); + } + } +} diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index f2a7195e742c0..f7f51d7f6de5d 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use std::{io, convert::TryInto}; use kvdb::{KeyValueDB, DBTransaction}; -#[cfg(feature = "kvdb-rocksdb")] +#[cfg(any(feature = "kvdb-rocksdb", test))] use kvdb_rocksdb::{Database, DatabaseConfig}; use log::debug; @@ -78,6 +78,15 @@ pub struct Meta { /// A block lookup key: used for canonical lookup from block number to hash pub type NumberIndexKey = [u8; 4]; +/// Database type. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum DatabaseType { + /// Full node database. + Full, + /// Light node database. + Light, +} + /// Convert block number into short lookup key (LE representation) for /// blocks that are in the canonical chain. /// @@ -205,14 +214,17 @@ pub fn db_err(err: io::Error) -> sp_blockchain::Error { } /// Open RocksDB database. -pub fn open_database( +pub fn open_database( config: &DatabaseSettings, - col_meta: u32, - db_type: &str + db_type: DatabaseType, ) -> sp_blockchain::Result> { let db: Arc = match &config.source { - #[cfg(feature = "kvdb-rocksdb")] + #[cfg(any(feature = "kvdb-rocksdb", test))] DatabaseSettingsSrc::Path { path, cache_size } => { + // first upgrade database to required version + crate::upgrade::upgrade_db::(&path, db_type)?; + + // and now open database assuming that it has the latest version let mut db_config = DatabaseConfig::with_columns(NUM_COLUMNS); if let Some(cache_size) = cache_size { @@ -234,7 +246,7 @@ pub fn open_database( .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; Arc::new(Database::open(&db_config, &path).map_err(db_err)?) }, - #[cfg(not(feature = "kvdb-rocksdb"))] + #[cfg(not(any(feature = "kvdb-rocksdb", test)))] DatabaseSettingsSrc::Path { .. } => { let msg = "Try to open RocksDB database with RocksDB disabled".into(); return Err(sp_blockchain::Error::Backend(msg)); @@ -242,22 +254,28 @@ pub fn open_database( DatabaseSettingsSrc::Custom(db) => db.clone(), }; - // check database type - match db.get(col_meta, meta_keys::TYPE).map_err(db_err)? { + check_database_type(&*db, db_type)?; + + Ok(db) +} + +/// Check database type. +pub fn check_database_type(db: &dyn KeyValueDB, db_type: DatabaseType) -> sp_blockchain::Result<()> { + match db.get(COLUMN_META, meta_keys::TYPE).map_err(db_err)? { Some(stored_type) => { - if db_type.as_bytes() != &*stored_type { + if db_type.as_str().as_bytes() != &*stored_type { return Err(sp_blockchain::Error::Backend( - format!("Unexpected database type. Expected: {}", db_type)).into()); + format!("Unexpected database type. Expected: {}", db_type.as_str())).into()); } }, None => { let mut transaction = DBTransaction::new(); - transaction.put(col_meta, meta_keys::TYPE, db_type.as_bytes()); + transaction.put(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); db.write(transaction).map_err(db_err)?; }, } - Ok(db) + Ok(()) } /// Read database column entry for the given block. @@ -306,20 +324,15 @@ pub fn require_header( } /// Read meta from the database. -pub fn read_meta(db: &dyn KeyValueDB, col_meta: u32, col_header: u32) -> Result< +pub fn read_meta(db: &dyn KeyValueDB, col_header: u32) -> Result< Meta<<::Header as HeaderT>::Number, Block::Hash>, sp_blockchain::Error, > where Block: BlockT, { - let genesis_hash: Block::Hash = match db.get(col_meta, meta_keys::GENESIS_HASH).map_err(db_err)? { - Some(h) => match Decode::decode(&mut &h[..]) { - Ok(h) => h, - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding genesis hash: {}", err) - )), - }, + let genesis_hash: Block::Hash = match read_genesis_hash(db)? { + Some(genesis_hash) => genesis_hash, None => return Ok(Meta { best_hash: Default::default(), best_number: Zero::zero(), @@ -330,7 +343,7 @@ pub fn read_meta(db: &dyn KeyValueDB, col_meta: u32, col_header: u32) -> }; let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { - if let Some(Some(header)) = db.get(col_meta, key).and_then(|id| + if let Some(Some(header)) = db.get(COLUMN_META, key).and_then(|id| match id { Some(id) => db.get(col_header, &id).map(|h| h.map(|b| Block::Header::decode(&mut &b[..]).ok())), None => Ok(None), @@ -356,6 +369,29 @@ pub fn read_meta(db: &dyn KeyValueDB, col_meta: u32, col_header: u32) -> }) } +/// Read genesis hash from database. +pub fn read_genesis_hash(db: &dyn KeyValueDB) -> sp_blockchain::Result> { + match db.get(COLUMN_META, meta_keys::GENESIS_HASH).map_err(db_err)? { + Some(h) => match Decode::decode(&mut &h[..]) { + Ok(h) => Ok(Some(h)), + Err(err) => Err(sp_blockchain::Error::Backend( + format!("Error decoding genesis hash: {}", err) + )), + }, + None => Ok(None), + } +} + +impl DatabaseType { + /// Returns str representation of the type. + pub fn as_str(&self) -> &'static str { + match *self { + DatabaseType::Full => "full", + DatabaseType::Light => "light", + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -370,4 +406,10 @@ mod tests { _ => unreachable!(), }; } + + #[test] + fn database_type_as_str_works() { + assert_eq!(DatabaseType::Full.as_str(), "full"); + assert_eq!(DatabaseType::Light.as_str(), "light"); + } } From 76580402c7f89f4a42f987e0ad9a44dc9ac92550 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Thu, 16 Jan 2020 16:10:30 +0300 Subject: [PATCH 63/63] fix after merge --- primitives/state-machine/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 11bfa2dc0a168..b27bf47050f64 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -23,7 +23,7 @@ use log::{warn, trace}; use hash_db::Hasher; use codec::{Decode, Encode, Codec}; use sp_core::{ - storage::{well_known_keys, ChildInfo}, NativeOrEncoded, NeverNativeValue + storage::ChildInfo, NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, CallInWasmExt}, hexdisplay::HexDisplay, }; use overlayed_changes::OverlayedChangeSet;