From 32c32ecfdac7787ab8d4271b5245661bf253d471 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= Date: Mon, 7 May 2018 11:57:03 +0100 Subject: [PATCH 01/11] ethcore, rpc, machine: refactor block reward application and tracing (#8490) --- ethcore/src/engines/authority_round/mod.rs | 9 ++++-- ethcore/src/engines/block_reward.rs | 34 +++++++++++++++++----- ethcore/src/engines/mod.rs | 2 +- ethcore/src/engines/null_engine.rs | 20 +++++-------- ethcore/src/engines/tendermint/mod.rs | 10 +++++-- ethcore/src/ethereum/ethash.rs | 25 ++++++++-------- ethcore/src/machine.rs | 24 ++++++++++----- ethcore/src/trace/types/trace.rs | 8 +++++ machine/src/lib.rs | 8 ----- rpc/src/v1/types/trace.rs | 8 +++++ 10 files changed, 93 insertions(+), 55 deletions(-) diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index 4807d6c3fbb..c2aee7c6efc 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -1015,8 +1015,10 @@ impl Engine for AuthorityRound { let author = *block.header().author(); benefactors.push((author, RewardKind::Author)); - let rewards = match self.block_reward_contract { + let rewards: Vec<_> = match self.block_reward_contract { Some(ref c) if block.header().number() >= self.block_reward_contract_transition => { + // NOTE: this logic should be moved to a function when another + // engine needs support for block reward contract. let mut call = |to, data| { let result = self.machine.execute_as_system( block, @@ -1027,10 +1029,11 @@ impl Engine for AuthorityRound { result.map_err(|e| format!("{}", e)) }; - c.reward(&benefactors, &mut call)? + let rewards = c.reward(&benefactors, &mut call)?; + rewards.into_iter().map(|(author, amount)| (author, RewardKind::External, amount)).collect() }, _ => { - benefactors.into_iter().map(|(author, _)| (author, self.block_reward)).collect() + benefactors.into_iter().map(|(author, reward_kind)| (author, reward_kind, self.block_reward)).collect() }, }; diff --git a/ethcore/src/engines/block_reward.rs b/ethcore/src/engines/block_reward.rs index 510a5255f5f..9a9d54e4af1 100644 --- a/ethcore/src/engines/block_reward.rs +++ b/ethcore/src/engines/block_reward.rs @@ -14,13 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +//! A module with types for declaring block rewards and a client interface for interacting with a +//! block reward contract. + use ethabi; use ethabi::ParamType; use ethereum_types::{H160, Address, U256}; -use block::ExecutedBlock; use error::Error; -use machine::EthereumMachine; +use machine::WithRewards; +use parity_machine::{Machine, WithBalances}; +use trace; use super::SystemCall; use_contract!(block_reward_contract, "BlockReward", "res/contracts/block_reward.json"); @@ -37,6 +41,8 @@ pub enum RewardKind { Uncle = 1, /// Reward attributed to the author(s) of empty step(s) included in the block (AuthorityRound engine). EmptyStep = 2, + /// Reward attributed by an external protocol (e.g. block reward contract). + External = 3, } impl From for u16 { @@ -45,6 +51,17 @@ impl From for u16 { } } +impl Into for RewardKind { + fn into(self) -> trace::RewardType { + match self { + RewardKind::Author => trace::RewardType::Block, + RewardKind::Uncle => trace::RewardType::Uncle, + RewardKind::EmptyStep => trace::RewardType::EmptyStep, + RewardKind::External => trace::RewardType::External, + } + } +} + /// A client for the block reward contract. pub struct BlockRewardContract { /// Address of the contract. @@ -112,14 +129,17 @@ impl BlockRewardContract { /// Applies the given block rewards, i.e. adds the given balance to each benefactors' address. /// If tracing is enabled the operations are recorded. -pub fn apply_block_rewards(rewards: &[(Address, U256)], block: &mut ExecutedBlock, machine: &EthereumMachine) -> Result<(), Error> { - use parity_machine::WithBalances; - - for &(ref author, ref block_reward) in rewards { +pub fn apply_block_rewards( + rewards: &[(Address, RewardKind, U256)], + block: &mut M::LiveBlock, + machine: &M, +) -> Result<(), M::Error> { + for &(ref author, _, ref block_reward) in rewards { machine.add_balance(block, author, block_reward)?; } - machine.note_rewards(block, &rewards, &[]) + let rewards: Vec<_> = rewards.into_iter().map(|&(a, k, r)| (a, k.into(), r)).collect(); + machine.note_rewards(block, &rewards) } #[cfg(test)] diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index a17ae356e6f..e019636f53a 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -18,7 +18,6 @@ mod authority_round; mod basic_authority; -mod block_reward; mod instant_seal; mod null_engine; mod signer; @@ -27,6 +26,7 @@ mod transition; mod validator_set; mod vote_collector; +pub mod block_reward; pub mod epoch; pub use self::authority_round::AuthorityRound; diff --git a/ethcore/src/engines/null_engine.rs b/ethcore/src/engines/null_engine.rs index f20a9cdfd9d..278eb037c06 100644 --- a/ethcore/src/engines/null_engine.rs +++ b/ethcore/src/engines/null_engine.rs @@ -16,7 +16,9 @@ use ethereum_types::U256; use engines::Engine; +use engines::block_reward::{self, RewardKind}; use header::BlockNumber; +use machine::WithRewards; use parity_machine::{Header, LiveBlock, WithBalances}; /// Params for a null engine. @@ -56,7 +58,7 @@ impl Default for NullEngine { } } -impl Engine for NullEngine { +impl Engine for NullEngine { fn name(&self) -> &str { "NullEngine" } @@ -74,26 +76,20 @@ impl Engine for NullEngine { let n_uncles = LiveBlock::uncles(&*block).len(); + let mut rewards = Vec::new(); + // Bestow block reward let result_block_reward = reward + reward.shr(5) * U256::from(n_uncles); - let mut uncle_rewards = Vec::with_capacity(n_uncles); - - self.machine.add_balance(block, &author, &result_block_reward)?; + rewards.push((author, RewardKind::Author, result_block_reward)); // bestow uncle rewards. for u in LiveBlock::uncles(&*block) { let uncle_author = u.author(); let result_uncle_reward = (reward * U256::from(8 + u.number() - number)).shr(3); - - uncle_rewards.push((*uncle_author, result_uncle_reward)); - } - - for &(ref a, ref reward) in &uncle_rewards { - self.machine.add_balance(block, a, reward)?; + rewards.push((*uncle_author, RewardKind::Uncle, result_uncle_reward)); } - // note and trace. - self.machine.note_rewards(block, &[(author, result_block_reward)], &uncle_rewards) + block_reward::apply_block_rewards(&rewards, block, &self.machine) } fn maximum_uncle_count(&self, _block: BlockNumber) -> usize { 2 } diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index 289beaad0cc..d80a5e182f1 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -41,6 +41,7 @@ use ethkey::{self, Message, Signature}; use account_provider::AccountProvider; use block::*; use engines::{Engine, Seal, EngineError, ConstructedVerifier}; +use engines::block_reward::{self, RewardKind}; use io::IoService; use super::signer::EngineSigner; use super::validator_set::{ValidatorSet, SimpleList}; @@ -550,10 +551,13 @@ impl Engine for Tendermint { /// Apply the block reward on finalisation of the block. fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error>{ - use parity_machine::WithBalances; let author = *block.header().author(); - self.machine.add_balance(block, &author, &self.block_reward)?; - self.machine.note_rewards(block, &[(author, self.block_reward)], &[]) + + block_reward::apply_block_rewards( + &[(author, RewardKind::Author, self.block_reward)], + block, + &self.machine, + ) } fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> { diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index 09e9caf727b..09151415c87 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -19,6 +19,7 @@ use std::cmp; use std::collections::BTreeMap; use std::sync::Arc; use hash::{KECCAK_EMPTY_LIST_RLP}; +use engines::block_reward::{self, RewardKind}; use ethash::{quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor}; use ethereum_types::{H256, H64, U256, Address}; use unexpected::{OutOfBounds, Mismatch}; @@ -233,11 +234,13 @@ impl Engine for Arc { /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> { use std::ops::Shr; - use parity_machine::{LiveBlock, WithBalances}; + use parity_machine::LiveBlock; let author = *LiveBlock::header(&*block).author(); let number = LiveBlock::header(&*block).number(); + let mut rewards = Vec::new(); + // Applies EIP-649 reward. let reward = if number >= self.ethash_params.eip649_transition { self.ethash_params.eip649_reward.unwrap_or(self.ethash_params.block_reward) @@ -253,20 +256,21 @@ impl Engine for Arc { // Bestow block rewards. let mut result_block_reward = reward + reward.shr(5) * U256::from(n_uncles); - let mut uncle_rewards = Vec::with_capacity(n_uncles); if number >= self.ethash_params.mcip3_transition { result_block_reward = self.ethash_params.mcip3_miner_reward; + let ubi_contract = self.ethash_params.mcip3_ubi_contract; let ubi_reward = self.ethash_params.mcip3_ubi_reward; let dev_contract = self.ethash_params.mcip3_dev_contract; let dev_reward = self.ethash_params.mcip3_dev_reward; - self.machine.add_balance(block, &author, &result_block_reward)?; - self.machine.add_balance(block, &ubi_contract, &ubi_reward)?; - self.machine.add_balance(block, &dev_contract, &dev_reward)?; + rewards.push((author, RewardKind::Author, result_block_reward)); + rewards.push((ubi_contract, RewardKind::External, ubi_reward)); + rewards.push((dev_contract, RewardKind::External, dev_reward)); + } else { - self.machine.add_balance(block, &author, &result_block_reward)?; + rewards.push((author, RewardKind::Author, result_block_reward)); } // Bestow uncle rewards. @@ -278,15 +282,10 @@ impl Engine for Arc { reward.shr(5) }; - uncle_rewards.push((*uncle_author, result_uncle_reward)); - } - - for &(ref a, ref reward) in &uncle_rewards { - self.machine.add_balance(block, a, reward)?; + rewards.push((*uncle_author, RewardKind::Uncle, result_uncle_reward)); } - // Note and trace. - self.machine.note_rewards(block, &[(author, result_block_reward)], &uncle_rewards) + block_reward::apply_block_rewards(&rewards, block, &self.machine) } fn verify_local_seal(&self, header: &Header) -> Result<(), Error> { diff --git a/ethcore/src/machine.rs b/ethcore/src/machine.rs index e3bf7d340ca..4aa72b50d92 100644 --- a/ethcore/src/machine.rs +++ b/ethcore/src/machine.rs @@ -437,22 +437,30 @@ impl ::parity_machine::WithBalances for EthereumMachine { fn add_balance(&self, live: &mut ExecutedBlock, address: &Address, amount: &U256) -> Result<(), Error> { live.state_mut().add_balance(address, amount, CleanupMode::NoEmpty).map_err(Into::into) } +} + +/// A state machine that uses block rewards. +pub trait WithRewards: ::parity_machine::Machine { + /// Note block rewards, traces each reward storing information about benefactor, amount and type + /// of reward. + fn note_rewards( + &self, + live: &mut Self::LiveBlock, + rewards: &[(Address, RewardType, U256)], + ) -> Result<(), Self::Error>; +} +impl WithRewards for EthereumMachine { fn note_rewards( &self, live: &mut Self::LiveBlock, - direct: &[(Address, U256)], - indirect: &[(Address, U256)], + rewards: &[(Address, RewardType, U256)], ) -> Result<(), Self::Error> { if let Tracing::Enabled(ref mut traces) = *live.traces_mut() { let mut tracer = ExecutiveTracer::default(); - for &(address, amount) in direct { - tracer.trace_reward(address, amount, RewardType::Block); - } - - for &(address, amount) in indirect { - tracer.trace_reward(address, amount, RewardType::Uncle); + for &(address, ref reward_type, amount) in rewards { + tracer.trace_reward(address, amount, reward_type.clone()); } traces.push(tracer.drain().into()); diff --git a/ethcore/src/trace/types/trace.rs b/ethcore/src/trace/types/trace.rs index 06f24efac38..cdb00a52294 100644 --- a/ethcore/src/trace/types/trace.rs +++ b/ethcore/src/trace/types/trace.rs @@ -141,6 +141,10 @@ pub enum RewardType { Block, /// Uncle Uncle, + /// Empty step (AuthorityRound) + EmptyStep, + /// A reward directly attributed by an external protocol (e.g. block reward contract) + External, } impl Encodable for RewardType { @@ -148,6 +152,8 @@ impl Encodable for RewardType { let v = match *self { RewardType::Block => 0u32, RewardType::Uncle => 1, + RewardType::EmptyStep => 2, + RewardType::External => 3, }; Encodable::rlp_append(&v, s); } @@ -158,6 +164,8 @@ impl Decodable for RewardType { rlp.as_val().and_then(|v| Ok(match v { 0u32 => RewardType::Block, 1 => RewardType::Uncle, + 2 => RewardType::EmptyStep, + 3 => RewardType::External, _ => return Err(DecoderError::Custom("Invalid value of RewardType item")), })) } diff --git a/machine/src/lib.rs b/machine/src/lib.rs index 3a45c38d2ef..54ee403d954 100644 --- a/machine/src/lib.rs +++ b/machine/src/lib.rs @@ -106,12 +106,4 @@ pub trait WithBalances: Machine { /// Increment the balance of an account in the state of the live block. fn add_balance(&self, live: &mut Self::LiveBlock, address: &Address, amount: &U256) -> Result<(), Self::Error>; - - /// Note block rewards. "direct" rewards are for authors, "indirect" are for e.g. uncles. - fn note_rewards( - &self, - _live: &mut Self::LiveBlock, - _direct: &[(Address, U256)], - _indirect: &[(Address, U256)], - ) -> Result<(), Self::Error> { Ok(()) } } diff --git a/rpc/src/v1/types/trace.rs b/rpc/src/v1/types/trace.rs index a984c64ba80..6eb222f5e63 100644 --- a/rpc/src/v1/types/trace.rs +++ b/rpc/src/v1/types/trace.rs @@ -308,6 +308,12 @@ pub enum RewardType { /// Uncle #[serde(rename="uncle")] Uncle, + /// EmptyStep (AuthorityRound) + #[serde(rename="emptyStep")] + EmptyStep, + /// External (attributed as part of an external protocol) + #[serde(rename="external")] + External, } impl From for RewardType { @@ -315,6 +321,8 @@ impl From for RewardType { match c { trace::RewardType::Block => RewardType::Block, trace::RewardType::Uncle => RewardType::Uncle, + trace::RewardType::EmptyStep => RewardType::EmptyStep, + trace::RewardType::External => RewardType::External, } } } From 528497b86a93c64f317dd869da47010c2675e4ed Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 7 May 2018 18:58:25 +0800 Subject: [PATCH 02/11] Keep all enacted blocks notify in order (#8524) * Keep all enacted blocks notify in order * Collect is unnecessary * Update ChainNotify to use ChainRouteType * Fix all ethcore fn defs * Wrap the type within ChainRoute * Fix private-tx and sync api * Fix secret_store API * Fix updater API * Fix rpc api * Fix informant api * Eagerly cache enacted/retracted and remove contain_enacted/retracted * Fix indent * tests: should use full expr form for struct constructor * Use into_enacted_retracted to further avoid copy * typo: not a function * rpc/tests: ChainRoute -> ChainRoute::new --- ethcore/private-tx/src/lib.rs | 4 +- ethcore/src/blockchain/import_route.rs | 2 +- ethcore/src/client/chain_notify.rs | 88 ++++++++++++++++++- ethcore/src/client/client.rs | 49 +++-------- ethcore/src/client/mod.rs | 2 +- ethcore/src/snapshot/watcher.rs | 10 +-- ethcore/sync/src/api.rs | 9 +- ethcore/sync/src/tests/helpers.rs | 7 +- parity/informant.rs | 4 +- rpc/src/v1/impls/eth_pubsub.rs | 49 ++++++----- rpc/src/v1/tests/mocked/eth_pubsub.rs | 10 +-- secret_store/src/acl_storage.rs | 6 +- secret_store/src/key_server_set.rs | 6 +- .../src/listener/service_contract_listener.rs | 6 +- updater/src/updater.rs | 4 +- 15 files changed, 158 insertions(+), 98 deletions(-) diff --git a/ethcore/private-tx/src/lib.rs b/ethcore/private-tx/src/lib.rs index 26a31fc7ae3..723d4918298 100644 --- a/ethcore/private-tx/src/lib.rs +++ b/ethcore/private-tx/src/lib.rs @@ -79,7 +79,7 @@ use ethcore::executed::{Executed}; use transaction::{SignedTransaction, Transaction, Action, UnverifiedTransaction}; use ethcore::{contract_address as ethcore_contract_address}; use ethcore::client::{ - Client, ChainNotify, ChainMessageType, ClientIoMessage, BlockId, CallContract + Client, ChainNotify, ChainRoute, ChainMessageType, ClientIoMessage, BlockId, CallContract }; use ethcore::account_provider::AccountProvider; use ethcore::miner::{self, Miner, MinerService}; @@ -668,7 +668,7 @@ fn find_account_password(passwords: &Vec, account_provider: &AccountProv } impl ChainNotify for Provider { - fn new_blocks(&self, imported: Vec, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: Duration) { + fn new_blocks(&self, imported: Vec, _invalid: Vec, _route: ChainRoute, _sealed: Vec, _proposed: Vec, _duration: Duration) { if !imported.is_empty() { trace!("New blocks imported, try to prune the queue"); if let Err(err) = self.process_queue() { diff --git a/ethcore/src/blockchain/import_route.rs b/ethcore/src/blockchain/import_route.rs index cf5d3ca1e78..080d3b06824 100644 --- a/ethcore/src/blockchain/import_route.rs +++ b/ethcore/src/blockchain/import_route.rs @@ -20,7 +20,7 @@ use ethereum_types::H256; use blockchain::block_info::{BlockInfo, BlockLocation}; /// Import route for newly inserted block. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub struct ImportRoute { /// Blocks that were invalidated by new block. pub retracted: Vec, diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index a1f84d2a139..8330fb40d9e 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -17,7 +17,9 @@ use bytes::Bytes; use ethereum_types::H256; use transaction::UnverifiedTransaction; +use blockchain::ImportRoute; use std::time::Duration; +use std::collections::HashMap; /// Messages to broadcast via chain pub enum ChainMessageType { @@ -29,6 +31,89 @@ pub enum ChainMessageType { SignedPrivateTransaction(Vec), } +/// Route type to indicate whether it is enacted or retracted. +#[derive(Clone)] +pub enum ChainRouteType { + /// Enacted block + Enacted, + /// Retracted block + Retracted +} + +/// A complete chain enacted retracted route. +#[derive(Default, Clone)] +pub struct ChainRoute { + route: Vec<(H256, ChainRouteType)>, + enacted: Vec, + retracted: Vec, +} + +impl<'a> From<&'a [ImportRoute]> for ChainRoute { + fn from(import_results: &'a [ImportRoute]) -> ChainRoute { + ChainRoute::new(import_results.iter().flat_map(|route| { + route.retracted.iter().map(|h| (*h, ChainRouteType::Retracted)) + .chain(route.enacted.iter().map(|h| (*h, ChainRouteType::Enacted))) + }).collect()) + } +} + +impl ChainRoute { + /// Create a new ChainRoute based on block hash and route type pairs. + pub fn new(route: Vec<(H256, ChainRouteType)>) -> Self { + let (enacted, retracted) = Self::to_enacted_retracted(&route); + + Self { route, enacted, retracted } + } + + /// Gather all non-duplicate enacted and retracted blocks. + fn to_enacted_retracted(route: &[(H256, ChainRouteType)]) -> (Vec, Vec) { + fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { + map.into_iter().map(|(k, _v)| k).collect() + } + + // Because we are doing multiple inserts some of the blocks that were enacted in import `k` + // could be retracted in import `k+1`. This is why to understand if after all inserts + // the block is enacted or retracted we iterate over all routes and at the end final state + // will be in the hashmap + let map = route.iter().fold(HashMap::new(), |mut map, route| { + match &route.1 { + &ChainRouteType::Enacted => { + map.insert(route.0, true); + }, + &ChainRouteType::Retracted => { + map.insert(route.0, false); + }, + } + map + }); + + // Split to enacted retracted (using hashmap value) + let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); + // And convert tuples to keys + (map_to_vec(enacted), map_to_vec(retracted)) + } + + /// Consume route and return the enacted retracted form. + pub fn into_enacted_retracted(self) -> (Vec, Vec) { + (self.enacted, self.retracted) + } + + /// All non-duplicate enacted blocks. + pub fn enacted(&self) -> &[H256] { + &self.enacted + } + + /// All non-duplicate retracted blocks. + pub fn retracted(&self) -> &[H256] { + &self.retracted + } + + /// All blocks in the route. + pub fn route(&self) -> &[(H256, ChainRouteType)] { + &self.route + } +} + /// Represents what has to be handled by actor listening to chain events pub trait ChainNotify : Send + Sync { /// fires when chain has new blocks. @@ -36,8 +121,7 @@ pub trait ChainNotify : Send + Sync { &self, _imported: Vec, _invalid: Vec, - _enacted: Vec, - _retracted: Vec, + _route: ChainRoute, _sealed: Vec, // Block bytes. _proposed: Vec, diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index a37d62a5911..8119ebd35f6 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::{HashSet, HashMap, BTreeMap, BTreeSet, VecDeque}; +use std::collections::{HashSet, BTreeMap, BTreeSet, VecDeque}; use std::str::FromStr; use std::sync::{Arc, Weak}; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; @@ -45,7 +45,7 @@ use client::{ use client::{ BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, - ChainNotify, PruningInfo, ProvingBlockChainClient, EngineInfo, ChainMessageType + ChainNotify, ChainRoute, PruningInfo, ProvingBlockChainClient, EngineInfo, ChainMessageType }; use encoded; use engines::{EthEngine, EpochTransition}; @@ -245,32 +245,6 @@ impl Importer { }) } - fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec, Vec) { - fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { - map.into_iter().map(|(k, _v)| k).collect() - } - - // In ImportRoute we get all the blocks that have been enacted and retracted by single insert. - // Because we are doing multiple inserts some of the blocks that were enacted in import `k` - // could be retracted in import `k+1`. This is why to understand if after all inserts - // the block is enacted or retracted we iterate over all routes and at the end final state - // will be in the hashmap - let map = import_results.iter().fold(HashMap::new(), |mut map, route| { - for hash in &route.enacted { - map.insert(hash.clone(), true); - } - for hash in &route.retracted { - map.insert(hash.clone(), false); - } - map - }); - - // Split to enacted retracted (using hashmap value) - let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); - // And convert tuples to keys - (map_to_vec(enacted), map_to_vec(retracted)) - } - /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_blocks(&self, client: &Client) -> usize { @@ -336,18 +310,17 @@ impl Importer { { if !imported_blocks.is_empty() && is_empty { - let (enacted, retracted) = self.calculate_enacted_retracted(&import_results); + let route = ChainRoute::from(import_results.as_ref()); if is_empty { - self.miner.chain_new_blocks(client, &imported_blocks, &invalid_blocks, &enacted, &retracted, false); + self.miner.chain_new_blocks(client, &imported_blocks, &invalid_blocks, route.enacted(), route.retracted(), false); } client.notify(|notify| { notify.new_blocks( imported_blocks.clone(), invalid_blocks.clone(), - enacted.clone(), - retracted.clone(), + route.clone(), Vec::new(), proposed_blocks.clone(), duration, @@ -1421,7 +1394,7 @@ impl ImportBlock for Client { } fn import_block_with_receipts(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result { - let header: Header = ::rlp::Rlp::new(&block_bytes).val_at(0)?; + let header: Header = ::rlp::Rlp::new(&block_bytes).val_at(0)?; { // check block order if self.chain.read().is_known(&header.hash()) { @@ -2155,14 +2128,13 @@ impl ImportSealedBlock for Client { self.state_db.write().sync_cache(&route.enacted, &route.retracted, false); route }; - let (enacted, retracted) = self.importer.calculate_enacted_retracted(&[route]); - self.importer.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted, true); + let route = ChainRoute::from([route].as_ref()); + self.importer.miner.chain_new_blocks(self, &[h.clone()], &[], route.enacted(), route.retracted(), true); self.notify(|notify| { notify.new_blocks( vec![h.clone()], vec![], - enacted.clone(), - retracted.clone(), + route.clone(), vec![h.clone()], vec![], start.elapsed(), @@ -2180,8 +2152,7 @@ impl BroadcastProposalBlock for Client { notify.new_blocks( vec![], vec![], - vec![], - vec![], + ChainRoute::default(), vec![], vec![block.rlp_bytes()], DURATION_ZERO, diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 1e4ccba585f..05e2018258f 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -31,7 +31,7 @@ pub use self::error::Error; pub use self::evm_test_client::{EvmTestClient, EvmTestError, TransactResult}; pub use self::io_message::ClientIoMessage; pub use self::test_client::{TestBlockChainClient, EachBlockWith}; -pub use self::chain_notify::{ChainNotify, ChainMessageType}; +pub use self::chain_notify::{ChainNotify, ChainRoute, ChainRouteType, ChainMessageType}; pub use self::traits::{ Nonce, Balance, ChainInfo, BlockInfo, ReopenBlock, PrepareOpenBlock, CallContract, TransactionInfo, RegistryInfo, ScheduleInfo, ImportSealedBlock, BroadcastProposalBlock, ImportBlock, StateOrBlock, StateClient, Call, EngineInfo, AccountData, BlockChain, BlockProducer, SealedBlockImporter diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index 936feaefba8..6e04fe6d16d 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -17,7 +17,7 @@ //! Watcher for snapshot-related chain events. use parking_lot::Mutex; -use client::{BlockInfo, Client, ChainNotify, ClientIoMessage}; +use client::{BlockInfo, Client, ChainNotify, ChainRoute, ClientIoMessage}; use ids::BlockId; use io::IoChannel; @@ -103,8 +103,7 @@ impl ChainNotify for Watcher { &self, imported: Vec, _: Vec, - _: Vec, - _: Vec, + _: ChainRoute, _: Vec, _: Vec, _duration: Duration) @@ -131,7 +130,7 @@ impl ChainNotify for Watcher { mod tests { use super::{Broadcast, Oracle, Watcher}; - use client::ChainNotify; + use client::{ChainNotify, ChainRoute}; use ethereum_types::{H256, U256}; @@ -174,8 +173,7 @@ mod tests { watcher.new_blocks( hashes, vec![], - vec![], - vec![], + ChainRoute::default(), vec![], vec![], DURATION_ZERO, diff --git a/ethcore/sync/src/api.rs b/ethcore/sync/src/api.rs index 5e96f11cf37..7690eeb864b 100644 --- a/ethcore/sync/src/api.rs +++ b/ethcore/sync/src/api.rs @@ -25,7 +25,7 @@ use network::{NetworkProtocolHandler, NetworkContext, HostInfo, PeerId, Protocol use ethereum_types::{H256, H512, U256}; use io::{TimerToken}; use ethcore::ethstore::ethkey::Secret; -use ethcore::client::{BlockChainClient, ChainNotify, ChainMessageType}; +use ethcore::client::{BlockChainClient, ChainNotify, ChainRoute, ChainMessageType}; use ethcore::snapshot::SnapshotService; use ethcore::header::BlockNumber; use sync_io::NetSyncIo; @@ -409,8 +409,7 @@ impl ChainNotify for EthSync { fn new_blocks(&self, imported: Vec, invalid: Vec, - enacted: Vec, - retracted: Vec, + route: ChainRoute, sealed: Vec, proposed: Vec, _duration: Duration) @@ -424,8 +423,8 @@ impl ChainNotify for EthSync { &mut sync_io, &imported, &invalid, - &enacted, - &retracted, + route.enacted(), + route.retracted(), &sealed, &proposed); }); diff --git a/ethcore/sync/src/tests/helpers.rs b/ethcore/sync/src/tests/helpers.rs index 54467adb775..dc52fdd8b85 100644 --- a/ethcore/sync/src/tests/helpers.rs +++ b/ethcore/sync/src/tests/helpers.rs @@ -23,7 +23,7 @@ use bytes::Bytes; use network::{self, PeerId, ProtocolId, PacketId, SessionInfo}; use tests::snapshot::*; use ethcore::client::{TestBlockChainClient, BlockChainClient, Client as EthcoreClient, - ClientConfig, ChainNotify, ChainMessageType, ClientIoMessage}; + ClientConfig, ChainNotify, ChainRoute, ChainMessageType, ClientIoMessage}; use ethcore::header::BlockNumber; use ethcore::snapshot::SnapshotService; use ethcore::spec::Spec; @@ -535,12 +535,13 @@ impl ChainNotify for EthPeer { fn new_blocks(&self, imported: Vec, invalid: Vec, - enacted: Vec, - retracted: Vec, + route: ChainRoute, sealed: Vec, proposed: Vec, _duration: Duration) { + let (enacted, retracted) = route.into_enacted_retracted(); + self.new_blocks_queue.write().push_back(NewBlockMessage { imported, invalid, diff --git a/parity/informant.rs b/parity/informant.rs index 5c2e0ab89d3..beeb258b522 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -25,7 +25,7 @@ use std::time::{Instant, Duration}; use atty; use ethcore::client::{ BlockId, BlockChainClient, ChainInfo, BlockInfo, BlockChainInfo, - BlockQueueInfo, ChainNotify, ClientReport, Client, ClientIoMessage + BlockQueueInfo, ChainNotify, ChainRoute, ClientReport, Client, ClientIoMessage }; use ethcore::header::BlockNumber; use ethcore::snapshot::{RestorationStatus, SnapshotService as SS}; @@ -351,7 +351,7 @@ impl Informant { } impl ChainNotify for Informant { - fn new_blocks(&self, imported: Vec, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, duration: Duration) { + fn new_blocks(&self, imported: Vec, _invalid: Vec, _route: ChainRoute, _sealed: Vec, _proposed: Vec, duration: Duration) { let mut last_import = self.last_import.lock(); let client = &self.target.client; diff --git a/rpc/src/v1/impls/eth_pubsub.rs b/rpc/src/v1/impls/eth_pubsub.rs index 1a872f5600a..30459594466 100644 --- a/rpc/src/v1/impls/eth_pubsub.rs +++ b/rpc/src/v1/impls/eth_pubsub.rs @@ -34,7 +34,7 @@ use v1::types::{pubsub, RichHeader, Log}; use ethcore::encoded; use ethcore::filter::Filter as EthFilter; -use ethcore::client::{BlockChainClient, ChainNotify, BlockId}; +use ethcore::client::{BlockChainClient, ChainNotify, ChainRoute, ChainRouteType, BlockId}; use sync::LightSync; use light::cache::Cache; use light::on_demand::OnDemand; @@ -141,19 +141,20 @@ impl ChainNotificationHandler { } } - fn notify_logs(&self, enacted: &[H256], logs: F) where - F: Fn(EthFilter) -> T, + fn notify_logs(&self, enacted: &[(H256, Ex)], logs: F) where + F: Fn(EthFilter, &Ex) -> T, + Ex: Send, T: IntoFuture, Error = Error>, T::Future: Send + 'static, { for &(ref subscriber, ref filter) in self.logs_subscribers.read().values() { let logs = futures::future::join_all(enacted .iter() - .map(|hash| { + .map(|&(hash, ref ex)| { let mut filter = filter.clone(); - filter.from_block = BlockId::Hash(*hash); + filter.from_block = BlockId::Hash(hash); filter.to_block = filter.from_block.clone(); - logs(filter).into_future() + logs(filter, ex).into_future() }) .collect::>() ); @@ -214,7 +215,7 @@ impl LightChainNotify for ChainNotificationHandler { .collect::>(); self.notify_heads(&headers); - self.notify_logs(&enacted, |filter| self.client.logs(filter)) + self.notify_logs(&enacted.iter().map(|h| (*h, ())).collect::>(), |filter, _| self.client.logs(filter)) } } @@ -223,17 +224,21 @@ impl ChainNotify for ChainNotificationHandler { &self, _imported: Vec, _invalid: Vec, - enacted: Vec, - retracted: Vec, + route: ChainRoute, _sealed: Vec, // Block bytes. _proposed: Vec, _duration: Duration, ) { const EXTRA_INFO_PROOF: &'static str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed"; - let headers = enacted + let headers = route.route() .iter() - .filter_map(|hash| self.client.block_header(BlockId::Hash(*hash))) + .filter_map(|&(hash, ref typ)| { + match typ { + &ChainRouteType::Retracted => None, + &ChainRouteType::Enacted => self.client.block_header(BlockId::Hash(hash)) + } + }) .map(|header| { let hash = header.hash(); (header, self.client.block_extra_info(BlockId::Hash(hash)).expect(EXTRA_INFO_PROOF)) @@ -243,17 +248,17 @@ impl ChainNotify for ChainNotificationHandler { // Headers self.notify_heads(&headers); - // Enacted logs - self.notify_logs(&enacted, |filter| { - Ok(self.client.logs(filter).into_iter().map(Into::into).collect()) - }); - - // Retracted logs - self.notify_logs(&retracted, |filter| { - Ok(self.client.logs(filter).into_iter().map(Into::into).map(|mut log: Log| { - log.log_type = "removed".into(); - log - }).collect()) + // We notify logs enacting and retracting as the order in route. + self.notify_logs(route.route(), |filter, ex| { + match ex { + &ChainRouteType::Enacted => + Ok(self.client.logs(filter).into_iter().map(Into::into).collect()), + &ChainRouteType::Retracted => + Ok(self.client.logs(filter).into_iter().map(Into::into).map(|mut log: Log| { + log.log_type = "removed".into(); + log + }).collect()), + } }); } } diff --git a/rpc/src/v1/tests/mocked/eth_pubsub.rs b/rpc/src/v1/tests/mocked/eth_pubsub.rs index fb28ba31274..936695a9a13 100644 --- a/rpc/src/v1/tests/mocked/eth_pubsub.rs +++ b/rpc/src/v1/tests/mocked/eth_pubsub.rs @@ -24,7 +24,7 @@ use std::time::Duration; use v1::{EthPubSub, EthPubSubClient, Metadata}; -use ethcore::client::{TestBlockChainClient, EachBlockWith, ChainNotify}; +use ethcore::client::{TestBlockChainClient, EachBlockWith, ChainNotify, ChainRoute, ChainRouteType}; use parity_reactor::EventLoop; const DURATION_ZERO: Duration = Duration::from_millis(0); @@ -57,13 +57,13 @@ fn should_subscribe_to_new_heads() { assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); // Check notifications - handler.new_blocks(vec![], vec![], vec![h1], vec![], vec![], vec![], DURATION_ZERO); + handler.new_blocks(vec![], vec![], ChainRoute::new(vec![(h1, ChainRouteType::Enacted)]), vec![], vec![], DURATION_ZERO); let (res, receiver) = receiver.into_future().wait().unwrap(); let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"author":"0x0000000000000000000000000000000000000000","difficulty":"0x1","extraData":"0x","gasLimit":"0xf4240","gasUsed":"0x0","hash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","number":"0x1","parentHash":"0x0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sealFields":[],"sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x1c9","stateRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","timestamp":"0x0","transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"},"subscription":"0x416d77337e24399d"}}"#; assert_eq!(res, Some(response.into())); // Notify about two blocks - handler.new_blocks(vec![], vec![], vec![h2, h3], vec![], vec![], vec![], DURATION_ZERO); + handler.new_blocks(vec![], vec![], ChainRoute::new(vec![(h2, ChainRouteType::Enacted), (h3, ChainRouteType::Enacted)]), vec![], vec![], DURATION_ZERO); // Receive both let (res, receiver) = receiver.into_future().wait().unwrap(); @@ -129,7 +129,7 @@ fn should_subscribe_to_logs() { assert_eq!(io.handle_request_sync(request, metadata.clone()), Some(response.to_owned())); // Check notifications (enacted) - handler.new_blocks(vec![], vec![], vec![h1], vec![], vec![], vec![], DURATION_ZERO); + handler.new_blocks(vec![], vec![], ChainRoute::new(vec![(h1, ChainRouteType::Enacted)]), vec![], vec![], DURATION_ZERO); let (res, receiver) = receiver.into_future().wait().unwrap(); let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"address":"0x0000000000000000000000000000000000000005","blockHash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","blockNumber":"0x1","data":"0x","logIndex":"0x0","topics":["0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"],"transactionHash":""#.to_owned() + &format!("0x{:x}", tx_hash) @@ -137,7 +137,7 @@ fn should_subscribe_to_logs() { assert_eq!(res, Some(response.into())); // Check notifications (retracted) - handler.new_blocks(vec![], vec![], vec![], vec![h1], vec![], vec![], DURATION_ZERO); + handler.new_blocks(vec![], vec![], ChainRoute::new(vec![(h1, ChainRouteType::Retracted)]), vec![], vec![], DURATION_ZERO); let (res, receiver) = receiver.into_future().wait().unwrap(); let response = r#"{"jsonrpc":"2.0","method":"eth_subscription","params":{"result":{"address":"0x0000000000000000000000000000000000000005","blockHash":"0x3457d2fa2e3dd33c78ac681cf542e429becf718859053448748383af67e23218","blockNumber":"0x1","data":"0x","logIndex":"0x0","topics":["0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000000"],"transactionHash":""#.to_owned() + &format!("0x{:x}", tx_hash) diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 50414eff26c..b3427fa1b20 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -18,7 +18,7 @@ use std::sync::Arc; use std::collections::{HashMap, HashSet}; use std::time::Duration; use parking_lot::{Mutex, RwLock}; -use ethcore::client::{BlockId, ChainNotify, CallContract, RegistryInfo}; +use ethcore::client::{BlockId, ChainNotify, ChainRoute, CallContract, RegistryInfo}; use ethereum_types::{H256, Address}; use bytes::Bytes; use trusted_client::TrustedClient; @@ -76,8 +76,8 @@ impl AclStorage for OnChainAclStorage { } impl ChainNotify for OnChainAclStorage { - fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: Duration) { - if !enacted.is_empty() || !retracted.is_empty() { + fn new_blocks(&self, _imported: Vec, _invalid: Vec, route: ChainRoute, _sealed: Vec, _proposed: Vec, _duration: Duration) { + if !route.enacted().is_empty() || !route.retracted().is_empty() { self.contract.lock().update() } } diff --git a/secret_store/src/key_server_set.rs b/secret_store/src/key_server_set.rs index 25651bb4cad..d13017261c4 100644 --- a/secret_store/src/key_server_set.rs +++ b/secret_store/src/key_server_set.rs @@ -19,7 +19,7 @@ use std::net::SocketAddr; use std::collections::{BTreeMap, HashSet}; use std::time::Duration; use parking_lot::Mutex; -use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify, CallContract, RegistryInfo}; +use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify, ChainRoute, CallContract, RegistryInfo}; use ethcore::filter::Filter; use ethkey::public_to_address; use hash::keccak; @@ -163,7 +163,9 @@ impl KeyServerSet for OnChainKeyServerSet { } impl ChainNotify for OnChainKeyServerSet { - fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: Duration) { + fn new_blocks(&self, _imported: Vec, _invalid: Vec, route: ChainRoute, _sealed: Vec, _proposed: Vec, _duration: Duration) { + let (enacted, retracted) = route.into_enacted_retracted(); + if !enacted.is_empty() || !retracted.is_empty() { self.contract.lock().update(enacted, retracted) } diff --git a/secret_store/src/listener/service_contract_listener.rs b/secret_store/src/listener/service_contract_listener.rs index 0e273b3eeee..214235210fa 100644 --- a/secret_store/src/listener/service_contract_listener.rs +++ b/secret_store/src/listener/service_contract_listener.rs @@ -20,7 +20,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Duration; use std::thread; use parking_lot::Mutex; -use ethcore::client::ChainNotify; +use ethcore::client::{ChainNotify, ChainRoute}; use ethkey::{Public, public_to_address}; use bytes::Bytes; use ethereum_types::{H256, U256, Address}; @@ -428,8 +428,8 @@ impl Drop for ServiceContractListener { } impl ChainNotify for ServiceContractListener { - fn new_blocks(&self, _imported: Vec, _invalid: Vec, enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: Duration) { - let enacted_len = enacted.len(); + fn new_blocks(&self, _imported: Vec, _invalid: Vec, route: ChainRoute, _sealed: Vec, _proposed: Vec, _duration: Duration) { + let enacted_len = route.enacted().len(); if enacted_len == 0 { return; } diff --git a/updater/src/updater.rs b/updater/src/updater.rs index c5f45c7658a..f8a98f3b0f4 100644 --- a/updater/src/updater.rs +++ b/updater/src/updater.rs @@ -28,7 +28,7 @@ use target_info::Target; use bytes::Bytes; use ethcore::BlockNumber; use ethcore::filter::Filter; -use ethcore::client::{BlockId, BlockChainClient, ChainNotify}; +use ethcore::client::{BlockId, BlockChainClient, ChainNotify, ChainRoute}; use ethereum_types::H256; use sync::{SyncProvider}; use hash_fetch::{self as fetch, HashFetch}; @@ -660,7 +660,7 @@ impl Updater, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: Duration) { + fn new_blocks(&self, _imported: Vec, _invalid: Vec, _route: ChainRoute, _sealed: Vec, _proposed: Vec, _duration: Duration) { match (self.client.upgrade(), self.sync.as_ref().and_then(Weak::upgrade)) { (Some(ref c), Some(ref s)) if !s.status().is_syncing(c.queue_info()) => self.poll(), _ => {}, From a7a46f425369fd662042cb4a1c9ffc301b3b5b75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= Date: Mon, 7 May 2018 12:11:12 +0100 Subject: [PATCH 03/11] Node table sorting according to last contact data (#8541) * network-devp2p: sort nodes in node table using last contact data * network-devp2p: rename node contact types in node table json output * network-devp2p: fix node table tests * network-devp2p: note node failure when failed to establish connection * network-devp2p: handle UselessPeer error * network-devp2p: note failure when marking node as useless --- util/network-devp2p/src/host.rs | 28 +++- util/network-devp2p/src/node_table.rs | 230 +++++++++++++++++++------- 2 files changed, 188 insertions(+), 70 deletions(-) diff --git a/util/network-devp2p/src/host.rs b/util/network-devp2p/src/host.rs index 73ca2aca4bd..2de9a1884c8 100644 --- a/util/network-devp2p/src/host.rs +++ b/util/network-devp2p/src/host.rs @@ -105,10 +105,13 @@ pub struct NetworkContext<'s> { impl<'s> NetworkContext<'s> { /// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler. - fn new(io: &'s IoContext, + fn new( + io: &'s IoContext, protocol: ProtocolId, - session: Option, sessions: Arc>>, - reserved_peers: &'s HashSet) -> NetworkContext<'s> { + session: Option, + sessions: Arc>>, + reserved_peers: &'s HashSet, + ) -> NetworkContext<'s> { let id = session.as_ref().map(|s| s.lock().token()); NetworkContext { io: io, @@ -585,10 +588,8 @@ impl Host { let address = { let mut nodes = self.nodes.write(); if let Some(node) = nodes.get_mut(id) { - node.attempts += 1; node.endpoint.address - } - else { + } else { debug!(target: "network", "Connection to expired node aborted"); return; } @@ -600,6 +601,7 @@ impl Host { }, Err(e) => { debug!(target: "network", "{}: Can't connect to address {:?}: {:?}", id, address, e); + self.nodes.write().note_failure(&id); return; } } @@ -685,10 +687,12 @@ impl Host { Err(e) => { let s = session.lock(); trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e); - if let ErrorKind::Disconnect(DisconnectReason::IncompatibleProtocol) = *e.kind() { + if let ErrorKind::Disconnect(DisconnectReason::UselessPeer) = *e.kind() { if let Some(id) = s.id() { if !self.reserved_nodes.read().contains(id) { - self.nodes.write().mark_as_useless(id); + let mut nodes = self.nodes.write(); + nodes.note_failure(&id); + nodes.mark_as_useless(id); } } } @@ -754,6 +758,10 @@ impl Host { } } } + + // Note connection success + self.nodes.write().note_success(&id); + for (p, _) in self.handlers.read().iter() { if s.have_capability(*p) { ready_data.push(*p); @@ -1024,7 +1032,9 @@ impl IoHandler for Host { if let Some(session) = session { session.lock().disconnect(io, DisconnectReason::DisconnectRequested); if let Some(id) = session.lock().id() { - self.nodes.write().mark_as_useless(id) + let mut nodes = self.nodes.write(); + nodes.note_failure(&id); + nodes.mark_as_useless(id); } } trace!(target: "network", "Disabling peer {}", peer); diff --git a/util/network-devp2p/src/node_table.rs b/util/network-devp2p/src/node_table.rs index fd18c10a12c..5079455866c 100644 --- a/util/network-devp2p/src/node_table.rs +++ b/util/network-devp2p/src/node_table.rs @@ -21,6 +21,8 @@ use std::net::{SocketAddr, ToSocketAddrs, SocketAddrV4, SocketAddrV6, Ipv4Addr, use std::path::PathBuf; use std::str::FromStr; use std::{fs, mem, slice}; +use std::time::{self, Duration, SystemTime}; +use rand::{self, Rng}; use ethereum_types::H512; use rlp::{Rlp, RlpStream, DecoderError}; use network::{Error, ErrorKind, AllowIP, IpFilter}; @@ -128,40 +130,64 @@ impl FromStr for NodeEndpoint { } } -#[derive(PartialEq, Eq, Copy, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum PeerType { _Required, Optional } +/// A type for representing an interaction (contact) with a node at a given time +/// that was either a success or a failure. +#[derive(Clone, Copy, Debug)] +pub enum NodeContact { + Success(SystemTime), + Failure(SystemTime), +} + +impl NodeContact { + fn success() -> NodeContact { + NodeContact::Success(SystemTime::now()) + } + + fn failure() -> NodeContact { + NodeContact::Failure(SystemTime::now()) + } + + fn time(&self) -> SystemTime { + match *self { + NodeContact::Success(t) | NodeContact::Failure(t) => t + } + } + + /// Filters and old contact, returning `None` if it happened longer than a + /// week ago. + fn recent(&self) -> Option<&NodeContact> { + let t = self.time(); + if let Ok(d) = t.elapsed() { + if d < Duration::from_secs(60 * 60 * 24 * 7) { + return Some(self); + } + } + + None + } +} + +#[derive(Debug)] pub struct Node { pub id: NodeId, pub endpoint: NodeEndpoint, pub peer_type: PeerType, - pub attempts: u32, - pub failures: u32, + pub last_contact: Option, } -const DEFAULT_FAILURE_PERCENTAGE: usize = 50; - impl Node { pub fn new(id: NodeId, endpoint: NodeEndpoint) -> Node { Node { id: id, endpoint: endpoint, peer_type: PeerType::Optional, - attempts: 0, - failures: 0, - } - } - - /// Returns the node's failure percentage (0..100) in buckets of 5%. If there are 0 connection attempts for this - /// node the default failure percentage is returned (50%). - pub fn failure_percentage(&self) -> usize { - if self.attempts == 0 { - DEFAULT_FAILURE_PERCENTAGE - } else { - (self.failures * 100 / self.attempts / 5 * 5) as usize + last_contact: None, } } } @@ -191,8 +217,7 @@ impl FromStr for Node { id: id, endpoint: endpoint, peer_type: PeerType::Optional, - attempts: 0, - failures: 0, + last_contact: None, }) } } @@ -231,28 +256,61 @@ impl NodeTable { /// Add a node to table pub fn add_node(&mut self, mut node: Node) { - // preserve attempts and failure counter - let (attempts, failures) = - self.nodes.get(&node.id).map_or((0, 0), |n| (n.attempts, n.failures)); - - node.attempts = attempts; - node.failures = failures; - + // preserve node last_contact + node.last_contact = self.nodes.get(&node.id).and_then(|n| n.last_contact); self.nodes.insert(node.id.clone(), node); } + /// Returns a list of ordered nodes according to their most recent contact + /// and filtering useless nodes. The algorithm for creating the sorted nodes + /// is: + /// - Contacts that aren't recent (older than 1 week) are discarded + /// - (1) Nodes with a successful contact are ordered (most recent success first) + /// - (2) Nodes with unknown contact (older than 1 week or new nodes) are randomly shuffled + /// - (3) Nodes with a failed contact are ordered (oldest failure first) + /// - The final result is the concatenation of (1), (2) and (3) fn ordered_entries(&self) -> Vec<&Node> { - let mut refs: Vec<&Node> = self.nodes.values() - .filter(|n| !self.useless_nodes.contains(&n.id)) - .collect(); + let mut success = Vec::new(); + let mut failures = Vec::new(); + let mut unknown = Vec::new(); + + let nodes = self.nodes.values() + .filter(|n| !self.useless_nodes.contains(&n.id)); + + for node in nodes { + // discard contact points older that aren't recent + match node.last_contact.as_ref().and_then(|c| c.recent()) { + Some(&NodeContact::Success(_)) => { + success.push(node); + }, + Some(&NodeContact::Failure(_)) => { + failures.push(node); + }, + None => { + unknown.push(node); + }, + } + } - refs.sort_by(|a, b| { - a.failure_percentage().cmp(&b.failure_percentage()) - .then_with(|| a.failures.cmp(&b.failures)) - .then_with(|| b.attempts.cmp(&a.attempts)) // we use reverse ordering for number of attempts + success.sort_by(|a, b| { + let a = a.last_contact.expect("vector only contains values with defined last_contact; qed"); + let b = b.last_contact.expect("vector only contains values with defined last_contact; qed"); + // inverse ordering, most recent successes come first + b.time().cmp(&a.time()) }); - refs + failures.sort_by(|a, b| { + let a = a.last_contact.expect("vector only contains values with defined last_contact; qed"); + let b = b.last_contact.expect("vector only contains values with defined last_contact; qed"); + // normal ordering, most distant failures come first + a.time().cmp(&b.time()) + }); + + rand::thread_rng().shuffle(&mut unknown); + + success.append(&mut unknown); + success.append(&mut failures); + success } /// Returns node ids sorted by failure percentage, for nodes with the same failure percentage the absolute number of @@ -296,10 +354,17 @@ impl NodeTable { } } - /// Increase failure counte for a node + /// Set last contact as failure for a node pub fn note_failure(&mut self, id: &NodeId) { if let Some(node) = self.nodes.get_mut(id) { - node.failures += 1; + node.last_contact = Some(NodeContact::failure()); + } + } + + /// Set last contact as success for a node + pub fn note_success(&mut self, id: &NodeId) { + if let Some(node) = self.nodes.get_mut(id) { + node.last_contact = Some(NodeContact::success()); } } @@ -396,19 +461,38 @@ mod json { pub nodes: Vec, } + #[derive(Serialize, Deserialize)] + pub enum NodeContact { + #[serde(rename = "success")] + Success(u64), + #[serde(rename = "failure")] + Failure(u64), + } + + impl NodeContact { + pub fn into_node_contact(self) -> super::NodeContact { + match self { + NodeContact::Success(s) => super::NodeContact::Success( + time::UNIX_EPOCH + Duration::from_secs(s) + ), + NodeContact::Failure(s) => super::NodeContact::Failure( + time::UNIX_EPOCH + Duration::from_secs(s) + ), + } + } + } + #[derive(Serialize, Deserialize)] pub struct Node { pub url: String, - pub attempts: u32, - pub failures: u32, + pub last_contact: Option, } impl Node { pub fn into_node(self) -> Option { match super::Node::from_str(&self.url) { Ok(mut node) => { - node.attempts = self.attempts; - node.failures = self.failures; + node.last_contact = self.last_contact.map(|c| c.into_node_contact()); Some(node) }, _ => None, @@ -418,10 +502,18 @@ mod json { impl<'a> From<&'a super::Node> for Node { fn from(node: &'a super::Node) -> Self { + let last_contact = node.last_contact.and_then(|c| { + match c { + super::NodeContact::Success(t) => + t.duration_since(time::UNIX_EPOCH).ok().map(|d| NodeContact::Success(d.as_secs())), + super::NodeContact::Failure(t) => + t.duration_since(time::UNIX_EPOCH).ok().map(|d| NodeContact::Failure(d.as_secs())), + } + }); + Node { url: format!("{}", node), - attempts: node.attempts, - failures: node.failures, + last_contact } } } @@ -464,42 +556,54 @@ mod tests { } #[test] - fn table_failure_percentage_order() { + fn table_last_contact_order() { let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node3 = Node::from_str("enode://c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node4 = Node::from_str("enode://d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node5 = Node::from_str("enode://e979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node6 = Node::from_str("enode://f979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id3 = H512::from_str("c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id4 = H512::from_str("d979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id5 = H512::from_str("e979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id6 = H512::from_str("f979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let mut table = NodeTable::new(None); table.add_node(node1); table.add_node(node2); table.add_node(node3); table.add_node(node4); + table.add_node(node5); + table.add_node(node6); - // node 1 - failure percentage 100% - table.get_mut(&id1).unwrap().attempts = 2; - table.note_failure(&id1); + // failures - nodes 1 & 2 table.note_failure(&id1); - - // node2 - failure percentage 33% - table.get_mut(&id2).unwrap().attempts = 3; table.note_failure(&id2); - // node3 - failure percentage 0% - table.get_mut(&id3).unwrap().attempts = 1; + // success - nodes 3 & 4 + table.note_success(&id3); + table.note_success(&id4); - // node4 - failure percentage 50% (default when no attempts) + // success - node 5 (old contact) + table.get_mut(&id5).unwrap().last_contact = Some(NodeContact::Success(time::UNIX_EPOCH)); + + // unknown - node 6 let r = table.nodes(IpFilter::default()); - assert_eq!(r[0][..], id3[..]); - assert_eq!(r[1][..], id2[..]); - assert_eq!(r[2][..], id4[..]); - assert_eq!(r[3][..], id1[..]); + assert_eq!(r[0][..], id4[..]); // most recent success + assert_eq!(r[1][..], id3[..]); + + // unknown (old contacts and new nodes), randomly shuffled + assert!( + r[2][..] == id5[..] && r[3][..] == id6[..] || + r[2][..] == id6[..] && r[3][..] == id5[..] + ); + + assert_eq!(r[4][..], id1[..]); // oldest failure + assert_eq!(r[5][..], id2[..]); } #[test] @@ -507,23 +611,27 @@ mod tests { let tempdir = TempDir::new("").unwrap(); let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); + let node3 = Node::from_str("enode://c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@22.99.55.44:7770").unwrap(); let id1 = H512::from_str("a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); let id2 = H512::from_str("b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + let id3 = H512::from_str("c979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c").unwrap(); + { let mut table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned())); table.add_node(node1); table.add_node(node2); + table.add_node(node3); - table.get_mut(&id1).unwrap().attempts = 1; - table.get_mut(&id2).unwrap().attempts = 1; - table.note_failure(&id2); + table.note_success(&id2); + table.note_failure(&id3); } { let table = NodeTable::new(Some(tempdir.path().to_str().unwrap().to_owned())); let r = table.nodes(IpFilter::default()); - assert_eq!(r[0][..], id1[..]); - assert_eq!(r[1][..], id2[..]); + assert_eq!(r[0][..], id2[..]); // latest success + assert_eq!(r[1][..], id1[..]); // unknown + assert_eq!(r[2][..], id3[..]); // oldest failure } } From 28c731881f2da0ceca4752dbcc1a8f9ad041f988 Mon Sep 17 00:00:00 2001 From: David Date: Tue, 8 May 2018 11:22:12 +0200 Subject: [PATCH 04/11] Rlp decode returns Result (#8527) rlp::decode returns Result Make a best effort to handle decoding errors gracefully throughout the code, using `expect` where the value is guaranteed to be valid (and in other places where it makes sense). --- ethcore/light/src/client/header_chain.rs | 27 +++++++++++++----------- ethcore/light/src/net/request_credits.rs | 2 +- ethcore/light/src/types/request/mod.rs | 2 +- ethcore/src/blockchain/blockchain.rs | 4 ++-- ethcore/src/db.rs | 13 +++++------- ethcore/src/encoded.rs | 11 +++++----- ethcore/src/engines/tendermint/mod.rs | 6 ++++-- ethcore/src/header.rs | 4 ++-- ethcore/src/snapshot/account.rs | 6 +++--- ethcore/src/snapshot/mod.rs | 6 +++--- ethcore/src/snapshot/tests/helpers.rs | 2 +- ethcore/src/snapshot/tests/state.rs | 2 +- ethcore/src/state/account.rs | 23 +++++++++++--------- ethcore/src/state/mod.rs | 17 ++++++++++----- ethcore/src/trace/types/flat.rs | 2 +- ethcore/transaction/src/transaction.rs | 5 +++-- ethcore/types/src/receipt.rs | 4 ++-- ethcore/vm/src/call_type.rs | 2 +- rpc/src/v1/tests/mocked/eth.rs | 3 ++- util/journaldb/src/archivedb.rs | 13 ++++++------ util/journaldb/src/earlymergedb.rs | 2 +- util/journaldb/src/overlaydb.rs | 2 +- util/journaldb/src/overlayrecentdb.rs | 4 ++-- util/journaldb/src/refcounteddb.rs | 15 +++++++------ util/rlp/src/lib.rs | 6 +++--- util/rlp/tests/tests.rs | 6 ++++-- util/rlp_derive/tests/rlp.rs | 4 ++-- whisper/src/message.rs | 4 ++-- 28 files changed, 107 insertions(+), 90 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index abcb04c3662..02a18a60dfe 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -228,7 +228,7 @@ impl HeaderChain { let decoded_header = spec.genesis_header(); let chain = if let Some(current) = db.get(col, CURRENT_KEY)? { - let curr : BestAndLatest = ::rlp::decode(¤t); + let curr : BestAndLatest = ::rlp::decode(¤t).expect("decoding db value failed"); let mut cur_number = curr.latest_num; let mut candidates = BTreeMap::new(); @@ -236,7 +236,7 @@ impl HeaderChain { // load all era entries, referenced headers within them, // and live epoch proofs. while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { - let entry: Entry = ::rlp::decode(&entry); + let entry: Entry = ::rlp::decode(&entry).expect("decoding db value failed"); trace!(target: "chain", "loaded header chain entry for era {} with {} candidates", cur_number, entry.candidates.len()); @@ -524,7 +524,10 @@ impl HeaderChain { None } Ok(None) => panic!("stored candidates always have corresponding headers; qed"), - Ok(Some(header)) => Some((epoch_transition, ::rlp::decode(&header))), + Ok(Some(header)) => Some(( + epoch_transition, + ::rlp::decode(&header).expect("decoding value from db failed") + )), }; } } @@ -591,7 +594,7 @@ impl HeaderChain { in an inconsistent state", h_num); ErrorKind::Database(msg.into()) })?; - ::rlp::decode(&bytes) + ::rlp::decode(&bytes).expect("decoding db value failed") }; let total_difficulty = entry.candidates.iter() @@ -604,9 +607,9 @@ impl HeaderChain { .total_difficulty; break Ok(Some(SpecHardcodedSync { - header: header, - total_difficulty: total_difficulty, - chts: chts, + header, + total_difficulty, + chts, })); }, None => { @@ -742,7 +745,7 @@ impl HeaderChain { /// so including it within a CHT would be redundant. pub fn cht_root(&self, n: usize) -> Option { match self.db.get(self.col, cht_key(n as u64).as_bytes()) { - Ok(val) => val.map(|x| ::rlp::decode(&x)), + Ok(db_fetch) => db_fetch.map(|bytes| ::rlp::decode(&bytes).expect("decoding value from db failed")), Err(e) => { warn!(target: "chain", "Error reading from database: {}", e); None @@ -793,7 +796,7 @@ impl HeaderChain { pub fn pending_transition(&self, hash: H256) -> Option { let key = pending_transition_key(hash); match self.db.get(self.col, &*key) { - Ok(val) => val.map(|x| ::rlp::decode(&x)), + Ok(db_fetch) => db_fetch.map(|bytes| ::rlp::decode(&bytes).expect("decoding value from db failed")), Err(e) => { warn!(target: "chain", "Error reading from database: {}", e); None @@ -1192,7 +1195,7 @@ mod tests { let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::from_secs(6 * 3600)))); - let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).unwrap(); + let chain = HeaderChain::new(db.clone(), None, &spec, cache, HardcodedSync::Allow).expect("failed to instantiate a new HeaderChain"); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -1211,14 +1214,14 @@ mod tests { parent_hash = header.hash(); let mut tx = db.transaction(); - let pending = chain.insert(&mut tx, header, None).unwrap(); + let pending = chain.insert(&mut tx, header, None).expect("failed inserting a transaction"); db.write(tx).unwrap(); chain.apply_pending(pending); rolling_timestamp += 10; } - let hardcoded_sync = chain.read_hardcoded_sync().unwrap().unwrap(); + let hardcoded_sync = chain.read_hardcoded_sync().expect("failed reading hardcoded sync").expect("failed unwrapping hardcoded sync"); assert_eq!(hardcoded_sync.chts.len(), 3); assert_eq!(hardcoded_sync.total_difficulty, total_difficulty); let decoded: Header = hardcoded_sync.header.decode(); diff --git a/ethcore/light/src/net/request_credits.rs b/ethcore/light/src/net/request_credits.rs index abe609dabbd..29570b613cf 100644 --- a/ethcore/light/src/net/request_credits.rs +++ b/ethcore/light/src/net/request_credits.rs @@ -407,7 +407,7 @@ mod tests { let costs = CostTable::default(); let serialized = ::rlp::encode(&costs); - let new_costs: CostTable = ::rlp::decode(&*serialized); + let new_costs: CostTable = ::rlp::decode(&*serialized).unwrap(); assert_eq!(costs, new_costs); } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index 8d911d3f555..bda992df975 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -1642,7 +1642,7 @@ mod tests { { // check as single value. let bytes = ::rlp::encode(&val); - let new_val: T = ::rlp::decode(&bytes); + let new_val: T = ::rlp::decode(&bytes).unwrap(); assert_eq!(val, new_val); // check as list containing single value. diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 57bcdf2bc28..0e18c5f889c 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -438,7 +438,7 @@ impl<'a> Iterator for EpochTransitionIter<'a> { return None } - let transitions: EpochTransitions = ::rlp::decode(&val[..]); + let transitions: EpochTransitions = ::rlp::decode(&val[..]).expect("decode error: the db is corrupted or the data structure has changed"); // if there are multiple candidates, at most one will be on the // canon chain. @@ -462,7 +462,7 @@ impl<'a> Iterator for EpochTransitionIter<'a> { impl BlockChain { /// Create new instance of blockchain from given Genesis. pub fn new(config: Config, genesis: &[u8], db: Arc) -> BlockChain { - // 400 is the avarage size of the key + // 400 is the average size of the key let cache_man = CacheManager::new(config.pref_cache_size, config.max_cache_size, 400); let mut bc = BlockChain { diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index d11adc7710d..a1c7d6b0f5e 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -218,15 +218,12 @@ impl Writable for DBTransaction { } impl Readable for KVDB { - fn read(&self, col: Option, key: &Key) -> Option where T: rlp::Decodable, R: Deref { - let result = self.get(col, &key.key()); + fn read(&self, col: Option, key: &Key) -> Option + where T: rlp::Decodable, R: Deref { + self.get(col, &key.key()) + .expect(&format!("db get failed, key: {:?}", &key.key() as &[u8])) + .map(|v| rlp::decode(&v).expect("decode db value failed") ) - match result { - Ok(option) => option.map(|v| rlp::decode(&v)), - Err(err) => { - panic!("db get failed, key: {:?}, err: {:?}", &key.key() as &[u8], err); - } - } } fn exists(&self, col: Option, key: &Key) -> bool where R: Deref { diff --git a/ethcore/src/encoded.rs b/ethcore/src/encoded.rs index 1f627666a90..01df386cc2e 100644 --- a/ethcore/src/encoded.rs +++ b/ethcore/src/encoded.rs @@ -24,13 +24,12 @@ //! decoded object where parts like the hash can be saved. use block::Block as FullBlock; -use header::{BlockNumber, Header as FullHeader}; -use transaction::UnverifiedTransaction; - +use ethereum_types::{H256, Bloom, U256, Address}; use hash::keccak; +use header::{BlockNumber, Header as FullHeader}; use heapsize::HeapSizeOf; -use ethereum_types::{H256, Bloom, U256, Address}; use rlp::{Rlp, RlpStream}; +use transaction::UnverifiedTransaction; use views::{self, BlockView, HeaderView, BodyView}; /// Owning header view. @@ -48,7 +47,7 @@ impl Header { pub fn new(encoded: Vec) -> Self { Header(encoded) } /// Upgrade this encoded view to a fully owned `Header` object. - pub fn decode(&self) -> FullHeader { ::rlp::decode(&self.0) } + pub fn decode(&self) -> FullHeader { ::rlp::decode(&self.0).expect("decoding failure") } /// Get a borrowed header view onto the data. #[inline] @@ -205,7 +204,7 @@ impl Block { pub fn header_view(&self) -> HeaderView { self.view().header_view() } /// Decode to a full block. - pub fn decode(&self) -> FullBlock { ::rlp::decode(&self.0) } + pub fn decode(&self) -> FullBlock { ::rlp::decode(&self.0).expect("decoding failure") } /// Decode the header. pub fn decode_header(&self) -> FullHeader { self.view().rlp().val_at(0) } diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs index d80a5e182f1..93fc347e942 100644 --- a/ethcore/src/engines/tendermint/mod.rs +++ b/ethcore/src/engines/tendermint/mod.rs @@ -143,8 +143,10 @@ impl super::EpochVerifier for EpochVerifier } fn check_finality_proof(&self, proof: &[u8]) -> Option> { - let header: Header = ::rlp::decode(proof); - self.verify_light(&header).ok().map(|_| vec![header.hash()]) + match ::rlp::decode(proof) { + Ok(header) => self.verify_light(&header).ok().map(|_| vec![header.hash()]), + Err(_) => None // REVIEW: log perhaps? Not sure what the policy is. + } } } diff --git a/ethcore/src/header.rs b/ethcore/src/header.rs index a31aa029b31..ba71eb30497 100644 --- a/ethcore/src/header.rs +++ b/ethcore/src/header.rs @@ -398,7 +398,7 @@ mod tests { let nonce = "88ab4e252a7e8c2a23".from_hex().unwrap(); let nonce_decoded = "ab4e252a7e8c2a23".from_hex().unwrap(); - let header: Header = rlp::decode(&header_rlp); + let header: Header = rlp::decode(&header_rlp).expect("error decoding header"); let seal_fields = header.seal.clone(); assert_eq!(seal_fields.len(), 2); assert_eq!(seal_fields[0], mix_hash); @@ -415,7 +415,7 @@ mod tests { // that's rlp of block header created with ethash engine. let header_rlp = "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23".from_hex().unwrap(); - let header: Header = rlp::decode(&header_rlp); + let header: Header = rlp::decode(&header_rlp).expect("error decoding header"); let encoded_header = rlp::encode(&header).into_vec(); assert_eq!(header_rlp, encoded_header); diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index 6c9e0f3d6e8..49f45136e9b 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -236,7 +236,7 @@ mod tests { }; let thin_rlp = ::rlp::encode(&account); - assert_eq!(::rlp::decode::(&thin_rlp), account); + assert_eq!(::rlp::decode::(&thin_rlp).unwrap(), account); let fat_rlps = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap(); let fat_rlp = Rlp::new(&fat_rlps[0]).at(1).unwrap(); @@ -261,7 +261,7 @@ mod tests { }; let thin_rlp = ::rlp::encode(&account); - assert_eq!(::rlp::decode::(&thin_rlp), account); + assert_eq!(::rlp::decode::(&thin_rlp).unwrap(), account); let fat_rlp = to_fat_rlps(&keccak(&addr), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), usize::max_value(), usize::max_value()).unwrap(); let fat_rlp = Rlp::new(&fat_rlp[0]).at(1).unwrap(); @@ -286,7 +286,7 @@ mod tests { }; let thin_rlp = ::rlp::encode(&account); - assert_eq!(::rlp::decode::(&thin_rlp), account); + assert_eq!(::rlp::decode::(&thin_rlp).unwrap(), account); let fat_rlps = to_fat_rlps(&keccak(addr), &account, &AccountDB::new(db.as_hashdb(), &addr), &mut Default::default(), 500, 1000).unwrap(); let mut root = KECCAK_NULL_RLP; diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index fbf0c5ca5ec..94236e9e95d 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -281,7 +281,7 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex(rlp).storage_root); + known_storage_roots.insert(*hash, ::rlp::decode::(rlp)?.storage_root); } if let Some(&(ref hash, ref rlp)) = out_chunk.iter().next() { - known_storage_roots.insert(*hash, ::rlp::decode::(rlp).storage_root); + known_storage_roots.insert(*hash, ::rlp::decode::(rlp)?.storage_root); } Ok(status) } diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index 51f417149bf..067a3abab07 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -75,7 +75,7 @@ impl StateProducer { // sweep once to alter storage tries. for &mut (ref mut address_hash, ref mut account_data) in &mut accounts_to_modify { - let mut account: BasicAccount = ::rlp::decode(&*account_data); + let mut account: BasicAccount = ::rlp::decode(&*account_data).expect("error decoding basic account"); let acct_db = AccountDBMut::from_hash(db, *address_hash); fill_storage(acct_db, &mut account.storage_root, &mut self.storage_seed); *account_data = DBValue::from_vec(::rlp::encode(&account).into_vec()); diff --git a/ethcore/src/snapshot/tests/state.rs b/ethcore/src/snapshot/tests/state.rs index f17fa7dde5a..05926a7e662 100644 --- a/ethcore/src/snapshot/tests/state.rs +++ b/ethcore/src/snapshot/tests/state.rs @@ -114,7 +114,7 @@ fn get_code_from_prev_chunk() { // first one will have code inlined, // second will just have its hash. let thin_rlp = acc_stream.out(); - let acc: BasicAccount = ::rlp::decode(&thin_rlp); + let acc: BasicAccount = ::rlp::decode(&thin_rlp).expect("error decoding basic account"); let mut make_chunk = |acc, hash| { let mut db = MemoryDB::new(); diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index f9c8f258e60..ff7d70bd3aa 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -21,6 +21,7 @@ use std::sync::Arc; use std::collections::{HashMap, BTreeMap}; use hash::{KECCAK_EMPTY, KECCAK_NULL_RLP, keccak}; use ethereum_types::{H256, U256, Address}; +use error::Error; use hashdb::HashDB; use kvdb::DBValue; use bytes::{Bytes, ToPretty}; @@ -144,9 +145,10 @@ impl Account { } /// Create a new account from RLP. - pub fn from_rlp(rlp: &[u8]) -> Account { - let basic: BasicAccount = ::rlp::decode(rlp); - basic.into() + pub fn from_rlp(rlp: &[u8]) -> Result { + ::rlp::decode::(rlp) + .map(|ba| ba.into()) + .map_err(|e| e.into()) } /// Create a new contract account. @@ -202,8 +204,8 @@ impl Account { return Ok(value); } let db = SecTrieDB::new(db, &self.storage_root)?; - - let item: U256 = db.get_with(key, ::rlp::decode)?.unwrap_or_else(U256::zero); + let panicky_decoder = |bytes:&[u8]| ::rlp::decode(&bytes).expect("decoding db value failed"); + let item: U256 = db.get_with(key, panicky_decoder)?.unwrap_or_else(U256::zero); let value: H256 = item.into(); self.storage_cache.borrow_mut().insert(key.clone(), value.clone()); Ok(value) @@ -478,7 +480,8 @@ impl Account { let trie = TrieDB::new(db, &self.storage_root)?; let item: U256 = { - let query = (&mut recorder, ::rlp::decode); + let panicky_decoder = |bytes:&[u8]| ::rlp::decode(bytes).expect("decoding db value failed"); + let query = (&mut recorder, panicky_decoder); trie.get_with(&storage_key, query)?.unwrap_or_else(U256::zero) }; @@ -528,7 +531,7 @@ mod tests { a.rlp() }; - let a = Account::from_rlp(&rlp); + let a = Account::from_rlp(&rlp).expect("decoding db value failed"); assert_eq!(*a.storage_root().unwrap(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into()); assert_eq!(a.storage_at(&db.immutable(), &0x00u64.into()).unwrap(), 0x1234u64.into()); assert_eq!(a.storage_at(&db.immutable(), &0x01u64.into()).unwrap(), H256::default()); @@ -546,10 +549,10 @@ mod tests { a.rlp() }; - let mut a = Account::from_rlp(&rlp); + let mut a = Account::from_rlp(&rlp).expect("decoding db value failed"); assert!(a.cache_code(&db.immutable()).is_some()); - let mut a = Account::from_rlp(&rlp); + let mut a = Account::from_rlp(&rlp).expect("decoding db value failed"); assert_eq!(a.note_code(vec![0x55, 0x44, 0xffu8]), Ok(())); } @@ -609,7 +612,7 @@ mod tests { #[test] fn rlpio() { let a = Account::new(69u8.into(), 0u8.into(), HashMap::new(), Bytes::new()); - let b = Account::from_rlp(&a.rlp()); + let b = Account::from_rlp(&a.rlp()).unwrap(); assert_eq!(a.balance(), b.balance()); assert_eq!(a.nonce(), b.nonce()); assert_eq!(a.code_hash(), b.code_hash()); diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 255dce5b5de..20d564588c5 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -605,7 +605,8 @@ impl State { // account is not found in the global cache, get from the DB and insert into local let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); - let maybe_acc = db.get_with(address, Account::from_rlp)?; + let from_rlp = |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); + let maybe_acc = db.get_with(address, from_rlp)?; let r = maybe_acc.as_ref().map_or(Ok(H256::new()), |a| { let account_db = self.factories.accountdb.readonly(self.db.as_hashdb(), a.address_hash(address)); a.storage_at(account_db.as_hashdb(), key) @@ -983,7 +984,8 @@ impl State { // not found in the global cache, get from the DB and insert into local let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root)?; - let mut maybe_acc = db.get_with(a, Account::from_rlp)?; + let from_rlp = |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); + let mut maybe_acc = db.get_with(a, from_rlp)?; if let Some(ref mut account) = maybe_acc.as_mut() { let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), account.address_hash(a)); Self::update_account_cache(require, account, &self.db, accountdb.as_hashdb()); @@ -1012,7 +1014,8 @@ impl State { None => { let maybe_acc = if !self.db.is_known_null(a) { let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root)?; - AccountEntry::new_clean(db.get_with(a, Account::from_rlp)?) + let from_rlp = |b:&[u8]| { Account::from_rlp(b).expect("decoding db value failed") }; + AccountEntry::new_clean(db.get_with(a, from_rlp)?) } else { AccountEntry::new_clean(None) }; @@ -1064,7 +1067,10 @@ impl State { let mut recorder = Recorder::new(); let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; let maybe_account: Option = { - let query = (&mut recorder, ::rlp::decode); + let panicky_decoder = |bytes: &[u8]| { + ::rlp::decode(bytes).expect(&format!("prove_account, could not query trie for account key={}", &account_key)) + }; + let query = (&mut recorder, panicky_decoder); trie.get_with(&account_key, query)? }; let account = maybe_account.unwrap_or_else(|| BasicAccount { @@ -1086,7 +1092,8 @@ impl State { // TODO: probably could look into cache somehow but it's keyed by // address, not keccak(address). let trie = TrieDB::new(self.db.as_hashdb(), &self.root)?; - let acc = match trie.get_with(&account_key, Account::from_rlp)? { + let from_rlp = |b: &[u8]| Account::from_rlp(b).expect("decoding db value failed"); + let acc = match trie.get_with(&account_key, from_rlp)? { Some(acc) => acc, None => return Ok((Vec::new(), H256::new())), }; diff --git a/ethcore/src/trace/types/flat.rs b/ethcore/src/trace/types/flat.rs index e2746ca7f7d..00cf517df80 100644 --- a/ethcore/src/trace/types/flat.rs +++ b/ethcore/src/trace/types/flat.rs @@ -244,7 +244,7 @@ mod tests { ]); let encoded = ::rlp::encode(&block_traces); - let decoded = ::rlp::decode(&encoded); + let decoded = ::rlp::decode(&encoded).expect("error decoding block traces"); assert_eq!(block_traces, decoded); } } diff --git a/ethcore/transaction/src/transaction.rs b/ethcore/transaction/src/transaction.rs index 571dec3faeb..6152e61acb6 100644 --- a/ethcore/transaction/src/transaction.rs +++ b/ethcore/transaction/src/transaction.rs @@ -576,7 +576,8 @@ mod tests { #[test] fn sender_test() { - let t: UnverifiedTransaction = rlp::decode(&::rustc_hex::FromHex::from_hex("f85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804").unwrap()); + let bytes = ::rustc_hex::FromHex::from_hex("f85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804").unwrap(); + let t: UnverifiedTransaction = rlp::decode(&bytes).expect("decoding UnverifiedTransaction failed"); assert_eq!(t.data, b""); assert_eq!(t.gas, U256::from(0x5208u64)); assert_eq!(t.gas_price, U256::from(0x01u64)); @@ -645,7 +646,7 @@ mod tests { use rustc_hex::FromHex; let test_vector = |tx_data: &str, address: &'static str| { - let signed = rlp::decode(&FromHex::from_hex(tx_data).unwrap()); + let signed = rlp::decode(&FromHex::from_hex(tx_data).unwrap()).expect("decoding tx data failed"); let signed = SignedTransaction::new(signed).unwrap(); assert_eq!(signed.sender(), address.into()); println!("chainid: {:?}", signed.chain_id()); diff --git a/ethcore/types/src/receipt.rs b/ethcore/types/src/receipt.rs index c1defbc151f..8846d27c027 100644 --- a/ethcore/types/src/receipt.rs +++ b/ethcore/types/src/receipt.rs @@ -193,7 +193,7 @@ mod tests { ); let encoded = ::rlp::encode(&r); assert_eq!(&encoded[..], &expected[..]); - let decoded: Receipt = ::rlp::decode(&encoded); + let decoded: Receipt = ::rlp::decode(&encoded).expect("decoding receipt failed"); assert_eq!(decoded, r); } @@ -211,7 +211,7 @@ mod tests { ); let encoded = ::rlp::encode(&r); assert_eq!(&encoded[..], &expected[..]); - let decoded: Receipt = ::rlp::decode(&encoded); + let decoded: Receipt = ::rlp::decode(&encoded).expect("decoding receipt failed"); assert_eq!(decoded, r); } } diff --git a/ethcore/vm/src/call_type.rs b/ethcore/vm/src/call_type.rs index 83260825f3c..dc00b2b8392 100644 --- a/ethcore/vm/src/call_type.rs +++ b/ethcore/vm/src/call_type.rs @@ -64,7 +64,7 @@ mod tests { fn should_encode_and_decode_call_type() { let original = CallType::Call; let encoded = encode(&original); - let decoded = decode(&encoded); + let decoded = decode(&encoded).expect("failure decoding CallType"); assert_eq!(original, decoded); } } diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index badf5bff728..39a2e842db1 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -566,7 +566,8 @@ fn rpc_eth_pending_transaction_by_hash() { let tester = EthTester::default(); { - let tx = rlp::decode(&FromHex::from_hex("f85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804").unwrap()); + let bytes = FromHex::from_hex("f85f800182520894095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba048b55bfa915ac795c431978d8a6a992b628d557da5ff759b307d495a36649353a0efffd310ac743f371de3b9f7f9cb56c0b28ad43601b4ab949f53faa07bd2c804").unwrap(); + let tx = rlp::decode(&bytes).expect("decoding failure"); let tx = SignedTransaction::new(tx).unwrap(); tester.miner.pending_transactions.lock().insert(H256::zero(), tx); } diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index e00b37d3c4a..b58558a332a 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -45,14 +45,15 @@ pub struct ArchiveDB { impl ArchiveDB { /// Create a new instance from a key-value db. - pub fn new(backing: Arc, col: Option) -> ArchiveDB { - let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") - .map(|val| decode::(&val)); + pub fn new(backing: Arc, column: Option) -> ArchiveDB { + let latest_era = backing.get(column, &LATEST_ERA_KEY) + .expect("Low-level database error.") + .map(|val| decode::(&val).expect("decoding db value failed")); ArchiveDB { overlay: MemoryDB::new(), - backing: backing, - latest_era: latest_era, - column: col, + backing, + latest_era, + column, } } diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index bb8e49d41ed..e76cdcd313a 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -263,7 +263,7 @@ impl EarlyMergeDB { let mut refs = HashMap::new(); let mut latest_era = None; if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); + let mut era = decode::(&val).expect("decoding db value failed"); latest_era = Some(era); loop { let mut db_key = DatabaseKey { diff --git a/util/journaldb/src/overlaydb.rs b/util/journaldb/src/overlaydb.rs index fa7ff04596e..54d0bb12d76 100644 --- a/util/journaldb/src/overlaydb.rs +++ b/util/journaldb/src/overlaydb.rs @@ -137,7 +137,7 @@ impl OverlayDB { fn payload(&self, key: &H256) -> Option { self.backing.get(self.column, key) .expect("Low-level database error. Some issue with your hard disk?") - .map(|d| decode(&d)) + .map(|d| decode(&d).expect("decoding db value failed")) } /// Put the refs and value of the given key, possibly deleting it from the db. diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index fdc178350e6..2c9ce5cb1dd 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -186,7 +186,7 @@ impl OverlayRecentDB { let mut earliest_era = None; let mut cumulative_size = 0; if let Some(val) = db.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); + let mut era = decode::(&val).expect("decoding db value failed"); latest_era = Some(era); loop { let mut db_key = DatabaseKey { @@ -195,7 +195,7 @@ impl OverlayRecentDB { }; while let Some(rlp_data) = db.get(col, &encode(&db_key)).expect("Low-level database error.") { trace!("read_overlay: era={}, index={}", era, db_key.index); - let value = decode::(&rlp_data); + let value = decode::(&rlp_data).expect(&format!("read_overlay: Error decoding DatabaseValue era={}, index{}", era, db_key.index)); count += value.inserts.len(); let mut inserted_keys = Vec::new(); for (k, v) in value.inserts { diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index bf366faf753..944d81d3733 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -62,17 +62,18 @@ pub struct RefCountedDB { impl RefCountedDB { /// Create a new instance given a `backing` database. - pub fn new(backing: Arc, col: Option) -> RefCountedDB { - let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") - .map(|val| decode::(&val)); + pub fn new(backing: Arc, column: Option) -> RefCountedDB { + let latest_era = backing.get(column, &LATEST_ERA_KEY) + .expect("Low-level database error.") + .map(|v| decode::(&v).expect("decoding db value failed")); RefCountedDB { - forward: OverlayDB::new(backing.clone(), col), - backing: backing, + forward: OverlayDB::new(backing.clone(), column), + backing, inserts: vec![], removes: vec![], - latest_era: latest_era, - column: col, + latest_era, + column, } } } diff --git a/util/rlp/src/lib.rs b/util/rlp/src/lib.rs index a6754e22de9..b416b1c25b0 100644 --- a/util/rlp/src/lib.rs +++ b/util/rlp/src/lib.rs @@ -63,13 +63,13 @@ pub const EMPTY_LIST_RLP: [u8; 1] = [0xC0; 1]; /// /// fn main () { /// let data = vec![0x83, b'c', b'a', b't']; -/// let animal: String = rlp::decode(&data); +/// let animal: String = rlp::decode(&data).expect("could not decode"); /// assert_eq!(animal, "cat".to_owned()); /// } /// ``` -pub fn decode(bytes: &[u8]) -> T where T: Decodable { +pub fn decode(bytes: &[u8]) -> Result where T: Decodable { let rlp = Rlp::new(bytes); - rlp.as_val().expect("trusted rlp should be valid") + rlp.as_val() } pub fn decode_list(bytes: &[u8]) -> Vec where T: Decodable { diff --git a/util/rlp/tests/tests.rs b/util/rlp/tests/tests.rs index 6ff426a7739..041c267667d 100644 --- a/util/rlp/tests/tests.rs +++ b/util/rlp/tests/tests.rs @@ -209,8 +209,10 @@ struct VDTestPair(Vec, Vec) where T: Decodable + fmt::Debug + cmp::Eq; fn run_decode_tests(tests: Vec>) where T: Decodable + fmt::Debug + cmp::Eq { for t in &tests { - let res: T = rlp::decode(&t.1); - assert_eq!(res, t.0); + let res : Result = rlp::decode(&t.1); + assert!(res.is_ok()); + let res = res.unwrap(); + assert_eq!(&res, &t.0); } } diff --git a/util/rlp_derive/tests/rlp.rs b/util/rlp_derive/tests/rlp.rs index c873805247d..ba51309146e 100644 --- a/util/rlp_derive/tests/rlp.rs +++ b/util/rlp_derive/tests/rlp.rs @@ -24,7 +24,7 @@ fn test_encode_foo() { let out = encode(&foo).into_vec(); assert_eq!(out, expected); - let decoded = decode(&expected); + let decoded = decode(&expected).expect("decode failure"); assert_eq!(foo, decoded); } @@ -38,7 +38,7 @@ fn test_encode_foo_wrapper() { let out = encode(&foo).into_vec(); assert_eq!(out, expected); - let decoded = decode(&expected); + let decoded = decode(&expected).expect("decode failure"); assert_eq!(foo, decoded); } diff --git a/whisper/src/message.rs b/whisper/src/message.rs index fbf2faf3fdf..d0de9af4b5a 100644 --- a/whisper/src/message.rs +++ b/whisper/src/message.rs @@ -446,7 +446,7 @@ mod tests { }; let encoded = ::rlp::encode(&envelope); - let decoded = ::rlp::decode(&encoded); + let decoded = ::rlp::decode(&encoded).expect("failure decoding Envelope"); assert_eq!(envelope, decoded) } @@ -462,7 +462,7 @@ mod tests { }; let encoded = ::rlp::encode(&envelope); - let decoded = ::rlp::decode(&encoded); + let decoded = ::rlp::decode(&encoded).expect("failure decoding Envelope"); assert_eq!(envelope, decoded) } From ac3de4c5fca0c0b469aa2198c42e92f04be18626 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Wed, 9 May 2018 08:47:21 +0200 Subject: [PATCH 05/11] Parity as a library (#8412) * Parity as a library * Fix concerns * Allow using a null on_client_restart_cb * Fix more concerns * Test the C library in test.sh * Reduce CMake version to 3.5 * Move the clib test before cargo test * Add println in test --- Cargo.lock | 8 +- Cargo.toml | 5 +- parity-clib-example/CMakeLists.txt | 19 ++ parity-clib-example/main.cpp | 28 +++ parity-clib/Cargo.toml | 17 ++ parity-clib/parity.h | 93 ++++++++++ parity-clib/src/lib.rs | 133 ++++++++++++++ parity/blockchain.rs | 7 - parity/cli/usage.rs | 1 + parity/configuration.rs | 41 +++-- parity/lib.rs | 249 ++++++++++++++++++++++++++ parity/main.rs | 275 +++++++---------------------- parity/run.rs | 138 +++++---------- parity/snapshot.rs | 3 - parity/url.rs | 3 +- test.sh | 16 +- 16 files changed, 702 insertions(+), 334 deletions(-) create mode 100644 parity-clib-example/CMakeLists.txt create mode 100644 parity-clib-example/main.cpp create mode 100644 parity-clib/Cargo.toml create mode 100644 parity-clib/parity.h create mode 100644 parity-clib/src/lib.rs create mode 100644 parity/lib.rs diff --git a/Cargo.lock b/Cargo.lock index ceb14ff277c..64e468e6703 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1954,7 +1954,6 @@ dependencies = [ "ethcore-private-tx 1.0.0", "ethcore-secretstore 1.0.0", "ethcore-service 0.1.0", - "ethcore-stratum 1.12.0", "ethcore-sync 1.12.0", "ethcore-transaction 0.1.0", "ethereum-types 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2008,6 +2007,13 @@ dependencies = [ "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parity-clib" +version = "1.12.0" +dependencies = [ + "parity 1.12.0", +] + [[package]] name = "parity-dapps" version = "1.12.0" diff --git a/Cargo.toml b/Cargo.toml index a611458e653..de1a78bf4fa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,7 +40,6 @@ ethcore-miner = { path = "miner" } ethcore-network = { path = "util/network" } ethcore-private-tx = { path = "ethcore/private-tx" } ethcore-service = { path = "ethcore/service" } -ethcore-stratum = { path = "ethcore/stratum" } ethcore-sync = { path = "ethcore/sync" } ethcore-transaction = { path = "ethcore/transaction" } ethereum-types = "0.3" @@ -108,6 +107,9 @@ slow-blocks = ["ethcore/slow-blocks"] secretstore = ["ethcore-secretstore"] final = ["parity-version/final"] +[lib] +path = "parity/lib.rs" + [[bin]] path = "parity/main.rs" name = "parity" @@ -130,6 +132,7 @@ members = [ "ethstore/cli", "evmbin", "miner", + "parity-clib", "transaction-pool", "whisper", "whisper/cli", diff --git a/parity-clib-example/CMakeLists.txt b/parity-clib-example/CMakeLists.txt new file mode 100644 index 00000000000..143d014e322 --- /dev/null +++ b/parity-clib-example/CMakeLists.txt @@ -0,0 +1,19 @@ +cmake_minimum_required(VERSION 3.5) +include(ExternalProject) + +include_directories("${CMAKE_SOURCE_DIR}/../parity-clib") + +add_executable(parity-example main.cpp) + +ExternalProject_Add( + libparity + DOWNLOAD_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + COMMAND cargo build -p parity-clib # Note: use --release in a real project + BINARY_DIR "${CMAKE_SOURCE_DIR}/../target" + INSTALL_COMMAND "" + LOG_BUILD ON) + +add_dependencies(parity-example libparity) +target_link_libraries(parity-example "${CMAKE_SOURCE_DIR}/../target/debug/libparity.so") diff --git a/parity-clib-example/main.cpp b/parity-clib-example/main.cpp new file mode 100644 index 00000000000..1fadf1b5b56 --- /dev/null +++ b/parity-clib-example/main.cpp @@ -0,0 +1,28 @@ +#include +#include +#include + +void on_restart(void*, const char*, size_t) {} + +int main() { + ParityParams cfg = { 0 }; + cfg.on_client_restart_cb = on_restart; + + const char* args[] = {"--light"}; + size_t str_lens[] = {7}; + if (parity_config_from_cli(args, str_lens, 1, &cfg.configuration) != 0) { + return 1; + } + + void* parity; + if (parity_start(&cfg, &parity) != 0) { + return 1; + } + + sleep(5); + if (parity != NULL) { + parity_destroy(parity); + } + + return 0; +} diff --git a/parity-clib/Cargo.toml b/parity-clib/Cargo.toml new file mode 100644 index 00000000000..001f954c211 --- /dev/null +++ b/parity-clib/Cargo.toml @@ -0,0 +1,17 @@ +[package] +description = "C bindings for the Parity Ethereum client" +name = "parity-clib" +version = "1.12.0" +license = "GPL-3.0" +authors = ["Parity Technologies "] + +[lib] +name = "parity" +crate-type = ["cdylib", "staticlib"] + +[dependencies] +parity = { path = "../", default-features = false } + +[features] +default = [] +final = ["parity/final"] diff --git a/parity-clib/parity.h b/parity-clib/parity.h new file mode 100644 index 00000000000..b61da8e458b --- /dev/null +++ b/parity-clib/parity.h @@ -0,0 +1,93 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#ifndef _PARITY_H_INCLUDED_ +#define _PARITY_H_INCLUDED_ + +#include + +/// Parameters to pass to `parity_start`. +struct ParityParams { + /// Configuration object, as handled by the `parity_config_*` functions. + /// Note that calling `parity_start` will destroy the configuration object (even on failure). + void *configuration; + + /// Callback function to call when the client receives an RPC request to change its chain spec. + /// + /// Will only be called if you enable the `--can-restart` flag. + /// + /// The first parameter of the callback is the value of `on_client_restart_cb_custom`. + /// The second and third parameters of the callback are the string pointer and length. + void (*on_client_restart_cb)(void* custom, const char* new_chain, size_t new_chain_len); + + /// Custom parameter passed to the `on_client_restart_cb` callback as first parameter. + void *on_client_restart_cb_custom; +}; + +#ifdef __cplusplus +extern "C" { +#endif + +/// Builds a new configuration object by parsing a list of CLI arguments. +/// +/// The first two parameters are string pointers and string lengths. They must have a length equal +/// to `len`. The strings don't need to be zero-terminated. +/// +/// On success, the produced object will be written to the `void*` pointed by `out`. +/// +/// Returns 0 on success, and non-zero on error. +/// +/// # Example +/// +/// ```no_run +/// void* cfg; +/// const char *args[] = {"--light", "--can-restart"}; +/// size_t str_lens[] = {7, 13}; +/// if (parity_config_from_cli(args, str_lens, 2, &cfg) != 0) { +/// return 1; +/// } +/// ``` +/// +int parity_config_from_cli(char const* const* args, size_t const* arg_lens, size_t len, void** out); + +/// Destroys a configuration object created earlier. +/// +/// **Important**: You probably don't need to call this function. Calling `parity_start` destroys +/// the configuration object as well (even on failure). +void parity_config_destroy(void* cfg); + +/// Starts the parity client in background threads. Returns a pointer to a struct that represents +/// the running client. Can also return NULL if the execution completes instantly. +/// +/// **Important**: The configuration object passed inside `cfg` is destroyed when you +/// call `parity_start` (even on failure). +/// +/// On success, the produced object will be written to the `void*` pointed by `out`. +/// +/// Returns 0 on success, and non-zero on error. +int parity_start(const ParityParams* params, void** out); + +/// Destroys the parity client created with `parity_start`. +/// +/// **Warning**: `parity_start` can return NULL if execution finished instantly, in which case you +/// must not call this function. +void parity_destroy(void* parity); + +#ifdef __cplusplus +} +#endif + +#endif // include guard diff --git a/parity-clib/src/lib.rs b/parity-clib/src/lib.rs new file mode 100644 index 00000000000..b08d6487d1a --- /dev/null +++ b/parity-clib/src/lib.rs @@ -0,0 +1,133 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Note that all the structs and functions here are documented in `parity.h`, to avoid +//! duplicating documentation. + +extern crate parity; + +use std::os::raw::{c_char, c_void, c_int}; +use std::panic; +use std::ptr; +use std::slice; + +#[repr(C)] +pub struct ParityParams { + pub configuration: *mut c_void, + pub on_client_restart_cb: Option, + pub on_client_restart_cb_custom: *mut c_void, +} + +#[no_mangle] +pub extern fn parity_config_from_cli(args: *const *const c_char, args_lens: *const usize, len: usize, output: *mut *mut c_void) -> c_int { + unsafe { + panic::catch_unwind(|| { + *output = ptr::null_mut(); + + let args = { + let arg_ptrs = slice::from_raw_parts(args, len); + let arg_lens = slice::from_raw_parts(args_lens, len); + + let mut args = Vec::with_capacity(len + 1); + args.push("parity".to_owned()); + + for (&arg, &len) in arg_ptrs.iter().zip(arg_lens.iter()) { + let string = slice::from_raw_parts(arg as *const u8, len); + match String::from_utf8(string.to_owned()) { + Ok(a) => args.push(a), + Err(_) => return 1, + }; + } + + args + }; + + match parity::Configuration::parse_cli(&args) { + Ok(mut cfg) => { + // Always disable the auto-updater when used as a library. + cfg.args.arg_auto_update = "none".to_owned(); + + let cfg = Box::into_raw(Box::new(cfg)); + *output = cfg as *mut _; + 0 + }, + Err(_) => { + 1 + }, + } + }).unwrap_or(1) + } +} + +#[no_mangle] +pub extern fn parity_config_destroy(cfg: *mut c_void) { + unsafe { + let _ = panic::catch_unwind(|| { + let _cfg = Box::from_raw(cfg as *mut parity::Configuration); + }); + } +} + +#[no_mangle] +pub extern fn parity_start(cfg: *const ParityParams, output: *mut *mut c_void) -> c_int { + unsafe { + panic::catch_unwind(|| { + *output = ptr::null_mut(); + let cfg: &ParityParams = &*cfg; + + let config = Box::from_raw(cfg.configuration as *mut parity::Configuration); + + let on_client_restart_cb = { + struct Cb(Option, *mut c_void); + unsafe impl Send for Cb {} + unsafe impl Sync for Cb {} + impl Cb { + fn call(&self, new_chain: String) { + if let Some(ref cb) = self.0 { + cb(self.1, new_chain.as_bytes().as_ptr() as *const _, new_chain.len()) + } + } + } + let cb = Cb(cfg.on_client_restart_cb, cfg.on_client_restart_cb_custom); + move |new_chain: String| { cb.call(new_chain); } + }; + + let action = match parity::start(*config, on_client_restart_cb, || {}) { + Ok(action) => action, + Err(_) => return 1, + }; + + match action { + parity::ExecutionAction::Instant(Some(s)) => { println!("{}", s); 0 }, + parity::ExecutionAction::Instant(None) => 0, + parity::ExecutionAction::Running(client) => { + *output = Box::into_raw(Box::::new(client)) as *mut c_void; + 0 + } + } + }).unwrap_or(1) + } +} + +#[no_mangle] +pub extern fn parity_destroy(client: *mut c_void) { + unsafe { + let _ = panic::catch_unwind(|| { + let client = Box::from_raw(client as *mut parity::RunningClient); + client.shutdown(); + }); + } +} diff --git a/parity/blockchain.rs b/parity/blockchain.rs index f9c2f8ba378..027814f2451 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -37,7 +37,6 @@ use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_ use helpers::{to_client_config, execute_upgrades}; use dir::Directories; use user_defaults::UserDefaults; -use fdlimit; use ethcore_private_tx; use db; @@ -178,8 +177,6 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> { // load user defaults let user_defaults = UserDefaults::load(&user_defaults_path)?; - fdlimit::raise_fd_limit(); - // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); @@ -327,8 +324,6 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> { // load user defaults let mut user_defaults = UserDefaults::load(&user_defaults_path)?; - fdlimit::raise_fd_limit(); - // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); @@ -518,8 +513,6 @@ fn start_client( // load user defaults let user_defaults = UserDefaults::load(&user_defaults_path)?; - fdlimit::raise_fd_limit(); - // select pruning algorithm let algorithm = pruning.to_algorithm(&user_defaults); diff --git a/parity/cli/usage.rs b/parity/cli/usage.rs index 2bdeaaed1a5..ce138fdff3d 100644 --- a/parity/cli/usage.rs +++ b/parity/cli/usage.rs @@ -198,6 +198,7 @@ macro_rules! usage { } } + /// Parsed command line arguments. #[derive(Debug, PartialEq)] pub struct Args { $( diff --git a/parity/configuration.rs b/parity/configuration.rs index 93cc9a4dd6a..3151621801c 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -92,23 +92,30 @@ pub struct Execute { pub cmd: Cmd, } +/// Configuration for the Parity client. #[derive(Debug, PartialEq)] pub struct Configuration { + /// Arguments to be interpreted. pub args: Args, } impl Configuration { - pub fn parse>(command: &[S]) -> Result { - let args = Args::parse(command)?; - + /// Parses a configuration from a list of command line arguments. + /// + /// # Example + /// + /// ``` + /// let _cfg = parity::Configuration::parse_cli(&["--light", "--chain", "koven"]).unwrap(); + /// ``` + pub fn parse_cli>(command: &[S]) -> Result { let config = Configuration { - args: args, + args: Args::parse(command)?, }; Ok(config) } - pub fn into_command(self) -> Result { + pub(crate) fn into_command(self) -> Result { let dirs = self.directories(); let pruning = self.args.arg_pruning.parse()?; let pruning_history = self.args.arg_pruning_history; @@ -1843,7 +1850,7 @@ mod tests { let filename = tempdir.path().join("peers"); File::create(&filename).unwrap().write_all(b" \n\t\n").unwrap(); let args = vec!["parity", "--reserved-peers", filename.to_str().unwrap()]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); assert!(conf.init_reserved_nodes().is_ok()); } @@ -1853,7 +1860,7 @@ mod tests { let filename = tempdir.path().join("peers_comments"); File::create(&filename).unwrap().write_all(b"# Sample comment\nenode://6f8a80d14311c39f35f516fa664deaaaa13e85b2f7493f37f6144d86991ec012937307647bd3b9a82abe2974e1407241d54947bbb39763a4cac9f77166ad92a0@172.0.0.1:30303\n").unwrap(); let args = vec!["parity", "--reserved-peers", filename.to_str().unwrap()]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); let reserved_nodes = conf.init_reserved_nodes(); assert!(reserved_nodes.is_ok()); assert_eq!(reserved_nodes.unwrap().len(), 1); @@ -1862,7 +1869,7 @@ mod tests { #[test] fn test_dev_preset() { let args = vec!["parity", "--config", "dev"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_settings.chain, "dev"); @@ -1876,7 +1883,7 @@ mod tests { #[test] fn test_mining_preset() { let args = vec!["parity", "--config", "mining"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_conf.min_peers, 50); @@ -1898,7 +1905,7 @@ mod tests { #[test] fn test_non_standard_ports_preset() { let args = vec!["parity", "--config", "non-standard-ports"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_settings.network_port, 30305); @@ -1911,7 +1918,7 @@ mod tests { #[test] fn test_insecure_preset() { let args = vec!["parity", "--config", "insecure"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.update_policy.require_consensus, false); @@ -1931,7 +1938,7 @@ mod tests { #[test] fn test_dev_insecure_preset() { let args = vec!["parity", "--config", "dev-insecure"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_settings.chain, "dev"); @@ -1954,7 +1961,7 @@ mod tests { #[test] fn test_override_preset() { let args = vec!["parity", "--config", "mining", "--min-peers=99"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_conf.min_peers, 99); @@ -2077,7 +2084,7 @@ mod tests { #[test] fn should_respect_only_max_peers_and_default() { let args = vec!["parity", "--max-peers=50"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_conf.min_peers, 25); @@ -2090,7 +2097,7 @@ mod tests { #[test] fn should_respect_only_max_peers_less_than_default() { let args = vec!["parity", "--max-peers=5"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_conf.min_peers, 5); @@ -2103,7 +2110,7 @@ mod tests { #[test] fn should_respect_only_min_peers_and_default() { let args = vec!["parity", "--min-peers=5"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_conf.min_peers, 5); @@ -2116,7 +2123,7 @@ mod tests { #[test] fn should_respect_only_min_peers_and_greater_than_default() { let args = vec!["parity", "--min-peers=500"]; - let conf = Configuration::parse(&args).unwrap(); + let conf = Configuration::parse_cli(&args).unwrap(); match conf.into_command().unwrap().cmd { Cmd::Run(c) => { assert_eq!(c.net_conf.min_peers, 500); diff --git a/parity/lib.rs b/parity/lib.rs new file mode 100644 index 00000000000..4d9d1a2c95f --- /dev/null +++ b/parity/lib.rs @@ -0,0 +1,249 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Ethcore client application. + +#![warn(missing_docs)] + +extern crate ansi_term; +extern crate docopt; +#[macro_use] +extern crate clap; +extern crate dir; +extern crate env_logger; +extern crate futures; +extern crate futures_cpupool; +extern crate atty; +extern crate jsonrpc_core; +extern crate num_cpus; +extern crate number_prefix; +extern crate parking_lot; +extern crate regex; +extern crate rlp; +extern crate rpassword; +extern crate rustc_hex; +extern crate semver; +extern crate serde; +extern crate serde_json; +#[macro_use] +extern crate serde_derive; +extern crate toml; + +extern crate ethcore; +extern crate ethcore_bytes as bytes; +extern crate ethcore_io as io; +extern crate ethcore_light as light; +extern crate ethcore_logger; +extern crate ethcore_miner as miner; +extern crate ethcore_network as network; +extern crate ethcore_private_tx; +extern crate ethcore_service; +extern crate ethcore_sync as sync; +extern crate ethcore_transaction as transaction; +extern crate ethereum_types; +extern crate ethkey; +extern crate kvdb; +extern crate node_health; +extern crate panic_hook; +extern crate parity_hash_fetch as hash_fetch; +extern crate parity_ipfs_api; +extern crate parity_local_store as local_store; +extern crate parity_reactor; +extern crate parity_rpc; +extern crate parity_updater as updater; +extern crate parity_version; +extern crate parity_whisper; +extern crate path; +extern crate rpc_cli; +extern crate node_filter; +extern crate keccak_hash as hash; +extern crate journaldb; +extern crate registrar; + +#[macro_use] +extern crate log as rlog; + +#[cfg(feature="secretstore")] +extern crate ethcore_secretstore; + +#[cfg(feature = "dapps")] +extern crate parity_dapps; + +#[cfg(test)] +#[macro_use] +extern crate pretty_assertions; + +#[cfg(windows)] extern crate winapi; + +#[cfg(test)] +extern crate tempdir; + +mod account; +mod blockchain; +mod cache; +mod cli; +mod configuration; +mod dapps; +mod export_hardcoded_sync; +mod ipfs; +mod deprecated; +mod helpers; +mod informant; +mod light_helpers; +mod modules; +mod params; +mod presale; +mod rpc; +mod rpc_apis; +mod run; +mod secretstore; +mod signer; +mod snapshot; +mod upgrade; +mod url; +mod user_defaults; +mod whisper; +mod db; + +use std::net::{TcpListener}; +use std::io::BufReader; +use std::fs::File; +use ansi_term::Style; +use hash::keccak_buffer; +use cli::Args; +use configuration::{Cmd, Execute}; +use deprecated::find_deprecated; +use ethcore_logger::{Config as LogConfig, setup_log}; + +pub use self::configuration::Configuration; +pub use self::run::RunningClient; + +fn print_hash_of(maybe_file: Option) -> Result { + if let Some(file) = maybe_file { + let mut f = BufReader::new(File::open(&file).map_err(|_| "Unable to open file".to_owned())?); + let hash = keccak_buffer(&mut f).map_err(|_| "Unable to read from file".to_owned())?; + Ok(format!("{:x}", hash)) + } else { + Err("Streaming from standard input not yet supported. Specify a file.".to_owned()) + } +} + +/// Action that Parity performed when running `start`. +pub enum ExecutionAction { + /// The execution didn't require starting a node, and thus has finished. + /// Contains the string to print on stdout, if any. + Instant(Option), + + /// The client has started running and must be shut down manually by calling `shutdown`. + /// + /// If you don't call `shutdown()`, execution will continue in the background. + Running(RunningClient), +} + +fn execute(command: Execute, on_client_rq: Cr, on_updater_rq: Rr) -> Result + where Cr: Fn(String) + 'static + Send, + Rr: Fn() + 'static + Send +{ + // TODO: move this to `main()` and expose in the C API so that users can setup logging the way + // they want + let logger = setup_log(&command.logger).expect("Logger is initialized only once; qed"); + + match command.cmd { + Cmd::Run(run_cmd) => { + if run_cmd.ui_conf.enabled && !run_cmd.ui_conf.info_page_only { + warn!("{}", Style::new().bold().paint("Parity browser interface is deprecated. It's going to be removed in the next version, use standalone Parity UI instead.")); + warn!("{}", Style::new().bold().paint("Standalone Parity UI: https://github.com/Parity-JS/shell/releases")); + } + + if run_cmd.ui && run_cmd.dapps_conf.enabled { + // Check if Parity is already running + let addr = format!("{}:{}", run_cmd.ui_conf.interface, run_cmd.ui_conf.port); + if !TcpListener::bind(&addr as &str).is_ok() { + return open_ui(&run_cmd.ws_conf, &run_cmd.ui_conf, &run_cmd.logger_config).map(|_| ExecutionAction::Instant(None)); + } + } + + // start ui + if run_cmd.ui { + open_ui(&run_cmd.ws_conf, &run_cmd.ui_conf, &run_cmd.logger_config)?; + } + + if let Some(ref dapp) = run_cmd.dapp { + open_dapp(&run_cmd.dapps_conf, &run_cmd.http_conf, dapp)?; + } + + let outcome = run::execute(run_cmd, logger, on_client_rq, on_updater_rq)?; + Ok(ExecutionAction::Running(outcome)) + }, + Cmd::Version => Ok(ExecutionAction::Instant(Some(Args::print_version()))), + Cmd::Hash(maybe_file) => print_hash_of(maybe_file).map(|s| ExecutionAction::Instant(Some(s))), + Cmd::Account(account_cmd) => account::execute(account_cmd).map(|s| ExecutionAction::Instant(Some(s))), + Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd).map(|s| ExecutionAction::Instant(Some(s))), + Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd).map(|_| ExecutionAction::Instant(None)), + Cmd::SignerToken(ws_conf, ui_conf, logger_config) => signer::execute(ws_conf, ui_conf, logger_config).map(|s| ExecutionAction::Instant(Some(s))), + Cmd::SignerSign { id, pwfile, port, authfile } => rpc_cli::signer_sign(id, pwfile, port, authfile).map(|s| ExecutionAction::Instant(Some(s))), + Cmd::SignerList { port, authfile } => rpc_cli::signer_list(port, authfile).map(|s| ExecutionAction::Instant(Some(s))), + Cmd::SignerReject { id, port, authfile } => rpc_cli::signer_reject(id, port, authfile).map(|s| ExecutionAction::Instant(Some(s))), + Cmd::Snapshot(snapshot_cmd) => snapshot::execute(snapshot_cmd).map(|s| ExecutionAction::Instant(Some(s))), + Cmd::ExportHardcodedSync(export_hs_cmd) => export_hardcoded_sync::execute(export_hs_cmd).map(|s| ExecutionAction::Instant(Some(s))), + } +} + +/// Starts the parity client. +/// +/// `on_client_rq` is the action to perform when the client receives an RPC request to be restarted +/// with a different chain. +/// +/// `on_updater_rq` is the action to perform when the updater has a new binary to execute. +/// +/// The first parameter is the command line arguments that you would pass when running the parity +/// binary. +/// +/// On error, returns what to print on stderr. +pub fn start(conf: Configuration, on_client_rq: Cr, on_updater_rq: Rr) -> Result + where Cr: Fn(String) + 'static + Send, + Rr: Fn() + 'static + Send +{ + let deprecated = find_deprecated(&conf.args); + for d in deprecated { + println!("{}", d); + } + + execute(conf.into_command()?, on_client_rq, on_updater_rq) +} + +fn open_ui(ws_conf: &rpc::WsConfiguration, ui_conf: &rpc::UiConfiguration, logger_config: &LogConfig) -> Result<(), String> { + if !ui_conf.enabled { + return Err("Cannot use UI command with UI turned off.".into()) + } + + let token = signer::generate_token_and_url(ws_conf, ui_conf, logger_config)?; + // Open a browser + url::open(&token.url).map_err(|e| format!("{}", e))?; + // Print a message + println!("{}", token.message); + Ok(()) +} + +fn open_dapp(dapps_conf: &dapps::Configuration, rpc_conf: &rpc::HttpConfiguration, dapp: &str) -> Result<(), String> { + if !dapps_conf.enabled { + return Err("Cannot use DAPP command with Dapps turned off.".into()) + } + + let url = format!("http://{}:{}/{}/", rpc_conf.interface, rpc_conf.port, dapp); + url::open(&url).map_err(|e| format!("{}", e))?; + Ok(()) +} diff --git a/parity/main.rs b/parity/main.rs index 6774a83864c..e489ad865e4 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -1,4 +1,4 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// Copyright 2015-2018 Parity Technologies (UK) Ltd. // This file is part of Parity. // Parity is free software: you can redistribute it and/or modify @@ -18,187 +18,28 @@ #![warn(missing_docs)] -extern crate ansi_term; +extern crate parity; + extern crate ctrlc; -extern crate docopt; -#[macro_use] -extern crate clap; extern crate dir; -extern crate env_logger; extern crate fdlimit; -extern crate futures; -extern crate futures_cpupool; -extern crate atty; -extern crate jsonrpc_core; -extern crate num_cpus; -extern crate number_prefix; -extern crate parking_lot; -extern crate regex; -extern crate rlp; -extern crate rpassword; -extern crate rustc_hex; -extern crate semver; -extern crate serde; -extern crate serde_json; #[macro_use] -extern crate serde_derive; -extern crate toml; - -extern crate ethcore; -extern crate ethcore_bytes as bytes; -extern crate ethcore_io as io; -extern crate ethcore_light as light; -extern crate ethcore_logger; -extern crate ethcore_miner as miner; -extern crate ethcore_network as network; -extern crate ethcore_private_tx; -extern crate ethcore_service; -extern crate ethcore_sync as sync; -extern crate ethcore_transaction as transaction; -extern crate ethereum_types; -extern crate ethkey; -extern crate kvdb; -extern crate node_health; +extern crate log; extern crate panic_hook; -extern crate parity_hash_fetch as hash_fetch; -extern crate parity_ipfs_api; -extern crate parity_local_store as local_store; -extern crate parity_reactor; -extern crate parity_rpc; -extern crate parity_updater as updater; -extern crate parity_version; -extern crate parity_whisper; -extern crate path; -extern crate rpc_cli; -extern crate node_filter; -extern crate keccak_hash as hash; -extern crate journaldb; -extern crate registrar; - -#[macro_use] -extern crate log as rlog; - -#[cfg(feature="stratum")] -extern crate ethcore_stratum; - -#[cfg(feature="secretstore")] -extern crate ethcore_secretstore; - -#[cfg(feature = "dapps")] -extern crate parity_dapps; - -#[cfg(test)] -#[macro_use] -extern crate pretty_assertions; +extern crate parking_lot; #[cfg(windows)] extern crate winapi; -#[cfg(test)] -extern crate tempdir; - -mod account; -mod blockchain; -mod cache; -mod cli; -mod configuration; -mod dapps; -mod export_hardcoded_sync; -mod ipfs; -mod deprecated; -mod helpers; -mod informant; -mod light_helpers; -mod modules; -mod params; -mod presale; -mod rpc; -mod rpc_apis; -mod run; -mod secretstore; -mod signer; -mod snapshot; -mod upgrade; -mod url; -mod user_defaults; -mod whisper; -mod db; - -#[cfg(feature="stratum")] -mod stratum; - use std::{process, env}; -use std::collections::HashMap; -use std::io::{self as stdio, BufReader, Read, Write}; +use std::io::{self as stdio, Read, Write}; use std::fs::{remove_file, metadata, File, create_dir_all}; use std::path::PathBuf; -use hash::keccak_buffer; -use cli::Args; -use configuration::{Cmd, Execute, Configuration}; -use deprecated::find_deprecated; -use ethcore_logger::setup_log; +use std::sync::Arc; +use ctrlc::CtrlC; use dir::default_hypervisor_path; - -fn print_hash_of(maybe_file: Option) -> Result { - if let Some(file) = maybe_file { - let mut f = BufReader::new(File::open(&file).map_err(|_| "Unable to open file".to_owned())?); - let hash = keccak_buffer(&mut f).map_err(|_| "Unable to read from file".to_owned())?; - Ok(format!("{:x}", hash)) - } else { - Err("Streaming from standard input not yet supported. Specify a file.".to_owned()) - } -} - -enum PostExecutionAction { - Print(String), - Restart(Option), - Quit, -} - -fn execute(command: Execute, can_restart: bool) -> Result { - let logger = setup_log(&command.logger).expect("Logger is initialized only once; qed"); - - match command.cmd { - Cmd::Run(run_cmd) => { - let (restart, spec_name) = run::execute(run_cmd, can_restart, logger)?; - Ok(if restart { PostExecutionAction::Restart(spec_name) } else { PostExecutionAction::Quit }) - }, - Cmd::Version => Ok(PostExecutionAction::Print(Args::print_version())), - Cmd::Hash(maybe_file) => print_hash_of(maybe_file).map(|s| PostExecutionAction::Print(s)), - Cmd::Account(account_cmd) => account::execute(account_cmd).map(|s| PostExecutionAction::Print(s)), - Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd).map(|s| PostExecutionAction::Print(s)), - Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd).map(|_| PostExecutionAction::Quit), - Cmd::SignerToken(ws_conf, ui_conf, logger_config) => signer::execute(ws_conf, ui_conf, logger_config).map(|s| PostExecutionAction::Print(s)), - Cmd::SignerSign { id, pwfile, port, authfile } => rpc_cli::signer_sign(id, pwfile, port, authfile).map(|s| PostExecutionAction::Print(s)), - Cmd::SignerList { port, authfile } => rpc_cli::signer_list(port, authfile).map(|s| PostExecutionAction::Print(s)), - Cmd::SignerReject { id, port, authfile } => rpc_cli::signer_reject(id, port, authfile).map(|s| PostExecutionAction::Print(s)), - Cmd::Snapshot(snapshot_cmd) => snapshot::execute(snapshot_cmd).map(|s| PostExecutionAction::Print(s)), - Cmd::ExportHardcodedSync(export_hs_cmd) => export_hardcoded_sync::execute(export_hs_cmd).map(|s| PostExecutionAction::Print(s)), - } -} - -fn start(mut args: Vec) -> Result { - args.insert(0, "parity".to_owned()); - let conf = Configuration::parse(&args).unwrap_or_else(|e| e.exit()); - let can_restart = conf.args.flag_can_restart; - - let deprecated = find_deprecated(&conf.args); - for d in deprecated { - println!("{}", d); - } - - let cmd = conf.into_command()?; - execute(cmd, can_restart) -} - -#[cfg(not(feature="stratum"))] -fn stratum_main(_: &mut HashMap) {} - -#[cfg(feature="stratum")] -fn stratum_main(alt_mains: &mut HashMap) { - alt_mains.insert("stratum".to_owned(), stratum::main); -} - -fn sync_main(_: &mut HashMap) {} +use fdlimit::raise_fd_limit; +use parity::{start, ExecutionAction}; +use parking_lot::{Condvar, Mutex}; fn updates_path(name: &str) -> PathBuf { let mut dest = PathBuf::from(default_hypervisor_path()); @@ -275,48 +116,68 @@ const PLEASE_RESTART_EXIT_CODE: i32 = 69; // Returns the exit error code. fn main_direct(force_can_restart: bool) -> i32 { global_init(); - let mut alt_mains = HashMap::new(); - sync_main(&mut alt_mains); - stratum_main(&mut alt_mains); - let res = if let Some(f) = std::env::args().nth(1).and_then(|arg| alt_mains.get(&arg.to_string())) { - f(); - 0 + + let mut conf = { + let args = std::env::args().collect::>(); + parity::Configuration::parse_cli(&args).unwrap_or_else(|e| e.exit()) + }; + + if let Some(spec_override) = take_spec_name_override() { + conf.args.flag_testnet = false; + conf.args.arg_chain = spec_override; + } + + let can_restart = force_can_restart || conf.args.flag_can_restart; + + // increase max number of open files + raise_fd_limit(); + + let exit = Arc::new((Mutex::new((false, None)), Condvar::new())); + + let exec = if can_restart { + let e1 = exit.clone(); + let e2 = exit.clone(); + start(conf, + move |new_chain: String| { *e1.0.lock() = (true, Some(new_chain)); e1.1.notify_all(); }, + move || { *e2.0.lock() = (true, None); e2.1.notify_all(); }) } else { - let mut args = std::env::args().skip(1).collect::>(); - if force_can_restart && !args.iter().any(|arg| arg == "--can-restart") { - args.push("--can-restart".to_owned()); - } + trace!(target: "mode", "Not hypervised: not setting exit handlers."); + start(conf, move |_| {}, move || {}) + }; - if let Some(spec_override) = take_spec_name_override() { - args.retain(|f| f != "--testnet"); - args.retain(|f| !f.starts_with("--chain=")); - while let Some(pos) = args.iter().position(|a| a == "--chain") { - if args.len() > pos + 1 { - args.remove(pos + 1); + let res = match exec { + Ok(result) => match result { + ExecutionAction::Instant(Some(s)) => { println!("{}", s); 0 }, + ExecutionAction::Instant(None) => 0, + ExecutionAction::Running(client) => { + CtrlC::set_handler({ + let e = exit.clone(); + move || { e.1.notify_all(); } + }); + + // Wait for signal + let mut lock = exit.0.lock(); + let _ = exit.1.wait(&mut lock); + + client.shutdown(); + + match &*lock { + &(true, ref spec_name_override) => { + if let &Some(ref spec_name) = spec_name_override { + set_spec_name_override(spec_name.clone()); + } + PLEASE_RESTART_EXIT_CODE + }, + _ => 0, } - args.remove(pos); - } - args.push("--chain".to_owned()); - args.push(spec_override); - } - - match start(args) { - Ok(result) => match result { - PostExecutionAction::Print(s) => { println!("{}", s); 0 }, - PostExecutionAction::Restart(spec_name_override) => { - if let Some(spec_name) = spec_name_override { - set_spec_name_override(spec_name); - } - PLEASE_RESTART_EXIT_CODE - }, - PostExecutionAction::Quit => 0, - }, - Err(err) => { - writeln!(&mut stdio::stderr(), "{}", err).expect("StdErr available; qed"); - 1 }, - } + }, + Err(err) => { + writeln!(&mut stdio::stderr(), "{}", err).expect("StdErr available; qed"); + 1 + }, }; + global_cleanup(); res } diff --git a/parity/run.rs b/parity/run.rs index fdb32293b9c..73113055bbf 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -19,10 +19,8 @@ use std::fmt; use std::sync::{Arc, Weak}; use std::time::{Duration, Instant}; use std::thread; -use std::net::{TcpListener}; -use ansi_term::{Colour, Style}; -use ctrlc::CtrlC; +use ansi_term::Colour; use ethcore::account_provider::{AccountProvider, AccountProviderSettings}; use ethcore::client::{Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient, BlockInfo}; use ethcore::ethstore::ethkey; @@ -34,7 +32,6 @@ use ethcore_logger::{Config as LogConfig, RotatingLogger}; use ethcore_service::ClientService; use sync::{self, SyncConfig}; use miner::work_notify::WorkPoster; -use fdlimit::raise_fd_limit; use futures_cpupool::CpuPool; use hash_fetch::{self, fetch}; use informant::{Informant, LightNodeInformantData, FullNodeInformantData}; @@ -45,7 +42,6 @@ use node_filter::NodeFilter; use node_health; use parity_reactor::EventLoop; use parity_rpc::{NetworkSettings, informant, is_major_importing}; -use parking_lot::{Condvar, Mutex}; use updater::{UpdatePolicy, Updater}; use parity_version::version; use ethcore_private_tx::{ProviderConfig, EncryptorConfig, SecretStoreEncryptor}; @@ -65,7 +61,6 @@ use rpc; use rpc_apis; use secretstore; use signer; -use url; use db; // how often to take periodic snapshots. @@ -138,28 +133,6 @@ pub struct RunCmd { pub no_hardcoded_sync: bool, } -pub fn open_ui(ws_conf: &rpc::WsConfiguration, ui_conf: &rpc::UiConfiguration, logger_config: &LogConfig) -> Result<(), String> { - if !ui_conf.enabled { - return Err("Cannot use UI command with UI turned off.".into()) - } - - let token = signer::generate_token_and_url(ws_conf, ui_conf, logger_config)?; - // Open a browser - url::open(&token.url).map_err(|e| format!("{}", e))?; - // Print a message - println!("{}", token.message); - Ok(()) -} - -pub fn open_dapp(dapps_conf: &dapps::Configuration, rpc_conf: &rpc::HttpConfiguration, dapp: &str) -> Result<(), String> { - if !dapps_conf.enabled { - return Err("Cannot use DAPP command with Dapps turned off.".into()) - } - - let url = format!("http://{}:{}/{}/", rpc_conf.interface, rpc_conf.port, dapp); - url::open(&url).map_err(|e| format!("{}", e))?; - Ok(()) -} // node info fetcher for the local store. struct FullNodeInfo { miner: Option>, // TODO: only TXQ needed, just use that after decoupling. @@ -415,10 +388,12 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc) -> Result(cmd: RunCmd, logger: Arc, on_client_rq: }, }; - // start ui - if cmd.ui { - open_ui(&cmd.ws_conf, &cmd.ui_conf, &cmd.logger_config)?; - } - - if let Some(dapp) = cmd.dapp { - open_dapp(&cmd.dapps_conf, &cmd.http_conf, &dapp)?; - } - client.set_exit_handler(on_client_rq); updater.set_exit_handler(on_updater_rq); - Ok(RunningClient::Full { - informant, - client, - keep_alive: Box::new((watcher, service, updater, ws_server, http_server, ipc_server, ui_server, secretstore_key_server, ipfs_server, event_loop)), + Ok(RunningClient { + inner: RunningClientInner::Full { + informant, + client, + keep_alive: Box::new((watcher, service, updater, ws_server, http_server, ipc_server, ui_server, secretstore_key_server, ipfs_server, event_loop)), + } }) } -enum RunningClient { +/// Parity client currently executing in background threads. +/// +/// Should be destroyed by calling `shutdown()`, otherwise execution will continue in the +/// background. +pub struct RunningClient { + inner: RunningClientInner +} + +enum RunningClientInner { Light { informant: Arc>, client: Arc, @@ -931,9 +907,10 @@ enum RunningClient { } impl RunningClient { - fn shutdown(self) { - match self { - RunningClient::Light { informant, client, keep_alive } => { + /// Shuts down the client. + pub fn shutdown(self) { + match self.inner { + RunningClientInner::Light { informant, client, keep_alive } => { // Create a weak reference to the client so that we can wait on shutdown // until it is dropped let weak_client = Arc::downgrade(&client); @@ -943,7 +920,7 @@ impl RunningClient { drop(client); wait_for_drop(weak_client); }, - RunningClient::Full { informant, client, keep_alive } => { + RunningClientInner::Full { informant, client, keep_alive } => { info!("Finishing work, please wait..."); // Create a weak reference to the client so that we can wait on shutdown // until it is dropped @@ -961,51 +938,24 @@ impl RunningClient { } } -pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { - if cmd.ui_conf.enabled && !cmd.ui_conf.info_page_only { - warn!("{}", Style::new().bold().paint("Parity browser interface is deprecated. It's going to be removed in the next version, use standalone Parity UI instead.")); - warn!("{}", Style::new().bold().paint("Standalone Parity UI: https://github.com/Parity-JS/shell/releases")); - } - - if cmd.ui && cmd.dapps_conf.enabled { - // Check if Parity is already running - let addr = format!("{}:{}", cmd.ui_conf.interface, cmd.ui_conf.port); - if !TcpListener::bind(&addr as &str).is_ok() { - return open_ui(&cmd.ws_conf, &cmd.ui_conf, &cmd.logger_config).map(|_| (false, None)); - } - } - - // increase max number of open files - raise_fd_limit(); - - let exit = Arc::new((Mutex::new((false, None)), Condvar::new())); - - let running_client = if cmd.light { - execute_light_impl(cmd, logger)? - } else if can_restart { - let e1 = exit.clone(); - let e2 = exit.clone(); - execute_impl(cmd, logger, - move |new_chain: String| { *e1.0.lock() = (true, Some(new_chain)); e1.1.notify_all(); }, - move || { *e2.0.lock() = (true, None); e2.1.notify_all(); })? +/// Executes the given run command. +/// +/// `on_client_rq` is the action to perform when the client receives an RPC request to be restarted +/// with a different chain. +/// +/// `on_updater_rq` is the action to perform when the updater has a new binary to execute. +/// +/// On error, returns what to print on stderr. +pub fn execute(cmd: RunCmd, logger: Arc, + on_client_rq: Cr, on_updater_rq: Rr) -> Result + where Cr: Fn(String) + 'static + Send, + Rr: Fn() + 'static + Send +{ + if cmd.light { + execute_light_impl(cmd, logger) } else { - trace!(target: "mode", "Not hypervised: not setting exit handlers."); - execute_impl(cmd, logger, move |_| {}, move || {})? - }; - - // Handle possible exits - CtrlC::set_handler({ - let e = exit.clone(); - move || { e.1.notify_all(); } - }); - - // Wait for signal - let mut l = exit.0.lock(); - let _ = exit.1.wait(&mut l); - - running_client.shutdown(); - - Ok(l.clone()) + execute_impl(cmd, logger, on_client_rq, on_updater_rq) + } } #[cfg(not(windows))] diff --git a/parity/snapshot.rs b/parity/snapshot.rs index ad93801c0b5..423864679a2 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -35,7 +35,6 @@ use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_ use helpers::{to_client_config, execute_upgrades}; use dir::Directories; use user_defaults::UserDefaults; -use fdlimit; use ethcore_private_tx; use db; @@ -149,8 +148,6 @@ impl SnapshotCommand { // load user defaults let user_defaults = UserDefaults::load(&user_defaults_path)?; - fdlimit::raise_fd_limit(); - // select pruning algorithm let algorithm = self.pruning.to_algorithm(&user_defaults); diff --git a/parity/url.rs b/parity/url.rs index 41c4e545831..4f547c28f07 100644 --- a/parity/url.rs +++ b/parity/url.rs @@ -80,8 +80,9 @@ pub fn open(url: &str) -> Result<(), Error> { } #[cfg(target_os="android")] -pub fn open(_url: &str) { +pub fn open(_url: &str) -> Result<(), Error> { // TODO: While it is generally always bad to leave a function implemented, there is not much // more we can do here. This function will eventually be removed when we compile Parity // as a library and not as a full binary. + Ok(()) } diff --git a/test.sh b/test.sh index 84940a6aca4..6dcd258ea36 100755 --- a/test.sh +++ b/test.sh @@ -40,8 +40,18 @@ echo "________Validate chainspecs________" fi -# Running test's +# Running the C example +echo "________Running the C example________" +cd parity-clib-example && \ + mkdir -p build && \ + cd build && \ + cmake .. && \ + make && \ + ./parity-example && \ + cd .. && \ + rm -rf build && \ + cd .. + +# Running tests echo "________Running Parity Full Test Suite________" - cargo test -j 8 $OPTIONS --features "$FEATURES" --all $1 - From 7a00d97977f387e66793e6b31e0d739d3f5ff8c8 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 9 May 2018 14:48:55 +0800 Subject: [PATCH 06/11] Trace precompiled contracts when the transfer value is not zero (#8486) * Trace precompiled contracts when the transfer value is not zero * Add tests for precompiled CALL tracing * Use byzantium test machine for the new test * Add notes in comments on why we don't trace all precompileds * Use is_transferred instead of transferred --- ethcore/src/executive.rs | 86 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 2 deletions(-) diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index cded6358e8f..e29da093c7d 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -428,8 +428,14 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { self.state.discard_checkpoint(); output.write(0, &builtin_out_buffer); - // trace only top level calls to builtins to avoid DDoS attacks - if self.depth == 0 { + // Trace only top level calls and calls with balance transfer to builtins. The reason why we don't + // trace all internal calls to builtin contracts is that memcpy (IDENTITY) is a heavily used + // function. + let is_transferred = match params.value { + ActionValue::Transfer(value) => value != U256::zero(), + ActionValue::Apparent(_) => false, + }; + if self.depth == 0 || is_transferred { let mut trace_output = tracer.prepare_trace_output(); if let Some(out) = trace_output.as_mut() { *out = output.to_owned(); @@ -722,6 +728,12 @@ mod tests { machine } + fn make_byzantium_machine(max_depth: usize) -> EthereumMachine { + let mut machine = ::ethereum::new_byzantium_test_machine(); + machine.set_schedule_creation_rules(Box::new(move |s, _| s.max_depth = max_depth)); + machine + } + #[test] fn test_contract_address() { let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); @@ -813,6 +825,76 @@ mod tests { assert_eq!(substate.contracts_created.len(), 0); } + #[test] + fn test_call_to_precompiled_tracing() { + // code: + // + // 60 00 - push 00 out size + // 60 00 - push 00 out offset + // 60 00 - push 00 in size + // 60 00 - push 00 in offset + // 60 01 - push 01 value + // 60 03 - push 03 to + // 61 ffff - push fff gas + // f1 - CALL + + let code = "60006000600060006001600361fffff1".from_hex().unwrap(); + let sender = Address::from_str("4444444444444444444444444444444444444444").unwrap(); + let address = Address::from_str("5555555555555555555555555555555555555555").unwrap(); + + let mut params = ActionParams::default(); + params.address = address.clone(); + params.code_address = address.clone(); + params.sender = sender.clone(); + params.origin = sender.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(code)); + params.value = ActionValue::Transfer(U256::from(100)); + params.call_type = CallType::Call; + let mut state = get_temp_state(); + state.add_balance(&sender, &U256::from(100), CleanupMode::NoEmpty).unwrap(); + let info = EnvInfo::default(); + let machine = make_byzantium_machine(5); + let mut substate = Substate::new(); + let mut tracer = ExecutiveTracer::default(); + let mut vm_tracer = ExecutiveVMTracer::toplevel(); + + let mut ex = Executive::new(&mut state, &info, &machine); + let output = BytesRef::Fixed(&mut[0u8;0]); + ex.call(params, &mut substate, output, &mut tracer, &mut vm_tracer).unwrap(); + + assert_eq!(tracer.drain(), vec![FlatTrace { + action: trace::Action::Call(trace::Call { + from: "4444444444444444444444444444444444444444".into(), + to: "5555555555555555555555555555555555555555".into(), + value: 100.into(), + gas: 100_000.into(), + input: vec![], + call_type: CallType::Call + }), + result: trace::Res::Call(trace::CallResult { + gas_used: 33021.into(), + output: vec![] + }), + subtraces: 1, + trace_address: Default::default() + }, FlatTrace { + action: trace::Action::Call(trace::Call { + from: "5555555555555555555555555555555555555555".into(), + to: "0000000000000000000000000000000000000003".into(), + value: 1.into(), + gas: 66560.into(), + input: vec![], + call_type: CallType::Call + }), result: trace::Res::Call(trace::CallResult { + gas_used: 600.into(), + output: vec![] + }), + subtraces: 0, + trace_address: vec![0].into_iter().collect(), + }]); + } + #[test] // Tracing is not suported in JIT fn test_call_to_create() { From 24838bbcd3aa3eccde8bb0c24909172c06fdf58b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 May 2018 08:49:34 +0200 Subject: [PATCH 07/11] Don't block sync when importing old blocks (#8530) * Alter IO queueing. * Don't require IoMessages to be Clone * Ancient blocks imported via IoChannel. * Get rid of private transactions io message. * Get rid of deadlock and fix disconnected handler. * Revert to old disconnect condition. * Fix tests. * Fix deadlock. --- Cargo.lock | 2 + ethcore/private-tx/src/lib.rs | 250 +++++++++--------- ethcore/private-tx/tests/private_contract.rs | 2 +- ethcore/service/Cargo.toml | 1 + ethcore/service/src/lib.rs | 3 + ethcore/service/src/service.rs | 32 +-- ethcore/src/client/ancient_import.rs | 50 +++- ethcore/src/client/client.rs | 254 +++++++++++-------- ethcore/src/client/io_message.rs | 30 ++- ethcore/src/client/mod.rs | 3 +- ethcore/src/client/test_client.rs | 32 +-- ethcore/src/client/traits.rs | 24 +- ethcore/src/views/block.rs | 5 +- ethcore/sync/Cargo.toml | 1 + ethcore/sync/src/api.rs | 4 + ethcore/sync/src/block_sync.rs | 2 +- ethcore/sync/src/chain.rs | 5 +- ethcore/sync/src/lib.rs | 2 + ethcore/sync/src/tests/helpers.rs | 6 +- ethcore/sync/src/tests/private.rs | 9 +- util/io/src/lib.rs | 4 +- util/io/src/service.rs | 50 ++-- util/io/src/worker.rs | 10 +- 23 files changed, 452 insertions(+), 329 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 64e468e6703..b02fcbd24e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -818,6 +818,7 @@ dependencies = [ "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "stop-guard 0.1.0", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "trace-time 0.1.0", ] [[package]] @@ -866,6 +867,7 @@ dependencies = [ "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "trace-time 0.1.0", "triehash 0.1.0", ] diff --git a/ethcore/private-tx/src/lib.rs b/ethcore/private-tx/src/lib.rs index 723d4918298..7aca4c85dc4 100644 --- a/ethcore/private-tx/src/lib.rs +++ b/ethcore/private-tx/src/lib.rs @@ -149,8 +149,8 @@ impl Provider where { encryptor: Box, config: ProviderConfig, channel: IoChannel, - ) -> Result { - Ok(Provider { + ) -> Self { + Provider { encryptor, validator_accounts: config.validator_accounts.into_iter().collect(), signer_account: config.signer_account, @@ -162,7 +162,7 @@ impl Provider where { miner, accounts, channel, - }) + } } // TODO [ToDr] Don't use `ChainNotify` here! @@ -243,50 +243,6 @@ impl Provider where { Ok(original_transaction) } - /// Process received private transaction - pub fn import_private_transaction(&self, rlp: &[u8]) -> Result<(), Error> { - trace!("Private transaction received"); - let private_tx: PrivateTransaction = Rlp::new(rlp).as_val()?; - let contract = private_tx.contract; - let contract_validators = self.get_validators(BlockId::Latest, &contract)?; - - let validation_account = contract_validators - .iter() - .find(|address| self.validator_accounts.contains(address)); - - match validation_account { - None => { - // TODO [ToDr] This still seems a bit invalid, imho we should still import the transaction to the pool. - // Importing to pool verifies correctness and nonce; here we are just blindly forwarding. - // - // Not for verification, broadcast further to peers - self.broadcast_private_transaction(rlp.into()); - return Ok(()); - }, - Some(&validation_account) => { - let hash = private_tx.hash(); - trace!("Private transaction taken for verification"); - let original_tx = self.extract_original_transaction(private_tx, &contract)?; - trace!("Validating transaction: {:?}", original_tx); - // Verify with the first account available - trace!("The following account will be used for verification: {:?}", validation_account); - let nonce_cache = Default::default(); - self.transactions_for_verification.lock().add_transaction( - original_tx, - contract, - validation_account, - hash, - self.pool_client(&nonce_cache), - )?; - // NOTE This will just fire `on_private_transaction_queued` but from a client thread. - // It seems that a lot of heavy work (verification) is done in this thread anyway - // it might actually make sense to decouple it from clientService and just use dedicated thread - // for both verification and execution. - self.channel.send(ClientIoMessage::NewPrivateTransaction).map_err(|_| ErrorKind::ClientIsMalformed.into()) - } - } - } - fn pool_client<'a>(&'a self, nonce_cache: &'a RwLock>) -> miner::pool_client::PoolClient<'a, Client> { let engine = self.client.engine(); let refuse_service_transactions = true; @@ -299,11 +255,6 @@ impl Provider where { ) } - /// Private transaction for validation added into queue - pub fn on_private_transaction_queued(&self) -> Result<(), Error> { - self.process_queue() - } - /// Retrieve and verify the first available private transaction for every sender /// /// TODO [ToDr] It seems that: @@ -347,73 +298,6 @@ impl Provider where { Ok(()) } - /// Add signed private transaction into the store - /// Creates corresponding public transaction if last required singature collected and sends it to the chain - pub fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result<(), Error> { - let tx: SignedPrivateTransaction = Rlp::new(rlp).as_val()?; - trace!("Signature for private transaction received: {:?}", tx); - let private_hash = tx.private_transaction_hash(); - let desc = match self.transactions_for_signing.lock().get(&private_hash) { - None => { - // TODO [ToDr] Verification (we can't just blindly forward every transaction) - - // Not our transaction, broadcast further to peers - self.broadcast_signed_private_transaction(rlp.into()); - return Ok(()); - }, - Some(desc) => desc, - }; - - let last = self.last_required_signature(&desc, tx.signature())?; - - if last { - let mut signatures = desc.received_signatures.clone(); - signatures.push(tx.signature()); - let rsv: Vec = signatures.into_iter().map(|sign| sign.into_electrum().into()).collect(); - //Create public transaction - let public_tx = self.public_transaction( - desc.state.clone(), - &desc.original_transaction, - &rsv, - desc.original_transaction.nonce, - desc.original_transaction.gas_price - )?; - trace!("Last required signature received, public transaction created: {:?}", public_tx); - //Sign and add it to the queue - let chain_id = desc.original_transaction.chain_id(); - let hash = public_tx.hash(chain_id); - let signer_account = self.signer_account.ok_or_else(|| ErrorKind::SignerAccountNotSet)?; - let password = find_account_password(&self.passwords, &*self.accounts, &signer_account); - let signature = self.accounts.sign(signer_account, password, hash)?; - let signed = SignedTransaction::new(public_tx.with_signature(signature, chain_id))?; - match self.miner.import_own_transaction(&*self.client, signed.into()) { - Ok(_) => trace!("Public transaction added to queue"), - Err(err) => { - trace!("Failed to add transaction to queue, error: {:?}", err); - bail!(err); - } - } - //Remove from store for signing - match self.transactions_for_signing.lock().remove(&private_hash) { - Ok(_) => {} - Err(err) => { - trace!("Failed to remove transaction from signing store, error: {:?}", err); - bail!(err); - } - } - } else { - //Add signature to the store - match self.transactions_for_signing.lock().add_signature(&private_hash, tx.signature()) { - Ok(_) => trace!("Signature stored for private transaction"), - Err(err) => { - trace!("Failed to add signature to signing store, error: {:?}", err); - bail!(err); - } - } - } - Ok(()) - } - fn last_required_signature(&self, desc: &PrivateTransactionSigningDesc, sign: Signature) -> Result { if desc.received_signatures.contains(&sign) { return Ok(false); @@ -657,6 +541,134 @@ impl Provider where { } } +pub trait Importer { + /// Process received private transaction + fn import_private_transaction(&self, _rlp: &[u8]) -> Result<(), Error>; + + /// Add signed private transaction into the store + /// + /// Creates corresponding public transaction if last required signature collected and sends it to the chain + fn import_signed_private_transaction(&self, _rlp: &[u8]) -> Result<(), Error>; +} + +// TODO [ToDr] Offload more heavy stuff to the IoService thread. +// It seems that a lot of heavy work (verification) is done in this thread anyway +// it might actually make sense to decouple it from clientService and just use dedicated thread +// for both verification and execution. + +impl Importer for Arc { + fn import_private_transaction(&self, rlp: &[u8]) -> Result<(), Error> { + trace!("Private transaction received"); + let private_tx: PrivateTransaction = Rlp::new(rlp).as_val()?; + let contract = private_tx.contract; + let contract_validators = self.get_validators(BlockId::Latest, &contract)?; + + let validation_account = contract_validators + .iter() + .find(|address| self.validator_accounts.contains(address)); + + match validation_account { + None => { + // TODO [ToDr] This still seems a bit invalid, imho we should still import the transaction to the pool. + // Importing to pool verifies correctness and nonce; here we are just blindly forwarding. + // + // Not for verification, broadcast further to peers + self.broadcast_private_transaction(rlp.into()); + return Ok(()); + }, + Some(&validation_account) => { + let hash = private_tx.hash(); + trace!("Private transaction taken for verification"); + let original_tx = self.extract_original_transaction(private_tx, &contract)?; + trace!("Validating transaction: {:?}", original_tx); + // Verify with the first account available + trace!("The following account will be used for verification: {:?}", validation_account); + let nonce_cache = Default::default(); + self.transactions_for_verification.lock().add_transaction( + original_tx, + contract, + validation_account, + hash, + self.pool_client(&nonce_cache), + )?; + let provider = Arc::downgrade(self); + self.channel.send(ClientIoMessage::execute(move |_| { + if let Some(provider) = provider.upgrade() { + if let Err(e) = provider.process_queue() { + debug!("Unable to process the queue: {}", e); + } + } + })).map_err(|_| ErrorKind::ClientIsMalformed.into()) + } + } + } + + fn import_signed_private_transaction(&self, rlp: &[u8]) -> Result<(), Error> { + let tx: SignedPrivateTransaction = Rlp::new(rlp).as_val()?; + trace!("Signature for private transaction received: {:?}", tx); + let private_hash = tx.private_transaction_hash(); + let desc = match self.transactions_for_signing.lock().get(&private_hash) { + None => { + // TODO [ToDr] Verification (we can't just blindly forward every transaction) + + // Not our transaction, broadcast further to peers + self.broadcast_signed_private_transaction(rlp.into()); + return Ok(()); + }, + Some(desc) => desc, + }; + + let last = self.last_required_signature(&desc, tx.signature())?; + + if last { + let mut signatures = desc.received_signatures.clone(); + signatures.push(tx.signature()); + let rsv: Vec = signatures.into_iter().map(|sign| sign.into_electrum().into()).collect(); + //Create public transaction + let public_tx = self.public_transaction( + desc.state.clone(), + &desc.original_transaction, + &rsv, + desc.original_transaction.nonce, + desc.original_transaction.gas_price + )?; + trace!("Last required signature received, public transaction created: {:?}", public_tx); + //Sign and add it to the queue + let chain_id = desc.original_transaction.chain_id(); + let hash = public_tx.hash(chain_id); + let signer_account = self.signer_account.ok_or_else(|| ErrorKind::SignerAccountNotSet)?; + let password = find_account_password(&self.passwords, &*self.accounts, &signer_account); + let signature = self.accounts.sign(signer_account, password, hash)?; + let signed = SignedTransaction::new(public_tx.with_signature(signature, chain_id))?; + match self.miner.import_own_transaction(&*self.client, signed.into()) { + Ok(_) => trace!("Public transaction added to queue"), + Err(err) => { + trace!("Failed to add transaction to queue, error: {:?}", err); + bail!(err); + } + } + //Remove from store for signing + match self.transactions_for_signing.lock().remove(&private_hash) { + Ok(_) => {} + Err(err) => { + trace!("Failed to remove transaction from signing store, error: {:?}", err); + bail!(err); + } + } + } else { + //Add signature to the store + match self.transactions_for_signing.lock().add_signature(&private_hash, tx.signature()) { + Ok(_) => trace!("Signature stored for private transaction"), + Err(err) => { + trace!("Failed to add signature to signing store, error: {:?}", err); + bail!(err); + } + } + } + Ok(()) + } +} + /// Try to unlock account using stored password, return found password if any fn find_account_password(passwords: &Vec, account_provider: &AccountProvider, account: &Address) -> Option { for password in passwords { diff --git a/ethcore/private-tx/tests/private_contract.rs b/ethcore/private-tx/tests/private_contract.rs index e53ad5e5f68..e7e608c2b61 100644 --- a/ethcore/private-tx/tests/private_contract.rs +++ b/ethcore/private-tx/tests/private_contract.rs @@ -74,7 +74,7 @@ fn private_contract() { Box::new(NoopEncryptor::default()), config, io, - ).unwrap()); + )); let (address, _) = contract_address(CreateContractAddress::FromSenderAndNonce, &key1.address(), &0.into(), &[]); diff --git a/ethcore/service/Cargo.toml b/ethcore/service/Cargo.toml index b612baf5660..3a10849b61f 100644 --- a/ethcore/service/Cargo.toml +++ b/ethcore/service/Cargo.toml @@ -13,6 +13,7 @@ ethcore-sync = { path = "../sync" } kvdb = { path = "../../util/kvdb" } log = "0.3" stop-guard = { path = "../../util/stop-guard" } +trace-time = { path = "../../util/trace-time" } [dev-dependencies] tempdir = "0.3" diff --git a/ethcore/service/src/lib.rs b/ethcore/service/src/lib.rs index 1604e84b10a..d85a377cde2 100644 --- a/ethcore/service/src/lib.rs +++ b/ethcore/service/src/lib.rs @@ -28,6 +28,9 @@ extern crate error_chain; #[macro_use] extern crate log; +#[macro_use] +extern crate trace_time; + #[cfg(test)] extern crate tempdir; diff --git a/ethcore/service/src/service.rs b/ethcore/service/src/service.rs index b60d4194c90..5f46799796a 100644 --- a/ethcore/service/src/service.rs +++ b/ethcore/service/src/service.rs @@ -33,7 +33,7 @@ use ethcore::snapshot::{RestorationStatus}; use ethcore::spec::Spec; use ethcore::account_provider::AccountProvider; -use ethcore_private_tx; +use ethcore_private_tx::{self, Importer}; use Error; pub struct PrivateTxService { @@ -112,14 +112,13 @@ impl ClientService { account_provider, encryptor, private_tx_conf, - io_service.channel())?, - ); + io_service.channel(), + )); let private_tx = Arc::new(PrivateTxService::new(provider)); let client_io = Arc::new(ClientIoHandler { client: client.clone(), snapshot: snapshot.clone(), - private_tx: private_tx.clone(), }); io_service.register_handler(client_io)?; @@ -175,7 +174,6 @@ impl ClientService { struct ClientIoHandler { client: Arc, snapshot: Arc, - private_tx: Arc, } const CLIENT_TICK_TIMER: TimerToken = 0; @@ -191,6 +189,7 @@ impl IoHandler for ClientIoHandler { } fn timeout(&self, _io: &IoContext, timer: TimerToken) { + trace_time!("service::read"); match timer { CLIENT_TICK_TIMER => { use ethcore::snapshot::SnapshotService; @@ -203,20 +202,24 @@ impl IoHandler for ClientIoHandler { } fn message(&self, _io: &IoContext, net_message: &ClientIoMessage) { + trace_time!("service::message"); use std::thread; match *net_message { - ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); } - ClientIoMessage::NewTransactions(ref transactions, peer_id) => { - self.client.import_queued_transactions(transactions, peer_id); + ClientIoMessage::BlockVerified => { + self.client.import_verified_blocks(); } ClientIoMessage::BeginRestoration(ref manifest) => { if let Err(e) = self.snapshot.init_restore(manifest.clone(), true) { warn!("Failed to initialize snapshot restoration: {}", e); } } - ClientIoMessage::FeedStateChunk(ref hash, ref chunk) => self.snapshot.feed_state_chunk(*hash, chunk), - ClientIoMessage::FeedBlockChunk(ref hash, ref chunk) => self.snapshot.feed_block_chunk(*hash, chunk), + ClientIoMessage::FeedStateChunk(ref hash, ref chunk) => { + self.snapshot.feed_state_chunk(*hash, chunk) + } + ClientIoMessage::FeedBlockChunk(ref hash, ref chunk) => { + self.snapshot.feed_block_chunk(*hash, chunk) + } ClientIoMessage::TakeSnapshot(num) => { let client = self.client.clone(); let snapshot = self.snapshot.clone(); @@ -231,12 +234,9 @@ impl IoHandler for ClientIoHandler { debug!(target: "snapshot", "Failed to initialize periodic snapshot thread: {:?}", e); } }, - ClientIoMessage::NewMessage(ref message) => if let Err(e) = self.client.engine().handle_message(message) { - trace!(target: "poa", "Invalid message received: {}", e); - }, - ClientIoMessage::NewPrivateTransaction => if let Err(e) = self.private_tx.provider.on_private_transaction_queued() { - warn!("Failed to handle private transaction {:?}", e); - }, + ClientIoMessage::Execute(ref exec) => { + (*exec.0)(&self.client); + } _ => {} // ignore other messages } } diff --git a/ethcore/src/client/ancient_import.rs b/ethcore/src/client/ancient_import.rs index 13699ea5a0e..c2523a13a56 100644 --- a/ethcore/src/client/ancient_import.rs +++ b/ethcore/src/client/ancient_import.rs @@ -32,16 +32,16 @@ const HEAVY_VERIFY_RATE: f32 = 0.02; /// Ancient block verifier: import an ancient sequence of blocks in order from a starting /// epoch. pub struct AncientVerifier { - cur_verifier: RwLock>>, + cur_verifier: RwLock>>>, engine: Arc, } impl AncientVerifier { - /// Create a new ancient block verifier with the given engine and initial verifier. - pub fn new(engine: Arc, start_verifier: Box>) -> Self { + /// Create a new ancient block verifier with the given engine. + pub fn new(engine: Arc) -> Self { AncientVerifier { - cur_verifier: RwLock::new(start_verifier), - engine: engine, + cur_verifier: RwLock::new(None), + engine, } } @@ -53,17 +53,49 @@ impl AncientVerifier { header: &Header, chain: &BlockChain, ) -> Result<(), ::error::Error> { - match rng.gen::() <= HEAVY_VERIFY_RATE { - true => self.cur_verifier.read().verify_heavy(header)?, - false => self.cur_verifier.read().verify_light(header)?, + // perform verification + let verified = if let Some(ref cur_verifier) = *self.cur_verifier.read() { + match rng.gen::() <= HEAVY_VERIFY_RATE { + true => cur_verifier.verify_heavy(header)?, + false => cur_verifier.verify_light(header)?, + } + true + } else { + false + }; + + // when there is no verifier initialize it. + // We use a bool flag to avoid double locking in the happy case + if !verified { + { + let mut cur_verifier = self.cur_verifier.write(); + if cur_verifier.is_none() { + *cur_verifier = Some(self.initial_verifier(header, chain)?); + } + } + // Call again to verify. + return self.verify(rng, header, chain); } // ancient import will only use transitions obtained from the snapshot. if let Some(transition) = chain.epoch_transition(header.number(), header.hash()) { let v = self.engine.epoch_verifier(&header, &transition.proof).known_confirmed()?; - *self.cur_verifier.write() = v; + *self.cur_verifier.write() = Some(v); } Ok(()) } + + fn initial_verifier(&self, header: &Header, chain: &BlockChain) + -> Result>, ::error::Error> + { + trace!(target: "client", "Initializing ancient block restoration."); + let current_epoch_data = chain.epoch_transitions() + .take_while(|&(_, ref t)| t.block_number < header.number()) + .last() + .map(|(_, t)| t.proof) + .expect("At least one epoch entry (genesis) always stored; qed"); + + self.engine.epoch_verifier(&header, ¤t_epoch_data).known_confirmed() + } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 8119ebd35f6..bffa4e38ba8 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -15,15 +15,16 @@ // along with Parity. If not, see . use std::collections::{HashSet, BTreeMap, BTreeSet, VecDeque}; +use std::fmt; use std::str::FromStr; -use std::sync::{Arc, Weak}; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; +use std::sync::{Arc, Weak}; use std::time::{Instant, Duration}; -use itertools::Itertools; // util use hash::keccak; use bytes::Bytes; +use itertools::Itertools; use journaldb; use trie::{TrieSpec, TrieFactory, Trie}; use kvdb::{DBValue, KeyValueDB, DBTransaction}; @@ -45,7 +46,8 @@ use client::{ use client::{ BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, - ChainNotify, ChainRoute, PruningInfo, ProvingBlockChainClient, EngineInfo, ChainMessageType + ChainNotify, ChainRoute, PruningInfo, ProvingBlockChainClient, EngineInfo, ChainMessageType, + IoClient, }; use encoded; use engines::{EthEngine, EpochTransition}; @@ -55,7 +57,7 @@ use evm::Schedule; use executive::{Executive, Executed, TransactOptions, contract_address}; use factory::{Factories, VmFactory}; use header::{BlockNumber, Header}; -use io::IoChannel; +use io::{IoChannel, IoError}; use log_entry::LocalizedLogEntry; use miner::{Miner, MinerService}; use ethcore_miner::pool::VerifiedTransaction; @@ -85,6 +87,7 @@ pub use verification::queue::QueueInfo as BlockQueueInfo; use_contract!(registry, "Registry", "res/contracts/registrar.json"); const MAX_TX_QUEUE_SIZE: usize = 4096; +const MAX_ANCIENT_BLOCKS_QUEUE_SIZE: usize = 4096; const MAX_QUEUE_SIZE_TO_SLEEP_ON: usize = 2; const MIN_HISTORY_SIZE: u64 = 8; @@ -154,10 +157,7 @@ struct Importer { pub miner: Arc, /// Ancient block verifier: import an ancient sequence of blocks in order from a starting epoch - pub ancient_verifier: Mutex>, - - /// Random number generator used by `AncientVerifier` - pub rng: Mutex, + pub ancient_verifier: AncientVerifier, /// Ethereum engine to be used during import pub engine: Arc, @@ -204,8 +204,13 @@ pub struct Client { /// List of actors to be notified on certain chain events notify: RwLock>>, - /// Count of pending transactions in the queue - queue_transactions: AtomicUsize, + /// Queued transactions from IO + queue_transactions: IoChannelQueue, + /// Ancient blocks import queue + queue_ancient_blocks: IoChannelQueue, + /// Consensus messages import queue + queue_consensus_message: IoChannelQueue, + last_hashes: RwLock>, factories: Factories, @@ -239,8 +244,7 @@ impl Importer { verifier: verification::new(config.verifier_type.clone()), block_queue, miner, - ancient_verifier: Mutex::new(None), - rng: Mutex::new(OsRng::new()?), + ancient_verifier: AncientVerifier::new(engine.clone()), engine, }) } @@ -416,55 +420,25 @@ impl Importer { Ok(locked_block) } + /// Import a block with transaction receipts. /// /// The block is guaranteed to be the next best blocks in the /// first block sequence. Does no sealing or transaction validation. - fn import_old_block(&self, header: &Header, block_bytes: Bytes, receipts_bytes: Bytes, db: &KeyValueDB, chain: &BlockChain) -> Result { - let receipts = ::rlp::decode_list(&receipts_bytes); + fn import_old_block(&self, header: &Header, block_bytes: &[u8], receipts_bytes: &[u8], db: &KeyValueDB, chain: &BlockChain) -> Result { + let receipts = ::rlp::decode_list(receipts_bytes); let hash = header.hash(); let _import_lock = self.import_lock.lock(); { trace_time!("import_old_block"); - let mut ancient_verifier = self.ancient_verifier.lock(); - - { - // closure for verifying a block. - let verify_with = |verifier: &AncientVerifier| -> Result<(), ::error::Error> { - // verify the block, passing the chain for updating the epoch - // verifier. - let mut rng = OsRng::new().map_err(UtilError::from)?; - verifier.verify(&mut rng, &header, &chain) - }; - - // initialize the ancient block verifier if we don't have one already. - match &mut *ancient_verifier { - &mut Some(ref verifier) => { - verify_with(verifier)? - } - x @ &mut None => { - // load most recent epoch. - trace!(target: "client", "Initializing ancient block restoration."); - let current_epoch_data = chain.epoch_transitions() - .take_while(|&(_, ref t)| t.block_number < header.number()) - .last() - .map(|(_, t)| t.proof) - .expect("At least one epoch entry (genesis) always stored; qed"); - - let current_verifier = self.engine.epoch_verifier(&header, ¤t_epoch_data) - .known_confirmed()?; - let current_verifier = AncientVerifier::new(self.engine.clone(), current_verifier); - - verify_with(¤t_verifier)?; - *x = Some(current_verifier); - } - } - } + // verify the block, passing the chain for updating the epoch verifier. + let mut rng = OsRng::new().map_err(UtilError::from)?; + self.ancient_verifier.verify(&mut rng, &header, &chain)?; // Commit results let mut batch = DBTransaction::new(); - chain.insert_unordered_block(&mut batch, &block_bytes, receipts, None, false, true); + chain.insert_unordered_block(&mut batch, block_bytes, receipts, None, false, true); // Final commit to the DB db.write_buffered(batch); chain.commit(); @@ -734,7 +708,9 @@ impl Client { report: RwLock::new(Default::default()), io_channel: Mutex::new(message_channel), notify: RwLock::new(Vec::new()), - queue_transactions: AtomicUsize::new(0), + queue_transactions: IoChannelQueue::new(MAX_TX_QUEUE_SIZE), + queue_ancient_blocks: IoChannelQueue::new(MAX_ANCIENT_BLOCKS_QUEUE_SIZE), + queue_consensus_message: IoChannelQueue::new(usize::max_value()), last_hashes: RwLock::new(VecDeque::new()), factories: factories, history: history, @@ -820,7 +796,7 @@ impl Client { } fn notify(&self, f: F) where F: Fn(&ChainNotify) { - for np in self.notify.read().iter() { + for np in &*self.notify.read() { if let Some(n) = np.upgrade() { f(&*n); } @@ -954,24 +930,6 @@ impl Client { } } - /// Import transactions from the IO queue - pub fn import_queued_transactions(&self, transactions: &[Bytes], peer_id: usize) -> usize { - trace_time!("import_queued_transactions"); - self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst); - - let txs: Vec = transactions - .iter() - .filter_map(|bytes| self.engine().decode_transaction(bytes).ok()) - .collect(); - - self.notify(|notify| { - notify.transactions_received(&txs, peer_id); - }); - - let results = self.importer.miner.import_external_transactions(self, txs); - results.len() - } - /// Get shared miner reference. #[cfg(test)] pub fn miner(&self) -> Arc { @@ -1392,22 +1350,6 @@ impl ImportBlock for Client { } Ok(self.importer.block_queue.import(unverified)?) } - - fn import_block_with_receipts(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result { - let header: Header = ::rlp::Rlp::new(&block_bytes).val_at(0)?; - { - // check block order - if self.chain.read().is_known(&header.hash()) { - bail!(BlockImportErrorKind::Import(ImportErrorKind::AlreadyInChain)); - } - let status = self.block_status(BlockId::Hash(*header.parent_hash())); - if status == BlockStatus::Unknown || status == BlockStatus::Pending { - bail!(BlockImportErrorKind::Block(BlockError::UnknownParent(*header.parent_hash()))); - } - } - - self.importer.import_old_block(&header, block_bytes, receipts_bytes, &**self.db.read(), &*self.chain.read()).map_err(Into::into) - } } impl StateClient for Client { @@ -1958,35 +1900,10 @@ impl BlockChainClient for Client { (*self.build_last_hashes(&self.chain.read().best_block_hash())).clone() } - fn queue_transactions(&self, transactions: Vec, peer_id: usize) { - let queue_size = self.queue_transactions.load(AtomicOrdering::Relaxed); - trace!(target: "external_tx", "Queue size: {}", queue_size); - if queue_size > MAX_TX_QUEUE_SIZE { - debug!("Ignoring {} transactions: queue is full", transactions.len()); - } else { - let len = transactions.len(); - match self.io_channel.lock().send(ClientIoMessage::NewTransactions(transactions, peer_id)) { - Ok(_) => { - self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst); - } - Err(e) => { - debug!("Ignoring {} transactions: error queueing: {}", len, e); - } - } - } - } - fn ready_transactions(&self) -> Vec> { self.importer.miner.ready_transactions(self) } - fn queue_consensus_message(&self, message: Bytes) { - let channel = self.io_channel.lock().clone(); - if let Err(e) = channel.send(ClientIoMessage::NewMessage(message)) { - debug!("Ignoring the message, error queueing: {}", e); - } - } - fn signing_chain_id(&self) -> Option { self.engine.signing_chain_id(&self.latest_env_info()) } @@ -2034,6 +1951,72 @@ impl BlockChainClient for Client { } } +impl IoClient for Client { + fn queue_transactions(&self, transactions: Vec, peer_id: usize) { + let len = transactions.len(); + self.queue_transactions.queue(&mut self.io_channel.lock(), len, move |client| { + trace_time!("import_queued_transactions"); + + let txs: Vec = transactions + .iter() + .filter_map(|bytes| client.engine.decode_transaction(bytes).ok()) + .collect(); + + client.notify(|notify| { + notify.transactions_received(&txs, peer_id); + }); + + client.importer.miner.import_external_transactions(client, txs); + }).unwrap_or_else(|e| { + debug!(target: "client", "Ignoring {} transactions: {}", len, e); + }); + } + + fn queue_ancient_block(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result { + let header: Header = ::rlp::Rlp::new(&block_bytes).val_at(0)?; + let hash = header.hash(); + + { + // check block order + if self.chain.read().is_known(&header.hash()) { + bail!(BlockImportErrorKind::Import(ImportErrorKind::AlreadyInChain)); + } + let status = self.block_status(BlockId::Hash(*header.parent_hash())); + if status == BlockStatus::Unknown || status == BlockStatus::Pending { + bail!(BlockImportErrorKind::Block(BlockError::UnknownParent(*header.parent_hash()))); + } + } + + match self.queue_ancient_blocks.queue(&mut self.io_channel.lock(), 1, move |client| { + client.importer.import_old_block( + &header, + &block_bytes, + &receipts_bytes, + &**client.db.read(), + &*client.chain.read() + ).map(|_| ()).unwrap_or_else(|e| { + error!(target: "client", "Error importing ancient block: {}", e); + }); + }) { + Ok(_) => Ok(hash), + Err(e) => bail!(BlockImportErrorKind::Other(format!("{}", e))), + } + } + + fn queue_consensus_message(&self, message: Bytes) { + match self.queue_consensus_message.queue(&mut self.io_channel.lock(), 1, move |client| { + if let Err(e) = client.engine().handle_message(&message) { + debug!(target: "poa", "Invalid message received: {}", e); + } + }) { + Ok(_) => (), + Err(e) => { + debug!(target: "poa", "Ignoring the message, error queueing: {}", e); + } + } + } +} + impl ReopenBlock for Client { fn reopen_block(&self, block: ClosedBlock) -> OpenBlock { let engine = &*self.engine; @@ -2409,3 +2392,54 @@ mod tests { }); } } + +#[derive(Debug)] +enum QueueError { + Channel(IoError), + Full(usize), +} + +impl fmt::Display for QueueError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + QueueError::Channel(ref c) => fmt::Display::fmt(c, fmt), + QueueError::Full(limit) => write!(fmt, "The queue is full ({})", limit), + } + } +} + +/// Queue some items to be processed by IO client. +struct IoChannelQueue { + currently_queued: Arc, + limit: usize, +} + +impl IoChannelQueue { + pub fn new(limit: usize) -> Self { + IoChannelQueue { + currently_queued: Default::default(), + limit, + } + } + + pub fn queue(&self, channel: &mut IoChannel, count: usize, fun: F) -> Result<(), QueueError> where + F: Fn(&Client) + Send + Sync + 'static, + { + let queue_size = self.currently_queued.load(AtomicOrdering::Relaxed); + ensure!(queue_size < self.limit, QueueError::Full(self.limit)); + + let currently_queued = self.currently_queued.clone(); + let result = channel.send(ClientIoMessage::execute(move |client| { + currently_queued.fetch_sub(count, AtomicOrdering::SeqCst); + fun(client); + })); + + match result { + Ok(_) => { + self.currently_queued.fetch_add(count, AtomicOrdering::SeqCst); + Ok(()) + }, + Err(e) => Err(QueueError::Channel(e)), + } + } +} diff --git a/ethcore/src/client/io_message.rs b/ethcore/src/client/io_message.rs index e19d3054fe0..817c7260205 100644 --- a/ethcore/src/client/io_message.rs +++ b/ethcore/src/client/io_message.rs @@ -14,19 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethereum_types::H256; +use std::fmt; use bytes::Bytes; +use client::Client; +use ethereum_types::H256; use snapshot::ManifestData; /// Message type for external and internal events -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Debug)] pub enum ClientIoMessage { /// Best Block Hash in chain has been changed NewChainHead, /// A block is ready BlockVerified, - /// New transaction RLPs are ready to be imported - NewTransactions(Vec, usize), /// Begin snapshot restoration BeginRestoration(ManifestData), /// Feed a state chunk to the snapshot service @@ -35,9 +35,23 @@ pub enum ClientIoMessage { FeedBlockChunk(H256, Bytes), /// Take a snapshot for the block with given number. TakeSnapshot(u64), - /// New consensus message received. - NewMessage(Bytes), - /// New private transaction arrived - NewPrivateTransaction, + /// Execute wrapped closure + Execute(Callback), +} + +impl ClientIoMessage { + /// Create new `ClientIoMessage` that executes given procedure. + pub fn execute(fun: F) -> Self { + ClientIoMessage::Execute(Callback(Box::new(fun))) + } +} + +/// A function to invoke in the client thread. +pub struct Callback(pub Box); + +impl fmt::Debug for Callback { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "") + } } diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 05e2018258f..4c410d30117 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -36,9 +36,8 @@ pub use self::traits::{ Nonce, Balance, ChainInfo, BlockInfo, ReopenBlock, PrepareOpenBlock, CallContract, TransactionInfo, RegistryInfo, ScheduleInfo, ImportSealedBlock, BroadcastProposalBlock, ImportBlock, StateOrBlock, StateClient, Call, EngineInfo, AccountData, BlockChain, BlockProducer, SealedBlockImporter }; -//pub use self::private_notify::PrivateNotify; pub use state::StateInfo; -pub use self::traits::{BlockChainClient, EngineClient, ProvingBlockChainClient}; +pub use self::traits::{BlockChainClient, EngineClient, ProvingBlockChainClient, IoClient}; pub use types::ids::*; pub use types::trace_filter::Filter as TraceFilter; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index c2e06009b64..b229159667d 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -39,7 +39,7 @@ use client::{ PrepareOpenBlock, BlockChainClient, BlockChainInfo, BlockStatus, BlockId, TransactionId, UncleId, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError, ProvingBlockChainClient, ScheduleInfo, ImportSealedBlock, BroadcastProposalBlock, ImportBlock, StateOrBlock, - Call, StateClient, EngineInfo, AccountData, BlockChain, BlockProducer, SealedBlockImporter + Call, StateClient, EngineInfo, AccountData, BlockChain, BlockProducer, SealedBlockImporter, IoClient }; use db::{NUM_COLUMNS, COL_STATE}; use header::{Header as BlockHeader, BlockNumber}; @@ -556,10 +556,6 @@ impl ImportBlock for TestBlockChainClient { } Ok(h) } - - fn import_block_with_receipts(&self, b: Bytes, _r: Bytes) -> Result { - self.import_block(b) - } } impl Call for TestBlockChainClient { @@ -809,16 +805,6 @@ impl BlockChainClient for TestBlockChainClient { self.traces.read().clone() } - fn queue_transactions(&self, transactions: Vec, _peer_id: usize) { - // import right here - let txs = transactions.into_iter().filter_map(|bytes| Rlp::new(&bytes).as_val().ok()).collect(); - self.miner.import_external_transactions(self, txs); - } - - fn queue_consensus_message(&self, message: Bytes) { - self.spec.engine.handle_message(&message).unwrap(); - } - fn ready_transactions(&self) -> Vec> { self.miner.ready_transactions(self) } @@ -863,6 +849,22 @@ impl BlockChainClient for TestBlockChainClient { fn eip86_transition(&self) -> u64 { u64::max_value() } } +impl IoClient for TestBlockChainClient { + fn queue_transactions(&self, transactions: Vec, _peer_id: usize) { + // import right here + let txs = transactions.into_iter().filter_map(|bytes| Rlp::new(&bytes).as_val().ok()).collect(); + self.miner.import_external_transactions(self, txs); + } + + fn queue_ancient_block(&self, b: Bytes, _r: Bytes) -> Result { + self.import_block(b) + } + + fn queue_consensus_message(&self, message: Bytes) { + self.spec.engine.handle_message(&message).unwrap(); + } +} + impl ProvingBlockChainClient for TestBlockChainClient { fn prove_storage(&self, _: H256, _: H256, _: BlockId) -> Option<(Vec, H256)> { None diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 7d4d5846c69..358e24fa905 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -168,9 +168,6 @@ pub trait RegistryInfo { pub trait ImportBlock { /// Import a block into the blockchain. fn import_block(&self, bytes: Bytes) -> Result; - - /// Import a block with transaction receipts. Does no sealing and transaction validation. - fn import_block_with_receipts(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result; } /// Provides `call_contract` method @@ -201,8 +198,21 @@ pub trait EngineInfo { fn engine(&self) -> &EthEngine; } +/// IO operations that should off-load heavy work to another thread. +pub trait IoClient: Sync + Send { + /// Queue transactions for importing. + fn queue_transactions(&self, transactions: Vec, peer_id: usize); + + /// Queue block import with transaction receipts. Does no sealing and transaction validation. + fn queue_ancient_block(&self, block_bytes: Bytes, receipts_bytes: Bytes) -> Result; + + /// Queue conensus engine message. + fn queue_consensus_message(&self, message: Bytes); +} + /// Blockchain database client. Owns and manages a blockchain and a block queue. -pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContract + RegistryInfo + ImportBlock { +pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContract + RegistryInfo + ImportBlock ++ IoClient { /// Look up the block number for the given block ID. fn block_number(&self, id: BlockId) -> Option; @@ -310,12 +320,6 @@ pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContra /// Get last hashes starting from best block. fn last_hashes(&self) -> LastHashes; - /// Queue transactions for importing. - fn queue_transactions(&self, transactions: Vec, peer_id: usize); - - /// Queue conensus engine message. - fn queue_consensus_message(&self, message: Bytes); - /// List all transactions that are allowed into the next block. fn ready_transactions(&self) -> Vec>; diff --git a/ethcore/src/views/block.rs b/ethcore/src/views/block.rs index f610504d85f..3bed1818f24 100644 --- a/ethcore/src/views/block.rs +++ b/ethcore/src/views/block.rs @@ -29,7 +29,6 @@ pub struct BlockView<'a> { rlp: ViewRlp<'a> } - impl<'a> BlockView<'a> { /// Creates new view onto block from rlp. /// Use the `view!` macro to create this view in order to capture debugging info. @@ -39,9 +38,9 @@ impl<'a> BlockView<'a> { /// ``` /// #[macro_use] /// extern crate ethcore; - /// + /// /// use ethcore::views::{BlockView}; - /// + /// /// fn main() { /// let bytes : &[u8] = &[]; /// let block_view = view!(BlockView, bytes); diff --git a/ethcore/sync/Cargo.toml b/ethcore/sync/Cargo.toml index d2d060e686f..ba03075d0e3 100644 --- a/ethcore/sync/Cargo.toml +++ b/ethcore/sync/Cargo.toml @@ -30,6 +30,7 @@ heapsize = "0.4" semver = "0.9" smallvec = { version = "0.4", features = ["heapsizeof"] } parking_lot = "0.5" +trace-time = { path = "../../util/trace-time" } ipnetwork = "0.12.6" [dev-dependencies] diff --git a/ethcore/sync/src/api.rs b/ethcore/sync/src/api.rs index 7690eeb864b..4fd0cbb54dd 100644 --- a/ethcore/sync/src/api.rs +++ b/ethcore/sync/src/api.rs @@ -379,10 +379,12 @@ impl NetworkProtocolHandler for SyncProtocolHandler { } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + trace_time!("sync::read"); ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer, packet_id, data); } fn connected(&self, io: &NetworkContext, peer: &PeerId) { + trace_time!("sync::connected"); // If warp protocol is supported only allow warp handshake let warp_protocol = io.protocol_version(WARP_SYNC_PROTOCOL_ID, *peer).unwrap_or(0) != 0; let warp_context = io.subprotocol_name() == WARP_SYNC_PROTOCOL_ID; @@ -392,12 +394,14 @@ impl NetworkProtocolHandler for SyncProtocolHandler { } fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { + trace_time!("sync::disconnected"); if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID { self.sync.write().on_peer_aborting(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer); } } fn timeout(&self, io: &NetworkContext, _timer: TimerToken) { + trace_time!("sync::timeout"); let mut io = NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay); self.sync.write().maintain_peers(&mut io); self.sync.write().maintain_sync(&mut io); diff --git a/ethcore/sync/src/block_sync.rs b/ethcore/sync/src/block_sync.rs index 4a5acae526b..7411fa30ccf 100644 --- a/ethcore/sync/src/block_sync.rs +++ b/ethcore/sync/src/block_sync.rs @@ -496,7 +496,7 @@ impl BlockDownloader { } let result = if let Some(receipts) = receipts { - io.chain().import_block_with_receipts(block, receipts) + io.chain().queue_ancient_block(block, receipts) } else { io.chain().import_block(block) }; diff --git a/ethcore/sync/src/chain.rs b/ethcore/sync/src/chain.rs index 25d9a09f6bc..1a6af101159 100644 --- a/ethcore/sync/src/chain.rs +++ b/ethcore/sync/src/chain.rs @@ -1789,10 +1789,13 @@ impl ChainSync { } pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { + debug!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id); + if packet_id != STATUS_PACKET && !self.peers.contains_key(&peer) { debug!(target:"sync", "Unexpected packet {} from unregistered peer: {}:{}", packet_id, peer, io.peer_info(peer)); return; } + let rlp = Rlp::new(data); let result = match packet_id { STATUS_PACKET => self.on_peer_status(io, peer, &rlp), @@ -1831,7 +1834,7 @@ impl ChainSync { PeerAsking::SnapshotData => elapsed > SNAPSHOT_DATA_TIMEOUT, }; if timeout { - trace!(target:"sync", "Timeout {}", peer_id); + debug!(target:"sync", "Timeout {}", peer_id); io.disconnect_peer(*peer_id); aborting.push(*peer_id); } diff --git a/ethcore/sync/src/lib.rs b/ethcore/sync/src/lib.rs index a3e24bdb823..3eb2e8332b4 100644 --- a/ethcore/sync/src/lib.rs +++ b/ethcore/sync/src/lib.rs @@ -54,6 +54,8 @@ extern crate macros; extern crate log; #[macro_use] extern crate heapsize; +#[macro_use] +extern crate trace_time; mod chain; mod blocks; diff --git a/ethcore/sync/src/tests/helpers.rs b/ethcore/sync/src/tests/helpers.rs index dc52fdd8b85..3a4697cc093 100644 --- a/ethcore/sync/src/tests/helpers.rs +++ b/ethcore/sync/src/tests/helpers.rs @@ -520,11 +520,9 @@ impl TestIoHandler { impl IoHandler for TestIoHandler { fn message(&self, _io: &IoContext, net_message: &ClientIoMessage) { match *net_message { - ClientIoMessage::NewMessage(ref message) => if let Err(e) = self.client.engine().handle_message(message) { - panic!("Invalid message received: {}", e); - }, - ClientIoMessage::NewPrivateTransaction => { + ClientIoMessage::Execute(ref exec) => { *self.private_tx_queued.lock() += 1; + (*exec.0)(&self.client); }, _ => {} // ignore other messages } diff --git a/ethcore/sync/src/tests/private.rs b/ethcore/sync/src/tests/private.rs index a9e8718e5e7..b54240bfb87 100644 --- a/ethcore/sync/src/tests/private.rs +++ b/ethcore/sync/src/tests/private.rs @@ -24,7 +24,7 @@ use ethcore::CreateContractAddress; use transaction::{Transaction, Action}; use ethcore::executive::{contract_address}; use ethcore::test_helpers::{push_block_with_transactions}; -use ethcore_private_tx::{Provider, ProviderConfig, NoopEncryptor}; +use ethcore_private_tx::{Provider, ProviderConfig, NoopEncryptor, Importer}; use ethcore::account_provider::AccountProvider; use ethkey::{KeyPair}; use tests::helpers::{TestNet, TestIoHandler}; @@ -84,7 +84,7 @@ fn send_private_transaction() { Box::new(NoopEncryptor::default()), signer_config, IoChannel::to_handler(Arc::downgrade(&io_handler0)), - ).unwrap()); + )); pm0.add_notify(net.peers[0].clone()); let pm1 = Arc::new(Provider::new( @@ -94,7 +94,7 @@ fn send_private_transaction() { Box::new(NoopEncryptor::default()), validator_config, IoChannel::to_handler(Arc::downgrade(&io_handler1)), - ).unwrap()); + )); pm1.add_notify(net.peers[1].clone()); // Create and deploy contract @@ -133,7 +133,6 @@ fn send_private_transaction() { //process received private transaction message let private_transaction = received_private_transactions[0].clone(); assert!(pm1.import_private_transaction(&private_transaction).is_ok()); - assert!(pm1.on_private_transaction_queued().is_ok()); //send signed response net.sync(); @@ -147,4 +146,4 @@ fn send_private_transaction() { assert!(pm0.import_signed_private_transaction(&signed_private_transaction).is_ok()); let local_transactions = net.peer(0).miner.local_transactions(); assert_eq!(local_transactions.len(), 1); -} \ No newline at end of file +} diff --git a/util/io/src/lib.rs b/util/io/src/lib.rs index 20b908ac91d..9232b2a909a 100644 --- a/util/io/src/lib.rs +++ b/util/io/src/lib.rs @@ -106,7 +106,7 @@ impl From<::std::io::Error> for IoError { } } -impl From>> for IoError where Message: Send + Clone { +impl From>> for IoError where Message: Send { fn from(_err: NotifyError>) -> IoError { IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error")) } @@ -115,7 +115,7 @@ impl From>> for IoError where M /// Generic IO handler. /// All the handler function are called from within IO event loop. /// `Message` type is used as notification data -pub trait IoHandler: Send + Sync where Message: Send + Sync + Clone + 'static { +pub trait IoHandler: Send + Sync where Message: Send + Sync + 'static { /// Initialize the handler fn initialize(&self, _io: &IoContext) {} /// Timer function called after a timeout created with `HandlerIo::timeout`. diff --git a/util/io/src/service.rs b/util/io/src/service.rs index 19f2d4b3bb5..0de674ae121 100644 --- a/util/io/src/service.rs +++ b/util/io/src/service.rs @@ -41,7 +41,7 @@ const MAX_HANDLERS: usize = 8; /// Messages used to communicate with the event loop from other threads. #[derive(Clone)] -pub enum IoMessage where Message: Send + Clone + Sized { +pub enum IoMessage where Message: Send + Sized { /// Shutdown the event loop Shutdown, /// Register a new protocol handler. @@ -74,16 +74,16 @@ pub enum IoMessage where Message: Send + Clone + Sized { token: StreamToken, }, /// Broadcast a message across all protocol handlers. - UserMessage(Message) + UserMessage(Arc) } /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. -pub struct IoContext where Message: Send + Clone + Sync + 'static { +pub struct IoContext where Message: Send + Sync + 'static { channel: IoChannel, handler: HandlerId, } -impl IoContext where Message: Send + Clone + Sync + 'static { +impl IoContext where Message: Send + Sync + 'static { /// Create a new IO access point. Takes references to all the data that can be updated within the IO handler. pub fn new(channel: IoChannel, handler: HandlerId) -> IoContext { IoContext { @@ -187,7 +187,7 @@ pub struct IoManager where Message: Send + Sync { work_ready: Arc, } -impl IoManager where Message: Send + Sync + Clone + 'static { +impl IoManager where Message: Send + Sync + 'static { /// Creates a new instance and registers it with the event loop. pub fn start( event_loop: &mut EventLoop>, @@ -219,7 +219,7 @@ impl IoManager where Message: Send + Sync + Clone + 'static { } } -impl Handler for IoManager where Message: Send + Clone + Sync + 'static { +impl Handler for IoManager where Message: Send + Sync + 'static { type Timeout = Token; type Message = IoMessage; @@ -317,7 +317,12 @@ impl Handler for IoManager where Message: Send + Clone + Sync for id in 0 .. MAX_HANDLERS { if let Some(h) = self.handlers.read().get(id) { let handler = h.clone(); - self.worker_channel.push(Work { work_type: WorkType::Message(data.clone()), token: 0, handler: handler, handler_id: id }); + self.worker_channel.push(Work { + work_type: WorkType::Message(data.clone()), + token: 0, + handler: handler, + handler_id: id + }); } } self.work_ready.notify_all(); @@ -326,21 +331,30 @@ impl Handler for IoManager where Message: Send + Clone + Sync } } -#[derive(Clone)] -enum Handlers where Message: Send + Clone { +enum Handlers where Message: Send { SharedCollection(Weak>, HandlerId>>>), Single(Weak>), } +impl Clone for Handlers { + fn clone(&self) -> Self { + use self::Handlers::*; + + match *self { + SharedCollection(ref w) => SharedCollection(w.clone()), + Single(ref w) => Single(w.clone()), + } + } +} + /// Allows sending messages into the event loop. All the IO handlers will get the message /// in the `message` callback. -pub struct IoChannel where Message: Send + Clone{ +pub struct IoChannel where Message: Send { channel: Option>>, handlers: Handlers, - } -impl Clone for IoChannel where Message: Send + Clone + Sync + 'static { +impl Clone for IoChannel where Message: Send + Sync + 'static { fn clone(&self) -> IoChannel { IoChannel { channel: self.channel.clone(), @@ -349,11 +363,11 @@ impl Clone for IoChannel where Message: Send + Clone + Sync + } } -impl IoChannel where Message: Send + Clone + Sync + 'static { +impl IoChannel where Message: Send + Sync + 'static { /// Send a message through the channel pub fn send(&self, message: Message) -> Result<(), IoError> { match self.channel { - Some(ref channel) => channel.send(IoMessage::UserMessage(message))?, + Some(ref channel) => channel.send(IoMessage::UserMessage(Arc::new(message)))?, None => self.send_sync(message)? } Ok(()) @@ -413,13 +427,13 @@ impl IoChannel where Message: Send + Clone + Sync + 'static { /// General IO Service. Starts an event loop and dispatches IO requests. /// 'Message' is a notification message type -pub struct IoService where Message: Send + Sync + Clone + 'static { +pub struct IoService where Message: Send + Sync + 'static { thread: Mutex>>, host_channel: Mutex>>, handlers: Arc>, HandlerId>>>, } -impl IoService where Message: Send + Sync + Clone + 'static { +impl IoService where Message: Send + Sync + 'static { /// Starts IO event loop pub fn start() -> Result, IoError> { let mut config = EventLoopBuilder::new(); @@ -462,7 +476,7 @@ impl IoService where Message: Send + Sync + Clone + 'static { /// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads. pub fn send_message(&self, message: Message) -> Result<(), IoError> { - self.host_channel.lock().send(IoMessage::UserMessage(message))?; + self.host_channel.lock().send(IoMessage::UserMessage(Arc::new(message)))?; Ok(()) } @@ -472,7 +486,7 @@ impl IoService where Message: Send + Sync + Clone + 'static { } } -impl Drop for IoService where Message: Send + Sync + Clone { +impl Drop for IoService where Message: Send + Sync { fn drop(&mut self) { self.stop() } diff --git a/util/io/src/worker.rs b/util/io/src/worker.rs index 79570d3612a..0f0d448ecb4 100644 --- a/util/io/src/worker.rs +++ b/util/io/src/worker.rs @@ -38,7 +38,7 @@ pub enum WorkType { Writable, Hup, Timeout, - Message(Message) + Message(Arc) } pub struct Work { @@ -65,7 +65,7 @@ impl Worker { wait: Arc, wait_mutex: Arc>, ) -> Worker - where Message: Send + Sync + Clone + 'static { + where Message: Send + Sync + 'static { let deleting = Arc::new(AtomicBool::new(false)); let mut worker = Worker { thread: None, @@ -86,7 +86,7 @@ impl Worker { channel: IoChannel, wait: Arc, wait_mutex: Arc>, deleting: Arc) - where Message: Send + Sync + Clone + 'static { + where Message: Send + Sync + 'static { loop { { let lock = wait_mutex.lock().expect("Poisoned work_loop mutex"); @@ -105,7 +105,7 @@ impl Worker { } } - fn do_work(work: Work, channel: IoChannel) where Message: Send + Sync + Clone + 'static { + fn do_work(work: Work, channel: IoChannel) where Message: Send + Sync + 'static { match work.work_type { WorkType::Readable => { work.handler.stream_readable(&IoContext::new(channel, work.handler_id), work.token); @@ -120,7 +120,7 @@ impl Worker { work.handler.timeout(&IoContext::new(channel, work.handler_id), work.token); } WorkType::Message(message) => { - work.handler.message(&IoContext::new(channel, work.handler_id), &message); + work.handler.message(&IoContext::new(channel, work.handler_id), &*message); } } } From f20f9f376ef6a713400e2ba18353268bd35129fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 May 2018 08:54:37 +0200 Subject: [PATCH 08/11] Make trace-time publishable. (#8568) --- transaction-pool/Cargo.toml | 2 +- util/trace-time/Cargo.toml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index 342c376f63c..8965c8cee01 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -9,7 +9,7 @@ authors = ["Parity Technologies "] error-chain = "0.11" log = "0.3" smallvec = "0.4" -trace-time = { path = "../util/trace-time" } +trace-time = { path = "../util/trace-time", version = "0.1" } [dev-dependencies] ethereum-types = "0.3" diff --git a/util/trace-time/Cargo.toml b/util/trace-time/Cargo.toml index 00597ebfc49..288a2c4e47c 100644 --- a/util/trace-time/Cargo.toml +++ b/util/trace-time/Cargo.toml @@ -1,7 +1,9 @@ [package] name = "trace-time" +description = "Easily trace time to execute a scope." version = "0.1.0" authors = ["Parity Technologies "] +license = "GPL-3.0" [dependencies] log = "0.3" From b84682168d46716f455c53ee35166ad7e8e6f7ce Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 9 May 2018 14:55:01 +0800 Subject: [PATCH 09/11] Remove State::replace_backend (#8569) --- ethcore/src/state/mod.rs | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 20d564588c5..5b969bccb93 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -402,19 +402,6 @@ impl State { self.factories.vm.clone() } - /// Swap the current backend for another. - // TODO: [rob] find a less hacky way to avoid duplication of `Client::state_at`. - pub fn replace_backend(self, backend: T) -> State { - State { - db: backend, - root: self.root, - cache: self.cache, - checkpoints: self.checkpoints, - account_start_nonce: self.account_start_nonce, - factories: self.factories, - } - } - /// Create a recoverable checkpoint of this state. pub fn checkpoint(&mut self) { self.checkpoints.get_mut().push(HashMap::new()); From 8b0ba97cf2cec8c79b368e907d975484acaef504 Mon Sep 17 00:00:00 2001 From: Nicolas Gotchac Date: Wed, 9 May 2018 12:05:34 +0200 Subject: [PATCH 10/11] Refactoring `ethcore-sync` - Fixing warp-sync barrier (#8543) * Start dividing sync chain : first supplier method * WIP - updated chain sync supplier * Finish refactoring the Chain Sync Supplier * Create Chain Sync Requester * Add Propagator for Chain Sync * Add the Chain Sync Handler * Move tests from mod -> handler * Move tests to propagator * Refactor SyncRequester arguments * Refactoring peer fork header handler * Fix wrong highest block number in snapshot sync * Small refactor... * Address PR grumbles * Retry failed CI job * Fix tests * PR Grumbles --- ethcore/sync/src/chain.rs | 3112 -------------------------- ethcore/sync/src/chain/handler.rs | 828 +++++++ ethcore/sync/src/chain/mod.rs | 1379 ++++++++++++ ethcore/sync/src/chain/propagator.rs | 636 ++++++ ethcore/sync/src/chain/requester.rs | 154 ++ ethcore/sync/src/chain/supplier.rs | 446 ++++ ethcore/sync/src/tests/snapshot.rs | 12 +- 7 files changed, 3453 insertions(+), 3114 deletions(-) delete mode 100644 ethcore/sync/src/chain.rs create mode 100644 ethcore/sync/src/chain/handler.rs create mode 100644 ethcore/sync/src/chain/mod.rs create mode 100644 ethcore/sync/src/chain/propagator.rs create mode 100644 ethcore/sync/src/chain/requester.rs create mode 100644 ethcore/sync/src/chain/supplier.rs diff --git a/ethcore/sync/src/chain.rs b/ethcore/sync/src/chain.rs deleted file mode 100644 index 1a6af101159..00000000000 --- a/ethcore/sync/src/chain.rs +++ /dev/null @@ -1,3112 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -/// `BlockChain` synchronization strategy. -/// Syncs to peers and keeps up to date. -/// This implementation uses ethereum protocol v63 -/// -/// Syncing strategy summary. -/// Split the chain into ranges of N blocks each. Download ranges sequentially. Split each range into subchains of M blocks. Download subchains in parallel. -/// State. -/// Sync state consists of the following data: -/// - s: State enum which can be one of the following values: `ChainHead`, `Blocks`, `Idle` -/// - H: A set of downloaded block headers -/// - B: A set of downloaded block bodies -/// - S: Set of block subchain start block hashes to download. -/// - l: Last imported / common block hash -/// - P: A set of connected peers. For each peer we maintain its last known total difficulty and starting block hash being requested if any. -/// General behaviour. -/// We start with all sets empty, l is set to the best block in the block chain, s is set to `ChainHead`. -/// If at any moment a bad block is reported by the block queue, we set s to `ChainHead`, reset l to the best block in the block chain and clear H, B and S. -/// If at any moment P becomes empty, we set s to `ChainHead`, and clear H, B and S. -/// -/// Workflow for `ChainHead` state. -/// In this state we try to get subchain headers with a single `GetBlockHeaders` request. -/// On `NewPeer` / On `Restart`: -/// If peer's total difficulty is higher and there are less than 5 peers downloading, request N/M headers with interval M+1 starting from l -/// On `BlockHeaders(R)`: -/// If R is empty: -/// If l is equal to genesis block hash or l is more than 1000 blocks behind our best hash: -/// Remove current peer from P. set l to the best block in the block chain. Select peer with maximum total difficulty from P and restart. -/// Else -/// Set l to l’s parent and restart. -/// Else if we already have all the headers in the block chain or the block queue: -/// Set s to `Idle`, -/// Else -/// Set S to R, set s to `Blocks`. -/// -/// All other messages are ignored. -/// -/// Workflow for `Blocks` state. -/// In this state we download block headers and bodies from multiple peers. -/// On `NewPeer` / On `Restart`: -/// For all idle peers: -/// Find a set of 256 or less block hashes in H which are not in B and not being downloaded by other peers. If the set is not empty: -/// Request block bodies for the hashes in the set. -/// Else -/// Find an element in S which is not being downloaded by other peers. If found: Request M headers starting from the element. -/// -/// On `BlockHeaders(R)`: -/// If R is empty remove current peer from P and restart. -/// Validate received headers: -/// For each header find a parent in H or R or the blockchain. Restart if there is a block with unknown parent. -/// Find at least one header from the received list in S. Restart if there is none. -/// Go to `CollectBlocks`. -/// -/// On `BlockBodies(R)`: -/// If R is empty remove current peer from P and restart. -/// Add bodies with a matching header in H to B. -/// Go to `CollectBlocks`. -/// -/// `CollectBlocks`: -/// Find a chain of blocks C in H starting from h where h’s parent equals to l. The chain ends with the first block which does not have a body in B. -/// Add all blocks from the chain to the block queue. Remove them from H and B. Set l to the hash of the last block from C. -/// Update and merge subchain heads in S. For each h in S find a chain of blocks in B starting from h. Remove h from S. if the chain does not include an element from S add the end of the chain to S. -/// If H is empty and S contains a single element set s to `ChainHead`. -/// Restart. -/// -/// All other messages are ignored. -/// Workflow for Idle state. -/// On `NewBlock`: -/// Import the block. If the block is unknown set s to `ChainHead` and restart. -/// On `NewHashes`: -/// Set s to `ChainHead` and restart. -/// -/// All other messages are ignored. -/// - -use std::sync::Arc; -use std::collections::{HashSet, HashMap}; -use std::cmp; -use std::time::{Duration, Instant}; -use hash::keccak; -use heapsize::HeapSizeOf; -use ethereum_types::{H256, U256}; -use plain_hasher::H256FastMap; -use parking_lot::RwLock; -use bytes::Bytes; -use rlp::{Rlp, RlpStream, DecoderError, Encodable}; -use network::{self, PeerId, PacketId}; -use ethcore::header::{BlockNumber, Header as BlockHeader}; -use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo, BlockImportError, BlockImportErrorKind, BlockQueueInfo}; -use ethcore::error::*; -use ethcore::snapshot::{ManifestData, RestorationStatus}; -use transaction::SignedTransaction; -use sync_io::SyncIo; -use super::{WarpSync, SyncConfig}; -use block_sync::{BlockDownloader, BlockRequest, BlockDownloaderImportError as DownloaderImportError, DownloadAction}; -use rand::Rng; -use snapshot::{Snapshot, ChunkType}; -use api::{EthProtocolInfo as PeerInfoDigest, WARP_SYNC_PROTOCOL_ID}; -use private_tx::PrivateTxHandler; -use transactions_stats::{TransactionsStats, Stats as TransactionStats}; -use transaction::UnverifiedTransaction; - -known_heap_size!(0, PeerInfo); - -type PacketDecodeError = DecoderError; - -/// 63 version of Ethereum protocol. -pub const ETH_PROTOCOL_VERSION_63: u8 = 63; -/// 62 version of Ethereum protocol. -pub const ETH_PROTOCOL_VERSION_62: u8 = 62; -/// 1 version of Parity protocol. -pub const PAR_PROTOCOL_VERSION_1: u8 = 1; -/// 2 version of Parity protocol (consensus messages added). -pub const PAR_PROTOCOL_VERSION_2: u8 = 2; -/// 3 version of Parity protocol (private transactions messages added). -pub const PAR_PROTOCOL_VERSION_3: u8 = 3; - -const MAX_BODIES_TO_SEND: usize = 256; -const MAX_HEADERS_TO_SEND: usize = 512; -const MAX_NODE_DATA_TO_SEND: usize = 1024; -const MAX_RECEIPTS_TO_SEND: usize = 1024; -const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256; -const MIN_PEERS_PROPAGATION: usize = 4; -const MAX_PEERS_PROPAGATION: usize = 128; -const MAX_PEER_LAG_PROPAGATION: BlockNumber = 20; -const MAX_NEW_HASHES: usize = 64; -const MAX_NEW_BLOCK_AGE: BlockNumber = 20; -// maximal packet size with transactions (cannot be greater than 16MB - protocol limitation). -const MAX_TRANSACTION_PACKET_SIZE: usize = 8 * 1024 * 1024; -// Maximal number of transactions in sent in single packet. -const MAX_TRANSACTIONS_TO_PROPAGATE: usize = 64; -// Min number of blocks to be behind for a snapshot sync -const SNAPSHOT_RESTORE_THRESHOLD: BlockNumber = 30000; -const SNAPSHOT_MIN_PEERS: usize = 3; - -const STATUS_PACKET: u8 = 0x00; -const NEW_BLOCK_HASHES_PACKET: u8 = 0x01; -const TRANSACTIONS_PACKET: u8 = 0x02; -const GET_BLOCK_HEADERS_PACKET: u8 = 0x03; -const BLOCK_HEADERS_PACKET: u8 = 0x04; -const GET_BLOCK_BODIES_PACKET: u8 = 0x05; -const BLOCK_BODIES_PACKET: u8 = 0x06; -const NEW_BLOCK_PACKET: u8 = 0x07; - -const GET_NODE_DATA_PACKET: u8 = 0x0d; -const NODE_DATA_PACKET: u8 = 0x0e; -const GET_RECEIPTS_PACKET: u8 = 0x0f; -const RECEIPTS_PACKET: u8 = 0x10; - -pub const ETH_PACKET_COUNT: u8 = 0x11; - -const GET_SNAPSHOT_MANIFEST_PACKET: u8 = 0x11; -const SNAPSHOT_MANIFEST_PACKET: u8 = 0x12; -const GET_SNAPSHOT_DATA_PACKET: u8 = 0x13; -const SNAPSHOT_DATA_PACKET: u8 = 0x14; -const CONSENSUS_DATA_PACKET: u8 = 0x15; -const PRIVATE_TRANSACTION_PACKET: u8 = 0x16; -const SIGNED_PRIVATE_TRANSACTION_PACKET: u8 = 0x17; - -pub const SNAPSHOT_SYNC_PACKET_COUNT: u8 = 0x18; - -const MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD: usize = 3; - -const WAIT_PEERS_TIMEOUT: Duration = Duration::from_secs(5); -const STATUS_TIMEOUT: Duration = Duration::from_secs(5); -const HEADERS_TIMEOUT: Duration = Duration::from_secs(15); -const BODIES_TIMEOUT: Duration = Duration::from_secs(20); -const RECEIPTS_TIMEOUT: Duration = Duration::from_secs(10); -const FORK_HEADER_TIMEOUT: Duration = Duration::from_secs(3); -const SNAPSHOT_MANIFEST_TIMEOUT: Duration = Duration::from_secs(5); -const SNAPSHOT_DATA_TIMEOUT: Duration = Duration::from_secs(120); - -#[derive(Copy, Clone, Eq, PartialEq, Debug)] -/// Sync state -pub enum SyncState { - /// Collecting enough peers to start syncing. - WaitingPeers, - /// Waiting for snapshot manifest download - SnapshotManifest, - /// Downloading snapshot data - SnapshotData, - /// Waiting for snapshot restoration progress. - SnapshotWaiting, - /// Downloading new blocks - Blocks, - /// Initial chain sync complete. Waiting for new packets - Idle, - /// Block downloading paused. Waiting for block queue to process blocks and free some space - Waiting, - /// Downloading blocks learned from `NewHashes` packet - NewBlocks, -} - -/// Syncing status and statistics -#[derive(Clone, Copy)] -pub struct SyncStatus { - /// State - pub state: SyncState, - /// Syncing protocol version. That's the maximum protocol version we connect to. - pub protocol_version: u8, - /// The underlying p2p network version. - pub network_id: u64, - /// `BlockChain` height for the moment the sync started. - pub start_block_number: BlockNumber, - /// Last fully downloaded and imported block number (if any). - pub last_imported_block_number: Option, - /// Highest block number in the download queue (if any). - pub highest_block_number: Option, - /// Total number of blocks for the sync process. - pub blocks_total: BlockNumber, - /// Number of blocks downloaded so far. - pub blocks_received: BlockNumber, - /// Total number of connected peers - pub num_peers: usize, - /// Total number of active peers. - pub num_active_peers: usize, - /// Heap memory used in bytes. - pub mem_used: usize, - /// Snapshot chunks - pub num_snapshot_chunks: usize, - /// Snapshot chunks downloaded - pub snapshot_chunks_done: usize, - /// Last fully downloaded and imported ancient block number (if any). - pub last_imported_old_block_number: Option, -} - -impl SyncStatus { - /// Indicates if snapshot download is in progress - pub fn is_snapshot_syncing(&self) -> bool { - self.state == SyncState::SnapshotManifest - || self.state == SyncState::SnapshotData - || self.state == SyncState::SnapshotWaiting - } - - /// Returns max no of peers to display in informants - pub fn current_max_peers(&self, min_peers: u32, max_peers: u32) -> u32 { - if self.num_peers as u32 > min_peers { - max_peers - } else { - min_peers - } - } - - /// Is it doing a major sync? - pub fn is_syncing(&self, queue_info: BlockQueueInfo) -> bool { - let is_syncing_state = match self.state { SyncState::Idle | SyncState::NewBlocks => false, _ => true }; - let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3; - is_verifying || is_syncing_state - } -} - -#[derive(PartialEq, Eq, Debug, Clone)] -/// Peer data type requested -enum PeerAsking { - Nothing, - ForkHeader, - BlockHeaders, - BlockBodies, - BlockReceipts, - SnapshotManifest, - SnapshotData, -} - -#[derive(PartialEq, Eq, Debug, Clone, Copy)] -/// Block downloader channel. -enum BlockSet { - /// New blocks better than out best blocks - NewBlocks, - /// Missing old blocks - OldBlocks, -} -#[derive(Clone, Eq, PartialEq)] -enum ForkConfirmation { - /// Fork block confirmation pending. - Unconfirmed, - /// Peers chain is too short to confirm the fork. - TooShort, - /// Fork is confirmed. - Confirmed, -} - -#[derive(Clone)] -/// Syncing peer information -struct PeerInfo { - /// eth protocol version - protocol_version: u8, - /// Peer chain genesis hash - genesis: H256, - /// Peer network id - network_id: u64, - /// Peer best block hash - latest_hash: H256, - /// Peer total difficulty if known - difficulty: Option, - /// Type of data currenty being requested from peer. - asking: PeerAsking, - /// A set of block numbers being requested - asking_blocks: Vec, - /// Holds requested header hash if currently requesting block header by hash - asking_hash: Option, - /// Holds requested snapshot chunk hash if any. - asking_snapshot_data: Option, - /// Request timestamp - ask_time: Instant, - /// Holds a set of transactions recently sent to this peer to avoid spamming. - last_sent_transactions: HashSet, - /// Pending request is expired and result should be ignored - expired: bool, - /// Peer fork confirmation status - confirmation: ForkConfirmation, - /// Best snapshot hash - snapshot_hash: Option, - /// Best snapshot block number - snapshot_number: Option, - /// Block set requested - block_set: Option, -} - -impl PeerInfo { - fn can_sync(&self) -> bool { - self.confirmation == ForkConfirmation::Confirmed && !self.expired - } - - fn is_allowed(&self) -> bool { - self.confirmation != ForkConfirmation::Unconfirmed && !self.expired - } - - fn reset_asking(&mut self) { - self.asking_blocks.clear(); - self.asking_hash = None; - // mark any pending requests as expired - if self.asking != PeerAsking::Nothing && self.is_allowed() { - self.expired = true; - } - } -} - -#[cfg(not(test))] -mod random { - use rand; - pub fn new() -> rand::ThreadRng { rand::thread_rng() } -} -#[cfg(test)] -mod random { - use rand::{self, SeedableRng}; - pub fn new() -> rand::XorShiftRng { rand::XorShiftRng::from_seed([0, 1, 2, 3]) } -} - -/// Blockchain sync handler. -/// See module documentation for more details. -pub struct ChainSync { - /// Sync state - state: SyncState, - /// Last block number for the start of sync - starting_block: BlockNumber, - /// Highest block number seen - highest_block: Option, - /// All connected peers - peers: HashMap, - /// Peers active for current sync round - active_peers: HashSet, - /// Block download process for new blocks - new_blocks: BlockDownloader, - /// Block download process for ancient blocks - old_blocks: Option, - /// Last propagated block number - last_sent_block_number: BlockNumber, - /// Network ID - network_id: u64, - /// Optional fork block to check - fork_block: Option<(BlockNumber, H256)>, - /// Snapshot downloader. - snapshot: Snapshot, - /// Connected peers pending Status message. - /// Value is request timestamp. - handshaking_peers: HashMap, - /// Sync start timestamp. Measured when first peer is connected - sync_start_time: Option, - /// Transactions propagation statistics - transactions_stats: TransactionsStats, - /// Enable ancient block downloading - download_old_blocks: bool, - /// Shared private tx service. - private_tx_handler: Arc, - /// Enable warp sync. - warp_sync: WarpSync, -} - -type RlpResponseResult = Result, PacketDecodeError>; - -impl ChainSync { - /// Create a new instance of syncing strategy. - pub fn new(config: SyncConfig, chain: &BlockChainClient, private_tx_handler: Arc) -> ChainSync { - let chain_info = chain.chain_info(); - let best_block = chain.chain_info().best_block_number; - let state = match config.warp_sync { - WarpSync::Enabled => SyncState::WaitingPeers, - WarpSync::OnlyAndAfter(block) if block > best_block => SyncState::WaitingPeers, - _ => SyncState::Idle, - }; - - let mut sync = ChainSync { - state, - starting_block: best_block, - highest_block: None, - peers: HashMap::new(), - handshaking_peers: HashMap::new(), - active_peers: HashSet::new(), - new_blocks: BlockDownloader::new(false, &chain_info.best_block_hash, chain_info.best_block_number), - old_blocks: None, - last_sent_block_number: 0, - network_id: config.network_id, - fork_block: config.fork_block, - download_old_blocks: config.download_old_blocks, - snapshot: Snapshot::new(), - sync_start_time: None, - transactions_stats: TransactionsStats::default(), - private_tx_handler, - warp_sync: config.warp_sync, - }; - sync.update_targets(chain); - sync - } - - /// Returns synchonization status - pub fn status(&self) -> SyncStatus { - let last_imported_number = self.new_blocks.last_imported_block_number(); - SyncStatus { - state: self.state.clone(), - protocol_version: ETH_PROTOCOL_VERSION_63, - network_id: self.network_id, - start_block_number: self.starting_block, - last_imported_block_number: Some(last_imported_number), - last_imported_old_block_number: self.old_blocks.as_ref().map(|d| d.last_imported_block_number()), - highest_block_number: self.highest_block.map(|n| cmp::max(n, last_imported_number)), - blocks_received: if last_imported_number > self.starting_block { last_imported_number - self.starting_block } else { 0 }, - blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 }, - num_peers: self.peers.values().filter(|p| p.is_allowed()).count(), - num_active_peers: self.peers.values().filter(|p| p.is_allowed() && p.asking != PeerAsking::Nothing).count(), - num_snapshot_chunks: self.snapshot.total_chunks(), - snapshot_chunks_done: self.snapshot.done_chunks(), - mem_used: - self.new_blocks.heap_size() - + self.old_blocks.as_ref().map_or(0, |d| d.heap_size()) - + self.peers.heap_size_of_children(), - } - } - - /// Returns information on peers connections - pub fn peer_info(&self, peer_id: &PeerId) -> Option { - self.peers.get(peer_id).map(|peer_data| { - PeerInfoDigest { - version: peer_data.protocol_version as u32, - difficulty: peer_data.difficulty, - head: peer_data.latest_hash, - } - }) - } - - /// Returns transactions propagation statistics - pub fn transactions_stats(&self) -> &H256FastMap { - self.transactions_stats.stats() - } - - /// Updates transactions were received by a peer - pub fn transactions_received(&mut self, txs: &[UnverifiedTransaction], peer_id: PeerId) { - if let Some(peer_info) = self.peers.get_mut(&peer_id) { - peer_info.last_sent_transactions.extend(txs.iter().map(|tx| tx.hash())); - } - } - - /// Abort all sync activity - pub fn abort(&mut self, io: &mut SyncIo) { - self.reset_and_continue(io); - self.peers.clear(); - } - - /// Reset sync. Clear all downloaded data but keep the queue - fn reset(&mut self, io: &mut SyncIo) { - self.new_blocks.reset(); - let chain_info = io.chain().chain_info(); - for (_, ref mut p) in &mut self.peers { - if p.block_set != Some(BlockSet::OldBlocks) { - p.reset_asking(); - if p.difficulty.is_none() { - // assume peer has up to date difficulty - p.difficulty = Some(chain_info.pending_total_difficulty); - } - } - } - self.state = SyncState::Idle; - // Reactivate peers only if some progress has been made - // since the last sync round of if starting fresh. - self.active_peers = self.peers.keys().cloned().collect(); - } - - /// Restart sync - pub fn reset_and_continue(&mut self, io: &mut SyncIo) { - trace!(target: "sync", "Restarting"); - if self.state == SyncState::SnapshotData { - debug!(target:"sync", "Aborting snapshot restore"); - io.snapshot_service().abort_restore(); - } - self.snapshot.clear(); - self.reset(io); - self.continue_sync(io); - } - - /// Remove peer from active peer set. Peer will be reactivated on the next sync - /// round. - fn deactivate_peer(&mut self, _io: &mut SyncIo, peer_id: PeerId) { - trace!(target: "sync", "Deactivating peer {}", peer_id); - self.active_peers.remove(&peer_id); - } - - fn maybe_start_snapshot_sync(&mut self, io: &mut SyncIo) { - if !self.warp_sync.is_enabled() || io.snapshot_service().supported_versions().is_none() { - trace!(target: "sync", "Skipping warp sync. Disabled or not supported."); - return; - } - if self.state != SyncState::WaitingPeers && self.state != SyncState::Blocks && self.state != SyncState::Waiting { - trace!(target: "sync", "Skipping warp sync. State: {:?}", self.state); - return; - } - // Make sure the snapshot block is not too far away from best block and network best block and - // that it is higher than fork detection block - let our_best_block = io.chain().chain_info().best_block_number; - let fork_block = self.fork_block.as_ref().map(|&(n, _)| n).unwrap_or(0); - - let (best_hash, max_peers, snapshot_peers) = { - let expected_warp_block = match self.warp_sync { - WarpSync::OnlyAndAfter(block) => block, - _ => 0, - }; - //collect snapshot infos from peers - let snapshots = self.peers.iter() - .filter(|&(_, p)| p.is_allowed() && p.snapshot_number.map_or(false, |sn| - our_best_block < sn && (sn - our_best_block) > SNAPSHOT_RESTORE_THRESHOLD && - sn > fork_block && - sn > expected_warp_block && - self.highest_block.map_or(true, |highest| highest >= sn && (highest - sn) <= SNAPSHOT_RESTORE_THRESHOLD) - )) - .filter_map(|(p, peer)| peer.snapshot_hash.map(|hash| (p, hash.clone()))) - .filter(|&(_, ref hash)| !self.snapshot.is_known_bad(hash)); - - let mut snapshot_peers = HashMap::new(); - let mut max_peers: usize = 0; - let mut best_hash = None; - for (p, hash) in snapshots { - let peers = snapshot_peers.entry(hash).or_insert_with(Vec::new); - peers.push(*p); - if peers.len() > max_peers { - max_peers = peers.len(); - best_hash = Some(hash); - } - } - (best_hash, max_peers, snapshot_peers) - }; - - let timeout = (self.state == SyncState::WaitingPeers) && self.sync_start_time.map_or(false, |t| t.elapsed() > WAIT_PEERS_TIMEOUT); - - if let (Some(hash), Some(peers)) = (best_hash, best_hash.map_or(None, |h| snapshot_peers.get(&h))) { - if max_peers >= SNAPSHOT_MIN_PEERS { - trace!(target: "sync", "Starting confirmed snapshot sync {:?} with {:?}", hash, peers); - self.start_snapshot_sync(io, peers); - } else if timeout { - trace!(target: "sync", "Starting unconfirmed snapshot sync {:?} with {:?}", hash, peers); - self.start_snapshot_sync(io, peers); - } - } else if timeout && !self.warp_sync.is_warp_only() { - trace!(target: "sync", "No snapshots found, starting full sync"); - self.state = SyncState::Idle; - self.continue_sync(io); - } - } - - fn start_snapshot_sync(&mut self, io: &mut SyncIo, peers: &[PeerId]) { - if !self.snapshot.have_manifest() { - for p in peers { - if self.peers.get(p).map_or(false, |p| p.asking == PeerAsking::Nothing) { - self.request_snapshot_manifest(io, *p); - } - } - self.state = SyncState::SnapshotManifest; - trace!(target: "sync", "New snapshot sync with {:?}", peers); - } else { - self.state = SyncState::SnapshotData; - trace!(target: "sync", "Resumed snapshot sync with {:?}", peers); - } - } - - /// Restart sync disregarding the block queue status. May end up re-downloading up to QUEUE_SIZE blocks - pub fn restart(&mut self, io: &mut SyncIo) { - self.update_targets(io.chain()); - self.reset_and_continue(io); - } - - /// Update sync after the blockchain has been changed externally. - pub fn update_targets(&mut self, chain: &BlockChainClient) { - // Do not assume that the block queue/chain still has our last_imported_block - let chain = chain.chain_info(); - self.new_blocks = BlockDownloader::new(false, &chain.best_block_hash, chain.best_block_number); - self.old_blocks = None; - if self.download_old_blocks { - if let (Some(ancient_block_hash), Some(ancient_block_number)) = (chain.ancient_block_hash, chain.ancient_block_number) { - - trace!(target: "sync", "Downloading old blocks from {:?} (#{}) till {:?} (#{:?})", ancient_block_hash, ancient_block_number, chain.first_block_hash, chain.first_block_number); - let mut downloader = BlockDownloader::with_unlimited_reorg(true, &ancient_block_hash, ancient_block_number); - if let Some(hash) = chain.first_block_hash { - trace!(target: "sync", "Downloader target set to {:?}", hash); - downloader.set_target(&hash); - } - self.old_blocks = Some(downloader); - } - } - } - - /// Called by peer to report status - fn on_peer_status(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - self.handshaking_peers.remove(&peer_id); - let protocol_version: u8 = r.val_at(0)?; - let warp_protocol = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer_id) != 0; - let peer = PeerInfo { - protocol_version: protocol_version, - network_id: r.val_at(1)?, - difficulty: Some(r.val_at(2)?), - latest_hash: r.val_at(3)?, - genesis: r.val_at(4)?, - asking: PeerAsking::Nothing, - asking_blocks: Vec::new(), - asking_hash: None, - ask_time: Instant::now(), - last_sent_transactions: HashSet::new(), - expired: false, - confirmation: if self.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed }, - asking_snapshot_data: None, - snapshot_hash: if warp_protocol { Some(r.val_at(5)?) } else { None }, - snapshot_number: if warp_protocol { Some(r.val_at(6)?) } else { None }, - block_set: None, - }; - - trace!(target: "sync", "New peer {} (protocol: {}, network: {:?}, difficulty: {:?}, latest:{}, genesis:{}, snapshot:{:?})", - peer_id, peer.protocol_version, peer.network_id, peer.difficulty, peer.latest_hash, peer.genesis, peer.snapshot_number); - if io.is_expired() { - trace!(target: "sync", "Status packet from expired session {}:{}", peer_id, io.peer_info(peer_id)); - return Ok(()); - } - - if self.peers.contains_key(&peer_id) { - debug!(target: "sync", "Unexpected status packet from {}:{}", peer_id, io.peer_info(peer_id)); - return Ok(()); - } - let chain_info = io.chain().chain_info(); - if peer.genesis != chain_info.genesis_hash { - io.disable_peer(peer_id); - trace!(target: "sync", "Peer {} genesis hash mismatch (ours: {}, theirs: {})", peer_id, chain_info.genesis_hash, peer.genesis); - return Ok(()); - } - if peer.network_id != self.network_id { - io.disable_peer(peer_id); - trace!(target: "sync", "Peer {} network id mismatch (ours: {}, theirs: {})", peer_id, self.network_id, peer.network_id); - return Ok(()); - } - if (warp_protocol && peer.protocol_version != PAR_PROTOCOL_VERSION_1 && peer.protocol_version != PAR_PROTOCOL_VERSION_2 && peer.protocol_version != PAR_PROTOCOL_VERSION_3) - || (!warp_protocol && peer.protocol_version != ETH_PROTOCOL_VERSION_63 && peer.protocol_version != ETH_PROTOCOL_VERSION_62) { - io.disable_peer(peer_id); - trace!(target: "sync", "Peer {} unsupported eth protocol ({})", peer_id, peer.protocol_version); - return Ok(()); - } - - if self.sync_start_time.is_none() { - self.sync_start_time = Some(Instant::now()); - } - - self.peers.insert(peer_id.clone(), peer); - // Don't activate peer immediatelly when searching for common block. - // Let the current sync round complete first. - self.active_peers.insert(peer_id.clone()); - debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_info(peer_id)); - if let Some((fork_block, _)) = self.fork_block { - self.request_fork_header_by_number(io, peer_id, fork_block); - } else { - self.sync_peer(io, peer_id, false); - } - Ok(()) - } - - /// Called by peer once it has new block headers during sync - fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - let confirmed = match self.peers.get_mut(&peer_id) { - Some(ref mut peer) if peer.asking == PeerAsking::ForkHeader => { - peer.asking = PeerAsking::Nothing; - let item_count = r.item_count()?; - let (fork_number, fork_hash) = self.fork_block.expect("ForkHeader request is sent only fork block is Some; qed").clone(); - if item_count == 0 || item_count != 1 { - trace!(target: "sync", "{}: Chain is too short to confirm the block", peer_id); - peer.confirmation = ForkConfirmation::TooShort; - } else { - let header = r.at(0)?.as_raw(); - if keccak(&header) == fork_hash { - trace!(target: "sync", "{}: Confirmed peer", peer_id); - peer.confirmation = ForkConfirmation::Confirmed; - if !io.chain_overlay().read().contains_key(&fork_number) { - io.chain_overlay().write().insert(fork_number, header.to_vec()); - } - } else { - trace!(target: "sync", "{}: Fork mismatch", peer_id); - io.disable_peer(peer_id); - return Ok(()); - } - } - true - }, - _ => false, - }; - if confirmed { - self.sync_peer(io, peer_id, false); - return Ok(()); - } - - self.clear_peer_download(peer_id); - let expected_hash = self.peers.get(&peer_id).and_then(|p| p.asking_hash); - let allowed = self.peers.get(&peer_id).map(|p| p.is_allowed()).unwrap_or(false); - let block_set = self.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks); - if !self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders) || expected_hash.is_none() || !allowed { - trace!(target: "sync", "{}: Ignored unexpected headers, expected_hash = {:?}", peer_id, expected_hash); - self.continue_sync(io); - return Ok(()); - } - let item_count = r.item_count()?; - trace!(target: "sync", "{} -> BlockHeaders ({} entries), state = {:?}, set = {:?}", peer_id, item_count, self.state, block_set); - if (self.state == SyncState::Idle || self.state == SyncState::WaitingPeers) && self.old_blocks.is_none() { - trace!(target: "sync", "Ignored unexpected block headers"); - self.continue_sync(io); - return Ok(()); - } - if self.state == SyncState::Waiting { - trace!(target: "sync", "Ignored block headers while waiting"); - self.continue_sync(io); - return Ok(()); - } - - let result = { - let downloader = match block_set { - BlockSet::NewBlocks => &mut self.new_blocks, - BlockSet::OldBlocks => { - match self.old_blocks { - None => { - trace!(target: "sync", "Ignored block headers while block download is inactive"); - self.continue_sync(io); - return Ok(()); - }, - Some(ref mut blocks) => blocks, - } - } - }; - downloader.import_headers(io, r, expected_hash) - }; - - match result { - Err(DownloaderImportError::Useless) => { - self.deactivate_peer(io, peer_id); - }, - Err(DownloaderImportError::Invalid) => { - io.disable_peer(peer_id); - self.deactivate_peer(io, peer_id); - self.continue_sync(io); - return Ok(()); - }, - Ok(DownloadAction::Reset) => { - // mark all outstanding requests as expired - trace!("Resetting downloads for {:?}", block_set); - for (_, ref mut p) in self.peers.iter_mut().filter(|&(_, ref p)| p.block_set == Some(block_set)) { - p.reset_asking(); - } - - } - Ok(DownloadAction::None) => {}, - } - - self.collect_blocks(io, block_set); - // give a task to the same peer first if received valuable headers. - self.sync_peer(io, peer_id, false); - // give tasks to other peers - self.continue_sync(io); - Ok(()) - } - - /// Called by peer once it has new block bodies - fn on_peer_block_bodies(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - self.clear_peer_download(peer_id); - let block_set = self.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks); - if !self.reset_peer_asking(peer_id, PeerAsking::BlockBodies) { - trace!(target: "sync", "{}: Ignored unexpected bodies", peer_id); - self.continue_sync(io); - return Ok(()); - } - let item_count = r.item_count()?; - trace!(target: "sync", "{} -> BlockBodies ({} entries), set = {:?}", peer_id, item_count, block_set); - if item_count == 0 { - self.deactivate_peer(io, peer_id); - } - else if self.state == SyncState::Waiting { - trace!(target: "sync", "Ignored block bodies while waiting"); - } - else - { - let result = { - let downloader = match block_set { - BlockSet::NewBlocks => &mut self.new_blocks, - BlockSet::OldBlocks => match self.old_blocks { - None => { - trace!(target: "sync", "Ignored block headers while block download is inactive"); - self.continue_sync(io); - return Ok(()); - }, - Some(ref mut blocks) => blocks, - } - }; - downloader.import_bodies(io, r) - }; - - match result { - Err(DownloaderImportError::Invalid) => { - io.disable_peer(peer_id); - self.deactivate_peer(io, peer_id); - self.continue_sync(io); - return Ok(()); - }, - Err(DownloaderImportError::Useless) => { - self.deactivate_peer(io, peer_id); - }, - Ok(()) => (), - } - - self.collect_blocks(io, block_set); - self.sync_peer(io, peer_id, false); - } - self.continue_sync(io); - Ok(()) - } - - /// Called by peer once it has new block receipts - fn on_peer_block_receipts(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - self.clear_peer_download(peer_id); - let block_set = self.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks); - if !self.reset_peer_asking(peer_id, PeerAsking::BlockReceipts) { - trace!(target: "sync", "{}: Ignored unexpected receipts", peer_id); - self.continue_sync(io); - return Ok(()); - } - let item_count = r.item_count()?; - trace!(target: "sync", "{} -> BlockReceipts ({} entries)", peer_id, item_count); - if item_count == 0 { - self.deactivate_peer(io, peer_id); - } - else if self.state == SyncState::Waiting { - trace!(target: "sync", "Ignored block receipts while waiting"); - } - else - { - let result = { - let downloader = match block_set { - BlockSet::NewBlocks => &mut self.new_blocks, - BlockSet::OldBlocks => match self.old_blocks { - None => { - trace!(target: "sync", "Ignored block headers while block download is inactive"); - self.continue_sync(io); - return Ok(()); - }, - Some(ref mut blocks) => blocks, - } - }; - downloader.import_receipts(io, r) - }; - - match result { - Err(DownloaderImportError::Invalid) => { - io.disable_peer(peer_id); - self.deactivate_peer(io, peer_id); - self.continue_sync(io); - return Ok(()); - }, - Err(DownloaderImportError::Useless) => { - self.deactivate_peer(io, peer_id); - }, - Ok(()) => (), - } - - self.collect_blocks(io, block_set); - self.sync_peer(io, peer_id, false); - } - self.continue_sync(io); - Ok(()) - } - - /// Called by peer once it has new block bodies - fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id); - return Ok(()); - } - let difficulty: U256 = r.val_at(1)?; - if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { - if peer.difficulty.map_or(true, |pd| difficulty > pd) { - peer.difficulty = Some(difficulty); - } - } - let block_rlp = r.at(0)?; - let header_rlp = block_rlp.at(0)?; - let h = keccak(&header_rlp.as_raw()); - trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h); - let header: BlockHeader = header_rlp.as_val()?; - if header.number() > self.highest_block.unwrap_or(0) { - self.highest_block = Some(header.number()); - } - let mut unknown = false; - { - if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { - peer.latest_hash = header.hash(); - } - } - let last_imported_number = self.new_blocks.last_imported_block_number(); - if last_imported_number > header.number() && last_imported_number - header.number() > MAX_NEW_BLOCK_AGE { - trace!(target: "sync", "Ignored ancient new block {:?}", h); - io.disable_peer(peer_id); - return Ok(()); - } - match io.chain().import_block(block_rlp.as_raw().to_vec()) { - Err(BlockImportError(BlockImportErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { - trace!(target: "sync", "New block already in chain {:?}", h); - }, - Err(BlockImportError(BlockImportErrorKind::Import(ImportErrorKind::AlreadyQueued), _)) => { - trace!(target: "sync", "New block already queued {:?}", h); - }, - Ok(_) => { - // abort current download of the same block - self.complete_sync(io); - self.new_blocks.mark_as_known(&header.hash(), header.number()); - trace!(target: "sync", "New block queued {:?} ({})", h, header.number()); - }, - Err(BlockImportError(BlockImportErrorKind::Block(BlockError::UnknownParent(p)), _)) => { - unknown = true; - trace!(target: "sync", "New block with unknown parent ({:?}) {:?}", p, h); - }, - Err(e) => { - debug!(target: "sync", "Bad new block {:?} : {:?}", h, e); - io.disable_peer(peer_id); - } - }; - if unknown { - if self.state != SyncState::Idle { - trace!(target: "sync", "NewBlock ignored while seeking"); - } else { - trace!(target: "sync", "New unknown block {:?}", h); - //TODO: handle too many unknown blocks - self.sync_peer(io, peer_id, true); - } - } - self.continue_sync(io); - Ok(()) - } - - /// Handles `NewHashes` packet. Initiates headers download for any unknown hashes. - fn on_peer_new_hashes(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "Ignoring new hashes from unconfirmed peer {}", peer_id); - return Ok(()); - } - let hashes: Vec<_> = r.iter().take(MAX_NEW_HASHES).map(|item| (item.val_at::(0), item.val_at::(1))).collect(); - if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { - // Peer has new blocks with unknown difficulty - peer.difficulty = None; - if let Some(&(Ok(ref h), _)) = hashes.last() { - peer.latest_hash = h.clone(); - } - } - if self.state != SyncState::Idle { - trace!(target: "sync", "Ignoring new hashes since we're already downloading."); - let max = r.iter().take(MAX_NEW_HASHES).map(|item| item.val_at::(1).unwrap_or(0)).fold(0u64, cmp::max); - if max > self.highest_block.unwrap_or(0) { - self.highest_block = Some(max); - } - self.continue_sync(io); - return Ok(()); - } - trace!(target: "sync", "{} -> NewHashes ({} entries)", peer_id, r.item_count()?); - let mut max_height: BlockNumber = 0; - let mut new_hashes = Vec::new(); - let last_imported_number = self.new_blocks.last_imported_block_number(); - for (rh, rn) in hashes { - let hash = rh?; - let number = rn?; - if number > self.highest_block.unwrap_or(0) { - self.highest_block = Some(number); - } - if self.new_blocks.is_downloading(&hash) { - continue; - } - if last_imported_number > number && last_imported_number - number > MAX_NEW_BLOCK_AGE { - trace!(target: "sync", "Ignored ancient new block hash {:?}", hash); - io.disable_peer(peer_id); - continue; - } - match io.chain().block_status(BlockId::Hash(hash.clone())) { - BlockStatus::InChain => { - trace!(target: "sync", "New block hash already in chain {:?}", hash); - }, - BlockStatus::Queued => { - trace!(target: "sync", "New hash block already queued {:?}", hash); - }, - BlockStatus::Unknown | BlockStatus::Pending => { - new_hashes.push(hash.clone()); - if number > max_height { - trace!(target: "sync", "New unknown block hash {:?}", hash); - if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { - peer.latest_hash = hash.clone(); - } - max_height = number; - } - }, - BlockStatus::Bad => { - debug!(target: "sync", "Bad new block hash {:?}", hash); - io.disable_peer(peer_id); - return Ok(()); - } - } - }; - if max_height != 0 { - trace!(target: "sync", "Downloading blocks for new hashes"); - self.new_blocks.reset_to(new_hashes); - self.state = SyncState::NewBlocks; - self.sync_peer(io, peer_id, true); - } - self.continue_sync(io); - Ok(()) - } - - /// Called when snapshot manifest is downloaded from a peer. - fn on_snapshot_manifest(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id); - return Ok(()); - } - self.clear_peer_download(peer_id); - if !self.reset_peer_asking(peer_id, PeerAsking::SnapshotManifest) || self.state != SyncState::SnapshotManifest { - trace!(target: "sync", "{}: Ignored unexpected/expired manifest", peer_id); - self.continue_sync(io); - return Ok(()); - } - - let manifest_rlp = r.at(0)?; - let manifest = match ManifestData::from_rlp(manifest_rlp.as_raw()) { - Err(e) => { - trace!(target: "sync", "{}: Ignored bad manifest: {:?}", peer_id, e); - io.disable_peer(peer_id); - self.continue_sync(io); - return Ok(()); - } - Ok(manifest) => manifest, - }; - - let is_supported_version = io.snapshot_service().supported_versions() - .map_or(false, |(l, h)| manifest.version >= l && manifest.version <= h); - - if !is_supported_version { - trace!(target: "sync", "{}: Snapshot manifest version not supported: {}", peer_id, manifest.version); - io.disable_peer(peer_id); - self.continue_sync(io); - return Ok(()); - } - self.snapshot.reset_to(&manifest, &keccak(manifest_rlp.as_raw())); - io.snapshot_service().begin_restore(manifest); - self.state = SyncState::SnapshotData; - - // give a task to the same peer first. - self.sync_peer(io, peer_id, false); - // give tasks to other peers - self.continue_sync(io); - Ok(()) - } - - /// Called when snapshot data is downloaded from a peer. - fn on_snapshot_data(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id); - return Ok(()); - } - self.clear_peer_download(peer_id); - if !self.reset_peer_asking(peer_id, PeerAsking::SnapshotData) || (self.state != SyncState::SnapshotData && self.state != SyncState::SnapshotWaiting) { - trace!(target: "sync", "{}: Ignored unexpected snapshot data", peer_id); - self.continue_sync(io); - return Ok(()); - } - - // check service status - let status = io.snapshot_service().status(); - match status { - RestorationStatus::Inactive | RestorationStatus::Failed => { - trace!(target: "sync", "{}: Snapshot restoration aborted", peer_id); - self.state = SyncState::WaitingPeers; - - // only note bad if restoration failed. - if let (Some(hash), RestorationStatus::Failed) = (self.snapshot.snapshot_hash(), status) { - trace!(target: "sync", "Noting snapshot hash {} as bad", hash); - self.snapshot.note_bad(hash); - } - - self.snapshot.clear(); - self.continue_sync(io); - return Ok(()); - }, - RestorationStatus::Ongoing { .. } => { - trace!(target: "sync", "{}: Snapshot restoration is ongoing", peer_id); - }, - } - - let snapshot_data: Bytes = r.val_at(0)?; - match self.snapshot.validate_chunk(&snapshot_data) { - Ok(ChunkType::Block(hash)) => { - trace!(target: "sync", "{}: Processing block chunk", peer_id); - io.snapshot_service().restore_block_chunk(hash, snapshot_data); - } - Ok(ChunkType::State(hash)) => { - trace!(target: "sync", "{}: Processing state chunk", peer_id); - io.snapshot_service().restore_state_chunk(hash, snapshot_data); - } - Err(()) => { - trace!(target: "sync", "{}: Got bad snapshot chunk", peer_id); - io.disconnect_peer(peer_id); - self.continue_sync(io); - return Ok(()); - } - } - - if self.snapshot.is_complete() { - // wait for snapshot restoration process to complete - self.state = SyncState::SnapshotWaiting; - } - // give a task to the same peer first. - self.sync_peer(io, peer_id, false); - // give tasks to other peers - self.continue_sync(io); - Ok(()) - } - - /// Called by peer when it is disconnecting - pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { - trace!(target: "sync", "== Disconnecting {}: {}", peer, io.peer_info(peer)); - self.handshaking_peers.remove(&peer); - if self.peers.contains_key(&peer) { - debug!(target: "sync", "Disconnected {}", peer); - self.clear_peer_download(peer); - self.peers.remove(&peer); - self.active_peers.remove(&peer); - self.continue_sync(io); - } - } - - /// Called when a new peer is connected - pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { - trace!(target: "sync", "== Connected {}: {}", peer, io.peer_info(peer)); - if let Err(e) = self.send_status(io, peer) { - debug!(target:"sync", "Error sending status request: {:?}", e); - io.disconnect_peer(peer); - } else { - self.handshaking_peers.insert(peer, Instant::now()); - } - } - - /// Resume downloading - fn continue_sync(&mut self, io: &mut SyncIo) { - let mut peers: Vec<(PeerId, U256, u8)> = self.peers.iter().filter_map(|(k, p)| - if p.can_sync() { Some((*k, p.difficulty.unwrap_or_else(U256::zero), p.protocol_version)) } else { None }).collect(); - random::new().shuffle(&mut peers); //TODO: sort by rating - // prefer peers with higher protocol version - peers.sort_by(|&(_, _, ref v1), &(_, _, ref v2)| v1.cmp(v2)); - trace!(target: "sync", "Syncing with peers: {} active, {} confirmed, {} total", self.active_peers.len(), peers.len(), self.peers.len()); - for (p, _, _) in peers { - if self.active_peers.contains(&p) { - self.sync_peer(io, p, false); - } - } - - if - self.state != SyncState::WaitingPeers && - self.state != SyncState::SnapshotWaiting && - self.state != SyncState::Waiting && - self.state != SyncState::Idle && - !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.block_set != Some(BlockSet::OldBlocks) && p.can_sync()) - { - self.complete_sync(io); - } - } - - /// Called after all blocks have been downloaded - fn complete_sync(&mut self, io: &mut SyncIo) { - trace!(target: "sync", "Sync complete"); - self.reset(io); - self.state = SyncState::Idle; - } - - /// Enter waiting state - fn pause_sync(&mut self) { - trace!(target: "sync", "Block queue full, pausing sync"); - self.state = SyncState::Waiting; - } - - /// Find something to do for a peer. Called for a new peer or when a peer is done with its task. - fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) { - if !self.active_peers.contains(&peer_id) { - trace!(target: "sync", "Skipping deactivated peer {}", peer_id); - return; - } - let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { - if let Some(peer) = self.peers.get_mut(&peer_id) { - if peer.asking != PeerAsking::Nothing || !peer.can_sync() { - trace!(target: "sync", "Skipping busy peer {}", peer_id); - return; - } - if self.state == SyncState::Waiting { - trace!(target: "sync", "Waiting for the block queue"); - return; - } - if self.state == SyncState::SnapshotWaiting { - trace!(target: "sync", "Waiting for the snapshot restoration"); - return; - } - (peer.latest_hash.clone(), peer.difficulty.clone(), peer.snapshot_number.as_ref().cloned().unwrap_or(0), peer.snapshot_hash.as_ref().cloned()) - } else { - return; - } - }; - let chain_info = io.chain().chain_info(); - let syncing_difficulty = chain_info.pending_total_difficulty; - let num_active_peers = self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(); - - let higher_difficulty = peer_difficulty.map_or(true, |pd| pd > syncing_difficulty); - if force || higher_difficulty || self.old_blocks.is_some() { - match self.state { - SyncState::WaitingPeers => { - trace!( - target: "sync", - "Checking snapshot sync: {} vs {} (peer: {})", - peer_snapshot_number, - chain_info.best_block_number, - peer_id - ); - self.maybe_start_snapshot_sync(io); - }, - SyncState::Idle | SyncState::Blocks | SyncState::NewBlocks => { - if io.chain().queue_info().is_full() { - self.pause_sync(); - return; - } - - let have_latest = io.chain().block_status(BlockId::Hash(peer_latest)) != BlockStatus::Unknown; - trace!(target: "sync", "Considering peer {}, force={}, td={:?}, our td={}, latest={}, have_latest={}, state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, peer_latest, have_latest, self.state); - if !have_latest && (higher_difficulty || force || self.state == SyncState::NewBlocks) { - // check if got new blocks to download - trace!(target: "sync", "Syncing with peer {}, force={}, td={:?}, our td={}, state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, self.state); - if let Some(request) = self.new_blocks.request_blocks(io, num_active_peers) { - self.request_blocks(io, peer_id, request, BlockSet::NewBlocks); - if self.state == SyncState::Idle { - self.state = SyncState::Blocks; - } - return; - } - } - - if let Some(request) = self.old_blocks.as_mut().and_then(|d| d.request_blocks(io, num_active_peers)) { - self.request_blocks(io, peer_id, request, BlockSet::OldBlocks); - return; - } - }, - SyncState::SnapshotData => { - if let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } = io.snapshot_service().status() { - if self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize > MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD { - trace!(target: "sync", "Snapshot queue full, pausing sync"); - self.state = SyncState::SnapshotWaiting; - return; - } - } - if peer_snapshot_hash.is_some() && peer_snapshot_hash == self.snapshot.snapshot_hash() { - self.request_snapshot_data(io, peer_id); - } - }, - SyncState::SnapshotManifest | //already downloading from other peer - SyncState::Waiting | SyncState::SnapshotWaiting => () - } - } else { - trace!(target: "sync", "Skipping peer {}, force={}, td={:?}, our td={}, state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, self.state); - } - } - - /// Perofrm block download request` - fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId, request: BlockRequest, block_set: BlockSet) { - match request { - BlockRequest::Headers { start, count, skip } => { - self.request_headers_by_hash(io, peer_id, &start, count, skip, false, block_set); - }, - BlockRequest::Bodies { hashes } => { - self.request_bodies(io, peer_id, hashes, block_set); - }, - BlockRequest::Receipts { hashes } => { - self.request_receipts(io, peer_id, hashes, block_set); - }, - } - } - - /// Find some headers or blocks to download for a peer. - fn request_snapshot_data(&mut self, io: &mut SyncIo, peer_id: PeerId) { - self.clear_peer_download(peer_id); - // find chunk data to download - if let Some(hash) = self.snapshot.needed_chunk() { - if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { - peer.asking_snapshot_data = Some(hash.clone()); - } - self.request_snapshot_chunk(io, peer_id, &hash); - } - } - - /// Clear all blocks/headers marked as being downloaded by a peer. - fn clear_peer_download(&mut self, peer_id: PeerId) { - if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { - match peer.asking { - PeerAsking::BlockHeaders => { - if let Some(ref hash) = peer.asking_hash { - self.new_blocks.clear_header_download(hash); - if let Some(ref mut old) = self.old_blocks { - old.clear_header_download(hash); - } - } - }, - PeerAsking::BlockBodies => { - self.new_blocks.clear_body_download(&peer.asking_blocks); - if let Some(ref mut old) = self.old_blocks { - old.clear_body_download(&peer.asking_blocks); - } - }, - PeerAsking::BlockReceipts => { - self.new_blocks.clear_receipt_download(&peer.asking_blocks); - if let Some(ref mut old) = self.old_blocks { - old.clear_receipt_download(&peer.asking_blocks); - } - }, - PeerAsking::SnapshotData => { - if let Some(hash) = peer.asking_snapshot_data { - self.snapshot.clear_chunk_download(&hash); - } - }, - _ => (), - } - } - } - - /// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import. - fn collect_blocks(&mut self, io: &mut SyncIo, block_set: BlockSet) { - match block_set { - BlockSet::NewBlocks => { - if self.new_blocks.collect_blocks(io, self.state == SyncState::NewBlocks) == Err(DownloaderImportError::Invalid) { - self.restart(io); - } - }, - BlockSet::OldBlocks => { - if self.old_blocks.as_mut().map_or(false, |downloader| { downloader.collect_blocks(io, false) == Err(DownloaderImportError::Invalid) }) { - self.restart(io); - } else if self.old_blocks.as_ref().map_or(false, |downloader| { downloader.is_complete() }) { - trace!(target: "sync", "Background block download is complete"); - self.old_blocks = None; - } - } - } - } - - /// Request headers from a peer by block hash - fn request_headers_by_hash(&mut self, sync: &mut SyncIo, peer_id: PeerId, h: &H256, count: u64, skip: u64, reverse: bool, set: BlockSet) { - trace!(target: "sync", "{} <- GetBlockHeaders: {} entries starting from {}, set = {:?}", peer_id, count, h, set); - let mut rlp = RlpStream::new_list(4); - rlp.append(h); - rlp.append(&count); - rlp.append(&skip); - rlp.append(&if reverse {1u32} else {0u32}); - self.send_request(sync, peer_id, PeerAsking::BlockHeaders, GET_BLOCK_HEADERS_PACKET, rlp.out()); - let peer = self.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); - peer.asking_hash = Some(h.clone()); - peer.block_set = Some(set); - } - - /// Request headers from a peer by block number - fn request_fork_header_by_number(&mut self, sync: &mut SyncIo, peer_id: PeerId, n: BlockNumber) { - trace!(target: "sync", "{} <- GetForkHeader: at {}", peer_id, n); - let mut rlp = RlpStream::new_list(4); - rlp.append(&n); - rlp.append(&1u32); - rlp.append(&0u32); - rlp.append(&0u32); - self.send_request(sync, peer_id, PeerAsking::ForkHeader, GET_BLOCK_HEADERS_PACKET, rlp.out()); - } - - /// Request snapshot manifest from a peer. - fn request_snapshot_manifest(&mut self, sync: &mut SyncIo, peer_id: PeerId) { - trace!(target: "sync", "{} <- GetSnapshotManifest", peer_id); - let rlp = RlpStream::new_list(0); - self.send_request(sync, peer_id, PeerAsking::SnapshotManifest, GET_SNAPSHOT_MANIFEST_PACKET, rlp.out()); - } - - /// Request snapshot chunk from a peer. - fn request_snapshot_chunk(&mut self, sync: &mut SyncIo, peer_id: PeerId, chunk: &H256) { - trace!(target: "sync", "{} <- GetSnapshotData {:?}", peer_id, chunk); - let mut rlp = RlpStream::new_list(1); - rlp.append(chunk); - self.send_request(sync, peer_id, PeerAsking::SnapshotData, GET_SNAPSHOT_DATA_PACKET, rlp.out()); - } - - /// Request block bodies from a peer - fn request_bodies(&mut self, sync: &mut SyncIo, peer_id: PeerId, hashes: Vec, set: BlockSet) { - let mut rlp = RlpStream::new_list(hashes.len()); - trace!(target: "sync", "{} <- GetBlockBodies: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); - for h in &hashes { - rlp.append(&h.clone()); - } - self.send_request(sync, peer_id, PeerAsking::BlockBodies, GET_BLOCK_BODIES_PACKET, rlp.out()); - let peer = self.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); - peer.asking_blocks = hashes; - peer.block_set = Some(set); - } - - /// Request block receipts from a peer - fn request_receipts(&mut self, sync: &mut SyncIo, peer_id: PeerId, hashes: Vec, set: BlockSet) { - let mut rlp = RlpStream::new_list(hashes.len()); - trace!(target: "sync", "{} <- GetBlockReceipts: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); - for h in &hashes { - rlp.append(&h.clone()); - } - self.send_request(sync, peer_id, PeerAsking::BlockReceipts, GET_RECEIPTS_PACKET, rlp.out()); - let peer = self.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); - peer.asking_blocks = hashes; - peer.block_set = Some(set); - } - - /// Reset peer status after request is complete. - fn reset_peer_asking(&mut self, peer_id: PeerId, asking: PeerAsking) -> bool { - if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { - peer.expired = false; - peer.block_set = None; - if peer.asking != asking { - trace!(target:"sync", "Asking {:?} while expected {:?}", peer.asking, asking); - peer.asking = PeerAsking::Nothing; - return false; - } else { - peer.asking = PeerAsking::Nothing; - return true; - } - } - false - } - - /// Generic request sender - fn send_request(&mut self, sync: &mut SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: PacketId, packet: Bytes) { - if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { - if peer.asking != PeerAsking::Nothing { - warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking); - } - peer.asking = asking; - peer.ask_time = Instant::now(); - let result = if packet_id >= ETH_PACKET_COUNT { - sync.send_protocol(WARP_SYNC_PROTOCOL_ID, peer_id, packet_id, packet) - } else { - sync.send(peer_id, packet_id, packet) - }; - if let Err(e) = result { - debug!(target:"sync", "Error sending request: {:?}", e); - sync.disconnect_peer(peer_id); - } - } - } - - /// Generic packet sender - fn send_packet(&mut self, sync: &mut SyncIo, peer_id: PeerId, packet_id: PacketId, packet: Bytes) { - if let Err(e) = sync.send(peer_id, packet_id, packet) { - debug!(target:"sync", "Error sending packet: {:?}", e); - sync.disconnect_peer(peer_id); - } - } - - /// Called when peer sends us new transactions - fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - // Accept transactions only when fully synced - if !io.is_chain_queue_empty() || (self.state != SyncState::Idle && self.state != SyncState::NewBlocks) { - trace!(target: "sync", "{} Ignoring transactions while syncing", peer_id); - return Ok(()); - } - if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "{} Ignoring transactions from unconfirmed/unknown peer", peer_id); - return Ok(()); - } - - let item_count = r.item_count()?; - trace!(target: "sync", "{:02} -> Transactions ({} entries)", peer_id, item_count); - let mut transactions = Vec::with_capacity(item_count); - for i in 0 .. item_count { - let rlp = r.at(i)?; - let tx = rlp.as_raw().to_vec(); - transactions.push(tx); - } - io.chain().queue_transactions(transactions, peer_id); - Ok(()) - } - - /// Send Status message - fn send_status(&mut self, io: &mut SyncIo, peer: PeerId) -> Result<(), network::Error> { - let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer); - let warp_protocol = warp_protocol_version != 0; - let protocol = if warp_protocol { warp_protocol_version } else { ETH_PROTOCOL_VERSION_63 }; - trace!(target: "sync", "Sending status to {}, protocol version {}", peer, protocol); - let mut packet = RlpStream::new_list(if warp_protocol { 7 } else { 5 }); - let chain = io.chain().chain_info(); - packet.append(&(protocol as u32)); - packet.append(&self.network_id); - packet.append(&chain.total_difficulty); - packet.append(&chain.best_block_hash); - packet.append(&chain.genesis_hash); - if warp_protocol { - let manifest = match self.old_blocks.is_some() { - true => None, - false => io.snapshot_service().manifest(), - }; - let block_number = manifest.as_ref().map_or(0, |m| m.block_number); - let manifest_hash = manifest.map_or(H256::new(), |m| keccak(m.into_rlp())); - packet.append(&manifest_hash); - packet.append(&block_number); - } - io.respond(STATUS_PACKET, packet.out()) - } - - /// Respond to GetBlockHeaders request - fn return_block_headers(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - // Packet layout: - // [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ] - let max_headers: usize = r.val_at(1)?; - let skip: usize = r.val_at(2)?; - let reverse: bool = r.val_at(3)?; - let last = io.chain().chain_info().best_block_number; - let number = if r.at(0)?.size() == 32 { - // id is a hash - let hash: H256 = r.val_at(0)?; - trace!(target: "sync", "{} -> GetBlockHeaders (hash: {}, max: {}, skip: {}, reverse:{})", peer_id, hash, max_headers, skip, reverse); - match io.chain().block_header(BlockId::Hash(hash)) { - Some(hdr) => { - let number = hdr.number().into(); - debug_assert_eq!(hdr.hash(), hash); - - if max_headers == 1 || io.chain().block_hash(BlockId::Number(number)) != Some(hash) { - // Non canonical header or single header requested - // TODO: handle single-step reverse hashchains of non-canon hashes - trace!(target:"sync", "Returning single header: {:?}", hash); - let mut rlp = RlpStream::new_list(1); - rlp.append_raw(&hdr.into_inner(), 1); - return Ok(Some((BLOCK_HEADERS_PACKET, rlp))); - } - number - } - None => return Ok(Some((BLOCK_HEADERS_PACKET, RlpStream::new_list(0)))) //no such header, return nothing - } - } else { - trace!(target: "sync", "{} -> GetBlockHeaders (number: {}, max: {}, skip: {}, reverse:{})", peer_id, r.val_at::(0)?, max_headers, skip, reverse); - r.val_at(0)? - }; - - let mut number = if reverse { - cmp::min(last, number) - } else { - cmp::max(0, number) - }; - let max_count = cmp::min(MAX_HEADERS_TO_SEND, max_headers); - let mut count = 0; - let mut data = Bytes::new(); - let inc = (skip + 1) as BlockNumber; - let overlay = io.chain_overlay().read(); - - while number <= last && count < max_count { - if let Some(hdr) = overlay.get(&number) { - trace!(target: "sync", "{}: Returning cached fork header", peer_id); - data.extend_from_slice(hdr); - count += 1; - } else if let Some(hdr) = io.chain().block_header(BlockId::Number(number)) { - data.append(&mut hdr.into_inner()); - count += 1; - } else { - // No required block. - break; - } - if reverse { - if number <= inc || number == 0 { - break; - } - number -= inc; - } - else { - number += inc; - } - } - let mut rlp = RlpStream::new_list(count as usize); - rlp.append_raw(&data, count as usize); - trace!(target: "sync", "{} -> GetBlockHeaders: returned {} entries", peer_id, count); - Ok(Some((BLOCK_HEADERS_PACKET, rlp))) - } - - /// Respond to GetBlockBodies request - fn return_block_bodies(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let mut count = r.item_count().unwrap_or(0); - if count == 0 { - debug!(target: "sync", "Empty GetBlockBodies request, ignoring."); - return Ok(None); - } - count = cmp::min(count, MAX_BODIES_TO_SEND); - let mut added = 0usize; - let mut data = Bytes::new(); - for i in 0..count { - if let Some(body) = io.chain().block_body(BlockId::Hash(r.val_at::(i)?)) { - data.append(&mut body.into_inner()); - added += 1; - } - } - let mut rlp = RlpStream::new_list(added); - rlp.append_raw(&data, added); - trace!(target: "sync", "{} -> GetBlockBodies: returned {} entries", peer_id, added); - Ok(Some((BLOCK_BODIES_PACKET, rlp))) - } - - /// Respond to GetNodeData request - fn return_node_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let mut count = r.item_count().unwrap_or(0); - trace!(target: "sync", "{} -> GetNodeData: {} entries", peer_id, count); - if count == 0 { - debug!(target: "sync", "Empty GetNodeData request, ignoring."); - return Ok(None); - } - count = cmp::min(count, MAX_NODE_DATA_TO_SEND); - let mut added = 0usize; - let mut data = Vec::new(); - for i in 0..count { - if let Some(node) = io.chain().state_data(&r.val_at::(i)?) { - data.push(node); - added += 1; - } - } - trace!(target: "sync", "{} -> GetNodeData: return {} entries", peer_id, added); - let mut rlp = RlpStream::new_list(added); - for d in data { - rlp.append(&d); - } - Ok(Some((NODE_DATA_PACKET, rlp))) - } - - fn return_receipts(io: &SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let mut count = rlp.item_count().unwrap_or(0); - trace!(target: "sync", "{} -> GetReceipts: {} entries", peer_id, count); - if count == 0 { - debug!(target: "sync", "Empty GetReceipts request, ignoring."); - return Ok(None); - } - count = cmp::min(count, MAX_RECEIPTS_HEADERS_TO_SEND); - let mut added_headers = 0usize; - let mut added_receipts = 0usize; - let mut data = Bytes::new(); - for i in 0..count { - if let Some(mut receipts_bytes) = io.chain().block_receipts(&rlp.val_at::(i)?) { - data.append(&mut receipts_bytes); - added_receipts += receipts_bytes.len(); - added_headers += 1; - if added_receipts > MAX_RECEIPTS_TO_SEND { break; } - } - } - let mut rlp_result = RlpStream::new_list(added_headers); - rlp_result.append_raw(&data, added_headers); - Ok(Some((RECEIPTS_PACKET, rlp_result))) - } - - /// Respond to GetSnapshotManifest request - fn return_snapshot_manifest(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let count = r.item_count().unwrap_or(0); - trace!(target: "sync", "{} -> GetSnapshotManifest", peer_id); - if count != 0 { - debug!(target: "sync", "Invalid GetSnapshotManifest request, ignoring."); - return Ok(None); - } - let rlp = match io.snapshot_service().manifest() { - Some(manifest) => { - trace!(target: "sync", "{} <- SnapshotManifest", peer_id); - let mut rlp = RlpStream::new_list(1); - rlp.append_raw(&manifest.into_rlp(), 1); - rlp - }, - None => { - trace!(target: "sync", "{}: No manifest to return", peer_id); - RlpStream::new_list(0) - } - }; - Ok(Some((SNAPSHOT_MANIFEST_PACKET, rlp))) - } - - /// Respond to GetSnapshotData request - fn return_snapshot_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { - let hash: H256 = r.val_at(0)?; - trace!(target: "sync", "{} -> GetSnapshotData {:?}", peer_id, hash); - let rlp = match io.snapshot_service().chunk(hash) { - Some(data) => { - let mut rlp = RlpStream::new_list(1); - trace!(target: "sync", "{} <- SnapshotData", peer_id); - rlp.append(&data); - rlp - }, - None => { - RlpStream::new_list(0) - } - }; - Ok(Some((SNAPSHOT_DATA_PACKET, rlp))) - } - - fn return_rlp(io: &mut SyncIo, rlp: &Rlp, peer: PeerId, rlp_func: FRlp, error_func: FError) -> Result<(), PacketDecodeError> - where FRlp : Fn(&SyncIo, &Rlp, PeerId) -> RlpResponseResult, - FError : FnOnce(network::Error) -> String - { - let response = rlp_func(io, rlp, peer); - match response { - Err(e) => Err(e), - Ok(Some((packet_id, rlp_stream))) => { - io.respond(packet_id, rlp_stream.out()).unwrap_or_else( - |e| debug!(target: "sync", "{:?}", error_func(e))); - Ok(()) - } - _ => Ok(()) - } - } - - /// Dispatch incoming requests and responses - pub fn dispatch_packet(sync: &RwLock, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { - let rlp = Rlp::new(data); - let result = match packet_id { - GET_BLOCK_BODIES_PACKET => ChainSync::return_rlp(io, &rlp, peer, - ChainSync::return_block_bodies, - |e| format!("Error sending block bodies: {:?}", e)), - - GET_BLOCK_HEADERS_PACKET => ChainSync::return_rlp(io, &rlp, peer, - ChainSync::return_block_headers, - |e| format!("Error sending block headers: {:?}", e)), - - GET_RECEIPTS_PACKET => ChainSync::return_rlp(io, &rlp, peer, - ChainSync::return_receipts, - |e| format!("Error sending receipts: {:?}", e)), - - GET_NODE_DATA_PACKET => ChainSync::return_rlp(io, &rlp, peer, - ChainSync::return_node_data, - |e| format!("Error sending nodes: {:?}", e)), - - GET_SNAPSHOT_MANIFEST_PACKET => ChainSync::return_rlp(io, &rlp, peer, - ChainSync::return_snapshot_manifest, - |e| format!("Error sending snapshot manifest: {:?}", e)), - - GET_SNAPSHOT_DATA_PACKET => ChainSync::return_rlp(io, &rlp, peer, - ChainSync::return_snapshot_data, - |e| format!("Error sending snapshot data: {:?}", e)), - CONSENSUS_DATA_PACKET => ChainSync::on_consensus_packet(io, peer, &rlp), - _ => { - sync.write().on_packet(io, peer, packet_id, data); - Ok(()) - } - }; - result.unwrap_or_else(|e| { - debug!(target:"sync", "{} -> Malformed packet {} : {}", peer, packet_id, e); - }) - } - - pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { - debug!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id); - - if packet_id != STATUS_PACKET && !self.peers.contains_key(&peer) { - debug!(target:"sync", "Unexpected packet {} from unregistered peer: {}:{}", packet_id, peer, io.peer_info(peer)); - return; - } - - let rlp = Rlp::new(data); - let result = match packet_id { - STATUS_PACKET => self.on_peer_status(io, peer, &rlp), - TRANSACTIONS_PACKET => self.on_peer_transactions(io, peer, &rlp), - BLOCK_HEADERS_PACKET => self.on_peer_block_headers(io, peer, &rlp), - BLOCK_BODIES_PACKET => self.on_peer_block_bodies(io, peer, &rlp), - RECEIPTS_PACKET => self.on_peer_block_receipts(io, peer, &rlp), - NEW_BLOCK_PACKET => self.on_peer_new_block(io, peer, &rlp), - NEW_BLOCK_HASHES_PACKET => self.on_peer_new_hashes(io, peer, &rlp), - SNAPSHOT_MANIFEST_PACKET => self.on_snapshot_manifest(io, peer, &rlp), - SNAPSHOT_DATA_PACKET => self.on_snapshot_data(io, peer, &rlp), - PRIVATE_TRANSACTION_PACKET => self.on_private_transaction(io, peer, &rlp), - SIGNED_PRIVATE_TRANSACTION_PACKET => self.on_signed_private_transaction(io, peer, &rlp), - _ => { - debug!(target: "sync", "{}: Unknown packet {}", peer, packet_id); - Ok(()) - } - }; - result.unwrap_or_else(|e| { - debug!(target:"sync", "{} -> Malformed packet {} : {}", peer, packet_id, e); - }) - } - - pub fn maintain_peers(&mut self, io: &mut SyncIo) { - let tick = Instant::now(); - let mut aborting = Vec::new(); - for (peer_id, peer) in &self.peers { - let elapsed = tick - peer.ask_time; - let timeout = match peer.asking { - PeerAsking::BlockHeaders => elapsed > HEADERS_TIMEOUT, - PeerAsking::BlockBodies => elapsed > BODIES_TIMEOUT, - PeerAsking::BlockReceipts => elapsed > RECEIPTS_TIMEOUT, - PeerAsking::Nothing => false, - PeerAsking::ForkHeader => elapsed > FORK_HEADER_TIMEOUT, - PeerAsking::SnapshotManifest => elapsed > SNAPSHOT_MANIFEST_TIMEOUT, - PeerAsking::SnapshotData => elapsed > SNAPSHOT_DATA_TIMEOUT, - }; - if timeout { - debug!(target:"sync", "Timeout {}", peer_id); - io.disconnect_peer(*peer_id); - aborting.push(*peer_id); - } - } - for p in aborting { - self.on_peer_aborting(io, p); - } - - // Check for handshake timeouts - for (peer, &ask_time) in &self.handshaking_peers { - let elapsed = (tick - ask_time) / 1_000_000_000; - if elapsed > STATUS_TIMEOUT { - trace!(target:"sync", "Status timeout {}", peer); - io.disconnect_peer(*peer); - } - } - } - - fn check_resume(&mut self, io: &mut SyncIo) { - if self.state == SyncState::Waiting && !io.chain().queue_info().is_full() && self.state == SyncState::Waiting { - self.state = SyncState::Blocks; - self.continue_sync(io); - } else if self.state == SyncState::SnapshotWaiting { - match io.snapshot_service().status() { - RestorationStatus::Inactive => { - trace!(target:"sync", "Snapshot restoration is complete"); - self.restart(io); - self.continue_sync(io); - }, - RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } => { - if !self.snapshot.is_complete() && self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize <= MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD { - trace!(target:"sync", "Resuming snapshot sync"); - self.state = SyncState::SnapshotData; - self.continue_sync(io); - } - }, - RestorationStatus::Failed => { - trace!(target: "sync", "Snapshot restoration aborted"); - self.state = SyncState::WaitingPeers; - self.snapshot.clear(); - self.continue_sync(io); - }, - } - } - } - - /// creates rlp to send for the tree defined by 'from' and 'to' hashes - fn create_new_hashes_rlp(chain: &BlockChainClient, from: &H256, to: &H256) -> Option { - match chain.tree_route(from, to) { - Some(route) => { - let uncles = chain.find_uncles(from).unwrap_or_else(Vec::new); - match route.blocks.len() { - 0 => None, - _ => { - let mut blocks = route.blocks; - blocks.extend(uncles); - let mut rlp_stream = RlpStream::new_list(blocks.len()); - for block_hash in blocks { - let mut hash_rlp = RlpStream::new_list(2); - let number = chain.block_header(BlockId::Hash(block_hash.clone())) - .expect("chain.tree_route and chain.find_uncles only return hahses of blocks that are in the blockchain. qed.").number(); - hash_rlp.append(&block_hash); - hash_rlp.append(&number); - rlp_stream.append_raw(hash_rlp.as_raw(), 1); - } - Some(rlp_stream.out()) - } - } - }, - None => None - } - } - - /// creates rlp from block bytes and total difficulty - fn create_block_rlp(bytes: &Bytes, total_difficulty: U256) -> Bytes { - let mut rlp_stream = RlpStream::new_list(2); - rlp_stream.append_raw(bytes, 1); - rlp_stream.append(&total_difficulty); - rlp_stream.out() - } - - /// creates latest block rlp for the given client - fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes { - ChainSync::create_block_rlp( - &chain.block(BlockId::Hash(chain.chain_info().best_block_hash)) - .expect("Best block always exists").into_inner(), - chain.chain_info().total_difficulty - ) - } - - /// creates given hash block rlp for the given client - fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes { - ChainSync::create_block_rlp( - &chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed").into_inner(), - chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.") - ) - } - - /// returns peer ids that have different blocks than our chain - fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo) -> Vec { - let latest_hash = chain_info.best_block_hash; - self - .peers - .iter_mut() - .filter_map(|(&id, ref mut peer_info)| { - trace!(target: "sync", "Checking peer our best {} their best {}", latest_hash, peer_info.latest_hash); - if peer_info.latest_hash != latest_hash { - Some(id) - } else { - None - } - }) - .collect::>() - } - - fn select_random_peers(peers: &[PeerId]) -> Vec { - // take sqrt(x) peers - let mut peers = peers.to_vec(); - let mut count = (peers.len() as f64).powf(0.5).round() as usize; - count = cmp::min(count, MAX_PEERS_PROPAGATION); - count = cmp::max(count, MIN_PEERS_PROPAGATION); - random::new().shuffle(&mut peers); - peers.truncate(count); - peers - } - - fn get_consensus_peers(&self) -> Vec { - self.peers.iter().filter_map(|(id, p)| if p.protocol_version >= PAR_PROTOCOL_VERSION_2 { Some(*id) } else { None }).collect() - } - - fn get_private_transaction_peers(&self) -> Vec { - self.peers.iter().filter_map(|(id, p)| if p.protocol_version >= PAR_PROTOCOL_VERSION_3 { Some(*id) } else { None }).collect() - } - - /// propagates latest block to a set of peers - fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, blocks: &[H256], peers: &[PeerId]) -> usize { - trace!(target: "sync", "Sending NewBlocks to {:?}", peers); - let mut sent = 0; - for peer_id in peers { - if blocks.is_empty() { - let rlp = ChainSync::create_latest_block_rlp(io.chain()); - self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp); - } else { - for h in blocks { - let rlp = ChainSync::create_new_block_rlp(io.chain(), h); - self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp); - } - } - if let Some(ref mut peer) = self.peers.get_mut(peer_id) { - peer.latest_hash = chain_info.best_block_hash.clone(); - } - sent += 1; - } - sent - } - - /// propagates new known hashes to all peers - fn propagate_new_hashes(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[PeerId]) -> usize { - trace!(target: "sync", "Sending NewHashes to {:?}", peers); - let mut sent = 0; - let last_parent = *io.chain().best_block_header().parent_hash(); - for peer_id in peers { - sent += match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &chain_info.best_block_hash) { - Some(rlp) => { - { - if let Some(ref mut peer) = self.peers.get_mut(peer_id) { - peer.latest_hash = chain_info.best_block_hash.clone(); - } - } - self.send_packet(io, *peer_id, NEW_BLOCK_HASHES_PACKET, rlp); - 1 - }, - None => 0 - } - } - sent - } - - /// propagates new transactions to all peers - pub fn propagate_new_transactions(&mut self, io: &mut SyncIo) -> usize { - // Early out if nobody to send to. - if self.peers.is_empty() { - return 0; - } - - let transactions = io.chain().ready_transactions(); - if transactions.is_empty() { - return 0; - } - - let (transactions, service_transactions): (Vec<_>, Vec<_>) = transactions.iter() - .map(|tx| tx.signed()) - .partition(|tx| !tx.gas_price.is_zero()); - - // usual transactions could be propagated to all peers - let mut affected_peers = HashSet::new(); - if !transactions.is_empty() { - let peers = self.select_peers_for_transactions(|_| true); - affected_peers = self.propagate_transactions_to_peers(io, peers, transactions); - } - - // most of times service_transactions will be empty - // => there's no need to merge packets - if !service_transactions.is_empty() { - let service_transactions_peers = self.select_peers_for_transactions(|peer_id| accepts_service_transaction(&io.peer_info(*peer_id))); - let service_transactions_affected_peers = self.propagate_transactions_to_peers(io, service_transactions_peers, service_transactions); - affected_peers.extend(&service_transactions_affected_peers); - } - - affected_peers.len() - } - - fn select_peers_for_transactions(&self, filter: F) -> Vec - where F: Fn(&PeerId) -> bool { - // sqrt(x)/x scaled to max u32 - let fraction = ((self.peers.len() as f64).powf(-0.5) * (u32::max_value() as f64).round()) as u32; - let small = self.peers.len() < MIN_PEERS_PROPAGATION; - - let mut random = random::new(); - self.peers.keys() - .cloned() - .filter(filter) - .filter(|_| small || random.next_u32() < fraction) - .take(MAX_PEERS_PROPAGATION) - .collect() - } - - fn propagate_transactions_to_peers(&mut self, io: &mut SyncIo, peers: Vec, transactions: Vec<&SignedTransaction>) -> HashSet { - let all_transactions_hashes = transactions.iter() - .map(|tx| tx.hash()) - .collect::>(); - let all_transactions_rlp = { - let mut packet = RlpStream::new_list(transactions.len()); - for tx in &transactions { packet.append(&**tx); } - packet.out() - }; - - // Clear old transactions from stats - self.transactions_stats.retain(&all_transactions_hashes); - - // sqrt(x)/x scaled to max u32 - let block_number = io.chain().chain_info().best_block_number; - - let lucky_peers = { - peers.into_iter() - .filter_map(|peer_id| { - let stats = &mut self.transactions_stats; - let peer_info = self.peers.get_mut(&peer_id) - .expect("peer_id is form peers; peers is result of select_peers_for_transactions; select_peers_for_transactions selects peers from self.peers; qed"); - - // Send all transactions - if peer_info.last_sent_transactions.is_empty() { - // update stats - for hash in &all_transactions_hashes { - let id = io.peer_session_info(peer_id).and_then(|info| info.id); - stats.propagated(hash, id, block_number); - } - peer_info.last_sent_transactions = all_transactions_hashes.clone(); - return Some((peer_id, all_transactions_hashes.len(), all_transactions_rlp.clone())); - } - - // Get hashes of all transactions to send to this peer - let to_send = all_transactions_hashes.difference(&peer_info.last_sent_transactions) - .take(MAX_TRANSACTIONS_TO_PROPAGATE) - .cloned() - .collect::>(); - if to_send.is_empty() { - return None; - } - - // Construct RLP - let (packet, to_send) = { - let mut to_send = to_send; - let mut packet = RlpStream::new(); - packet.begin_unbounded_list(); - let mut pushed = 0; - for tx in &transactions { - let hash = tx.hash(); - if to_send.contains(&hash) { - let mut transaction = RlpStream::new(); - tx.rlp_append(&mut transaction); - let appended = packet.append_raw_checked(&transaction.drain(), 1, MAX_TRANSACTION_PACKET_SIZE); - if !appended { - // Maximal packet size reached just proceed with sending - debug!("Transaction packet size limit reached. Sending incomplete set of {}/{} transactions.", pushed, to_send.len()); - to_send = to_send.into_iter().take(pushed).collect(); - break; - } - pushed += 1; - } - } - packet.complete_unbounded_list(); - (packet, to_send) - }; - - // Update stats - let id = io.peer_session_info(peer_id).and_then(|info| info.id); - for hash in &to_send { - // update stats - stats.propagated(hash, id, block_number); - } - - peer_info.last_sent_transactions = all_transactions_hashes - .intersection(&peer_info.last_sent_transactions) - .chain(&to_send) - .cloned() - .collect(); - Some((peer_id, to_send.len(), packet.out())) - }) - .collect::>() - }; - - // Send RLPs - let mut peers = HashSet::new(); - if lucky_peers.len() > 0 { - let mut max_sent = 0; - let lucky_peers_len = lucky_peers.len(); - for (peer_id, sent, rlp) in lucky_peers { - peers.insert(peer_id); - self.send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp); - trace!(target: "sync", "{:02} <- Transactions ({} entries)", peer_id, sent); - max_sent = cmp::max(max_sent, sent); - } - debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, lucky_peers_len); - } - - peers - } - - fn propagate_latest_blocks(&mut self, io: &mut SyncIo, sealed: &[H256]) { - let chain_info = io.chain().chain_info(); - if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { - let mut peers = self.get_lagging_peers(&chain_info); - if sealed.is_empty() { - let hashes = self.propagate_new_hashes(&chain_info, io, &peers); - peers = ChainSync::select_random_peers(&peers); - let blocks = self.propagate_blocks(&chain_info, io, sealed, &peers); - if blocks != 0 || hashes != 0 { - trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); - } - } else { - self.propagate_blocks(&chain_info, io, sealed, &peers); - self.propagate_new_hashes(&chain_info, io, &peers); - trace!(target: "sync", "Sent sealed block to all peers"); - }; - } - self.last_sent_block_number = chain_info.best_block_number; - } - - /// Distribute valid proposed blocks to subset of current peers. - fn propagate_proposed_blocks(&mut self, io: &mut SyncIo, proposed: &[Bytes]) { - let peers = self.get_consensus_peers(); - trace!(target: "sync", "Sending proposed blocks to {:?}", peers); - for block in proposed { - let rlp = ChainSync::create_block_rlp( - block, - io.chain().chain_info().total_difficulty - ); - for peer_id in &peers { - self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp.clone()); - } - } - } - - /// Maintain other peers. Send out any new blocks and transactions - pub fn maintain_sync(&mut self, io: &mut SyncIo) { - self.maybe_start_snapshot_sync(io); - self.check_resume(io); - } - - /// called when block is imported to chain - propagates the blocks and updates transactions sent to peers - pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], enacted: &[H256], _retracted: &[H256], sealed: &[H256], proposed: &[Bytes]) { - let queue_info = io.chain().queue_info(); - let is_syncing = self.status().is_syncing(queue_info); - - if !is_syncing || !sealed.is_empty() || !proposed.is_empty() { - trace!(target: "sync", "Propagating blocks, state={:?}", self.state); - self.propagate_latest_blocks(io, sealed); - self.propagate_proposed_blocks(io, proposed); - } - if !invalid.is_empty() { - trace!(target: "sync", "Bad blocks in the queue, restarting"); - self.restart(io); - } - - if !is_syncing && !enacted.is_empty() && !self.peers.is_empty() { - // Select random peer to re-broadcast transactions to. - let peer = random::new().gen_range(0, self.peers.len()); - trace!(target: "sync", "Re-broadcasting transactions to a random peer."); - self.peers.values_mut().nth(peer).map(|peer_info| - peer_info.last_sent_transactions.clear() - ); - } - } - - /// Called when peer sends us new consensus packet - fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - trace!(target: "sync", "Received consensus packet from {:?}", peer_id); - io.chain().queue_consensus_message(r.as_raw().to_vec()); - Ok(()) - } - - /// Broadcast consensus message to peers. - pub fn propagate_consensus_packet(&mut self, io: &mut SyncIo, packet: Bytes) { - let lucky_peers = ChainSync::select_random_peers(&self.get_consensus_peers()); - trace!(target: "sync", "Sending consensus packet to {:?}", lucky_peers); - for peer_id in lucky_peers { - self.send_packet(io, peer_id, CONSENSUS_DATA_PACKET, packet.clone()); - } - } - - /// Called when peer sends us new private transaction packet - fn on_private_transaction(&self, _io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); - return Ok(()); - } - - trace!(target: "sync", "Received private transaction packet from {:?}", peer_id); - - if let Err(e) = self.private_tx_handler.import_private_transaction(r.as_raw()) { - trace!(target: "sync", "Ignoring the message, error queueing: {}", e); - } - Ok(()) - } - - /// Broadcast private transaction message to peers. - pub fn propagate_private_transaction(&mut self, io: &mut SyncIo, packet: Bytes) { - let lucky_peers = ChainSync::select_random_peers(&self.get_private_transaction_peers()); - trace!(target: "sync", "Sending private transaction packet to {:?}", lucky_peers); - for peer_id in lucky_peers { - self.send_packet(io, peer_id, PRIVATE_TRANSACTION_PACKET, packet.clone()); - } - } - - /// Called when peer sends us signed private transaction packet - fn on_signed_private_transaction(&self, _io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { - if !self.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { - trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); - return Ok(()); - } - - trace!(target: "sync", "Received signed private transaction packet from {:?}", peer_id); - if let Err(e) = self.private_tx_handler.import_signed_private_transaction(r.as_raw()) { - trace!(target: "sync", "Ignoring the message, error queueing: {}", e); - } - Ok(()) - } - - /// Broadcast signed private transaction message to peers. - pub fn propagate_signed_private_transaction(&mut self, io: &mut SyncIo, packet: Bytes) { - let lucky_peers = ChainSync::select_random_peers(&self.get_private_transaction_peers()); - trace!(target: "sync", "Sending signed private transaction packet to {:?}", lucky_peers); - for peer_id in lucky_peers { - self.send_packet(io, peer_id, SIGNED_PRIVATE_TRANSACTION_PACKET, packet.clone()); - } - } - -} - -/// Checks if peer is able to process service transactions -fn accepts_service_transaction(client_id: &str) -> bool { - // Parity versions starting from this will accept service-transactions - const SERVICE_TRANSACTIONS_VERSION: (u32, u32) = (1u32, 6u32); - // Parity client string prefix - const PARITY_CLIENT_ID_PREFIX: &'static str = "Parity/v"; - - if !client_id.starts_with(PARITY_CLIENT_ID_PREFIX) { - return false; - } - let ver: Vec = client_id[PARITY_CLIENT_ID_PREFIX.len()..].split('.') - .take(2) - .filter_map(|s| s.parse().ok()) - .collect(); - ver.len() == 2 && (ver[0] > SERVICE_TRANSACTIONS_VERSION.0 || (ver[0] == SERVICE_TRANSACTIONS_VERSION.0 && ver[1] >= SERVICE_TRANSACTIONS_VERSION.1)) -} - -#[cfg(test)] -mod tests { - use std::collections::{HashSet, VecDeque}; - use ethkey; - use network::PeerId; - use tests::helpers::{TestIo}; - use tests::snapshot::TestSnapshotService; - use ethereum_types::{H256, U256, Address}; - use parking_lot::RwLock; - use bytes::Bytes; - use rlp::{Rlp, RlpStream}; - use super::*; - use ::SyncConfig; - use super::{PeerInfo, PeerAsking}; - use ethcore::header::*; - use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient, ChainInfo, BlockInfo}; - use ethcore::miner::MinerService; - use private_tx::NoopPrivateTxHandler; - - fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { - let mut header = Header::new(); - header.set_gas_limit(0.into()); - header.set_difficulty((order * 100).into()); - header.set_timestamp((order * 10) as u64); - header.set_number(order as u64); - header.set_parent_hash(parent_hash); - header.set_state_root(H256::zero()); - - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&::rlp::EMPTY_LIST_RLP, 1); - rlp.append_raw(&::rlp::EMPTY_LIST_RLP, 1); - rlp.out() - } - - fn get_dummy_blocks(order: u32, parent_hash: H256) -> Bytes { - let mut rlp = RlpStream::new_list(1); - rlp.append_raw(&get_dummy_block(order, parent_hash), 1); - let difficulty: U256 = (100 * order).into(); - rlp.append(&difficulty); - rlp.out() - } - - fn get_dummy_hashes() -> Bytes { - let mut rlp = RlpStream::new_list(5); - for _ in 0..5 { - let mut hash_d_rlp = RlpStream::new_list(2); - let hash: H256 = H256::from(0u64); - let diff: U256 = U256::from(1u64); - hash_d_rlp.append(&hash); - hash_d_rlp.append(&diff); - - rlp.append_raw(&hash_d_rlp.out(), 1); - } - - rlp.out() - } - - fn queue_info(unverified: usize, verified: usize) -> BlockQueueInfo { - BlockQueueInfo { - unverified_queue_size: unverified, - verified_queue_size: verified, - verifying_queue_size: 0, - max_queue_size: 1000, - max_mem_use: 1000, - mem_used: 500 - } - } - - fn sync_status(state: SyncState) -> SyncStatus { - SyncStatus { - state: state, - protocol_version: 0, - network_id: 0, - start_block_number: 0, - last_imported_block_number: None, - highest_block_number: None, - blocks_total: 0, - blocks_received: 0, - num_peers: 0, - num_active_peers: 0, - mem_used: 0, - num_snapshot_chunks: 0, - snapshot_chunks_done: 0, - last_imported_old_block_number: None, - } - } - - #[test] - fn is_still_verifying() { - assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(2, 1))); - assert!(sync_status(SyncState::Idle).is_syncing(queue_info(2, 2))); - } - - #[test] - fn is_synced_state() { - assert!(sync_status(SyncState::Blocks).is_syncing(queue_info(0, 0))); - assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(0, 0))); - } - - #[test] - fn return_receipts_empty() { - let mut client = TestBlockChainClient::new(); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &ss, &queue, None); - - let result = ChainSync::return_receipts(&io, &Rlp::new(&[0xc0]), 0); - - assert!(result.is_ok()); - } - - #[test] - fn return_receipts() { - let mut client = TestBlockChainClient::new(); - let queue = RwLock::new(VecDeque::new()); - let sync = dummy_sync_with_peer(H256::new(), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let mut receipt_list = RlpStream::new_list(4); - receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); - receipt_list.append(&H256::from("ff00000000000000000000000000000000000000000000000000000000000000")); - receipt_list.append(&H256::from("fff0000000000000000000000000000000000000000000000000000000000000")); - receipt_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000")); - - let receipts_request = receipt_list.out(); - // it returns rlp ONLY for hashes started with "f" - let result = ChainSync::return_receipts(&io, &Rlp::new(&receipts_request.clone()), 0); - - assert!(result.is_ok()); - let rlp_result = result.unwrap(); - assert!(rlp_result.is_some()); - - // the length of two rlp-encoded receipts - assert_eq!(603, rlp_result.unwrap().1.out().len()); - - io.sender = Some(2usize); - ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, super::GET_RECEIPTS_PACKET, &receipts_request); - assert_eq!(1, io.packets.len()); - } - - #[test] - fn return_block_headers() { - use ethcore::views::HeaderView; - fn make_hash_req(h: &H256, count: usize, skip: usize, reverse: bool) -> Bytes { - let mut rlp = RlpStream::new_list(4); - rlp.append(h); - rlp.append(&count); - rlp.append(&skip); - rlp.append(&if reverse {1u32} else {0u32}); - rlp.out() - } - - fn make_num_req(n: usize, count: usize, skip: usize, reverse: bool) -> Bytes { - let mut rlp = RlpStream::new_list(4); - rlp.append(&n); - rlp.append(&count); - rlp.append(&skip); - rlp.append(&if reverse {1u32} else {0u32}); - rlp.out() - } - fn to_header_vec(rlp: ::chain::RlpResponseResult) -> Vec { - Rlp::new(&rlp.unwrap().unwrap().1.out()).iter().map(|r| r.as_raw().to_vec()).collect() - } - - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Nothing); - let blocks: Vec<_> = (0 .. 100) - .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).map(|b| b.into_inner()).unwrap()).collect(); - let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).unwrap().as_raw().to_vec()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| view!(HeaderView, h).hash()).collect(); - - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &ss, &queue, None); - - let unknown: H256 = H256::new(); - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_hash_req(&unknown, 1, 0, false)), 0); - assert!(to_header_vec(result).is_empty()); - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_hash_req(&unknown, 1, 0, true)), 0); - assert!(to_header_vec(result).is_empty()); - - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[2], 1, 0, true)), 0); - assert_eq!(to_header_vec(result), vec![headers[2].clone()]); - - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[2], 1, 0, false)), 0); - assert_eq!(to_header_vec(result), vec![headers[2].clone()]); - - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[50], 3, 5, false)), 0); - assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[56].clone(), headers[62].clone()]); - - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[50], 3, 5, true)), 0); - assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[44].clone(), headers[38].clone()]); - - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, true)), 0); - assert_eq!(to_header_vec(result), vec![headers[2].clone()]); - - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, false)), 0); - assert_eq!(to_header_vec(result), vec![headers[2].clone()]); - - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, false)), 0); - assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[56].clone(), headers[62].clone()]); - - let result = ChainSync::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, true)), 0); - assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[44].clone(), headers[38].clone()]); - } - - #[test] - fn return_nodes() { - let mut client = TestBlockChainClient::new(); - let queue = RwLock::new(VecDeque::new()); - let sync = dummy_sync_with_peer(H256::new(), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let mut node_list = RlpStream::new_list(3); - node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); - node_list.append(&H256::from("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa")); - node_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000")); - - let node_request = node_list.out(); - // it returns rlp ONLY for hashes started with "f" - let result = ChainSync::return_node_data(&io, &Rlp::new(&node_request.clone()), 0); - - assert!(result.is_ok()); - let rlp_result = result.unwrap(); - assert!(rlp_result.is_some()); - - // the length of one rlp-encoded hashe - let rlp = rlp_result.unwrap().1.out(); - let rlp = Rlp::new(&rlp); - assert_eq!(Ok(1), rlp.item_count()); - - io.sender = Some(2usize); - - ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, super::GET_NODE_DATA_PACKET, &node_request); - assert_eq!(1, io.packets.len()); - } - - fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync { - let mut sync = ChainSync::new(SyncConfig::default(), client, Arc::new(NoopPrivateTxHandler)); - insert_dummy_peer(&mut sync, 0, peer_latest_hash); - sync - } - - fn insert_dummy_peer(sync: &mut ChainSync, peer_id: PeerId, peer_latest_hash: H256) { - sync.peers.insert(peer_id, - PeerInfo { - protocol_version: 0, - genesis: H256::zero(), - network_id: 0, - latest_hash: peer_latest_hash, - difficulty: None, - asking: PeerAsking::Nothing, - asking_blocks: Vec::new(), - asking_hash: None, - ask_time: Instant::now(), - last_sent_transactions: HashSet::new(), - expired: false, - confirmation: super::ForkConfirmation::Confirmed, - snapshot_number: None, - snapshot_hash: None, - asking_snapshot_data: None, - block_set: None, - }); - - } - - #[test] - fn finds_lagging_peers() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client); - let chain_info = client.chain_info(); - - let lagging_peers = sync.get_lagging_peers(&chain_info); - - assert_eq!(1, lagging_peers.len()); - } - - #[test] - fn calculates_tree_for_lagging_peer() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(15, EachBlockWith::Uncle); - - let start = client.block_hash_delta_minus(4); - let end = client.block_hash_delta_minus(2); - - // wrong way end -> start, should be None - let rlp = ChainSync::create_new_hashes_rlp(&client, &end, &start); - assert!(rlp.is_none()); - - let rlp = ChainSync::create_new_hashes_rlp(&client, &start, &end).unwrap(); - // size of three rlp encoded hash-difficulty - assert_eq!(107, rlp.len()); - } - - #[test] - fn sends_new_hashes_to_lagging_peer() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let peers = sync.get_lagging_peers(&chain_info); - let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers); - - // 1 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should be updated - assert_eq!(1, peer_count); - // NEW_BLOCK_HASHES_PACKET - assert_eq!(0x01, io.packets[0].packet_id); - } - - #[test] - fn sends_latest_block_to_lagging_peer() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peers = sync.get_lagging_peers(&chain_info); - let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers); - - // 1 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should be updated - assert_eq!(1, peer_count); - // NEW_BLOCK_PACKET - assert_eq!(0x07, io.packets[0].packet_id); - } - - #[test] - fn sends_sealed_block() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let hash = client.block_hash(BlockId::Number(99)).unwrap(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peers = sync.get_lagging_peers(&chain_info); - let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers); - - // 1 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should be updated - assert_eq!(1, peer_count); - // NEW_BLOCK_PACKET - assert_eq!(0x07, io.packets[0].packet_id); - } - - #[test] - fn sends_proposed_block() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(2, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let block = client.block(BlockId::Latest).unwrap().into_inner(); - let mut sync = ChainSync::new(SyncConfig::default(), &client, Arc::new(NoopPrivateTxHandler)); - sync.peers.insert(0, - PeerInfo { - // Messaging protocol - protocol_version: 2, - genesis: H256::zero(), - network_id: 0, - latest_hash: client.block_hash_delta_minus(1), - difficulty: None, - asking: PeerAsking::Nothing, - asking_blocks: Vec::new(), - asking_hash: None, - ask_time: Instant::now(), - last_sent_transactions: HashSet::new(), - expired: false, - confirmation: super::ForkConfirmation::Confirmed, - snapshot_number: None, - snapshot_hash: None, - asking_snapshot_data: None, - block_set: None, - }); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - sync.propagate_proposed_blocks(&mut io, &[block]); - - // 1 message should be sent - assert_eq!(1, io.packets.len()); - // NEW_BLOCK_PACKET - assert_eq!(0x07, io.packets[0].packet_id); - } - - #[test] - fn propagates_transactions() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = sync.propagate_new_transactions(&mut io); - // Try to propagate same transactions for the second time - let peer_count2 = sync.propagate_new_transactions(&mut io); - // Even after new block transactions should not be propagated twice - sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); - // Try to propagate same transactions for the third time - let peer_count3 = sync.propagate_new_transactions(&mut io); - - // 1 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should be updated but only once - assert_eq!(1, peer_count); - assert_eq!(0, peer_count2); - assert_eq!(0, peer_count3); - // TRANSACTIONS_PACKET - assert_eq!(0x02, io.packets[0].packet_id); - } - - #[test] - fn does_not_propagate_new_transactions_after_new_block() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = sync.propagate_new_transactions(&mut io); - io.chain.insert_transaction_to_queue(); - // New block import should not trigger propagation. - // (we only propagate on timeout) - sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); - - // 2 message should be send - assert_eq!(1, io.packets.len()); - // 1 peer should receive the message - assert_eq!(1, peer_count); - // TRANSACTIONS_PACKET - assert_eq!(0x02, io.packets[0].packet_id); - } - - #[test] - fn does_not_fail_for_no_peers() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - // Sync with no peers - let mut sync = ChainSync::new(SyncConfig::default(), &client, Arc::new(NoopPrivateTxHandler)); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = sync.propagate_new_transactions(&mut io); - sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); - // Try to propagate same transactions for the second time - let peer_count2 = sync.propagate_new_transactions(&mut io); - - assert_eq!(0, io.packets.len()); - assert_eq!(0, peer_count); - assert_eq!(0, peer_count2); - } - - #[test] - fn propagates_transactions_without_alternating() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - // should sent some - { - let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peer_count = sync.propagate_new_transactions(&mut io); - assert_eq!(1, io.packets.len()); - assert_eq!(1, peer_count); - } - // Insert some more - client.insert_transaction_to_queue(); - let (peer_count2, peer_count3) = { - let mut io = TestIo::new(&mut client, &ss, &queue, None); - // Propagate new transactions - let peer_count2 = sync.propagate_new_transactions(&mut io); - // And now the peer should have all transactions - let peer_count3 = sync.propagate_new_transactions(&mut io); - (peer_count2, peer_count3) - }; - - // 2 message should be send (in total) - assert_eq!(2, queue.read().len()); - // 1 peer should be updated but only once after inserting new transaction - assert_eq!(1, peer_count2); - assert_eq!(0, peer_count3); - // TRANSACTIONS_PACKET - assert_eq!(0x02, queue.read()[0].packet_id); - assert_eq!(0x02, queue.read()[1].packet_id); - } - - #[test] - fn should_maintain_transations_propagation_stats() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - client.insert_transaction_to_queue(); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - sync.propagate_new_transactions(&mut io); - - let stats = sync.transactions_stats(); - assert_eq!(stats.len(), 1, "Should maintain stats for single transaction.") - } - - #[test] - fn should_propagate_service_transaction_to_selected_peers_only() { - let mut client = TestBlockChainClient::new(); - client.insert_transaction_with_gas_price_to_queue(U256::zero()); - let block_hash = client.block_hash_delta_minus(1); - let mut sync = ChainSync::new(SyncConfig::default(), &client, Arc::new(NoopPrivateTxHandler)); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - // when peer#1 is Geth - insert_dummy_peer(&mut sync, 1, block_hash); - io.peers_info.insert(1, "Geth".to_owned()); - // and peer#2 is Parity, accepting service transactions - insert_dummy_peer(&mut sync, 2, block_hash); - io.peers_info.insert(2, "Parity/v1.6".to_owned()); - // and peer#3 is Parity, discarding service transactions - insert_dummy_peer(&mut sync, 3, block_hash); - io.peers_info.insert(3, "Parity/v1.5".to_owned()); - // and peer#4 is Parity, accepting service transactions - insert_dummy_peer(&mut sync, 4, block_hash); - io.peers_info.insert(4, "Parity/v1.7.3-ABCDEFGH".to_owned()); - - // and new service transaction is propagated to peers - sync.propagate_new_transactions(&mut io); - - // peer#2 && peer#4 are receiving service transaction - assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 2)); // TRANSACTIONS_PACKET - assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 4)); // TRANSACTIONS_PACKET - assert_eq!(io.packets.len(), 2); - } - - #[test] - fn should_propagate_service_transaction_is_sent_as_separate_message() { - let mut client = TestBlockChainClient::new(); - let tx1_hash = client.insert_transaction_to_queue(); - let tx2_hash = client.insert_transaction_with_gas_price_to_queue(U256::zero()); - let block_hash = client.block_hash_delta_minus(1); - let mut sync = ChainSync::new(SyncConfig::default(), &client, Arc::new(NoopPrivateTxHandler)); - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - // when peer#1 is Parity, accepting service transactions - insert_dummy_peer(&mut sync, 1, block_hash); - io.peers_info.insert(1, "Parity/v1.6".to_owned()); - - // and service + non-service transactions are propagated to peers - sync.propagate_new_transactions(&mut io); - - // two separate packets for peer are queued: - // 1) with non-service-transaction - // 2) with service transaction - let sent_transactions: Vec = io.packets.iter() - .filter_map(|p| { - if p.packet_id != 0x02 || p.recipient != 1 { // TRANSACTIONS_PACKET - return None; - } - - let rlp = Rlp::new(&*p.data); - let item_count = rlp.item_count().unwrap_or(0); - if item_count != 1 { - return None; - } - - rlp.at(0).ok().and_then(|r| r.as_val().ok()) - }) - .collect(); - assert_eq!(sent_transactions.len(), 2); - assert!(sent_transactions.iter().any(|tx| tx.hash() == tx1_hash)); - assert!(sent_transactions.iter().any(|tx| tx.hash() == tx2_hash)); - } - - #[test] - fn handles_peer_new_block_malformed() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); - - let block_data = get_dummy_block(11, client.chain_info().best_block_hash); - - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - //sync.have_common_block = true; - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let block = Rlp::new(&block_data); - - let result = sync.on_peer_new_block(&mut io, 0, &block); - - assert!(result.is_err()); - } - - #[test] - fn handles_peer_new_block() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); - - let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); - - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let block = Rlp::new(&block_data); - - let result = sync.on_peer_new_block(&mut io, 0, &block); - - assert!(result.is_ok()); - } - - #[test] - fn handles_peer_new_block_empty() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let empty_data = vec![]; - let block = Rlp::new(&empty_data); - - let result = sync.on_peer_new_block(&mut io, 0, &block); - - assert!(result.is_err()); - } - - #[test] - fn handles_peer_new_hashes() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let hashes_data = get_dummy_hashes(); - let hashes_rlp = Rlp::new(&hashes_data); - - let result = sync.on_peer_new_hashes(&mut io, 0, &hashes_rlp); - - assert!(result.is_ok()); - } - - #[test] - fn handles_peer_new_hashes_empty() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let empty_hashes_data = vec![]; - let hashes_rlp = Rlp::new(&empty_hashes_data); - - let result = sync.on_peer_new_hashes(&mut io, 0, &hashes_rlp); - - assert!(result.is_ok()); - } - - // idea is that what we produce when propagading latest hashes should be accepted in - // on_peer_new_hashes in our code as well - #[test] - fn hashes_rlp_mutually_acceptable() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let peers = sync.get_lagging_peers(&chain_info); - sync.propagate_new_hashes(&chain_info, &mut io, &peers); - - let data = &io.packets[0].data.clone(); - let result = sync.on_peer_new_hashes(&mut io, 0, &Rlp::new(data)); - assert!(result.is_ok()); - } - - // idea is that what we produce when propagading latest block should be accepted in - // on_peer_new_block in our code as well - #[test] - fn block_rlp_mutually_acceptable() { - let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); - let queue = RwLock::new(VecDeque::new()); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - let peers = sync.get_lagging_peers(&chain_info); - sync.propagate_blocks(&chain_info, &mut io, &[], &peers); - - let data = &io.packets[0].data.clone(); - let result = sync.on_peer_new_block(&mut io, 0, &Rlp::new(data)); - assert!(result.is_ok()); - } - - #[test] - fn should_add_transactions_to_queue() { - fn sender(tx: &UnverifiedTransaction) -> Address { - ethkey::public_to_address(&tx.recover_public().unwrap()) - } - - // given - let mut client = TestBlockChainClient::new(); - client.add_blocks(98, EachBlockWith::Uncle); - client.add_blocks(1, EachBlockWith::UncleAndTransaction); - client.add_blocks(1, EachBlockWith::Transaction); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - - let good_blocks = vec![client.block_hash_delta_minus(2)]; - let retracted_blocks = vec![client.block_hash_delta_minus(1)]; - - // Add some balance to clients and reset nonces - for h in &[good_blocks[0], retracted_blocks[0]] { - let block = client.block(BlockId::Hash(*h)).unwrap(); - let sender = sender(&block.transactions()[0]);; - client.set_balance(sender, U256::from(10_000_000_000_000_000_000u64)); - client.set_nonce(sender, U256::from(0)); - } - - - // when - { - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks, false); - sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); - assert_eq!(io.chain.miner.ready_transactions(io.chain).len(), 1); - } - // We need to update nonce status (because we say that the block has been imported) - for h in &[good_blocks[0]] { - let block = client.block(BlockId::Hash(*h)).unwrap(); - client.set_nonce(sender(&block.transactions()[0]), U256::from(1)); - } - { - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&client, &ss, &queue, None); - io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks, false); - sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); - } - - // then - assert_eq!(client.miner.ready_transactions(&client).len(), 1); - } - - #[test] - fn should_not_add_transactions_to_queue_if_not_synced() { - // given - let mut client = TestBlockChainClient::new(); - client.add_blocks(98, EachBlockWith::Uncle); - client.add_blocks(1, EachBlockWith::UncleAndTransaction); - client.add_blocks(1, EachBlockWith::Transaction); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); - - let good_blocks = vec![client.block_hash_delta_minus(2)]; - let retracted_blocks = vec![client.block_hash_delta_minus(1)]; - - let queue = RwLock::new(VecDeque::new()); - let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &queue, None); - - // when - sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); - assert_eq!(io.chain.miner.queue_status().status.transaction_count, 0); - sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); - - // then - let status = io.chain.miner.queue_status(); - assert_eq!(status.status.transaction_count, 0); - } -} diff --git a/ethcore/sync/src/chain/handler.rs b/ethcore/sync/src/chain/handler.rs new file mode 100644 index 00000000000..966b7ce20ad --- /dev/null +++ b/ethcore/sync/src/chain/handler.rs @@ -0,0 +1,828 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use api::WARP_SYNC_PROTOCOL_ID; +use block_sync::{BlockDownloaderImportError as DownloaderImportError, DownloadAction}; +use bytes::Bytes; +use ethcore::client::{BlockStatus, BlockId, BlockImportError, BlockImportErrorKind}; +use ethcore::error::*; +use ethcore::header::{BlockNumber, Header as BlockHeader}; +use ethcore::snapshot::{ManifestData, RestorationStatus}; +use ethereum_types::{H256, U256}; +use hash::keccak; +use network::PeerId; +use rlp::Rlp; +use snapshot::ChunkType; +use std::cmp; +use std::collections::HashSet; +use std::time::Instant; +use sync_io::SyncIo; + +use super::{ + BlockSet, + ChainSync, + ForkConfirmation, + PacketDecodeError, + PeerAsking, + PeerInfo, + SyncRequester, + SyncState, + ETH_PROTOCOL_VERSION_62, + ETH_PROTOCOL_VERSION_63, + MAX_NEW_BLOCK_AGE, + MAX_NEW_HASHES, + PAR_PROTOCOL_VERSION_1, + PAR_PROTOCOL_VERSION_2, + PAR_PROTOCOL_VERSION_3, + BLOCK_BODIES_PACKET, + BLOCK_HEADERS_PACKET, + NEW_BLOCK_HASHES_PACKET, + NEW_BLOCK_PACKET, + PRIVATE_TRANSACTION_PACKET, + RECEIPTS_PACKET, + SIGNED_PRIVATE_TRANSACTION_PACKET, + SNAPSHOT_DATA_PACKET, + SNAPSHOT_MANIFEST_PACKET, + STATUS_PACKET, + TRANSACTIONS_PACKET, +}; + +/// The Chain Sync Handler: handles responses from peers +pub struct SyncHandler; + +impl SyncHandler { + /// Handle incoming packet from peer + pub fn on_packet(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { + if packet_id != STATUS_PACKET && !sync.peers.contains_key(&peer) { + debug!(target:"sync", "Unexpected packet {} from unregistered peer: {}:{}", packet_id, peer, io.peer_info(peer)); + return; + } + let rlp = Rlp::new(data); + let result = match packet_id { + STATUS_PACKET => SyncHandler::on_peer_status(sync, io, peer, &rlp), + TRANSACTIONS_PACKET => SyncHandler::on_peer_transactions(sync, io, peer, &rlp), + BLOCK_HEADERS_PACKET => SyncHandler::on_peer_block_headers(sync, io, peer, &rlp), + BLOCK_BODIES_PACKET => SyncHandler::on_peer_block_bodies(sync, io, peer, &rlp), + RECEIPTS_PACKET => SyncHandler::on_peer_block_receipts(sync, io, peer, &rlp), + NEW_BLOCK_PACKET => SyncHandler::on_peer_new_block(sync, io, peer, &rlp), + NEW_BLOCK_HASHES_PACKET => SyncHandler::on_peer_new_hashes(sync, io, peer, &rlp), + SNAPSHOT_MANIFEST_PACKET => SyncHandler::on_snapshot_manifest(sync, io, peer, &rlp), + SNAPSHOT_DATA_PACKET => SyncHandler::on_snapshot_data(sync, io, peer, &rlp), + PRIVATE_TRANSACTION_PACKET => SyncHandler::on_private_transaction(sync, io, peer, &rlp), + SIGNED_PRIVATE_TRANSACTION_PACKET => SyncHandler::on_signed_private_transaction(sync, io, peer, &rlp), + _ => { + debug!(target: "sync", "{}: Unknown packet {}", peer, packet_id); + Ok(()) + } + }; + result.unwrap_or_else(|e| { + debug!(target:"sync", "{} -> Malformed packet {} : {}", peer, packet_id, e); + }) + } + + /// Called when peer sends us new consensus packet + pub fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + trace!(target: "sync", "Received consensus packet from {:?}", peer_id); + io.chain().queue_consensus_message(r.as_raw().to_vec()); + Ok(()) + } + + /// Called by peer when it is disconnecting + pub fn on_peer_aborting(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId) { + trace!(target: "sync", "== Disconnecting {}: {}", peer, io.peer_info(peer)); + sync.handshaking_peers.remove(&peer); + if sync.peers.contains_key(&peer) { + debug!(target: "sync", "Disconnected {}", peer); + sync.clear_peer_download(peer); + sync.peers.remove(&peer); + sync.active_peers.remove(&peer); + sync.continue_sync(io); + } + } + + /// Called when a new peer is connected + pub fn on_peer_connected(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId) { + trace!(target: "sync", "== Connected {}: {}", peer, io.peer_info(peer)); + if let Err(e) = sync.send_status(io, peer) { + debug!(target:"sync", "Error sending status request: {:?}", e); + io.disconnect_peer(peer); + } else { + sync.handshaking_peers.insert(peer, Instant::now()); + } + } + + /// Called by peer once it has new block bodies + pub fn on_peer_new_block(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring new block from unconfirmed peer {}", peer_id); + return Ok(()); + } + let difficulty: U256 = r.val_at(1)?; + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + if peer.difficulty.map_or(true, |pd| difficulty > pd) { + peer.difficulty = Some(difficulty); + } + } + let block_rlp = r.at(0)?; + let header_rlp = block_rlp.at(0)?; + let h = keccak(&header_rlp.as_raw()); + trace!(target: "sync", "{} -> NewBlock ({})", peer_id, h); + let header: BlockHeader = header_rlp.as_val()?; + if header.number() > sync.highest_block.unwrap_or(0) { + sync.highest_block = Some(header.number()); + } + let mut unknown = false; + { + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + peer.latest_hash = header.hash(); + } + } + let last_imported_number = sync.new_blocks.last_imported_block_number(); + if last_imported_number > header.number() && last_imported_number - header.number() > MAX_NEW_BLOCK_AGE { + trace!(target: "sync", "Ignored ancient new block {:?}", h); + io.disable_peer(peer_id); + return Ok(()); + } + match io.chain().import_block(block_rlp.as_raw().to_vec()) { + Err(BlockImportError(BlockImportErrorKind::Import(ImportErrorKind::AlreadyInChain), _)) => { + trace!(target: "sync", "New block already in chain {:?}", h); + }, + Err(BlockImportError(BlockImportErrorKind::Import(ImportErrorKind::AlreadyQueued), _)) => { + trace!(target: "sync", "New block already queued {:?}", h); + }, + Ok(_) => { + // abort current download of the same block + sync.complete_sync(io); + sync.new_blocks.mark_as_known(&header.hash(), header.number()); + trace!(target: "sync", "New block queued {:?} ({})", h, header.number()); + }, + Err(BlockImportError(BlockImportErrorKind::Block(BlockError::UnknownParent(p)), _)) => { + unknown = true; + trace!(target: "sync", "New block with unknown parent ({:?}) {:?}", p, h); + }, + Err(e) => { + debug!(target: "sync", "Bad new block {:?} : {:?}", h, e); + io.disable_peer(peer_id); + } + }; + if unknown { + if sync.state != SyncState::Idle { + trace!(target: "sync", "NewBlock ignored while seeking"); + } else { + trace!(target: "sync", "New unknown block {:?}", h); + //TODO: handle too many unknown blocks + sync.sync_peer(io, peer_id, true); + } + } + sync.continue_sync(io); + Ok(()) + } + + /// Handles `NewHashes` packet. Initiates headers download for any unknown hashes. + pub fn on_peer_new_hashes(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring new hashes from unconfirmed peer {}", peer_id); + return Ok(()); + } + let hashes: Vec<_> = r.iter().take(MAX_NEW_HASHES).map(|item| (item.val_at::(0), item.val_at::(1))).collect(); + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + // Peer has new blocks with unknown difficulty + peer.difficulty = None; + if let Some(&(Ok(ref h), _)) = hashes.last() { + peer.latest_hash = h.clone(); + } + } + if sync.state != SyncState::Idle { + trace!(target: "sync", "Ignoring new hashes since we're already downloading."); + let max = r.iter().take(MAX_NEW_HASHES).map(|item| item.val_at::(1).unwrap_or(0)).fold(0u64, cmp::max); + if max > sync.highest_block.unwrap_or(0) { + sync.highest_block = Some(max); + } + sync.continue_sync(io); + return Ok(()); + } + trace!(target: "sync", "{} -> NewHashes ({} entries)", peer_id, r.item_count()?); + let mut max_height: BlockNumber = 0; + let mut new_hashes = Vec::new(); + let last_imported_number = sync.new_blocks.last_imported_block_number(); + for (rh, rn) in hashes { + let hash = rh?; + let number = rn?; + if number > sync.highest_block.unwrap_or(0) { + sync.highest_block = Some(number); + } + if sync.new_blocks.is_downloading(&hash) { + continue; + } + if last_imported_number > number && last_imported_number - number > MAX_NEW_BLOCK_AGE { + trace!(target: "sync", "Ignored ancient new block hash {:?}", hash); + io.disable_peer(peer_id); + continue; + } + match io.chain().block_status(BlockId::Hash(hash.clone())) { + BlockStatus::InChain => { + trace!(target: "sync", "New block hash already in chain {:?}", hash); + }, + BlockStatus::Queued => { + trace!(target: "sync", "New hash block already queued {:?}", hash); + }, + BlockStatus::Unknown | BlockStatus::Pending => { + new_hashes.push(hash.clone()); + if number > max_height { + trace!(target: "sync", "New unknown block hash {:?}", hash); + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + peer.latest_hash = hash.clone(); + } + max_height = number; + } + }, + BlockStatus::Bad => { + debug!(target: "sync", "Bad new block hash {:?}", hash); + io.disable_peer(peer_id); + return Ok(()); + } + } + }; + if max_height != 0 { + trace!(target: "sync", "Downloading blocks for new hashes"); + sync.new_blocks.reset_to(new_hashes); + sync.state = SyncState::NewBlocks; + sync.sync_peer(io, peer_id, true); + } + sync.continue_sync(io); + Ok(()) + } + + /// Called by peer once it has new block bodies + fn on_peer_block_bodies(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + sync.clear_peer_download(peer_id); + let block_set = sync.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks); + if !sync.reset_peer_asking(peer_id, PeerAsking::BlockBodies) { + trace!(target: "sync", "{}: Ignored unexpected bodies", peer_id); + sync.continue_sync(io); + return Ok(()); + } + let item_count = r.item_count()?; + trace!(target: "sync", "{} -> BlockBodies ({} entries), set = {:?}", peer_id, item_count, block_set); + if item_count == 0 { + sync.deactivate_peer(io, peer_id); + } + else if sync.state == SyncState::Waiting { + trace!(target: "sync", "Ignored block bodies while waiting"); + } + else + { + let result = { + let downloader = match block_set { + BlockSet::NewBlocks => &mut sync.new_blocks, + BlockSet::OldBlocks => match sync.old_blocks { + None => { + trace!(target: "sync", "Ignored block headers while block download is inactive"); + sync.continue_sync(io); + return Ok(()); + }, + Some(ref mut blocks) => blocks, + } + }; + downloader.import_bodies(io, r) + }; + + match result { + Err(DownloaderImportError::Invalid) => { + io.disable_peer(peer_id); + sync.deactivate_peer(io, peer_id); + sync.continue_sync(io); + return Ok(()); + }, + Err(DownloaderImportError::Useless) => { + sync.deactivate_peer(io, peer_id); + }, + Ok(()) => (), + } + + sync.collect_blocks(io, block_set); + sync.sync_peer(io, peer_id, false); + } + sync.continue_sync(io); + Ok(()) + } + + fn on_peer_confirmed(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { + sync.sync_peer(io, peer_id, false); + } + + fn on_peer_fork_header(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + { + let peer = sync.peers.get_mut(&peer_id).expect("Is only called when peer is present in peers"); + peer.asking = PeerAsking::Nothing; + let item_count = r.item_count()?; + let (fork_number, fork_hash) = sync.fork_block.expect("ForkHeader request is sent only fork block is Some; qed").clone(); + + if item_count == 0 || item_count != 1 { + trace!(target: "sync", "{}: Chain is too short to confirm the block", peer_id); + io.disable_peer(peer_id); + return Ok(()); + } + + let header = r.at(0)?.as_raw(); + if keccak(&header) != fork_hash { + trace!(target: "sync", "{}: Fork mismatch", peer_id); + io.disable_peer(peer_id); + return Ok(()); + } + + trace!(target: "sync", "{}: Confirmed peer", peer_id); + peer.confirmation = ForkConfirmation::Confirmed; + if !io.chain_overlay().read().contains_key(&fork_number) { + io.chain_overlay().write().insert(fork_number, header.to_vec()); + } + } + SyncHandler::on_peer_confirmed(sync, io, peer_id); + return Ok(()); + } + + /// Called by peer once it has new block headers during sync + fn on_peer_block_headers(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + let is_fork_header_request = match sync.peers.get(&peer_id) { + Some(peer) if peer.asking == PeerAsking::ForkHeader => true, + _ => false, + }; + + if is_fork_header_request { + return SyncHandler::on_peer_fork_header(sync, io, peer_id, r); + } + + sync.clear_peer_download(peer_id); + let expected_hash = sync.peers.get(&peer_id).and_then(|p| p.asking_hash); + let allowed = sync.peers.get(&peer_id).map(|p| p.is_allowed()).unwrap_or(false); + let block_set = sync.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks); + if !sync.reset_peer_asking(peer_id, PeerAsking::BlockHeaders) || expected_hash.is_none() || !allowed { + trace!(target: "sync", "{}: Ignored unexpected headers, expected_hash = {:?}", peer_id, expected_hash); + sync.continue_sync(io); + return Ok(()); + } + let item_count = r.item_count()?; + trace!(target: "sync", "{} -> BlockHeaders ({} entries), state = {:?}, set = {:?}", peer_id, item_count, sync.state, block_set); + if (sync.state == SyncState::Idle || sync.state == SyncState::WaitingPeers) && sync.old_blocks.is_none() { + trace!(target: "sync", "Ignored unexpected block headers"); + sync.continue_sync(io); + return Ok(()); + } + if sync.state == SyncState::Waiting { + trace!(target: "sync", "Ignored block headers while waiting"); + sync.continue_sync(io); + return Ok(()); + } + + let result = { + let downloader = match block_set { + BlockSet::NewBlocks => &mut sync.new_blocks, + BlockSet::OldBlocks => { + match sync.old_blocks { + None => { + trace!(target: "sync", "Ignored block headers while block download is inactive"); + sync.continue_sync(io); + return Ok(()); + }, + Some(ref mut blocks) => blocks, + } + } + }; + downloader.import_headers(io, r, expected_hash) + }; + + match result { + Err(DownloaderImportError::Useless) => { + sync.deactivate_peer(io, peer_id); + }, + Err(DownloaderImportError::Invalid) => { + io.disable_peer(peer_id); + sync.deactivate_peer(io, peer_id); + sync.continue_sync(io); + return Ok(()); + }, + Ok(DownloadAction::Reset) => { + // mark all outstanding requests as expired + trace!("Resetting downloads for {:?}", block_set); + for (_, ref mut p) in sync.peers.iter_mut().filter(|&(_, ref p)| p.block_set == Some(block_set)) { + p.reset_asking(); + } + + } + Ok(DownloadAction::None) => {}, + } + + sync.collect_blocks(io, block_set); + // give a task to the same peer first if received valuable headers. + sync.sync_peer(io, peer_id, false); + // give tasks to other peers + sync.continue_sync(io); + Ok(()) + } + + /// Called by peer once it has new block receipts + fn on_peer_block_receipts(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + sync.clear_peer_download(peer_id); + let block_set = sync.peers.get(&peer_id).and_then(|p| p.block_set).unwrap_or(BlockSet::NewBlocks); + if !sync.reset_peer_asking(peer_id, PeerAsking::BlockReceipts) { + trace!(target: "sync", "{}: Ignored unexpected receipts", peer_id); + sync.continue_sync(io); + return Ok(()); + } + let item_count = r.item_count()?; + trace!(target: "sync", "{} -> BlockReceipts ({} entries)", peer_id, item_count); + if item_count == 0 { + sync.deactivate_peer(io, peer_id); + } + else if sync.state == SyncState::Waiting { + trace!(target: "sync", "Ignored block receipts while waiting"); + } + else + { + let result = { + let downloader = match block_set { + BlockSet::NewBlocks => &mut sync.new_blocks, + BlockSet::OldBlocks => match sync.old_blocks { + None => { + trace!(target: "sync", "Ignored block headers while block download is inactive"); + sync.continue_sync(io); + return Ok(()); + }, + Some(ref mut blocks) => blocks, + } + }; + downloader.import_receipts(io, r) + }; + + match result { + Err(DownloaderImportError::Invalid) => { + io.disable_peer(peer_id); + sync.deactivate_peer(io, peer_id); + sync.continue_sync(io); + return Ok(()); + }, + Err(DownloaderImportError::Useless) => { + sync.deactivate_peer(io, peer_id); + }, + Ok(()) => (), + } + + sync.collect_blocks(io, block_set); + sync.sync_peer(io, peer_id, false); + } + sync.continue_sync(io); + Ok(()) + } + + /// Called when snapshot manifest is downloaded from a peer. + fn on_snapshot_manifest(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring snapshot manifest from unconfirmed peer {}", peer_id); + return Ok(()); + } + sync.clear_peer_download(peer_id); + if !sync.reset_peer_asking(peer_id, PeerAsking::SnapshotManifest) || sync.state != SyncState::SnapshotManifest { + trace!(target: "sync", "{}: Ignored unexpected/expired manifest", peer_id); + sync.continue_sync(io); + return Ok(()); + } + + let manifest_rlp = r.at(0)?; + let manifest = match ManifestData::from_rlp(manifest_rlp.as_raw()) { + Err(e) => { + trace!(target: "sync", "{}: Ignored bad manifest: {:?}", peer_id, e); + io.disable_peer(peer_id); + sync.continue_sync(io); + return Ok(()); + } + Ok(manifest) => manifest, + }; + + let is_supported_version = io.snapshot_service().supported_versions() + .map_or(false, |(l, h)| manifest.version >= l && manifest.version <= h); + + if !is_supported_version { + trace!(target: "sync", "{}: Snapshot manifest version not supported: {}", peer_id, manifest.version); + io.disable_peer(peer_id); + sync.continue_sync(io); + return Ok(()); + } + sync.snapshot.reset_to(&manifest, &keccak(manifest_rlp.as_raw())); + io.snapshot_service().begin_restore(manifest); + sync.state = SyncState::SnapshotData; + + // give a task to the same peer first. + sync.sync_peer(io, peer_id, false); + // give tasks to other peers + sync.continue_sync(io); + Ok(()) + } + + /// Called when snapshot data is downloaded from a peer. + fn on_snapshot_data(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "Ignoring snapshot data from unconfirmed peer {}", peer_id); + return Ok(()); + } + sync.clear_peer_download(peer_id); + if !sync.reset_peer_asking(peer_id, PeerAsking::SnapshotData) || (sync.state != SyncState::SnapshotData && sync.state != SyncState::SnapshotWaiting) { + trace!(target: "sync", "{}: Ignored unexpected snapshot data", peer_id); + sync.continue_sync(io); + return Ok(()); + } + + // check service status + let status = io.snapshot_service().status(); + match status { + RestorationStatus::Inactive | RestorationStatus::Failed => { + trace!(target: "sync", "{}: Snapshot restoration aborted", peer_id); + sync.state = SyncState::WaitingPeers; + + // only note bad if restoration failed. + if let (Some(hash), RestorationStatus::Failed) = (sync.snapshot.snapshot_hash(), status) { + trace!(target: "sync", "Noting snapshot hash {} as bad", hash); + sync.snapshot.note_bad(hash); + } + + sync.snapshot.clear(); + sync.continue_sync(io); + return Ok(()); + }, + RestorationStatus::Ongoing { .. } => { + trace!(target: "sync", "{}: Snapshot restoration is ongoing", peer_id); + }, + } + + let snapshot_data: Bytes = r.val_at(0)?; + match sync.snapshot.validate_chunk(&snapshot_data) { + Ok(ChunkType::Block(hash)) => { + trace!(target: "sync", "{}: Processing block chunk", peer_id); + io.snapshot_service().restore_block_chunk(hash, snapshot_data); + } + Ok(ChunkType::State(hash)) => { + trace!(target: "sync", "{}: Processing state chunk", peer_id); + io.snapshot_service().restore_state_chunk(hash, snapshot_data); + } + Err(()) => { + trace!(target: "sync", "{}: Got bad snapshot chunk", peer_id); + io.disconnect_peer(peer_id); + sync.continue_sync(io); + return Ok(()); + } + } + + if sync.snapshot.is_complete() { + // wait for snapshot restoration process to complete + sync.state = SyncState::SnapshotWaiting; + } + // give a task to the same peer first. + sync.sync_peer(io, peer_id, false); + // give tasks to other peers + sync.continue_sync(io); + Ok(()) + } + + /// Called by peer to report status + fn on_peer_status(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + sync.handshaking_peers.remove(&peer_id); + let protocol_version: u8 = r.val_at(0)?; + let warp_protocol = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer_id) != 0; + let peer = PeerInfo { + protocol_version: protocol_version, + network_id: r.val_at(1)?, + difficulty: Some(r.val_at(2)?), + latest_hash: r.val_at(3)?, + genesis: r.val_at(4)?, + asking: PeerAsking::Nothing, + asking_blocks: Vec::new(), + asking_hash: None, + ask_time: Instant::now(), + last_sent_transactions: HashSet::new(), + expired: false, + confirmation: if sync.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed }, + asking_snapshot_data: None, + snapshot_hash: if warp_protocol { Some(r.val_at(5)?) } else { None }, + snapshot_number: if warp_protocol { Some(r.val_at(6)?) } else { None }, + block_set: None, + }; + + trace!(target: "sync", "New peer {} (protocol: {}, network: {:?}, difficulty: {:?}, latest:{}, genesis:{}, snapshot:{:?})", + peer_id, peer.protocol_version, peer.network_id, peer.difficulty, peer.latest_hash, peer.genesis, peer.snapshot_number); + if io.is_expired() { + trace!(target: "sync", "Status packet from expired session {}:{}", peer_id, io.peer_info(peer_id)); + return Ok(()); + } + + if sync.peers.contains_key(&peer_id) { + debug!(target: "sync", "Unexpected status packet from {}:{}", peer_id, io.peer_info(peer_id)); + return Ok(()); + } + let chain_info = io.chain().chain_info(); + if peer.genesis != chain_info.genesis_hash { + io.disable_peer(peer_id); + trace!(target: "sync", "Peer {} genesis hash mismatch (ours: {}, theirs: {})", peer_id, chain_info.genesis_hash, peer.genesis); + return Ok(()); + } + if peer.network_id != sync.network_id { + io.disable_peer(peer_id); + trace!(target: "sync", "Peer {} network id mismatch (ours: {}, theirs: {})", peer_id, sync.network_id, peer.network_id); + return Ok(()); + } + if (warp_protocol && peer.protocol_version != PAR_PROTOCOL_VERSION_1 && peer.protocol_version != PAR_PROTOCOL_VERSION_2 && peer.protocol_version != PAR_PROTOCOL_VERSION_3) + || (!warp_protocol && peer.protocol_version != ETH_PROTOCOL_VERSION_63 && peer.protocol_version != ETH_PROTOCOL_VERSION_62) { + io.disable_peer(peer_id); + trace!(target: "sync", "Peer {} unsupported eth protocol ({})", peer_id, peer.protocol_version); + return Ok(()); + } + + if sync.sync_start_time.is_none() { + sync.sync_start_time = Some(Instant::now()); + } + + sync.peers.insert(peer_id.clone(), peer); + // Don't activate peer immediatelly when searching for common block. + // Let the current sync round complete first. + sync.active_peers.insert(peer_id.clone()); + debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_info(peer_id)); + if let Some((fork_block, _)) = sync.fork_block { + SyncRequester::request_fork_header(sync, io, peer_id, fork_block); + } else { + SyncHandler::on_peer_confirmed(sync, io, peer_id); + } + Ok(()) + } + + /// Called when peer sends us new transactions + fn on_peer_transactions(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + // Accept transactions only when fully synced + if !io.is_chain_queue_empty() || (sync.state != SyncState::Idle && sync.state != SyncState::NewBlocks) { + trace!(target: "sync", "{} Ignoring transactions while syncing", peer_id); + return Ok(()); + } + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "{} Ignoring transactions from unconfirmed/unknown peer", peer_id); + return Ok(()); + } + + let item_count = r.item_count()?; + trace!(target: "sync", "{:02} -> Transactions ({} entries)", peer_id, item_count); + let mut transactions = Vec::with_capacity(item_count); + for i in 0 .. item_count { + let rlp = r.at(i)?; + let tx = rlp.as_raw().to_vec(); + transactions.push(tx); + } + io.chain().queue_transactions(transactions, peer_id); + Ok(()) + } + + /// Called when peer sends us signed private transaction packet + fn on_signed_private_transaction(sync: &ChainSync, _io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); + return Ok(()); + } + + trace!(target: "sync", "Received signed private transaction packet from {:?}", peer_id); + if let Err(e) = sync.private_tx_handler.import_signed_private_transaction(r.as_raw()) { + trace!(target: "sync", "Ignoring the message, error queueing: {}", e); + } + Ok(()) + } + + /// Called when peer sends us new private transaction packet + fn on_private_transaction(sync: &ChainSync, _io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + if !sync.peers.get(&peer_id).map_or(false, |p| p.can_sync()) { + trace!(target: "sync", "{} Ignoring packet from unconfirmed/unknown peer", peer_id); + return Ok(()); + } + + trace!(target: "sync", "Received private transaction packet from {:?}", peer_id); + + if let Err(e) = sync.private_tx_handler.import_private_transaction(r.as_raw()) { + trace!(target: "sync", "Ignoring the message, error queueing: {}", e); + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use ethcore::client::{ChainInfo, EachBlockWith, TestBlockChainClient}; + use parking_lot::RwLock; + use rlp::{Rlp}; + use std::collections::{VecDeque}; + use tests::helpers::{TestIo}; + use tests::snapshot::TestSnapshotService; + + use super::*; + use super::super::tests::{ + dummy_sync_with_peer, + get_dummy_block, + get_dummy_blocks, + get_dummy_hashes, + }; + + #[test] + fn handles_peer_new_hashes() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let hashes_data = get_dummy_hashes(); + let hashes_rlp = Rlp::new(&hashes_data); + + let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &hashes_rlp); + + assert!(result.is_ok()); + } + + #[test] + fn handles_peer_new_block_malformed() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); + + let block_data = get_dummy_block(11, client.chain_info().best_block_hash); + + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + //sync.have_common_block = true; + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let block = Rlp::new(&block_data); + + let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &block); + + assert!(result.is_err()); + } + + #[test] + fn handles_peer_new_block() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); + + let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); + + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let block = Rlp::new(&block_data); + + let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &block); + + assert!(result.is_ok()); + } + + #[test] + fn handles_peer_new_block_empty() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let empty_data = vec![]; + let block = Rlp::new(&empty_data); + + let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &block); + + assert!(result.is_err()); + } + + #[test] + fn handles_peer_new_hashes_empty() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let empty_hashes_data = vec![]; + let hashes_rlp = Rlp::new(&empty_hashes_data); + + let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &hashes_rlp); + + assert!(result.is_ok()); + } +} diff --git a/ethcore/sync/src/chain/mod.rs b/ethcore/sync/src/chain/mod.rs new file mode 100644 index 00000000000..abab1da9413 --- /dev/null +++ b/ethcore/sync/src/chain/mod.rs @@ -0,0 +1,1379 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! `BlockChain` synchronization strategy. +//! Syncs to peers and keeps up to date. +//! This implementation uses ethereum protocol v63 +//! +//! Syncing strategy summary. +//! Split the chain into ranges of N blocks each. Download ranges sequentially. Split each range into subchains of M blocks. Download subchains in parallel. +//! State. +//! Sync state consists of the following data: +//! - s: State enum which can be one of the following values: `ChainHead`, `Blocks`, `Idle` +//! - H: A set of downloaded block headers +//! - B: A set of downloaded block bodies +//! - S: Set of block subchain start block hashes to download. +//! - l: Last imported / common block hash +//! - P: A set of connected peers. For each peer we maintain its last known total difficulty and starting block hash being requested if any. +//! General behaviour. +//! We start with all sets empty, l is set to the best block in the block chain, s is set to `ChainHead`. +//! If at any moment a bad block is reported by the block queue, we set s to `ChainHead`, reset l to the best block in the block chain and clear H, B and S. +//! If at any moment P becomes empty, we set s to `ChainHead`, and clear H, B and S. +//! +//! Workflow for `ChainHead` state. +//! In this state we try to get subchain headers with a single `GetBlockHeaders` request. +//! On `NewPeer` / On `Restart`: +//! If peer's total difficulty is higher and there are less than 5 peers downloading, request N/M headers with interval M+1 starting from l +//! On `BlockHeaders(R)`: +//! If R is empty: +//! If l is equal to genesis block hash or l is more than 1000 blocks behind our best hash: +//! Remove current peer from P. set l to the best block in the block chain. Select peer with maximum total difficulty from P and restart. +//! Else +//! Set l to l’s parent and restart. +//! Else if we already have all the headers in the block chain or the block queue: +//! Set s to `Idle`, +//! Else +//! Set S to R, set s to `Blocks`. +//! +//! All other messages are ignored. +//! +//! Workflow for `Blocks` state. +//! In this state we download block headers and bodies from multiple peers. +//! On `NewPeer` / On `Restart`: +//! For all idle peers: +//! Find a set of 256 or less block hashes in H which are not in B and not being downloaded by other peers. If the set is not empty: +//! Request block bodies for the hashes in the set. +//! Else +//! Find an element in S which is not being downloaded by other peers. If found: Request M headers starting from the element. +//! +//! On `BlockHeaders(R)`: +//! If R is empty remove current peer from P and restart. +//! Validate received headers: +//! For each header find a parent in H or R or the blockchain. Restart if there is a block with unknown parent. +//! Find at least one header from the received list in S. Restart if there is none. +//! Go to `CollectBlocks`. +//! +//! On `BlockBodies(R)`: +//! If R is empty remove current peer from P and restart. +//! Add bodies with a matching header in H to B. +//! Go to `CollectBlocks`. +//! +//! `CollectBlocks`: +//! Find a chain of blocks C in H starting from h where h’s parent equals to l. The chain ends with the first block which does not have a body in B. +//! Add all blocks from the chain to the block queue. Remove them from H and B. Set l to the hash of the last block from C. +//! Update and merge subchain heads in S. For each h in S find a chain of blocks in B starting from h. Remove h from S. if the chain does not include an element from S add the end of the chain to S. +//! If H is empty and S contains a single element set s to `ChainHead`. +//! Restart. +//! +//! All other messages are ignored. +//! Workflow for Idle state. +//! On `NewBlock`: +//! Import the block. If the block is unknown set s to `ChainHead` and restart. +//! On `NewHashes`: +//! Set s to `ChainHead` and restart. +//! +//! All other messages are ignored. + +mod handler; +mod propagator; +mod requester; +mod supplier; + +use std::sync::Arc; +use std::collections::{HashSet, HashMap}; +use std::cmp; +use std::time::{Duration, Instant}; +use hash::keccak; +use heapsize::HeapSizeOf; +use ethereum_types::{H256, U256}; +use plain_hasher::H256FastMap; +use parking_lot::RwLock; +use bytes::Bytes; +use rlp::{Rlp, RlpStream, DecoderError}; +use network::{self, PeerId, PacketId}; +use ethcore::header::{BlockNumber}; +use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo, BlockQueueInfo}; +use ethcore::snapshot::{RestorationStatus}; +use sync_io::SyncIo; +use super::{WarpSync, SyncConfig}; +use block_sync::{BlockDownloader, BlockDownloaderImportError as DownloaderImportError}; +use rand::Rng; +use snapshot::{Snapshot}; +use api::{EthProtocolInfo as PeerInfoDigest, WARP_SYNC_PROTOCOL_ID}; +use private_tx::PrivateTxHandler; +use transactions_stats::{TransactionsStats, Stats as TransactionStats}; +use transaction::UnverifiedTransaction; + +use self::handler::SyncHandler; +use self::propagator::SyncPropagator; +use self::requester::SyncRequester; +use self::supplier::SyncSupplier; + +known_heap_size!(0, PeerInfo); + +pub type PacketDecodeError = DecoderError; + +/// 63 version of Ethereum protocol. +pub const ETH_PROTOCOL_VERSION_63: u8 = 63; +/// 62 version of Ethereum protocol. +pub const ETH_PROTOCOL_VERSION_62: u8 = 62; +/// 1 version of Parity protocol. +pub const PAR_PROTOCOL_VERSION_1: u8 = 1; +/// 2 version of Parity protocol (consensus messages added). +pub const PAR_PROTOCOL_VERSION_2: u8 = 2; +/// 3 version of Parity protocol (private transactions messages added). +pub const PAR_PROTOCOL_VERSION_3: u8 = 3; + +pub const MAX_BODIES_TO_SEND: usize = 256; +pub const MAX_HEADERS_TO_SEND: usize = 512; +pub const MAX_NODE_DATA_TO_SEND: usize = 1024; +pub const MAX_RECEIPTS_TO_SEND: usize = 1024; +pub const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256; +const MIN_PEERS_PROPAGATION: usize = 4; +const MAX_PEERS_PROPAGATION: usize = 128; +const MAX_PEER_LAG_PROPAGATION: BlockNumber = 20; +const MAX_NEW_HASHES: usize = 64; +const MAX_NEW_BLOCK_AGE: BlockNumber = 20; +// maximal packet size with transactions (cannot be greater than 16MB - protocol limitation). +const MAX_TRANSACTION_PACKET_SIZE: usize = 8 * 1024 * 1024; +// Maximal number of transactions in sent in single packet. +const MAX_TRANSACTIONS_TO_PROPAGATE: usize = 64; +// Min number of blocks to be behind for a snapshot sync +const SNAPSHOT_RESTORE_THRESHOLD: BlockNumber = 30000; +const SNAPSHOT_MIN_PEERS: usize = 3; + +const STATUS_PACKET: u8 = 0x00; +const NEW_BLOCK_HASHES_PACKET: u8 = 0x01; +const TRANSACTIONS_PACKET: u8 = 0x02; +pub const GET_BLOCK_HEADERS_PACKET: u8 = 0x03; +pub const BLOCK_HEADERS_PACKET: u8 = 0x04; +pub const GET_BLOCK_BODIES_PACKET: u8 = 0x05; +const BLOCK_BODIES_PACKET: u8 = 0x06; +const NEW_BLOCK_PACKET: u8 = 0x07; + +pub const GET_NODE_DATA_PACKET: u8 = 0x0d; +pub const NODE_DATA_PACKET: u8 = 0x0e; +pub const GET_RECEIPTS_PACKET: u8 = 0x0f; +pub const RECEIPTS_PACKET: u8 = 0x10; + +pub const ETH_PACKET_COUNT: u8 = 0x11; + +pub const GET_SNAPSHOT_MANIFEST_PACKET: u8 = 0x11; +pub const SNAPSHOT_MANIFEST_PACKET: u8 = 0x12; +pub const GET_SNAPSHOT_DATA_PACKET: u8 = 0x13; +pub const SNAPSHOT_DATA_PACKET: u8 = 0x14; +pub const CONSENSUS_DATA_PACKET: u8 = 0x15; +const PRIVATE_TRANSACTION_PACKET: u8 = 0x16; +const SIGNED_PRIVATE_TRANSACTION_PACKET: u8 = 0x17; + +pub const SNAPSHOT_SYNC_PACKET_COUNT: u8 = 0x18; + +const MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD: usize = 3; + +const WAIT_PEERS_TIMEOUT: Duration = Duration::from_secs(5); +const STATUS_TIMEOUT: Duration = Duration::from_secs(5); +const HEADERS_TIMEOUT: Duration = Duration::from_secs(15); +const BODIES_TIMEOUT: Duration = Duration::from_secs(20); +const RECEIPTS_TIMEOUT: Duration = Duration::from_secs(10); +const FORK_HEADER_TIMEOUT: Duration = Duration::from_secs(3); +const SNAPSHOT_MANIFEST_TIMEOUT: Duration = Duration::from_secs(5); +const SNAPSHOT_DATA_TIMEOUT: Duration = Duration::from_secs(120); + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +/// Sync state +pub enum SyncState { + /// Collecting enough peers to start syncing. + WaitingPeers, + /// Waiting for snapshot manifest download + SnapshotManifest, + /// Downloading snapshot data + SnapshotData, + /// Waiting for snapshot restoration progress. + SnapshotWaiting, + /// Downloading new blocks + Blocks, + /// Initial chain sync complete. Waiting for new packets + Idle, + /// Block downloading paused. Waiting for block queue to process blocks and free some space + Waiting, + /// Downloading blocks learned from `NewHashes` packet + NewBlocks, +} + +/// Syncing status and statistics +#[derive(Clone, Copy)] +pub struct SyncStatus { + /// State + pub state: SyncState, + /// Syncing protocol version. That's the maximum protocol version we connect to. + pub protocol_version: u8, + /// The underlying p2p network version. + pub network_id: u64, + /// `BlockChain` height for the moment the sync started. + pub start_block_number: BlockNumber, + /// Last fully downloaded and imported block number (if any). + pub last_imported_block_number: Option, + /// Highest block number in the download queue (if any). + pub highest_block_number: Option, + /// Total number of blocks for the sync process. + pub blocks_total: BlockNumber, + /// Number of blocks downloaded so far. + pub blocks_received: BlockNumber, + /// Total number of connected peers + pub num_peers: usize, + /// Total number of active peers. + pub num_active_peers: usize, + /// Heap memory used in bytes. + pub mem_used: usize, + /// Snapshot chunks + pub num_snapshot_chunks: usize, + /// Snapshot chunks downloaded + pub snapshot_chunks_done: usize, + /// Last fully downloaded and imported ancient block number (if any). + pub last_imported_old_block_number: Option, +} + +impl SyncStatus { + /// Indicates if snapshot download is in progress + pub fn is_snapshot_syncing(&self) -> bool { + self.state == SyncState::SnapshotManifest + || self.state == SyncState::SnapshotData + || self.state == SyncState::SnapshotWaiting + } + + /// Returns max no of peers to display in informants + pub fn current_max_peers(&self, min_peers: u32, max_peers: u32) -> u32 { + if self.num_peers as u32 > min_peers { + max_peers + } else { + min_peers + } + } + + /// Is it doing a major sync? + pub fn is_syncing(&self, queue_info: BlockQueueInfo) -> bool { + let is_syncing_state = match self.state { SyncState::Idle | SyncState::NewBlocks => false, _ => true }; + let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > 3; + is_verifying || is_syncing_state + } +} + +#[derive(PartialEq, Eq, Debug, Clone)] +/// Peer data type requested +pub enum PeerAsking { + Nothing, + ForkHeader, + BlockHeaders, + BlockBodies, + BlockReceipts, + SnapshotManifest, + SnapshotData, +} + +#[derive(PartialEq, Eq, Debug, Clone, Copy)] +/// Block downloader channel. +pub enum BlockSet { + /// New blocks better than out best blocks + NewBlocks, + /// Missing old blocks + OldBlocks, +} +#[derive(Clone, Eq, PartialEq)] +pub enum ForkConfirmation { + /// Fork block confirmation pending. + Unconfirmed, + /// Fork is confirmed. + Confirmed, +} + +#[derive(Clone)] +/// Syncing peer information +pub struct PeerInfo { + /// eth protocol version + protocol_version: u8, + /// Peer chain genesis hash + genesis: H256, + /// Peer network id + network_id: u64, + /// Peer best block hash + latest_hash: H256, + /// Peer total difficulty if known + difficulty: Option, + /// Type of data currenty being requested from peer. + asking: PeerAsking, + /// A set of block numbers being requested + asking_blocks: Vec, + /// Holds requested header hash if currently requesting block header by hash + asking_hash: Option, + /// Holds requested snapshot chunk hash if any. + asking_snapshot_data: Option, + /// Request timestamp + ask_time: Instant, + /// Holds a set of transactions recently sent to this peer to avoid spamming. + last_sent_transactions: HashSet, + /// Pending request is expired and result should be ignored + expired: bool, + /// Peer fork confirmation status + confirmation: ForkConfirmation, + /// Best snapshot hash + snapshot_hash: Option, + /// Best snapshot block number + snapshot_number: Option, + /// Block set requested + block_set: Option, +} + +impl PeerInfo { + fn can_sync(&self) -> bool { + self.confirmation == ForkConfirmation::Confirmed && !self.expired + } + + fn is_allowed(&self) -> bool { + self.confirmation != ForkConfirmation::Unconfirmed && !self.expired + } + + fn reset_asking(&mut self) { + self.asking_blocks.clear(); + self.asking_hash = None; + // mark any pending requests as expired + if self.asking != PeerAsking::Nothing && self.is_allowed() { + self.expired = true; + } + } +} + +#[cfg(not(test))] +pub mod random { + use rand; + pub fn new() -> rand::ThreadRng { rand::thread_rng() } +} +#[cfg(test)] +pub mod random { + use rand::{self, SeedableRng}; + pub fn new() -> rand::XorShiftRng { rand::XorShiftRng::from_seed([0, 1, 2, 3]) } +} + +pub type RlpResponseResult = Result, PacketDecodeError>; +pub type Peers = HashMap; + +/// Blockchain sync handler. +/// See module documentation for more details. +pub struct ChainSync { + /// Sync state + state: SyncState, + /// Last block number for the start of sync + starting_block: BlockNumber, + /// Highest block number seen + highest_block: Option, + /// All connected peers + peers: Peers, + /// Peers active for current sync round + active_peers: HashSet, + /// Block download process for new blocks + new_blocks: BlockDownloader, + /// Block download process for ancient blocks + old_blocks: Option, + /// Last propagated block number + last_sent_block_number: BlockNumber, + /// Network ID + network_id: u64, + /// Optional fork block to check + fork_block: Option<(BlockNumber, H256)>, + /// Snapshot downloader. + snapshot: Snapshot, + /// Connected peers pending Status message. + /// Value is request timestamp. + handshaking_peers: HashMap, + /// Sync start timestamp. Measured when first peer is connected + sync_start_time: Option, + /// Transactions propagation statistics + transactions_stats: TransactionsStats, + /// Enable ancient block downloading + download_old_blocks: bool, + /// Shared private tx service. + private_tx_handler: Arc, + /// Enable warp sync. + warp_sync: WarpSync, +} + +impl ChainSync { + /// Create a new instance of syncing strategy. + pub fn new(config: SyncConfig, chain: &BlockChainClient, private_tx_handler: Arc) -> ChainSync { + let chain_info = chain.chain_info(); + let best_block = chain.chain_info().best_block_number; + let state = ChainSync::get_init_state(config.warp_sync, chain); + + let mut sync = ChainSync { + state, + starting_block: best_block, + highest_block: None, + peers: HashMap::new(), + handshaking_peers: HashMap::new(), + active_peers: HashSet::new(), + new_blocks: BlockDownloader::new(false, &chain_info.best_block_hash, chain_info.best_block_number), + old_blocks: None, + last_sent_block_number: 0, + network_id: config.network_id, + fork_block: config.fork_block, + download_old_blocks: config.download_old_blocks, + snapshot: Snapshot::new(), + sync_start_time: None, + transactions_stats: TransactionsStats::default(), + private_tx_handler, + warp_sync: config.warp_sync, + }; + sync.update_targets(chain); + sync + } + + fn get_init_state(warp_sync: WarpSync, chain: &BlockChainClient) -> SyncState { + let best_block = chain.chain_info().best_block_number; + match warp_sync { + WarpSync::Enabled => SyncState::WaitingPeers, + WarpSync::OnlyAndAfter(block) if block > best_block => SyncState::WaitingPeers, + _ => SyncState::Idle, + } + } + + /// Returns synchonization status + pub fn status(&self) -> SyncStatus { + let last_imported_number = self.new_blocks.last_imported_block_number(); + SyncStatus { + state: self.state.clone(), + protocol_version: ETH_PROTOCOL_VERSION_63, + network_id: self.network_id, + start_block_number: self.starting_block, + last_imported_block_number: Some(last_imported_number), + last_imported_old_block_number: self.old_blocks.as_ref().map(|d| d.last_imported_block_number()), + highest_block_number: self.highest_block.map(|n| cmp::max(n, last_imported_number)), + blocks_received: if last_imported_number > self.starting_block { last_imported_number - self.starting_block } else { 0 }, + blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 }, + num_peers: self.peers.values().filter(|p| p.is_allowed()).count(), + num_active_peers: self.peers.values().filter(|p| p.is_allowed() && p.asking != PeerAsking::Nothing).count(), + num_snapshot_chunks: self.snapshot.total_chunks(), + snapshot_chunks_done: self.snapshot.done_chunks(), + mem_used: + self.new_blocks.heap_size() + + self.old_blocks.as_ref().map_or(0, |d| d.heap_size()) + + self.peers.heap_size_of_children(), + } + } + + /// Returns information on peers connections + pub fn peer_info(&self, peer_id: &PeerId) -> Option { + self.peers.get(peer_id).map(|peer_data| { + PeerInfoDigest { + version: peer_data.protocol_version as u32, + difficulty: peer_data.difficulty, + head: peer_data.latest_hash, + } + }) + } + + /// Returns transactions propagation statistics + pub fn transactions_stats(&self) -> &H256FastMap { + self.transactions_stats.stats() + } + + /// Updates transactions were received by a peer + pub fn transactions_received(&mut self, txs: &[UnverifiedTransaction], peer_id: PeerId) { + if let Some(peer_info) = self.peers.get_mut(&peer_id) { + peer_info.last_sent_transactions.extend(txs.iter().map(|tx| tx.hash())); + } + } + + /// Abort all sync activity + pub fn abort(&mut self, io: &mut SyncIo) { + self.reset_and_continue(io); + self.peers.clear(); + } + + /// Reset sync. Clear all downloaded data but keep the queue + fn reset(&mut self, io: &mut SyncIo) { + self.new_blocks.reset(); + let chain_info = io.chain().chain_info(); + for (_, ref mut p) in &mut self.peers { + if p.block_set != Some(BlockSet::OldBlocks) { + p.reset_asking(); + if p.difficulty.is_none() { + // assume peer has up to date difficulty + p.difficulty = Some(chain_info.pending_total_difficulty); + } + } + } + self.state = ChainSync::get_init_state(self.warp_sync, io.chain()); + // Reactivate peers only if some progress has been made + // since the last sync round of if starting fresh. + self.active_peers = self.peers.keys().cloned().collect(); + } + + /// Restart sync + pub fn reset_and_continue(&mut self, io: &mut SyncIo) { + trace!(target: "sync", "Restarting"); + if self.state == SyncState::SnapshotData { + debug!(target:"sync", "Aborting snapshot restore"); + io.snapshot_service().abort_restore(); + } + self.snapshot.clear(); + self.reset(io); + self.continue_sync(io); + } + + /// Remove peer from active peer set. Peer will be reactivated on the next sync + /// round. + fn deactivate_peer(&mut self, _io: &mut SyncIo, peer_id: PeerId) { + trace!(target: "sync", "Deactivating peer {}", peer_id); + self.active_peers.remove(&peer_id); + } + + fn maybe_start_snapshot_sync(&mut self, io: &mut SyncIo) { + if !self.warp_sync.is_enabled() || io.snapshot_service().supported_versions().is_none() { + trace!(target: "sync", "Skipping warp sync. Disabled or not supported."); + return; + } + if self.state != SyncState::WaitingPeers && self.state != SyncState::Blocks && self.state != SyncState::Waiting { + trace!(target: "sync", "Skipping warp sync. State: {:?}", self.state); + return; + } + // Make sure the snapshot block is not too far away from best block and network best block and + // that it is higher than fork detection block + let our_best_block = io.chain().chain_info().best_block_number; + let fork_block = self.fork_block.map_or(0, |(n, _)| n); + + let (best_hash, max_peers, snapshot_peers) = { + let expected_warp_block = match self.warp_sync { + WarpSync::OnlyAndAfter(block) => block, + _ => 0, + }; + //collect snapshot infos from peers + let snapshots = self.peers.iter() + .filter(|&(_, p)| p.is_allowed() && p.snapshot_number.map_or(false, |sn| + // Snapshot must be old enough that it's usefull to sync with it + our_best_block < sn && (sn - our_best_block) > SNAPSHOT_RESTORE_THRESHOLD && + // Snapshot must have been taken after the Fork + sn > fork_block && + // Snapshot must be greater than the warp barrier if any + sn > expected_warp_block && + // If we know a highest block, snapshot must be recent enough + self.highest_block.map_or(true, |highest| { + highest < sn || (highest - sn) <= SNAPSHOT_RESTORE_THRESHOLD + }) + )) + .filter_map(|(p, peer)| peer.snapshot_hash.map(|hash| (p, hash.clone()))) + .filter(|&(_, ref hash)| !self.snapshot.is_known_bad(hash)); + + let mut snapshot_peers = HashMap::new(); + let mut max_peers: usize = 0; + let mut best_hash = None; + for (p, hash) in snapshots { + let peers = snapshot_peers.entry(hash).or_insert_with(Vec::new); + peers.push(*p); + if peers.len() > max_peers { + max_peers = peers.len(); + best_hash = Some(hash); + } + } + (best_hash, max_peers, snapshot_peers) + }; + + let timeout = (self.state == SyncState::WaitingPeers) && self.sync_start_time.map_or(false, |t| t.elapsed() > WAIT_PEERS_TIMEOUT); + + if let (Some(hash), Some(peers)) = (best_hash, best_hash.map_or(None, |h| snapshot_peers.get(&h))) { + if max_peers >= SNAPSHOT_MIN_PEERS { + trace!(target: "sync", "Starting confirmed snapshot sync {:?} with {:?}", hash, peers); + self.start_snapshot_sync(io, peers); + } else if timeout { + trace!(target: "sync", "Starting unconfirmed snapshot sync {:?} with {:?}", hash, peers); + self.start_snapshot_sync(io, peers); + } + } else if timeout && !self.warp_sync.is_warp_only() { + trace!(target: "sync", "No snapshots found, starting full sync"); + self.state = SyncState::Idle; + self.continue_sync(io); + } + } + + fn start_snapshot_sync(&mut self, io: &mut SyncIo, peers: &[PeerId]) { + if !self.snapshot.have_manifest() { + for p in peers { + if self.peers.get(p).map_or(false, |p| p.asking == PeerAsking::Nothing) { + SyncRequester::request_snapshot_manifest(self, io, *p); + } + } + self.state = SyncState::SnapshotManifest; + trace!(target: "sync", "New snapshot sync with {:?}", peers); + } else { + self.state = SyncState::SnapshotData; + trace!(target: "sync", "Resumed snapshot sync with {:?}", peers); + } + } + + /// Restart sync disregarding the block queue status. May end up re-downloading up to QUEUE_SIZE blocks + pub fn restart(&mut self, io: &mut SyncIo) { + self.update_targets(io.chain()); + self.reset_and_continue(io); + } + + /// Update sync after the blockchain has been changed externally. + pub fn update_targets(&mut self, chain: &BlockChainClient) { + // Do not assume that the block queue/chain still has our last_imported_block + let chain = chain.chain_info(); + self.new_blocks = BlockDownloader::new(false, &chain.best_block_hash, chain.best_block_number); + self.old_blocks = None; + if self.download_old_blocks { + if let (Some(ancient_block_hash), Some(ancient_block_number)) = (chain.ancient_block_hash, chain.ancient_block_number) { + + trace!(target: "sync", "Downloading old blocks from {:?} (#{}) till {:?} (#{:?})", ancient_block_hash, ancient_block_number, chain.first_block_hash, chain.first_block_number); + let mut downloader = BlockDownloader::with_unlimited_reorg(true, &ancient_block_hash, ancient_block_number); + if let Some(hash) = chain.first_block_hash { + trace!(target: "sync", "Downloader target set to {:?}", hash); + downloader.set_target(&hash); + } + self.old_blocks = Some(downloader); + } + } + } + + /// Resume downloading + fn continue_sync(&mut self, io: &mut SyncIo) { + // Collect active peers that can sync + let confirmed_peers: Vec<(PeerId, u8)> = self.peers.iter().filter_map(|(peer_id, peer)| + if peer.can_sync() { + Some((*peer_id, peer.protocol_version)) + } else { + None + } + ).collect(); + let mut peers: Vec<(PeerId, u8)> = confirmed_peers.iter().filter(|&&(peer_id, _)| + self.active_peers.contains(&peer_id) + ).map(|v| *v).collect(); + + random::new().shuffle(&mut peers); //TODO: sort by rating + // prefer peers with higher protocol version + peers.sort_by(|&(_, ref v1), &(_, ref v2)| v1.cmp(v2)); + trace!( + target: "sync", + "Syncing with peers: {} active, {} confirmed, {} total", + self.active_peers.len(), confirmed_peers.len(), self.peers.len() + ); + for (peer_id, _) in peers { + self.sync_peer(io, peer_id, false); + } + + if + (self.state == SyncState::Blocks || self.state == SyncState::NewBlocks) && + !self.peers.values().any(|p| p.asking != PeerAsking::Nothing && p.block_set != Some(BlockSet::OldBlocks) && p.can_sync()) + { + self.complete_sync(io); + } + } + + /// Called after all blocks have been downloaded + fn complete_sync(&mut self, io: &mut SyncIo) { + trace!(target: "sync", "Sync complete"); + self.reset(io); + } + + /// Enter waiting state + fn pause_sync(&mut self) { + trace!(target: "sync", "Block queue full, pausing sync"); + self.state = SyncState::Waiting; + } + + /// Find something to do for a peer. Called for a new peer or when a peer is done with its task. + fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) { + if !self.active_peers.contains(&peer_id) { + trace!(target: "sync", "Skipping deactivated peer {}", peer_id); + return; + } + let (peer_latest, peer_difficulty, peer_snapshot_number, peer_snapshot_hash) = { + if let Some(peer) = self.peers.get_mut(&peer_id) { + if peer.asking != PeerAsking::Nothing || !peer.can_sync() { + trace!(target: "sync", "Skipping busy peer {}", peer_id); + return; + } + if self.state == SyncState::Waiting { + trace!(target: "sync", "Waiting for the block queue"); + return; + } + if self.state == SyncState::SnapshotWaiting { + trace!(target: "sync", "Waiting for the snapshot restoration"); + return; + } + (peer.latest_hash.clone(), peer.difficulty.clone(), peer.snapshot_number.as_ref().cloned().unwrap_or(0), peer.snapshot_hash.as_ref().cloned()) + } else { + return; + } + }; + let chain_info = io.chain().chain_info(); + let syncing_difficulty = chain_info.pending_total_difficulty; + let num_active_peers = self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(); + + let higher_difficulty = peer_difficulty.map_or(true, |pd| pd > syncing_difficulty); + if force || higher_difficulty || self.old_blocks.is_some() { + match self.state { + SyncState::WaitingPeers => { + trace!( + target: "sync", + "Checking snapshot sync: {} vs {} (peer: {})", + peer_snapshot_number, + chain_info.best_block_number, + peer_id + ); + self.maybe_start_snapshot_sync(io); + }, + SyncState::Idle | SyncState::Blocks | SyncState::NewBlocks => { + if io.chain().queue_info().is_full() { + self.pause_sync(); + return; + } + + let have_latest = io.chain().block_status(BlockId::Hash(peer_latest)) != BlockStatus::Unknown; + trace!(target: "sync", "Considering peer {}, force={}, td={:?}, our td={}, latest={}, have_latest={}, state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, peer_latest, have_latest, self.state); + if !have_latest && (higher_difficulty || force || self.state == SyncState::NewBlocks) { + // check if got new blocks to download + trace!(target: "sync", "Syncing with peer {}, force={}, td={:?}, our td={}, state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, self.state); + if let Some(request) = self.new_blocks.request_blocks(io, num_active_peers) { + SyncRequester::request_blocks(self, io, peer_id, request, BlockSet::NewBlocks); + if self.state == SyncState::Idle { + self.state = SyncState::Blocks; + } + return; + } + } + + if let Some(request) = self.old_blocks.as_mut().and_then(|d| d.request_blocks(io, num_active_peers)) { + SyncRequester::request_blocks(self, io, peer_id, request, BlockSet::OldBlocks); + return; + } + }, + SyncState::SnapshotData => { + if let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } = io.snapshot_service().status() { + if self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize > MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD { + trace!(target: "sync", "Snapshot queue full, pausing sync"); + self.state = SyncState::SnapshotWaiting; + return; + } + } + if peer_snapshot_hash.is_some() && peer_snapshot_hash == self.snapshot.snapshot_hash() { + self.clear_peer_download(peer_id); + SyncRequester::request_snapshot_data(self, io, peer_id); + } + }, + SyncState::SnapshotManifest | //already downloading from other peer + SyncState::Waiting | SyncState::SnapshotWaiting => () + } + } else { + trace!(target: "sync", "Skipping peer {}, force={}, td={:?}, our td={}, state={:?}", peer_id, force, peer_difficulty, syncing_difficulty, self.state); + } + } + + /// Clear all blocks/headers marked as being downloaded by a peer. + fn clear_peer_download(&mut self, peer_id: PeerId) { + if let Some(ref peer) = self.peers.get(&peer_id) { + match peer.asking { + PeerAsking::BlockHeaders => { + if let Some(ref hash) = peer.asking_hash { + self.new_blocks.clear_header_download(hash); + if let Some(ref mut old) = self.old_blocks { + old.clear_header_download(hash); + } + } + }, + PeerAsking::BlockBodies => { + self.new_blocks.clear_body_download(&peer.asking_blocks); + if let Some(ref mut old) = self.old_blocks { + old.clear_body_download(&peer.asking_blocks); + } + }, + PeerAsking::BlockReceipts => { + self.new_blocks.clear_receipt_download(&peer.asking_blocks); + if let Some(ref mut old) = self.old_blocks { + old.clear_receipt_download(&peer.asking_blocks); + } + }, + PeerAsking::SnapshotData => { + if let Some(hash) = peer.asking_snapshot_data { + self.snapshot.clear_chunk_download(&hash); + } + }, + _ => (), + } + } + } + + /// Checks if there are blocks fully downloaded that can be imported into the blockchain and does the import. + fn collect_blocks(&mut self, io: &mut SyncIo, block_set: BlockSet) { + match block_set { + BlockSet::NewBlocks => { + if self.new_blocks.collect_blocks(io, self.state == SyncState::NewBlocks) == Err(DownloaderImportError::Invalid) { + self.restart(io); + } + }, + BlockSet::OldBlocks => { + if self.old_blocks.as_mut().map_or(false, |downloader| { downloader.collect_blocks(io, false) == Err(DownloaderImportError::Invalid) }) { + self.restart(io); + } else if self.old_blocks.as_ref().map_or(false, |downloader| { downloader.is_complete() }) { + trace!(target: "sync", "Background block download is complete"); + self.old_blocks = None; + } + } + } + } + + /// Reset peer status after request is complete. + fn reset_peer_asking(&mut self, peer_id: PeerId, asking: PeerAsking) -> bool { + if let Some(ref mut peer) = self.peers.get_mut(&peer_id) { + peer.expired = false; + peer.block_set = None; + if peer.asking != asking { + trace!(target:"sync", "Asking {:?} while expected {:?}", peer.asking, asking); + peer.asking = PeerAsking::Nothing; + return false; + } else { + peer.asking = PeerAsking::Nothing; + return true; + } + } + false + } + + /// Send Status message + fn send_status(&mut self, io: &mut SyncIo, peer: PeerId) -> Result<(), network::Error> { + let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer); + let warp_protocol = warp_protocol_version != 0; + let protocol = if warp_protocol { warp_protocol_version } else { ETH_PROTOCOL_VERSION_63 }; + trace!(target: "sync", "Sending status to {}, protocol version {}", peer, protocol); + let mut packet = RlpStream::new_list(if warp_protocol { 7 } else { 5 }); + let chain = io.chain().chain_info(); + packet.append(&(protocol as u32)); + packet.append(&self.network_id); + packet.append(&chain.total_difficulty); + packet.append(&chain.best_block_hash); + packet.append(&chain.genesis_hash); + if warp_protocol { + let manifest = match self.old_blocks.is_some() { + true => None, + false => io.snapshot_service().manifest(), + }; + let block_number = manifest.as_ref().map_or(0, |m| m.block_number); + let manifest_hash = manifest.map_or(H256::new(), |m| keccak(m.into_rlp())); + packet.append(&manifest_hash); + packet.append(&block_number); + } + io.respond(STATUS_PACKET, packet.out()) + } + + pub fn maintain_peers(&mut self, io: &mut SyncIo) { + let tick = Instant::now(); + let mut aborting = Vec::new(); + for (peer_id, peer) in &self.peers { + let elapsed = tick - peer.ask_time; + let timeout = match peer.asking { + PeerAsking::BlockHeaders => elapsed > HEADERS_TIMEOUT, + PeerAsking::BlockBodies => elapsed > BODIES_TIMEOUT, + PeerAsking::BlockReceipts => elapsed > RECEIPTS_TIMEOUT, + PeerAsking::Nothing => false, + PeerAsking::ForkHeader => elapsed > FORK_HEADER_TIMEOUT, + PeerAsking::SnapshotManifest => elapsed > SNAPSHOT_MANIFEST_TIMEOUT, + PeerAsking::SnapshotData => elapsed > SNAPSHOT_DATA_TIMEOUT, + }; + if timeout { + debug!(target:"sync", "Timeout {}", peer_id); + io.disconnect_peer(*peer_id); + aborting.push(*peer_id); + } + } + for p in aborting { + SyncHandler::on_peer_aborting(self, io, p); + } + + // Check for handshake timeouts + for (peer, &ask_time) in &self.handshaking_peers { + let elapsed = (tick - ask_time) / 1_000_000_000; + if elapsed > STATUS_TIMEOUT { + trace!(target:"sync", "Status timeout {}", peer); + io.disconnect_peer(*peer); + } + } + } + + fn check_resume(&mut self, io: &mut SyncIo) { + if self.state == SyncState::Waiting && !io.chain().queue_info().is_full() { + self.state = SyncState::Blocks; + self.continue_sync(io); + } else if self.state == SyncState::SnapshotWaiting { + match io.snapshot_service().status() { + RestorationStatus::Inactive => { + trace!(target:"sync", "Snapshot restoration is complete"); + self.restart(io); + }, + RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } => { + if !self.snapshot.is_complete() && self.snapshot.done_chunks() - (state_chunks_done + block_chunks_done) as usize <= MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD { + trace!(target:"sync", "Resuming snapshot sync"); + self.state = SyncState::SnapshotData; + self.continue_sync(io); + } + }, + RestorationStatus::Failed => { + trace!(target: "sync", "Snapshot restoration aborted"); + self.state = SyncState::WaitingPeers; + self.snapshot.clear(); + self.continue_sync(io); + }, + } + } + } + + /// creates rlp to send for the tree defined by 'from' and 'to' hashes + fn create_new_hashes_rlp(chain: &BlockChainClient, from: &H256, to: &H256) -> Option { + match chain.tree_route(from, to) { + Some(route) => { + let uncles = chain.find_uncles(from).unwrap_or_else(Vec::new); + match route.blocks.len() { + 0 => None, + _ => { + let mut blocks = route.blocks; + blocks.extend(uncles); + let mut rlp_stream = RlpStream::new_list(blocks.len()); + for block_hash in blocks { + let mut hash_rlp = RlpStream::new_list(2); + let number = chain.block_header(BlockId::Hash(block_hash.clone())) + .expect("chain.tree_route and chain.find_uncles only return hahses of blocks that are in the blockchain. qed.").number(); + hash_rlp.append(&block_hash); + hash_rlp.append(&number); + rlp_stream.append_raw(hash_rlp.as_raw(), 1); + } + Some(rlp_stream.out()) + } + } + }, + None => None + } + } + + /// creates rlp from block bytes and total difficulty + fn create_block_rlp(bytes: &Bytes, total_difficulty: U256) -> Bytes { + let mut rlp_stream = RlpStream::new_list(2); + rlp_stream.append_raw(bytes, 1); + rlp_stream.append(&total_difficulty); + rlp_stream.out() + } + + /// creates latest block rlp for the given client + fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes { + ChainSync::create_block_rlp( + &chain.block(BlockId::Hash(chain.chain_info().best_block_hash)) + .expect("Best block always exists").into_inner(), + chain.chain_info().total_difficulty + ) + } + + /// creates given hash block rlp for the given client + fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes { + ChainSync::create_block_rlp( + &chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed").into_inner(), + chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.") + ) + } + + /// returns peer ids that have different blocks than our chain + fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo) -> Vec { + let latest_hash = chain_info.best_block_hash; + self + .peers + .iter_mut() + .filter_map(|(&id, ref mut peer_info)| { + trace!(target: "sync", "Checking peer our best {} their best {}", latest_hash, peer_info.latest_hash); + if peer_info.latest_hash != latest_hash { + Some(id) + } else { + None + } + }) + .collect::>() + } + + fn select_random_peers(peers: &[PeerId]) -> Vec { + // take sqrt(x) peers + let mut peers = peers.to_vec(); + let mut count = (peers.len() as f64).powf(0.5).round() as usize; + count = cmp::min(count, MAX_PEERS_PROPAGATION); + count = cmp::max(count, MIN_PEERS_PROPAGATION); + random::new().shuffle(&mut peers); + peers.truncate(count); + peers + } + + fn get_consensus_peers(&self) -> Vec { + self.peers.iter().filter_map(|(id, p)| if p.protocol_version >= PAR_PROTOCOL_VERSION_2 { Some(*id) } else { None }).collect() + } + + fn get_private_transaction_peers(&self) -> Vec { + self.peers.iter().filter_map(|(id, p)| if p.protocol_version >= PAR_PROTOCOL_VERSION_3 { Some(*id) } else { None }).collect() + } + + /// Maintain other peers. Send out any new blocks and transactions + pub fn maintain_sync(&mut self, io: &mut SyncIo) { + self.maybe_start_snapshot_sync(io); + self.check_resume(io); + } + + /// called when block is imported to chain - propagates the blocks and updates transactions sent to peers + pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], enacted: &[H256], _retracted: &[H256], sealed: &[H256], proposed: &[Bytes]) { + let queue_info = io.chain().queue_info(); + let is_syncing = self.status().is_syncing(queue_info); + + if !is_syncing || !sealed.is_empty() || !proposed.is_empty() { + trace!(target: "sync", "Propagating blocks, state={:?}", self.state); + SyncPropagator::propagate_latest_blocks(self, io, sealed); + SyncPropagator::propagate_proposed_blocks(self, io, proposed); + } + if !invalid.is_empty() { + trace!(target: "sync", "Bad blocks in the queue, restarting"); + self.restart(io); + } + + if !is_syncing && !enacted.is_empty() && !self.peers.is_empty() { + // Select random peer to re-broadcast transactions to. + let peer = random::new().gen_range(0, self.peers.len()); + trace!(target: "sync", "Re-broadcasting transactions to a random peer."); + self.peers.values_mut().nth(peer).map(|peer_info| + peer_info.last_sent_transactions.clear() + ); + } + } + + /// Dispatch incoming requests and responses + pub fn dispatch_packet(sync: &RwLock, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { + SyncSupplier::dispatch_packet(sync, io, peer, packet_id, data) + } + + pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { + debug!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id); + SyncHandler::on_packet(self, io, peer, packet_id, data); + } + + /// Called when peer sends us new consensus packet + pub fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> { + SyncHandler::on_consensus_packet(io, peer_id, r) + } + + /// Called by peer when it is disconnecting + pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { + SyncHandler::on_peer_aborting(self, io, peer); + } + + /// Called when a new peer is connected + pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { + SyncHandler::on_peer_connected(self, io, peer); + } + + /// propagates new transactions to all peers + pub fn propagate_new_transactions(&mut self, io: &mut SyncIo) -> usize { + SyncPropagator::propagate_new_transactions(self, io) + } + + /// Broadcast consensus message to peers. + pub fn propagate_consensus_packet(&mut self, io: &mut SyncIo, packet: Bytes) { + SyncPropagator::propagate_consensus_packet(self, io, packet); + } + + /// Broadcast private transaction message to peers. + pub fn propagate_private_transaction(&mut self, io: &mut SyncIo, packet: Bytes) { + SyncPropagator::propagate_private_transaction(self, io, packet); + } + + /// Broadcast signed private transaction message to peers. + pub fn propagate_signed_private_transaction(&mut self, io: &mut SyncIo, packet: Bytes) { + SyncPropagator::propagate_signed_private_transaction(self, io, packet); + } +} + +#[cfg(test)] +pub mod tests { + use std::collections::{HashSet, VecDeque}; + use ethkey; + use network::PeerId; + use tests::helpers::{TestIo}; + use tests::snapshot::TestSnapshotService; + use ethereum_types::{H256, U256, Address}; + use parking_lot::RwLock; + use bytes::Bytes; + use rlp::{Rlp, RlpStream}; + use super::*; + use ::SyncConfig; + use super::{PeerInfo, PeerAsking}; + use ethcore::header::*; + use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient, ChainInfo, BlockInfo}; + use ethcore::miner::MinerService; + use private_tx::NoopPrivateTxHandler; + + pub fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { + let mut header = Header::new(); + header.set_gas_limit(0.into()); + header.set_difficulty((order * 100).into()); + header.set_timestamp((order * 10) as u64); + header.set_number(order as u64); + header.set_parent_hash(parent_hash); + header.set_state_root(H256::zero()); + + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&::rlp::EMPTY_LIST_RLP, 1); + rlp.append_raw(&::rlp::EMPTY_LIST_RLP, 1); + rlp.out() + } + + pub fn get_dummy_blocks(order: u32, parent_hash: H256) -> Bytes { + let mut rlp = RlpStream::new_list(1); + rlp.append_raw(&get_dummy_block(order, parent_hash), 1); + let difficulty: U256 = (100 * order).into(); + rlp.append(&difficulty); + rlp.out() + } + + pub fn get_dummy_hashes() -> Bytes { + let mut rlp = RlpStream::new_list(5); + for _ in 0..5 { + let mut hash_d_rlp = RlpStream::new_list(2); + let hash: H256 = H256::from(0u64); + let diff: U256 = U256::from(1u64); + hash_d_rlp.append(&hash); + hash_d_rlp.append(&diff); + + rlp.append_raw(&hash_d_rlp.out(), 1); + } + + rlp.out() + } + + fn queue_info(unverified: usize, verified: usize) -> BlockQueueInfo { + BlockQueueInfo { + unverified_queue_size: unverified, + verified_queue_size: verified, + verifying_queue_size: 0, + max_queue_size: 1000, + max_mem_use: 1000, + mem_used: 500 + } + } + + fn sync_status(state: SyncState) -> SyncStatus { + SyncStatus { + state: state, + protocol_version: 0, + network_id: 0, + start_block_number: 0, + last_imported_block_number: None, + highest_block_number: None, + blocks_total: 0, + blocks_received: 0, + num_peers: 0, + num_active_peers: 0, + mem_used: 0, + num_snapshot_chunks: 0, + snapshot_chunks_done: 0, + last_imported_old_block_number: None, + } + } + + #[test] + fn is_still_verifying() { + assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(2, 1))); + assert!(sync_status(SyncState::Idle).is_syncing(queue_info(2, 2))); + } + + #[test] + fn is_synced_state() { + assert!(sync_status(SyncState::Blocks).is_syncing(queue_info(0, 0))); + assert!(!sync_status(SyncState::Idle).is_syncing(queue_info(0, 0))); + } + + pub fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync { + let mut sync = ChainSync::new(SyncConfig::default(), client, Arc::new(NoopPrivateTxHandler)); + insert_dummy_peer(&mut sync, 0, peer_latest_hash); + sync + } + + pub fn insert_dummy_peer(sync: &mut ChainSync, peer_id: PeerId, peer_latest_hash: H256) { + sync.peers.insert(peer_id, + PeerInfo { + protocol_version: 0, + genesis: H256::zero(), + network_id: 0, + latest_hash: peer_latest_hash, + difficulty: None, + asking: PeerAsking::Nothing, + asking_blocks: Vec::new(), + asking_hash: None, + ask_time: Instant::now(), + last_sent_transactions: HashSet::new(), + expired: false, + confirmation: super::ForkConfirmation::Confirmed, + snapshot_number: None, + snapshot_hash: None, + asking_snapshot_data: None, + block_set: None, + }); + + } + + #[test] + fn finds_lagging_peers() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client); + let chain_info = client.chain_info(); + + let lagging_peers = sync.get_lagging_peers(&chain_info); + + assert_eq!(1, lagging_peers.len()); + } + + #[test] + fn calculates_tree_for_lagging_peer() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(15, EachBlockWith::Uncle); + + let start = client.block_hash_delta_minus(4); + let end = client.block_hash_delta_minus(2); + + // wrong way end -> start, should be None + let rlp = ChainSync::create_new_hashes_rlp(&client, &end, &start); + assert!(rlp.is_none()); + + let rlp = ChainSync::create_new_hashes_rlp(&client, &start, &end).unwrap(); + // size of three rlp encoded hash-difficulty + assert_eq!(107, rlp.len()); + } + // idea is that what we produce when propagading latest hashes should be accepted in + // on_peer_new_hashes in our code as well + #[test] + fn hashes_rlp_mutually_acceptable() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let peers = sync.get_lagging_peers(&chain_info); + SyncPropagator::propagate_new_hashes(&mut sync, &chain_info, &mut io, &peers); + + let data = &io.packets[0].data.clone(); + let result = SyncHandler::on_peer_new_hashes(&mut sync, &mut io, 0, &Rlp::new(data)); + assert!(result.is_ok()); + } + + // idea is that what we produce when propagading latest block should be accepted in + // on_peer_new_block in our code as well + #[test] + fn block_rlp_mutually_acceptable() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let peers = sync.get_lagging_peers(&chain_info); + SyncPropagator::propagate_blocks(&mut sync, &chain_info, &mut io, &[], &peers); + + let data = &io.packets[0].data.clone(); + let result = SyncHandler::on_peer_new_block(&mut sync, &mut io, 0, &Rlp::new(data)); + assert!(result.is_ok()); + } + + #[test] + fn should_add_transactions_to_queue() { + fn sender(tx: &UnverifiedTransaction) -> Address { + ethkey::public_to_address(&tx.recover_public().unwrap()) + } + + // given + let mut client = TestBlockChainClient::new(); + client.add_blocks(98, EachBlockWith::Uncle); + client.add_blocks(1, EachBlockWith::UncleAndTransaction); + client.add_blocks(1, EachBlockWith::Transaction); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + + let good_blocks = vec![client.block_hash_delta_minus(2)]; + let retracted_blocks = vec![client.block_hash_delta_minus(1)]; + + // Add some balance to clients and reset nonces + for h in &[good_blocks[0], retracted_blocks[0]] { + let block = client.block(BlockId::Hash(*h)).unwrap(); + let sender = sender(&block.transactions()[0]);; + client.set_balance(sender, U256::from(10_000_000_000_000_000_000u64)); + client.set_nonce(sender, U256::from(0)); + } + + + // when + { + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks, false); + sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); + assert_eq!(io.chain.miner.ready_transactions(io.chain).len(), 1); + } + // We need to update nonce status (because we say that the block has been imported) + for h in &[good_blocks[0]] { + let block = client.block(BlockId::Hash(*h)).unwrap(); + client.set_nonce(sender(&block.transactions()[0]), U256::from(1)); + } + { + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&client, &ss, &queue, None); + io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks, false); + sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); + } + + // then + assert_eq!(client.miner.ready_transactions(&client).len(), 1); + } + + #[test] + fn should_not_add_transactions_to_queue_if_not_synced() { + // given + let mut client = TestBlockChainClient::new(); + client.add_blocks(98, EachBlockWith::Uncle); + client.add_blocks(1, EachBlockWith::UncleAndTransaction); + client.add_blocks(1, EachBlockWith::Transaction); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + + let good_blocks = vec![client.block_hash_delta_minus(2)]; + let retracted_blocks = vec![client.block_hash_delta_minus(1)]; + + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + // when + sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); + assert_eq!(io.chain.miner.queue_status().status.transaction_count, 0); + sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); + + // then + let status = io.chain.miner.queue_status(); + assert_eq!(status.status.transaction_count, 0); + } +} diff --git a/ethcore/sync/src/chain/propagator.rs b/ethcore/sync/src/chain/propagator.rs new file mode 100644 index 00000000000..4ae0518a537 --- /dev/null +++ b/ethcore/sync/src/chain/propagator.rs @@ -0,0 +1,636 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use bytes::Bytes; +use ethereum_types::H256; +use ethcore::client::BlockChainInfo; +use ethcore::header::BlockNumber; +use network::{PeerId, PacketId}; +use rand::Rng; +use rlp::{Encodable, RlpStream}; +use sync_io::SyncIo; +use std::cmp; +use std::collections::HashSet; +use transaction::SignedTransaction; + +use super::{ + random, + ChainSync, + MAX_PEER_LAG_PROPAGATION, + MAX_PEERS_PROPAGATION, + MAX_TRANSACTION_PACKET_SIZE, + MAX_TRANSACTIONS_TO_PROPAGATE, + MIN_PEERS_PROPAGATION, + CONSENSUS_DATA_PACKET, + NEW_BLOCK_HASHES_PACKET, + NEW_BLOCK_PACKET, + PRIVATE_TRANSACTION_PACKET, + SIGNED_PRIVATE_TRANSACTION_PACKET, + TRANSACTIONS_PACKET, +}; + +/// Checks if peer is able to process service transactions +fn accepts_service_transaction(client_id: &str) -> bool { + // Parity versions starting from this will accept service-transactions + const SERVICE_TRANSACTIONS_VERSION: (u32, u32) = (1u32, 6u32); + // Parity client string prefix + const PARITY_CLIENT_ID_PREFIX: &'static str = "Parity/v"; + + if !client_id.starts_with(PARITY_CLIENT_ID_PREFIX) { + return false; + } + let ver: Vec = client_id[PARITY_CLIENT_ID_PREFIX.len()..].split('.') + .take(2) + .filter_map(|s| s.parse().ok()) + .collect(); + ver.len() == 2 && (ver[0] > SERVICE_TRANSACTIONS_VERSION.0 || (ver[0] == SERVICE_TRANSACTIONS_VERSION.0 && ver[1] >= SERVICE_TRANSACTIONS_VERSION.1)) +} + +/// The Chain Sync Propagator: propagates data to peers +pub struct SyncPropagator; + +impl SyncPropagator { + /// propagates latest block to a set of peers + pub fn propagate_blocks(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, blocks: &[H256], peers: &[PeerId]) -> usize { + trace!(target: "sync", "Sending NewBlocks to {:?}", peers); + let mut sent = 0; + for peer_id in peers { + if blocks.is_empty() { + let rlp = ChainSync::create_latest_block_rlp(io.chain()); + SyncPropagator::send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp); + } else { + for h in blocks { + let rlp = ChainSync::create_new_block_rlp(io.chain(), h); + SyncPropagator::send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp); + } + } + if let Some(ref mut peer) = sync.peers.get_mut(peer_id) { + peer.latest_hash = chain_info.best_block_hash.clone(); + } + sent += 1; + } + sent + } + + /// propagates new known hashes to all peers + pub fn propagate_new_hashes(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[PeerId]) -> usize { + trace!(target: "sync", "Sending NewHashes to {:?}", peers); + let mut sent = 0; + let last_parent = *io.chain().best_block_header().parent_hash(); + for peer_id in peers { + sent += match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &chain_info.best_block_hash) { + Some(rlp) => { + { + if let Some(ref mut peer) = sync.peers.get_mut(peer_id) { + peer.latest_hash = chain_info.best_block_hash.clone(); + } + } + SyncPropagator::send_packet(io, *peer_id, NEW_BLOCK_HASHES_PACKET, rlp); + 1 + }, + None => 0 + } + } + sent + } + + /// propagates new transactions to all peers + pub fn propagate_new_transactions(sync: &mut ChainSync, io: &mut SyncIo) -> usize { + // Early out if nobody to send to. + if sync.peers.is_empty() { + return 0; + } + + let transactions = io.chain().ready_transactions(); + if transactions.is_empty() { + return 0; + } + + let (transactions, service_transactions): (Vec<_>, Vec<_>) = transactions.iter() + .map(|tx| tx.signed()) + .partition(|tx| !tx.gas_price.is_zero()); + + // usual transactions could be propagated to all peers + let mut affected_peers = HashSet::new(); + if !transactions.is_empty() { + let peers = SyncPropagator::select_peers_for_transactions(sync, |_| true); + affected_peers = SyncPropagator::propagate_transactions_to_peers(sync, io, peers, transactions); + } + + // most of times service_transactions will be empty + // => there's no need to merge packets + if !service_transactions.is_empty() { + let service_transactions_peers = SyncPropagator::select_peers_for_transactions(sync, |peer_id| accepts_service_transaction(&io.peer_info(*peer_id))); + let service_transactions_affected_peers = SyncPropagator::propagate_transactions_to_peers(sync, io, service_transactions_peers, service_transactions); + affected_peers.extend(&service_transactions_affected_peers); + } + + affected_peers.len() + } + + fn propagate_transactions_to_peers(sync: &mut ChainSync, io: &mut SyncIo, peers: Vec, transactions: Vec<&SignedTransaction>) -> HashSet { + let all_transactions_hashes = transactions.iter() + .map(|tx| tx.hash()) + .collect::>(); + let all_transactions_rlp = { + let mut packet = RlpStream::new_list(transactions.len()); + for tx in &transactions { packet.append(&**tx); } + packet.out() + }; + + // Clear old transactions from stats + sync.transactions_stats.retain(&all_transactions_hashes); + + // sqrt(x)/x scaled to max u32 + let block_number = io.chain().chain_info().best_block_number; + + let lucky_peers = { + peers.into_iter() + .filter_map(|peer_id| { + let stats = &mut sync.transactions_stats; + let peer_info = sync.peers.get_mut(&peer_id) + .expect("peer_id is form peers; peers is result of select_peers_for_transactions; select_peers_for_transactions selects peers from self.peers; qed"); + + // Send all transactions + if peer_info.last_sent_transactions.is_empty() { + // update stats + for hash in &all_transactions_hashes { + let id = io.peer_session_info(peer_id).and_then(|info| info.id); + stats.propagated(hash, id, block_number); + } + peer_info.last_sent_transactions = all_transactions_hashes.clone(); + return Some((peer_id, all_transactions_hashes.len(), all_transactions_rlp.clone())); + } + + // Get hashes of all transactions to send to this peer + let to_send = all_transactions_hashes.difference(&peer_info.last_sent_transactions) + .take(MAX_TRANSACTIONS_TO_PROPAGATE) + .cloned() + .collect::>(); + if to_send.is_empty() { + return None; + } + + // Construct RLP + let (packet, to_send) = { + let mut to_send = to_send; + let mut packet = RlpStream::new(); + packet.begin_unbounded_list(); + let mut pushed = 0; + for tx in &transactions { + let hash = tx.hash(); + if to_send.contains(&hash) { + let mut transaction = RlpStream::new(); + tx.rlp_append(&mut transaction); + let appended = packet.append_raw_checked(&transaction.drain(), 1, MAX_TRANSACTION_PACKET_SIZE); + if !appended { + // Maximal packet size reached just proceed with sending + debug!("Transaction packet size limit reached. Sending incomplete set of {}/{} transactions.", pushed, to_send.len()); + to_send = to_send.into_iter().take(pushed).collect(); + break; + } + pushed += 1; + } + } + packet.complete_unbounded_list(); + (packet, to_send) + }; + + // Update stats + let id = io.peer_session_info(peer_id).and_then(|info| info.id); + for hash in &to_send { + // update stats + stats.propagated(hash, id, block_number); + } + + peer_info.last_sent_transactions = all_transactions_hashes + .intersection(&peer_info.last_sent_transactions) + .chain(&to_send) + .cloned() + .collect(); + Some((peer_id, to_send.len(), packet.out())) + }) + .collect::>() + }; + + // Send RLPs + let mut peers = HashSet::new(); + if lucky_peers.len() > 0 { + let mut max_sent = 0; + let lucky_peers_len = lucky_peers.len(); + for (peer_id, sent, rlp) in lucky_peers { + peers.insert(peer_id); + SyncPropagator::send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp); + trace!(target: "sync", "{:02} <- Transactions ({} entries)", peer_id, sent); + max_sent = cmp::max(max_sent, sent); + } + debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, lucky_peers_len); + } + + peers + } + + pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut SyncIo, sealed: &[H256]) { + let chain_info = io.chain().chain_info(); + if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { + let mut peers = sync.get_lagging_peers(&chain_info); + if sealed.is_empty() { + let hashes = SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); + peers = ChainSync::select_random_peers(&peers); + let blocks = SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); + if blocks != 0 || hashes != 0 { + trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); + } + } else { + SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers); + SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers); + trace!(target: "sync", "Sent sealed block to all peers"); + }; + } + sync.last_sent_block_number = chain_info.best_block_number; + } + + /// Distribute valid proposed blocks to subset of current peers. + pub fn propagate_proposed_blocks(sync: &mut ChainSync, io: &mut SyncIo, proposed: &[Bytes]) { + let peers = sync.get_consensus_peers(); + trace!(target: "sync", "Sending proposed blocks to {:?}", peers); + for block in proposed { + let rlp = ChainSync::create_block_rlp( + block, + io.chain().chain_info().total_difficulty + ); + for peer_id in &peers { + SyncPropagator::send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp.clone()); + } + } + } + + /// Broadcast consensus message to peers. + pub fn propagate_consensus_packet(sync: &mut ChainSync, io: &mut SyncIo, packet: Bytes) { + let lucky_peers = ChainSync::select_random_peers(&sync.get_consensus_peers()); + trace!(target: "sync", "Sending consensus packet to {:?}", lucky_peers); + for peer_id in lucky_peers { + SyncPropagator::send_packet(io, peer_id, CONSENSUS_DATA_PACKET, packet.clone()); + } + } + + /// Broadcast private transaction message to peers. + pub fn propagate_private_transaction(sync: &mut ChainSync, io: &mut SyncIo, packet: Bytes) { + let lucky_peers = ChainSync::select_random_peers(&sync.get_private_transaction_peers()); + trace!(target: "sync", "Sending private transaction packet to {:?}", lucky_peers); + for peer_id in lucky_peers { + SyncPropagator::send_packet(io, peer_id, PRIVATE_TRANSACTION_PACKET, packet.clone()); + } + } + + /// Broadcast signed private transaction message to peers. + pub fn propagate_signed_private_transaction(sync: &mut ChainSync, io: &mut SyncIo, packet: Bytes) { + let lucky_peers = ChainSync::select_random_peers(&sync.get_private_transaction_peers()); + trace!(target: "sync", "Sending signed private transaction packet to {:?}", lucky_peers); + for peer_id in lucky_peers { + SyncPropagator::send_packet(io, peer_id, SIGNED_PRIVATE_TRANSACTION_PACKET, packet.clone()); + } + } + + fn select_peers_for_transactions(sync: &ChainSync, filter: F) -> Vec + where F: Fn(&PeerId) -> bool { + // sqrt(x)/x scaled to max u32 + let fraction = ((sync.peers.len() as f64).powf(-0.5) * (u32::max_value() as f64).round()) as u32; + let small = sync.peers.len() < MIN_PEERS_PROPAGATION; + + let mut random = random::new(); + sync.peers.keys() + .cloned() + .filter(filter) + .filter(|_| small || random.next_u32() < fraction) + .take(MAX_PEERS_PROPAGATION) + .collect() + } + + /// Generic packet sender + fn send_packet(sync: &mut SyncIo, peer_id: PeerId, packet_id: PacketId, packet: Bytes) { + if let Err(e) = sync.send(peer_id, packet_id, packet) { + debug!(target:"sync", "Error sending packet: {:?}", e); + sync.disconnect_peer(peer_id); + } + } +} + +#[cfg(test)] +mod tests { + use ethcore::client::{BlockInfo, ChainInfo, EachBlockWith, TestBlockChainClient}; + use parking_lot::RwLock; + use private_tx::NoopPrivateTxHandler; + use rlp::{Rlp}; + use std::collections::{VecDeque}; + use tests::helpers::{TestIo}; + use tests::snapshot::TestSnapshotService; + + use super::{*, super::{*, tests::*}}; + + #[test] + fn sends_new_hashes_to_lagging_peer() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let peers = sync.get_lagging_peers(&chain_info); + let peer_count = SyncPropagator::propagate_new_hashes(&mut sync, &chain_info, &mut io, &peers); + + // 1 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should be updated + assert_eq!(1, peer_count); + // NEW_BLOCK_HASHES_PACKET + assert_eq!(0x01, io.packets[0].packet_id); + } + + #[test] + fn sends_latest_block_to_lagging_peer() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peers = sync.get_lagging_peers(&chain_info); + let peer_count = SyncPropagator::propagate_blocks(&mut sync, &chain_info, &mut io, &[], &peers); + + // 1 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should be updated + assert_eq!(1, peer_count); + // NEW_BLOCK_PACKET + assert_eq!(0x07, io.packets[0].packet_id); + } + + #[test] + fn sends_sealed_block() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let hash = client.block_hash(BlockId::Number(99)).unwrap(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); + let chain_info = client.chain_info(); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peers = sync.get_lagging_peers(&chain_info); + let peer_count = SyncPropagator::propagate_blocks(&mut sync ,&chain_info, &mut io, &[hash.clone()], &peers); + + // 1 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should be updated + assert_eq!(1, peer_count); + // NEW_BLOCK_PACKET + assert_eq!(0x07, io.packets[0].packet_id); + } + + #[test] + fn sends_proposed_block() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(2, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let block = client.block(BlockId::Latest).unwrap().into_inner(); + let mut sync = ChainSync::new(SyncConfig::default(), &client, Arc::new(NoopPrivateTxHandler)); + sync.peers.insert(0, + PeerInfo { + // Messaging protocol + protocol_version: 2, + genesis: H256::zero(), + network_id: 0, + latest_hash: client.block_hash_delta_minus(1), + difficulty: None, + asking: PeerAsking::Nothing, + asking_blocks: Vec::new(), + asking_hash: None, + ask_time: Instant::now(), + last_sent_transactions: HashSet::new(), + expired: false, + confirmation: ForkConfirmation::Confirmed, + snapshot_number: None, + snapshot_hash: None, + asking_snapshot_data: None, + block_set: None, + }); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + SyncPropagator::propagate_proposed_blocks(&mut sync, &mut io, &[block]); + + // 1 message should be sent + assert_eq!(1, io.packets.len()); + // NEW_BLOCK_PACKET + assert_eq!(0x07, io.packets[0].packet_id); + } + + #[test] + fn propagates_transactions() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + // Try to propagate same transactions for the second time + let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + // Even after new block transactions should not be propagated twice + sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); + // Try to propagate same transactions for the third time + let peer_count3 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + + // 1 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should be updated but only once + assert_eq!(1, peer_count); + assert_eq!(0, peer_count2); + assert_eq!(0, peer_count3); + // TRANSACTIONS_PACKET + assert_eq!(0x02, io.packets[0].packet_id); + } + + #[test] + fn does_not_propagate_new_transactions_after_new_block() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + io.chain.insert_transaction_to_queue(); + // New block import should not trigger propagation. + // (we only propagate on timeout) + sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); + + // 2 message should be send + assert_eq!(1, io.packets.len()); + // 1 peer should receive the message + assert_eq!(1, peer_count); + // TRANSACTIONS_PACKET + assert_eq!(0x02, io.packets[0].packet_id); + } + + #[test] + fn does_not_fail_for_no_peers() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + // Sync with no peers + let mut sync = ChainSync::new(SyncConfig::default(), &client, Arc::new(NoopPrivateTxHandler)); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); + // Try to propagate same transactions for the second time + let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + + assert_eq!(0, io.packets.len()); + assert_eq!(0, peer_count); + assert_eq!(0, peer_count2); + } + + #[test] + fn propagates_transactions_without_alternating() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + // should sent some + { + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + assert_eq!(1, io.packets.len()); + assert_eq!(1, peer_count); + } + // Insert some more + client.insert_transaction_to_queue(); + let (peer_count2, peer_count3) = { + let mut io = TestIo::new(&mut client, &ss, &queue, None); + // Propagate new transactions + let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + // And now the peer should have all transactions + let peer_count3 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + (peer_count2, peer_count3) + }; + + // 2 message should be send (in total) + assert_eq!(2, queue.read().len()); + // 1 peer should be updated but only once after inserting new transaction + assert_eq!(1, peer_count2); + assert_eq!(0, peer_count3); + // TRANSACTIONS_PACKET + assert_eq!(0x02, queue.read()[0].packet_id); + assert_eq!(0x02, queue.read()[1].packet_id); + } + + #[test] + fn should_maintain_transations_propagation_stats() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Uncle); + client.insert_transaction_to_queue(); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + + let stats = sync.transactions_stats(); + assert_eq!(stats.len(), 1, "Should maintain stats for single transaction.") + } + + #[test] + fn should_propagate_service_transaction_to_selected_peers_only() { + let mut client = TestBlockChainClient::new(); + client.insert_transaction_with_gas_price_to_queue(U256::zero()); + let block_hash = client.block_hash_delta_minus(1); + let mut sync = ChainSync::new(SyncConfig::default(), &client, Arc::new(NoopPrivateTxHandler)); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + // when peer#1 is Geth + insert_dummy_peer(&mut sync, 1, block_hash); + io.peers_info.insert(1, "Geth".to_owned()); + // and peer#2 is Parity, accepting service transactions + insert_dummy_peer(&mut sync, 2, block_hash); + io.peers_info.insert(2, "Parity/v1.6".to_owned()); + // and peer#3 is Parity, discarding service transactions + insert_dummy_peer(&mut sync, 3, block_hash); + io.peers_info.insert(3, "Parity/v1.5".to_owned()); + // and peer#4 is Parity, accepting service transactions + insert_dummy_peer(&mut sync, 4, block_hash); + io.peers_info.insert(4, "Parity/v1.7.3-ABCDEFGH".to_owned()); + + // and new service transaction is propagated to peers + SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + + // peer#2 && peer#4 are receiving service transaction + assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 2)); // TRANSACTIONS_PACKET + assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 4)); // TRANSACTIONS_PACKET + assert_eq!(io.packets.len(), 2); + } + + #[test] + fn should_propagate_service_transaction_is_sent_as_separate_message() { + let mut client = TestBlockChainClient::new(); + let tx1_hash = client.insert_transaction_to_queue(); + let tx2_hash = client.insert_transaction_with_gas_price_to_queue(U256::zero()); + let block_hash = client.block_hash_delta_minus(1); + let mut sync = ChainSync::new(SyncConfig::default(), &client, Arc::new(NoopPrivateTxHandler)); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + // when peer#1 is Parity, accepting service transactions + insert_dummy_peer(&mut sync, 1, block_hash); + io.peers_info.insert(1, "Parity/v1.6".to_owned()); + + // and service + non-service transactions are propagated to peers + SyncPropagator::propagate_new_transactions(&mut sync, &mut io); + + // two separate packets for peer are queued: + // 1) with non-service-transaction + // 2) with service transaction + let sent_transactions: Vec = io.packets.iter() + .filter_map(|p| { + if p.packet_id != 0x02 || p.recipient != 1 { // TRANSACTIONS_PACKET + return None; + } + + let rlp = Rlp::new(&*p.data); + let item_count = rlp.item_count().unwrap_or(0); + if item_count != 1 { + return None; + } + + rlp.at(0).ok().and_then(|r| r.as_val().ok()) + }) + .collect(); + assert_eq!(sent_transactions.len(), 2); + assert!(sent_transactions.iter().any(|tx| tx.hash() == tx1_hash)); + assert!(sent_transactions.iter().any(|tx| tx.hash() == tx2_hash)); + } +} diff --git a/ethcore/sync/src/chain/requester.rs b/ethcore/sync/src/chain/requester.rs new file mode 100644 index 00000000000..e6acf6bc53a --- /dev/null +++ b/ethcore/sync/src/chain/requester.rs @@ -0,0 +1,154 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use api::WARP_SYNC_PROTOCOL_ID; +use block_sync::BlockRequest; +use bytes::Bytes; +use ethcore::header::BlockNumber; +use ethereum_types::H256; +use network::{PeerId, PacketId}; +use rlp::RlpStream; +use std::time::Instant; +use sync_io::SyncIo; + +use super::{ + BlockSet, + ChainSync, + PeerAsking, + ETH_PACKET_COUNT, + GET_BLOCK_BODIES_PACKET, + GET_BLOCK_HEADERS_PACKET, + GET_RECEIPTS_PACKET, + GET_SNAPSHOT_DATA_PACKET, + GET_SNAPSHOT_MANIFEST_PACKET, +}; + +/// The Chain Sync Requester: requesting data to other peers +pub struct SyncRequester; + +impl SyncRequester { + /// Perform block download request` + pub fn request_blocks(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, request: BlockRequest, block_set: BlockSet) { + match request { + BlockRequest::Headers { start, count, skip } => { + SyncRequester::request_headers_by_hash(sync, io, peer_id, &start, count, skip, false, block_set); + }, + BlockRequest::Bodies { hashes } => { + SyncRequester::request_bodies(sync, io, peer_id, hashes, block_set); + }, + BlockRequest::Receipts { hashes } => { + SyncRequester::request_receipts(sync, io, peer_id, hashes, block_set); + }, + } + } + + /// Request block bodies from a peer + fn request_bodies(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, hashes: Vec, set: BlockSet) { + let mut rlp = RlpStream::new_list(hashes.len()); + trace!(target: "sync", "{} <- GetBlockBodies: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); + for h in &hashes { + rlp.append(&h.clone()); + } + SyncRequester::send_request(sync, io, peer_id, PeerAsking::BlockBodies, GET_BLOCK_BODIES_PACKET, rlp.out()); + let peer = sync.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); + peer.asking_blocks = hashes; + peer.block_set = Some(set); + } + + /// Request headers from a peer by block number + pub fn request_fork_header(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, n: BlockNumber) { + trace!(target: "sync", "{} <- GetForkHeader: at {}", peer_id, n); + let mut rlp = RlpStream::new_list(4); + rlp.append(&n); + rlp.append(&1u32); + rlp.append(&0u32); + rlp.append(&0u32); + SyncRequester::send_request(sync, io, peer_id, PeerAsking::ForkHeader, GET_BLOCK_HEADERS_PACKET, rlp.out()); + } + + /// Find some headers or blocks to download for a peer. + pub fn request_snapshot_data(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { + // find chunk data to download + if let Some(hash) = sync.snapshot.needed_chunk() { + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + peer.asking_snapshot_data = Some(hash.clone()); + } + SyncRequester::request_snapshot_chunk(sync, io, peer_id, &hash); + } + } + + /// Request snapshot manifest from a peer. + pub fn request_snapshot_manifest(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId) { + trace!(target: "sync", "{} <- GetSnapshotManifest", peer_id); + let rlp = RlpStream::new_list(0); + SyncRequester::send_request(sync, io, peer_id, PeerAsking::SnapshotManifest, GET_SNAPSHOT_MANIFEST_PACKET, rlp.out()); + } + + /// Request headers from a peer by block hash + fn request_headers_by_hash(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, h: &H256, count: u64, skip: u64, reverse: bool, set: BlockSet) { + trace!(target: "sync", "{} <- GetBlockHeaders: {} entries starting from {}, set = {:?}", peer_id, count, h, set); + let mut rlp = RlpStream::new_list(4); + rlp.append(h); + rlp.append(&count); + rlp.append(&skip); + rlp.append(&if reverse {1u32} else {0u32}); + SyncRequester::send_request(sync, io, peer_id, PeerAsking::BlockHeaders, GET_BLOCK_HEADERS_PACKET, rlp.out()); + let peer = sync.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); + peer.asking_hash = Some(h.clone()); + peer.block_set = Some(set); + } + + /// Request block receipts from a peer + fn request_receipts(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, hashes: Vec, set: BlockSet) { + let mut rlp = RlpStream::new_list(hashes.len()); + trace!(target: "sync", "{} <- GetBlockReceipts: {} entries starting from {:?}, set = {:?}", peer_id, hashes.len(), hashes.first(), set); + for h in &hashes { + rlp.append(&h.clone()); + } + SyncRequester::send_request(sync, io, peer_id, PeerAsking::BlockReceipts, GET_RECEIPTS_PACKET, rlp.out()); + let peer = sync.peers.get_mut(&peer_id).expect("peer_id may originate either from on_packet, where it is already validated or from enumerating self.peers. qed"); + peer.asking_blocks = hashes; + peer.block_set = Some(set); + } + + /// Request snapshot chunk from a peer. + fn request_snapshot_chunk(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, chunk: &H256) { + trace!(target: "sync", "{} <- GetSnapshotData {:?}", peer_id, chunk); + let mut rlp = RlpStream::new_list(1); + rlp.append(chunk); + SyncRequester::send_request(sync, io, peer_id, PeerAsking::SnapshotData, GET_SNAPSHOT_DATA_PACKET, rlp.out()); + } + + /// Generic request sender + fn send_request(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: PacketId, packet: Bytes) { + if let Some(ref mut peer) = sync.peers.get_mut(&peer_id) { + if peer.asking != PeerAsking::Nothing { + warn!(target:"sync", "Asking {:?} while requesting {:?}", peer.asking, asking); + } + peer.asking = asking; + peer.ask_time = Instant::now(); + let result = if packet_id >= ETH_PACKET_COUNT { + io.send_protocol(WARP_SYNC_PROTOCOL_ID, peer_id, packet_id, packet) + } else { + io.send(peer_id, packet_id, packet) + }; + if let Err(e) = result { + debug!(target:"sync", "Error sending request: {:?}", e); + io.disconnect_peer(peer_id); + } + } + } +} diff --git a/ethcore/sync/src/chain/supplier.rs b/ethcore/sync/src/chain/supplier.rs new file mode 100644 index 00000000000..0bfb8569823 --- /dev/null +++ b/ethcore/sync/src/chain/supplier.rs @@ -0,0 +1,446 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use bytes::Bytes; +use ethcore::client::BlockId; +use ethcore::header::BlockNumber; +use ethereum_types::H256; +use network::{self, PeerId}; +use parking_lot::RwLock; +use rlp::{Rlp, RlpStream}; +use std::cmp; +use sync_io::SyncIo; + +use super::{ + ChainSync, + RlpResponseResult, + PacketDecodeError, + BLOCK_BODIES_PACKET, + BLOCK_HEADERS_PACKET, + CONSENSUS_DATA_PACKET, + GET_BLOCK_BODIES_PACKET, + GET_BLOCK_HEADERS_PACKET, + GET_NODE_DATA_PACKET, + GET_RECEIPTS_PACKET, + GET_SNAPSHOT_DATA_PACKET, + GET_SNAPSHOT_MANIFEST_PACKET, + MAX_BODIES_TO_SEND, + MAX_HEADERS_TO_SEND, + MAX_NODE_DATA_TO_SEND, + MAX_RECEIPTS_HEADERS_TO_SEND, + MAX_RECEIPTS_TO_SEND, + NODE_DATA_PACKET, + RECEIPTS_PACKET, + SNAPSHOT_DATA_PACKET, + SNAPSHOT_MANIFEST_PACKET, +}; + +/// The Chain Sync Supplier: answers requests from peers with available data +pub struct SyncSupplier; + +impl SyncSupplier { + /// Dispatch incoming requests and responses + pub fn dispatch_packet(sync: &RwLock, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { + let rlp = Rlp::new(data); + let result = match packet_id { + GET_BLOCK_BODIES_PACKET => SyncSupplier::return_rlp(io, &rlp, peer, + SyncSupplier::return_block_bodies, + |e| format!("Error sending block bodies: {:?}", e)), + + GET_BLOCK_HEADERS_PACKET => SyncSupplier::return_rlp(io, &rlp, peer, + SyncSupplier::return_block_headers, + |e| format!("Error sending block headers: {:?}", e)), + + GET_RECEIPTS_PACKET => SyncSupplier::return_rlp(io, &rlp, peer, + SyncSupplier::return_receipts, + |e| format!("Error sending receipts: {:?}", e)), + + GET_NODE_DATA_PACKET => SyncSupplier::return_rlp(io, &rlp, peer, + SyncSupplier::return_node_data, + |e| format!("Error sending nodes: {:?}", e)), + + GET_SNAPSHOT_MANIFEST_PACKET => SyncSupplier::return_rlp(io, &rlp, peer, + SyncSupplier::return_snapshot_manifest, + |e| format!("Error sending snapshot manifest: {:?}", e)), + + GET_SNAPSHOT_DATA_PACKET => SyncSupplier::return_rlp(io, &rlp, peer, + SyncSupplier::return_snapshot_data, + |e| format!("Error sending snapshot data: {:?}", e)), + CONSENSUS_DATA_PACKET => ChainSync::on_consensus_packet(io, peer, &rlp), + _ => { + sync.write().on_packet(io, peer, packet_id, data); + Ok(()) + } + }; + result.unwrap_or_else(|e| { + debug!(target:"sync", "{} -> Malformed packet {} : {}", peer, packet_id, e); + }) + } + + /// Respond to GetBlockHeaders request + fn return_block_headers(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + // Packet layout: + // [ block: { P , B_32 }, maxHeaders: P, skip: P, reverse: P in { 0 , 1 } ] + let max_headers: usize = r.val_at(1)?; + let skip: usize = r.val_at(2)?; + let reverse: bool = r.val_at(3)?; + let last = io.chain().chain_info().best_block_number; + let number = if r.at(0)?.size() == 32 { + // id is a hash + let hash: H256 = r.val_at(0)?; + trace!(target: "sync", "{} -> GetBlockHeaders (hash: {}, max: {}, skip: {}, reverse:{})", peer_id, hash, max_headers, skip, reverse); + match io.chain().block_header(BlockId::Hash(hash)) { + Some(hdr) => { + let number = hdr.number().into(); + debug_assert_eq!(hdr.hash(), hash); + + if max_headers == 1 || io.chain().block_hash(BlockId::Number(number)) != Some(hash) { + // Non canonical header or single header requested + // TODO: handle single-step reverse hashchains of non-canon hashes + trace!(target:"sync", "Returning single header: {:?}", hash); + let mut rlp = RlpStream::new_list(1); + rlp.append_raw(&hdr.into_inner(), 1); + return Ok(Some((BLOCK_HEADERS_PACKET, rlp))); + } + number + } + None => return Ok(Some((BLOCK_HEADERS_PACKET, RlpStream::new_list(0)))) //no such header, return nothing + } + } else { + trace!(target: "sync", "{} -> GetBlockHeaders (number: {}, max: {}, skip: {}, reverse:{})", peer_id, r.val_at::(0)?, max_headers, skip, reverse); + r.val_at(0)? + }; + + let mut number = if reverse { + cmp::min(last, number) + } else { + cmp::max(0, number) + }; + let max_count = cmp::min(MAX_HEADERS_TO_SEND, max_headers); + let mut count = 0; + let mut data = Bytes::new(); + let inc = (skip + 1) as BlockNumber; + let overlay = io.chain_overlay().read(); + + while number <= last && count < max_count { + if let Some(hdr) = overlay.get(&number) { + trace!(target: "sync", "{}: Returning cached fork header", peer_id); + data.extend_from_slice(hdr); + count += 1; + } else if let Some(hdr) = io.chain().block_header(BlockId::Number(number)) { + data.append(&mut hdr.into_inner()); + count += 1; + } else { + // No required block. + break; + } + if reverse { + if number <= inc || number == 0 { + break; + } + number -= inc; + } + else { + number += inc; + } + } + let mut rlp = RlpStream::new_list(count as usize); + rlp.append_raw(&data, count as usize); + trace!(target: "sync", "{} -> GetBlockHeaders: returned {} entries", peer_id, count); + Ok(Some((BLOCK_HEADERS_PACKET, rlp))) + } + + /// Respond to GetBlockBodies request + fn return_block_bodies(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let mut count = r.item_count().unwrap_or(0); + if count == 0 { + debug!(target: "sync", "Empty GetBlockBodies request, ignoring."); + return Ok(None); + } + count = cmp::min(count, MAX_BODIES_TO_SEND); + let mut added = 0usize; + let mut data = Bytes::new(); + for i in 0..count { + if let Some(body) = io.chain().block_body(BlockId::Hash(r.val_at::(i)?)) { + data.append(&mut body.into_inner()); + added += 1; + } + } + let mut rlp = RlpStream::new_list(added); + rlp.append_raw(&data, added); + trace!(target: "sync", "{} -> GetBlockBodies: returned {} entries", peer_id, added); + Ok(Some((BLOCK_BODIES_PACKET, rlp))) + } + + /// Respond to GetNodeData request + fn return_node_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let mut count = r.item_count().unwrap_or(0); + trace!(target: "sync", "{} -> GetNodeData: {} entries", peer_id, count); + if count == 0 { + debug!(target: "sync", "Empty GetNodeData request, ignoring."); + return Ok(None); + } + count = cmp::min(count, MAX_NODE_DATA_TO_SEND); + let mut added = 0usize; + let mut data = Vec::new(); + for i in 0..count { + if let Some(node) = io.chain().state_data(&r.val_at::(i)?) { + data.push(node); + added += 1; + } + } + trace!(target: "sync", "{} -> GetNodeData: return {} entries", peer_id, added); + let mut rlp = RlpStream::new_list(added); + for d in data { + rlp.append(&d); + } + Ok(Some((NODE_DATA_PACKET, rlp))) + } + + fn return_receipts(io: &SyncIo, rlp: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let mut count = rlp.item_count().unwrap_or(0); + trace!(target: "sync", "{} -> GetReceipts: {} entries", peer_id, count); + if count == 0 { + debug!(target: "sync", "Empty GetReceipts request, ignoring."); + return Ok(None); + } + count = cmp::min(count, MAX_RECEIPTS_HEADERS_TO_SEND); + let mut added_headers = 0usize; + let mut added_receipts = 0usize; + let mut data = Bytes::new(); + for i in 0..count { + if let Some(mut receipts_bytes) = io.chain().block_receipts(&rlp.val_at::(i)?) { + data.append(&mut receipts_bytes); + added_receipts += receipts_bytes.len(); + added_headers += 1; + if added_receipts > MAX_RECEIPTS_TO_SEND { break; } + } + } + let mut rlp_result = RlpStream::new_list(added_headers); + rlp_result.append_raw(&data, added_headers); + Ok(Some((RECEIPTS_PACKET, rlp_result))) + } + + /// Respond to GetSnapshotManifest request + fn return_snapshot_manifest(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let count = r.item_count().unwrap_or(0); + trace!(target: "sync", "{} -> GetSnapshotManifest", peer_id); + if count != 0 { + debug!(target: "sync", "Invalid GetSnapshotManifest request, ignoring."); + return Ok(None); + } + let rlp = match io.snapshot_service().manifest() { + Some(manifest) => { + trace!(target: "sync", "{} <- SnapshotManifest", peer_id); + let mut rlp = RlpStream::new_list(1); + rlp.append_raw(&manifest.into_rlp(), 1); + rlp + }, + None => { + trace!(target: "sync", "{}: No manifest to return", peer_id); + RlpStream::new_list(0) + } + }; + Ok(Some((SNAPSHOT_MANIFEST_PACKET, rlp))) + } + + /// Respond to GetSnapshotData request + fn return_snapshot_data(io: &SyncIo, r: &Rlp, peer_id: PeerId) -> RlpResponseResult { + let hash: H256 = r.val_at(0)?; + trace!(target: "sync", "{} -> GetSnapshotData {:?}", peer_id, hash); + let rlp = match io.snapshot_service().chunk(hash) { + Some(data) => { + let mut rlp = RlpStream::new_list(1); + trace!(target: "sync", "{} <- SnapshotData", peer_id); + rlp.append(&data); + rlp + }, + None => { + RlpStream::new_list(0) + } + }; + Ok(Some((SNAPSHOT_DATA_PACKET, rlp))) + } + + fn return_rlp(io: &mut SyncIo, rlp: &Rlp, peer: PeerId, rlp_func: FRlp, error_func: FError) -> Result<(), PacketDecodeError> + where FRlp : Fn(&SyncIo, &Rlp, PeerId) -> RlpResponseResult, + FError : FnOnce(network::Error) -> String + { + let response = rlp_func(io, rlp, peer); + match response { + Err(e) => Err(e), + Ok(Some((packet_id, rlp_stream))) => { + io.respond(packet_id, rlp_stream.out()).unwrap_or_else( + |e| debug!(target: "sync", "{:?}", error_func(e))); + Ok(()) + } + _ => Ok(()) + } + } +} + +#[cfg(test)] +mod test { + use std::collections::{VecDeque}; + use tests::helpers::{TestIo}; + use tests::snapshot::TestSnapshotService; + use ethereum_types::{H256}; + use parking_lot::RwLock; + use bytes::Bytes; + use rlp::{Rlp, RlpStream}; + use super::{*, super::tests::*}; + use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient}; + + #[test] + fn return_block_headers() { + use ethcore::views::HeaderView; + fn make_hash_req(h: &H256, count: usize, skip: usize, reverse: bool) -> Bytes { + let mut rlp = RlpStream::new_list(4); + rlp.append(h); + rlp.append(&count); + rlp.append(&skip); + rlp.append(&if reverse {1u32} else {0u32}); + rlp.out() + } + + fn make_num_req(n: usize, count: usize, skip: usize, reverse: bool) -> Bytes { + let mut rlp = RlpStream::new_list(4); + rlp.append(&n); + rlp.append(&count); + rlp.append(&skip); + rlp.append(&if reverse {1u32} else {0u32}); + rlp.out() + } + fn to_header_vec(rlp: ::chain::RlpResponseResult) -> Vec { + Rlp::new(&rlp.unwrap().unwrap().1.out()).iter().map(|r| r.as_raw().to_vec()).collect() + } + + let mut client = TestBlockChainClient::new(); + client.add_blocks(100, EachBlockWith::Nothing); + let blocks: Vec<_> = (0 .. 100) + .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).map(|b| b.into_inner()).unwrap()).collect(); + let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).unwrap().as_raw().to_vec()).collect(); + let hashes: Vec<_> = headers.iter().map(|h| view!(HeaderView, h).hash()).collect(); + + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &ss, &queue, None); + + let unknown: H256 = H256::new(); + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&unknown, 1, 0, false)), 0); + assert!(to_header_vec(result).is_empty()); + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&unknown, 1, 0, true)), 0); + assert!(to_header_vec(result).is_empty()); + + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[2], 1, 0, true)), 0); + assert_eq!(to_header_vec(result), vec![headers[2].clone()]); + + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[2], 1, 0, false)), 0); + assert_eq!(to_header_vec(result), vec![headers[2].clone()]); + + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[50], 3, 5, false)), 0); + assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[56].clone(), headers[62].clone()]); + + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_hash_req(&hashes[50], 3, 5, true)), 0); + assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[44].clone(), headers[38].clone()]); + + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, true)), 0); + assert_eq!(to_header_vec(result), vec![headers[2].clone()]); + + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(2, 1, 0, false)), 0); + assert_eq!(to_header_vec(result), vec![headers[2].clone()]); + + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, false)), 0); + assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[56].clone(), headers[62].clone()]); + + let result = SyncSupplier::return_block_headers(&io, &Rlp::new(&make_num_req(50, 3, 5, true)), 0); + assert_eq!(to_header_vec(result), vec![headers[50].clone(), headers[44].clone(), headers[38].clone()]); + } + + #[test] + fn return_nodes() { + let mut client = TestBlockChainClient::new(); + let queue = RwLock::new(VecDeque::new()); + let sync = dummy_sync_with_peer(H256::new(), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let mut node_list = RlpStream::new_list(3); + node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); + node_list.append(&H256::from("ffffffffffffffffffffffffffffffffffffffffffffaaaaaaaaaaaaaaaaaaaa")); + node_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000")); + + let node_request = node_list.out(); + // it returns rlp ONLY for hashes started with "f" + let result = SyncSupplier::return_node_data(&io, &Rlp::new(&node_request.clone()), 0); + + assert!(result.is_ok()); + let rlp_result = result.unwrap(); + assert!(rlp_result.is_some()); + + // the length of one rlp-encoded hashe + let rlp = rlp_result.unwrap().1.out(); + let rlp = Rlp::new(&rlp); + assert_eq!(Ok(1), rlp.item_count()); + + io.sender = Some(2usize); + + ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, GET_NODE_DATA_PACKET, &node_request); + assert_eq!(1, io.packets.len()); + } + + #[test] + fn return_receipts_empty() { + let mut client = TestBlockChainClient::new(); + let queue = RwLock::new(VecDeque::new()); + let ss = TestSnapshotService::new(); + let io = TestIo::new(&mut client, &ss, &queue, None); + + let result = SyncSupplier::return_receipts(&io, &Rlp::new(&[0xc0]), 0); + + assert!(result.is_ok()); + } + + #[test] + fn return_receipts() { + let mut client = TestBlockChainClient::new(); + let queue = RwLock::new(VecDeque::new()); + let sync = dummy_sync_with_peer(H256::new(), &client); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + + let mut receipt_list = RlpStream::new_list(4); + receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); + receipt_list.append(&H256::from("ff00000000000000000000000000000000000000000000000000000000000000")); + receipt_list.append(&H256::from("fff0000000000000000000000000000000000000000000000000000000000000")); + receipt_list.append(&H256::from("aff0000000000000000000000000000000000000000000000000000000000000")); + + let receipts_request = receipt_list.out(); + // it returns rlp ONLY for hashes started with "f" + let result = SyncSupplier::return_receipts(&io, &Rlp::new(&receipts_request.clone()), 0); + + assert!(result.is_ok()); + let rlp_result = result.unwrap(); + assert!(rlp_result.is_some()); + + // the length of two rlp-encoded receipts + assert_eq!(603, rlp_result.unwrap().1.out().len()); + + io.sender = Some(2usize); + ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, GET_RECEIPTS_PACKET, &receipts_request); + assert_eq!(1, io.packets.len()); + } +} diff --git a/ethcore/sync/src/tests/snapshot.rs b/ethcore/sync/src/tests/snapshot.rs index 2f6441f4f28..804ebe9c535 100644 --- a/ethcore/sync/src/tests/snapshot.rs +++ b/ethcore/sync/src/tests/snapshot.rs @@ -22,7 +22,7 @@ use parking_lot::Mutex; use bytes::Bytes; use ethcore::snapshot::{SnapshotService, ManifestData, RestorationStatus}; use ethcore::header::BlockNumber; -use ethcore::client::{EachBlockWith}; +use ethcore::client::EachBlockWith; use super::helpers::*; use {SyncConfig, WarpSync}; @@ -99,7 +99,15 @@ impl SnapshotService for TestSnapshotService { } fn begin_restore(&self, manifest: ManifestData) { - *self.restoration_manifest.lock() = Some(manifest); + let mut restoration_manifest = self.restoration_manifest.lock(); + + if let Some(ref c_manifest) = *restoration_manifest { + if c_manifest.state_root == manifest.state_root { + return; + } + } + + *restoration_manifest = Some(manifest); self.state_restoration_chunks.lock().clear(); self.block_restoration_chunks.lock().clear(); } From 842b75c0e63f76e1b77d75529c7f6c3f4969d8d7 Mon Sep 17 00:00:00 2001 From: David Date: Wed, 9 May 2018 12:05:56 +0200 Subject: [PATCH 11/11] Decoding headers can fail (#8570) * rlp::decode returns Result * Fix journaldb to handle rlp::decode Result * Fix ethcore to work with rlp::decode returning Result * Light client handles rlp::decode returning Result * Fix tests in rlp_derive * Fix tests * Cleanup * cleanup * Allow panic rather than breaking out of iterator * Let decoding failures when reading from disk blow up * syntax * Fix the trivial grumbles * Fix failing tests * Make Account::from_rlp return Result * Syntx, sigh * Temp-fix for decoding failures * Header::decode returns Result Handle new return type throughout the code base. * Do not continue reading from the DB when a value could not be read * Fix tests * Handle header decoding in light_sync * Handling header decoding errors * Let the DecodeError bubble up unchanged * Remove redundant error conversion --- ethcore/light/src/client/header_chain.rs | 10 ++++--- ethcore/light/src/client/mod.rs | 12 ++++++-- ethcore/src/client/client.rs | 14 ++++++---- ethcore/src/client/test_client.rs | 5 ++-- ethcore/src/encoded.rs | 6 ++-- ethcore/src/engines/authority_round/mod.rs | 2 +- ethcore/src/error.rs | 10 +++++-- ethcore/src/miner/miner.rs | 11 ++++++-- ethcore/src/snapshot/mod.rs | 2 +- ethcore/src/verification/verification.rs | 5 ++-- ethcore/sync/src/light_sync/response.rs | 32 ++++++++++++---------- ethcore/sync/src/light_sync/tests/mod.rs | 2 +- rpc/src/v1/helpers/errors.rs | 13 +++++++++ rpc/src/v1/impls/eth.rs | 13 +++++---- rpc/src/v1/impls/light/eth.rs | 2 +- rpc/src/v1/impls/light/parity.rs | 2 +- rpc/src/v1/impls/parity.rs | 4 +-- rpc/src/v1/impls/traces.rs | 6 ++-- 18 files changed, 98 insertions(+), 53 deletions(-) diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 02a18a60dfe..b85091e53bf 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -305,7 +305,7 @@ impl HeaderChain { batch.put(col, cht_key(cht_num as u64).as_bytes(), &::rlp::encode(cht_root)); } - let decoded_header = hardcoded_sync.header.decode(); + let decoded_header = hardcoded_sync.header.decode()?; let decoded_header_num = decoded_header.number(); // write the block in the DB. @@ -585,7 +585,7 @@ impl HeaderChain { bail!(ErrorKind::Database(msg.into())); }; - let decoded = header.decode(); + let decoded = header.decode().expect("decoding db value failed"); let entry: Entry = { let bytes = self.db.get(self.col, era_key(h_num).as_bytes())? @@ -815,7 +815,9 @@ impl HeaderChain { for hdr in self.ancestry_iter(BlockId::Hash(parent_hash)) { if let Some(transition) = live_proofs.get(&hdr.hash()).cloned() { - return Some((hdr.decode(), transition.proof)) + return hdr.decode().map(|decoded_hdr| { + (decoded_hdr, transition.proof) + }).ok(); } } @@ -1224,7 +1226,7 @@ mod tests { let hardcoded_sync = chain.read_hardcoded_sync().expect("failed reading hardcoded sync").expect("failed unwrapping hardcoded sync"); assert_eq!(hardcoded_sync.chts.len(), 3); assert_eq!(hardcoded_sync.total_difficulty, total_difficulty); - let decoded: Header = hardcoded_sync.header.decode(); + let decoded: Header = hardcoded_sync.header.decode().expect("decoding failed"); assert_eq!(decoded.number(), h_num); } } diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index cf603d853f0..82b424cc833 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -318,7 +318,7 @@ impl Client { let epoch_proof = self.engine.is_epoch_end( &verified_header, - &|h| self.chain.block_header(BlockId::Hash(h)).map(|hdr| hdr.decode()), + &|h| self.chain.block_header(BlockId::Hash(h)).and_then(|hdr| hdr.decode().ok()), &|h| self.chain.pending_transition(h), ); @@ -426,7 +426,15 @@ impl Client { }; // Verify Block Family - let verify_family_result = self.engine.verify_block_family(&verified_header, &parent_header.decode()); + + let verify_family_result = { + parent_header.decode() + .map_err(|dec_err| dec_err.into()) + .and_then(|decoded| { + self.engine.verify_block_family(&verified_header, &decoded) + }) + + }; if let Err(e) = verify_family_result { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", verified_header.number(), verified_header.hash(), e); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index bffa4e38ba8..76d78e3df63 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1219,8 +1219,7 @@ impl Client { => Some(self.chain.read().best_block_header()), BlockId::Number(number) if number == self.chain.read().best_block_number() => Some(self.chain.read().best_block_header()), - _ - => self.block_header(id).map(|h| h.decode()), + _ => self.block_header(id).and_then(|h| h.decode().ok()) } } } @@ -1915,7 +1914,11 @@ impl BlockChainClient for Client { fn uncle_extra_info(&self, id: UncleId) -> Option> { self.uncle(id) - .map(|header| self.engine.extra_info(&header.decode())) + .and_then(|h| { + h.decode().map(|dh| { + self.engine.extra_info(&dh) + }).ok() + }) } fn pruning_info(&self) -> PruningInfo { @@ -2033,7 +2036,8 @@ impl ReopenBlock for Client { for h in uncles { if !block.uncles().iter().any(|header| header.hash() == h) { let uncle = chain.block_header_data(&h).expect("find_uncle_hashes only returns hashes for existing headers; qed"); - block.push_uncle(uncle.decode()).expect("pushing up to maximum_uncle_count; + let uncle = uncle.decode().expect("decoding failure"); + block.push_uncle(uncle).expect("pushing up to maximum_uncle_count; push_uncle is not ok only if more than maximum_uncle_count is pushed; so all push_uncle are Ok; qed"); @@ -2074,7 +2078,7 @@ impl PrepareOpenBlock for Client { .into_iter() .take(engine.maximum_uncle_count(open_block.header().number())) .foreach(|h| { - open_block.push_uncle(h.decode()).expect("pushing maximum_uncle_count; + open_block.push_uncle(h.decode().expect("decoding failure")).expect("pushing maximum_uncle_count; open_block was just created; push_uncle is not ok only if more than maximum_uncle_count is pushed; so all push_uncle are Ok; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index b229159667d..6a3166f7c04 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -289,7 +289,7 @@ impl TestBlockChainClient { /// Make a bad block by setting invalid extra data. pub fn corrupt_block(&self, n: BlockNumber) { let hash = self.block_hash(BlockId::Number(n)).unwrap(); - let mut header: BlockHeader = self.block_header(BlockId::Number(n)).unwrap().decode(); + let mut header: BlockHeader = self.block_header(BlockId::Number(n)).unwrap().decode().expect("decoding failed"); header.set_extra_data(b"This extra data is way too long to be considered valid".to_vec()); let mut rlp = RlpStream::new_list(3); rlp.append(&header); @@ -301,7 +301,7 @@ impl TestBlockChainClient { /// Make a bad block by setting invalid parent hash. pub fn corrupt_block_parent(&self, n: BlockNumber) { let hash = self.block_hash(BlockId::Number(n)).unwrap(); - let mut header: BlockHeader = self.block_header(BlockId::Number(n)).unwrap().decode(); + let mut header: BlockHeader = self.block_header(BlockId::Number(n)).unwrap().decode().expect("decoding failed"); header.set_parent_hash(H256::from(42)); let mut rlp = RlpStream::new_list(3); rlp.append(&header); @@ -479,6 +479,7 @@ impl BlockInfo for TestBlockChainClient { self.block_header(BlockId::Hash(self.chain_info().best_block_hash)) .expect("Best block always has header.") .decode() + .expect("decoding failed") } fn block(&self, id: BlockId) -> Option { diff --git a/ethcore/src/encoded.rs b/ethcore/src/encoded.rs index 01df386cc2e..c436607f8c3 100644 --- a/ethcore/src/encoded.rs +++ b/ethcore/src/encoded.rs @@ -28,7 +28,7 @@ use ethereum_types::{H256, Bloom, U256, Address}; use hash::keccak; use header::{BlockNumber, Header as FullHeader}; use heapsize::HeapSizeOf; -use rlp::{Rlp, RlpStream}; +use rlp::{self, Rlp, RlpStream}; use transaction::UnverifiedTransaction; use views::{self, BlockView, HeaderView, BodyView}; @@ -47,7 +47,9 @@ impl Header { pub fn new(encoded: Vec) -> Self { Header(encoded) } /// Upgrade this encoded view to a fully owned `Header` object. - pub fn decode(&self) -> FullHeader { ::rlp::decode(&self.0).expect("decoding failure") } + pub fn decode(&self) -> Result { + rlp::decode(&self.0) + } /// Get a borrowed header view onto the data. #[inline] diff --git a/ethcore/src/engines/authority_round/mod.rs b/ethcore/src/engines/authority_round/mod.rs index c2aee7c6efc..ed9a9a4f251 100644 --- a/ethcore/src/engines/authority_round/mod.rs +++ b/ethcore/src/engines/authority_round/mod.rs @@ -996,7 +996,7 @@ impl Engine for AuthorityRound { let parent = client.block_header(::client::BlockId::Hash(*block.header().parent_hash())) .expect("hash is from parent; parent header must exist; qed") - .decode(); + .decode()?; let parent_step = header_step(&parent, self.empty_steps_transition)?; let current_step = self.step.load(); diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 561701e7620..bec749297cb 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -290,6 +290,12 @@ error_chain! { description("Unknown engine name") display("Unknown engine name ({})", name) } + + #[doc = "RLP decoding errors"] + Decoder(err: ::rlp::DecoderError) { + description("decoding value failed") + display("decoding value failed with error: {}", err) + } } } @@ -310,11 +316,11 @@ impl From for Error { fn from(err: AccountsError) -> Error { ErrorKind::AccountProvider(err).into() } -} +} impl From<::rlp::DecoderError> for Error { fn from(err: ::rlp::DecoderError) -> Error { - UtilError::from(err).into() + ErrorKind::Decoder(err).into() } } diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 76a011343fb..3168ff1a846 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -528,8 +528,8 @@ impl Miner { } /// Attempts to perform internal sealing (one that does not require work) and handles the result depending on the type of Seal. - fn seal_and_import_block_internally(&self, chain: &C, block: ClosedBlock) -> bool where - C: BlockChain + SealedBlockImporter, + fn seal_and_import_block_internally(&self, chain: &C, block: ClosedBlock) -> bool + where C: BlockChain + SealedBlockImporter, { { let sealing = self.sealing.lock(); @@ -544,7 +544,12 @@ impl Miner { trace!(target: "miner", "seal_block_internally: attempting internal seal."); let parent_header = match chain.block_header(BlockId::Hash(*block.header().parent_hash())) { - Some(hdr) => hdr.decode(), + Some(h) => { + match h.decode() { + Ok(decoded_hdr) => decoded_hdr, + Err(_) => return false + } + } None => return false, }; diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 94236e9e95d..8871ced26fa 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -487,7 +487,7 @@ pub fn verify_old_block(rng: &mut OsRng, header: &Header, engine: &EthEngine, ch if always || rng.gen::() <= POW_VERIFY_RATE { engine.verify_block_unordered(header)?; match chain.block_header_data(header.parent_hash()) { - Some(parent) => engine.verify_block_family(header, &parent.decode()), + Some(parent) => engine.verify_block_family(header, &parent.decode()?), None => Ok(()), } } else { diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 92f3e77f902..03a6d6f8d41 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -224,7 +224,7 @@ fn verify_uncles(header: &Header, bytes: &[u8], bc: &BlockProvider, engine: &Eth return Err(From::from(BlockError::UncleParentNotInChain(uncle_parent.hash()))); } - let uncle_parent = uncle_parent.decode(); + let uncle_parent = uncle_parent.decode()?; verify_parent(&uncle, &uncle_parent, engine)?; engine.verify_block_family(&uncle, &uncle_parent)?; verified.insert(uncle.hash()); @@ -500,10 +500,9 @@ mod tests { // no existing tests need access to test, so having this not function // is fine. let client = ::client::TestBlockChainClient::default(); - let parent = bc.block_header_data(header.parent_hash()) .ok_or(BlockError::UnknownParent(header.parent_hash().clone()))? - .decode(); + .decode()?; let full_params = FullFamilyParams { block_bytes: bytes, diff --git a/ethcore/sync/src/light_sync/response.rs b/ethcore/sync/src/light_sync/response.rs index 4dfb383d466..74665118b7f 100644 --- a/ethcore/sync/src/light_sync/response.rs +++ b/ethcore/sync/src/light_sync/response.rs @@ -16,13 +16,11 @@ //! Helpers for decoding and verifying responses for headers. -use std::fmt; - -use ethcore::encoded; -use ethcore::header::Header; +use ethcore::{self, encoded, header::Header}; +use ethereum_types::H256; use light::request::{HashOrNumber, CompleteHeadersRequest as HeadersRequest}; use rlp::DecoderError; -use ethereum_types::H256; +use std::fmt; /// Errors found when decoding headers and verifying with basic constraints. #[derive(Debug, PartialEq)] @@ -74,19 +72,23 @@ pub trait Constraint { /// Do basic verification of provided headers against a request. pub fn verify(headers: &[encoded::Header], request: &HeadersRequest) -> Result, BasicError> { - let headers: Vec<_> = headers.iter().map(|h| h.decode()).collect(); + let headers: Result, _> = headers.iter().map(|h| h.decode() ).collect(); + match headers { + Ok(headers) => { + let reverse = request.reverse; + + Max(request.max as usize).verify(&headers, reverse)?; + match request.start { + HashOrNumber::Number(ref num) => StartsAtNumber(*num).verify(&headers, reverse)?, + HashOrNumber::Hash(ref hash) => StartsAtHash(*hash).verify(&headers, reverse)?, + } - let reverse = request.reverse; + SkipsBetween(request.skip).verify(&headers, reverse)?; - Max(request.max as usize).verify(&headers, reverse)?; - match request.start { - HashOrNumber::Number(ref num) => StartsAtNumber(*num).verify(&headers, reverse)?, - HashOrNumber::Hash(ref hash) => StartsAtHash(*hash).verify(&headers, reverse)?, + Ok(headers) + }, + Err(e) => Err(e.into()) } - - SkipsBetween(request.skip).verify(&headers, reverse)?; - - Ok(headers) } struct StartsAtNumber(u64); diff --git a/ethcore/sync/src/light_sync/tests/mod.rs b/ethcore/sync/src/light_sync/tests/mod.rs index 9fd270838bf..3fee1c71707 100644 --- a/ethcore/sync/src/light_sync/tests/mod.rs +++ b/ethcore/sync/src/light_sync/tests/mod.rs @@ -45,7 +45,7 @@ fn fork_post_cht() { for id in (0..CHAIN_LENGTH).map(|x| x + 1).map(BlockId::Number) { let (light_peer, full_peer) = (net.peer(0), net.peer(1)); let light_chain = light_peer.light_chain(); - let header = full_peer.chain().block_header(id).unwrap().decode(); + let header = full_peer.chain().block_header(id).unwrap().decode().expect("decoding failure"); let _ = light_chain.import_header(header); light_chain.flush_queue(); light_chain.import_verified(); diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 4f3289a116b..c85beef7d5a 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -360,6 +360,19 @@ pub fn transaction>(error: T) -> Error { } } +pub fn decode>(error: T) -> Error { + let error = error.into(); + match *error.kind() { + ErrorKind::Decoder(ref dec_err) => rlp(dec_err.clone()), + _ => Error { + code: ErrorCode::InternalError, + message: "decoding error".into(), + data: None, + } + + } +} + pub fn rlp(error: DecoderError) -> Error { Error { code: ErrorCode::InvalidParams, diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index a7ff4916dee..389805c1765 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -343,7 +343,10 @@ impl EthClient hdr.decode(), + Some(hdr) => match hdr.decode() { + Ok(h) => h, + Err(e) => return Err(errors::decode(e)) + }, None => { return Ok(None); } }; @@ -851,9 +854,9 @@ impl Eth for EthClient< }; let state = try_bf!(self.client.state_at(id).ok_or(errors::state_pruned())); - let header = try_bf!(self.client.block_header(id).ok_or(errors::state_pruned())); + let header = try_bf!(self.client.block_header(id).ok_or(errors::state_pruned()).and_then(|h| h.decode().map_err(errors::decode))); - (state, header.decode()) + (state, header) }; let result = self.client.call(&signed, Default::default(), &mut state, &header); @@ -890,9 +893,9 @@ impl Eth for EthClient< }; let state = try_bf!(self.client.state_at(id).ok_or(errors::state_pruned())); - let header = try_bf!(self.client.block_header(id).ok_or(errors::state_pruned())); + let header = try_bf!(self.client.block_header(id).ok_or(errors::state_pruned()).and_then(|h| h.decode().map_err(errors::decode))); - (state, header.decode()) + (state, header) }; Box::new(future::done(self.client.estimate_gas(&signed, &state, &header) diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index eeef12da6e6..35f7792b52c 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -371,7 +371,7 @@ impl Eth for EthClient { } fn send_raw_transaction(&self, raw: Bytes) -> Result { - let best_header = self.client.best_block_header().decode(); + let best_header = self.client.best_block_header().decode().map_err(errors::decode)?; Rlp::new(&raw.into_vec()).as_val() .map_err(errors::rlp) diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index 3d31d9e6765..982c7ff3633 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -395,7 +395,7 @@ impl Parity for ParityClient { let engine = self.light_dispatch.client.engine().clone(); let from_encoded = move |encoded: encoded::Header| { - let header = encoded.decode(); + let header = encoded.decode().expect("decoding error"); // REVIEW: not sure what to do here; what is a decent return value for the error case here? let extra_info = engine.extra_info(&header); RichHeader { inner: Header { diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index db66bddc7e9..08d5147202c 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -487,9 +487,9 @@ impl Parity for ParityClient where }; let state = self.client.state_at(id).ok_or(errors::state_pruned())?; - let header = self.client.block_header(id).ok_or(errors::state_pruned())?; + let header = self.client.block_header(id).ok_or(errors::state_pruned())?.decode().map_err(errors::decode)?; - (state, header.decode()) + (state, header) }; self.client.call_many(&requests, &mut state, &header) diff --git a/rpc/src/v1/impls/traces.rs b/rpc/src/v1/impls/traces.rs index bf4dc83beb1..0130b3b9c13 100644 --- a/rpc/src/v1/impls/traces.rs +++ b/rpc/src/v1/impls/traces.rs @@ -104,7 +104,7 @@ impl Traces for TracesClient where let mut state = self.client.state_at(id).ok_or(errors::state_pruned())?; let header = self.client.block_header(id).ok_or(errors::state_pruned())?; - self.client.call(&signed, to_call_analytics(flags), &mut state, &header.decode()) + self.client.call(&signed, to_call_analytics(flags), &mut state, &header.decode().map_err(errors::decode)?) .map(TraceResults::from) .map_err(errors::call) } @@ -131,7 +131,7 @@ impl Traces for TracesClient where let mut state = self.client.state_at(id).ok_or(errors::state_pruned())?; let header = self.client.block_header(id).ok_or(errors::state_pruned())?; - self.client.call_many(&requests, &mut state, &header.decode()) + self.client.call_many(&requests, &mut state, &header.decode().map_err(errors::decode)?) .map(|results| results.into_iter().map(TraceResults::from).collect()) .map_err(errors::call) } @@ -153,7 +153,7 @@ impl Traces for TracesClient where let mut state = self.client.state_at(id).ok_or(errors::state_pruned())?; let header = self.client.block_header(id).ok_or(errors::state_pruned())?; - self.client.call(&signed, to_call_analytics(flags), &mut state, &header.decode()) + self.client.call(&signed, to_call_analytics(flags), &mut state, &header.decode().map_err(errors::decode)?) .map(TraceResults::from) .map_err(errors::call) }