diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index 79cbe35bb2822..0000000000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM paritytech/substrate-playground-template-base:sha-0793587 - -# Here the whole repo is already accessible at . (thanks to the inherited image) - -RUN cargo build --release diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json deleted file mode 100644 index 90168da7b8122..0000000000000 --- a/.devcontainer/devcontainer.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "name": "Substrate Node template", - "dockerFile": "Dockerfile", - "settings": { - "terminal.integrated.shell.linux": "/bin/bash", - "lldb.executable": "/usr/bin/lldb" - }, - "extensions": [ - "rust-lang.rust", - "bungcip.better-toml", - "vadimcn.vscode-lldb" - ], - "forwardPorts": [ - 3000, - 9944 - ], - "image": "paritytech/substrate-playground-template-node-template:sha-c9fda53" -} diff --git a/Cargo.lock b/Cargo.lock index e28096dd10b6b..5f1db767a7f44 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1280,11 +1280,13 @@ dependencies = [ [[package]] name = "frame-executive" version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c843800f05a7ad4653bc0db53a15e3d9bdd1cf14103e15c29e8aca200dbb1188" dependencies = [ "frame-support", "frame-system", + "hex-literal", + "pallet-balances", + "pallet-indices", + "pallet-transaction-payment", "parity-scale-codec", "serde", "sp-core", @@ -1292,6 +1294,7 @@ dependencies = [ "sp-runtime", "sp-std", "sp-tracing", + "sp-version", ] [[package]] @@ -3239,7 +3242,7 @@ dependencies = [ "frame-system", "jsonrpc-core", "node-template-runtime", - "pallet-assets 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pallet-assets", "pallet-authority-discovery", "pallet-balances", "pallet-grandpa", @@ -3295,7 +3298,7 @@ dependencies = [ "frame-system-benchmarking", "frame-system-rpc-runtime-api", "hex-literal", - "pallet-assets 2.0.0", + "pallet-assets", "pallet-authorship", "pallet-babe", "pallet-balances", @@ -3488,18 +3491,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-assets" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee8096df10f954c2c55886ea0ba39361a0dfc0994479684b2f0eb87116deb825" -dependencies = [ - "frame-support", - "frame-system", - "parity-scale-codec", - "sp-runtime", -] - [[package]] name = "pallet-authority-discovery" version = "2.0.0" @@ -3614,6 +3605,23 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-indices" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d234bf46076a835b473a987f089299ffa3efd961a92b5be9384cc280fcc8c8f" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-offences" version = "2.0.0" @@ -3728,8 +3736,6 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccddd55b713f541dff6ccf063cc7ddbc4fc41e92a9fdad8ec9562a0e3b465016" dependencies = [ "frame-benchmarking", "frame-support", @@ -3737,6 +3743,7 @@ dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", "serde", + "sp-core", "sp-inherents", "sp-io", "sp-runtime", @@ -3801,7 +3808,7 @@ version = "0.1.0" dependencies = [ "frame-support", "frame-system", - "pallet-assets 2.0.0", + "pallet-assets", "parity-scale-codec", "sp-core", "sp-io", @@ -4904,10 +4911,10 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bee59dc560f30e72ee95c224e3e75299b53b619e659a38af9db2639803c08ee" dependencies = [ + "log", "parity-scale-codec", + "rand 0.7.3", "sc-client-api", "sp-api", "sp-block-builder", @@ -5082,8 +5089,6 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "075c3826f181c49723215636f1b840b22c00e9be2acbaea21a86121b4dddb709" dependencies = [ "derive_more", "fork-tree", @@ -5356,8 +5361,6 @@ dependencies = [ [[package]] name = "sc-network" version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e58ccd69ea8dd0c1e1d98e5e7ed2969aaf14d45dcf98416c679a968e752850" dependencies = [ "async-std", "async-trait", @@ -5556,8 +5559,6 @@ dependencies = [ [[package]] name = "sc-service" version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04b2096d7dac26c52656cd2c85bc208d2ca3316ea2185fd775763d558a980da" dependencies = [ "derive_more", "directories", @@ -6875,8 +6876,6 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44e6202803178f25f71a3218a69341289d38c1369cc63e78dfe51577599163f7" dependencies = [ "frame-system-rpc-runtime-api", "futures 0.3.5", @@ -6887,12 +6886,14 @@ dependencies = [ "parity-scale-codec", "sc-client-api", "sc-rpc-api", + "sc-transaction-pool", "serde", "sp-api", "sp-block-builder", "sp-blockchain", "sp-core", "sp-runtime", + "sp-tracing", "sp-transaction-pool", ] diff --git a/Cargo.toml b/Cargo.toml index d77ff631396d5..23b94c592f58e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,3 +7,10 @@ members = [ 'pallets/template', 'runtime', ] + +[patch.crates-io] +sc-block-builder = { default-features = false, version = '0.8.0', path = './client/block-builder' } +sc-service = { default-features = false, version = '0.8.0', path = './client/service' } +sc-consensus-babe = { default-features = false, version = '0.8.0', path = './client/consensus/babe' } +sc-network = { default-features = false, version = '0.8.0', path = './client/network' } +pallet-timestamp = { path = './pallets/timestamp', default-features = false, version = '2.0.0' } diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml new file mode 100644 index 0000000000000..7b9b354523f30 --- /dev/null +++ b/client/block-builder/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "sc-block-builder" +version = "0.8.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate block builder" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + + +[dependencies] +log = "0.4.8" +sp-state-machine = "0.8.0" +sp-runtime = "2.0.0" +sp-api = "2.0.0" +sp-consensus = "0.8.0" +sp-blockchain = "2.0.0" +sp-core = "2.0.0" +sp-block-builder = "2.0.0" +sp-inherents = "2.0.0" +sc-client-api = "2.0.0" +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } +rand = "0.7.3" + +[dev-dependencies] +#substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } +sp-trie = "2.0.0" diff --git a/client/block-builder/README.md b/client/block-builder/README.md new file mode 100644 index 0000000000000..c691f6692abff --- /dev/null +++ b/client/block-builder/README.md @@ -0,0 +1,9 @@ +Substrate block builder + +This crate provides the [`BlockBuilder`] utility and the corresponding runtime api +[`BlockBuilder`](sp_block_builder::BlockBuilder).Error + +The block builder utility is used in the node as an abstraction over the runtime api to +initialize a block, to push extrinsics and to finalize a block. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs new file mode 100644 index 0000000000000..d0c994f103032 --- /dev/null +++ b/client/block-builder/src/lib.rs @@ -0,0 +1,353 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate block builder +//! +//! This crate provides the [`BlockBuilder`] utility and the corresponding runtime api +//! [`BlockBuilder`](sp_block_builder::BlockBuilder).Error +//! +//! The block builder utility is used in the node as an abstraction over the runtime api to +//! initialize a block, to push extrinsics and to finalize a block. + +#![warn(missing_docs)] + +use sp_runtime::print; +use rand::{Rng, SeedableRng}; +use rand::seq::SliceRandom; +use rand::rngs::StdRng; +use log::{info}; +use codec::Encode; + +use sp_runtime::{ + generic::BlockId, + traits::{BlakeTwo256, Header as HeaderT, Hash, Block as BlockT, HashFor, DigestFor, NumberFor, One}, +}; +use sp_blockchain::{ApplyExtrinsicFailed, Error, Backend}; +use sp_core::ExecutionContext; +use sp_api::{ + Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof, + TransactionOutcome, +}; +use sp_consensus::RecordProof; + +pub use sp_block_builder::BlockBuilder as BlockBuilderApi; + +use sc_client_api::backend; + +/// A block that was build by [`BlockBuilder`] plus some additional data. +/// +/// This additional data includes the `storage_changes`, these changes can be applied to the +/// backend to get the state of the block. Furthermore an optional `proof` is included which +/// can be used to proof that the build block contains the expected data. The `proof` will +/// only be set when proof recording was activated. +pub struct BuiltBlock>> { + /// The actual block that was build. + pub block: Block, + /// The changes that need to be applied to the backend to get the state of the build block. + pub storage_changes: StorageChanges, + /// An optional proof that was recorded while building the block. + pub proof: Option, +} + +impl>> BuiltBlock { + /// Convert into the inner values. + pub fn into_inner(self) -> (Block, StorageChanges, Option) { + (self.block, self.storage_changes, self.proof) + } +} + +/// Block builder provider +pub trait BlockBuilderProvider + where + Block: BlockT, + B: backend::Backend, + Self: Sized, + RA: ProvideRuntimeApi, +{ + /// Create a new block, built on top of `parent`. + /// + /// When proof recording is enabled, all accessed trie nodes are saved. + /// These recorded trie nodes can be used by a third party to proof the + /// output of this block builder without having access to the full storage. + fn new_block_at>( + &self, + parent: &BlockId, + inherent_digests: DigestFor, + record_proof: R, + ) -> sp_blockchain::Result>; + + /// Create a new block, built on the head of the chain. + fn new_block( + &self, + inherent_digests: DigestFor, + ) -> sp_blockchain::Result>; +} + +/// Utility for building new (valid) blocks from a stream of extrinsics. +pub struct BlockBuilder<'a, Block: BlockT, A: ProvideRuntimeApi, B> { + extrinsics: Vec, + api: ApiRef<'a, A::Api>, + block_id: BlockId, + parent_hash: Block::Hash, + backend: &'a B, +} + +impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B> +where + Block: BlockT, + A: ProvideRuntimeApi + 'a, + A::Api: BlockBuilderApi + + ApiExt>, + B: backend::Backend, +{ + /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. + /// + /// While proof recording is enabled, all accessed trie nodes are saved. + /// These recorded trie nodes can be used by a third party to prove the + /// output of this block builder without having access to the full storage. + pub fn new( + api: &'a A, + parent_hash: Block::Hash, + parent_number: NumberFor, + record_proof: RecordProof, + inherent_digests: DigestFor, + backend: &'a B, + ) -> Result> { + let header = <::Header as HeaderT>::new( + parent_number + One::one(), + Default::default(), + Default::default(), + parent_hash, + inherent_digests, + ); + + let mut api = api.runtime_api(); + + if record_proof.yes() { + api.record_proof(); + } + + let block_id = BlockId::Hash(parent_hash); + + info!("Api call to initialize the block"); + api.initialize_block_with_context( + &block_id, ExecutionContext::BlockConstruction, &header, + )?; + + Ok(Self { + parent_hash, + extrinsics: Vec::new(), + api, + block_id, + backend, + }) + } + + /// Push onto the block's list of extrinsics. + /// + /// This will ensure the extrinsic can be validly executed (by executing it). + pub fn push(&mut self, xt: ::Extrinsic) -> Result<(), ApiErrorFor> { + let block_id = &self.block_id; + let extrinsics = &mut self.extrinsics; + + //FIXME add test execution with state rejection + info!("Pushing transactions without execution"); + extrinsics.push(xt); + Ok(()) + + // info!("Going to call api tx execution"); + // self.api.execute_in_transaction(|api| { + // match api.apply_extrinsic_with_context( + // block_id, + // ExecutionContext::BlockConstruction, + // xt.clone(), + // ) { + // Ok(Ok(_)) => { + // extrinsics.push(xt); + // TransactionOutcome::Commit(Ok(())) + // } + // Ok(Err(tx_validity)) => { + // TransactionOutcome::Rollback( + // Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), + // ) + // }, + // Err(e) => TransactionOutcome::Rollback(Err(e)), + // } + // }) + } + + /// Consume the builder to build a valid `Block` containing all pushed extrinsics. + /// + /// Returns the build `Block`, the changes to the storage and an optional `StorageProof` + /// supplied by `self.api`, combined as [`BuiltBlock`]. + /// The storage proof will be `Some(_)` when proof recording was enabled. + pub fn build(mut self) -> Result< + BuiltBlock>, + ApiErrorFor + > { + let block_id = &self.block_id; + + let mut extrinsics = self.extrinsics.clone(); + let parent_hash = self.parent_hash; + let extrinsics_hash = BlakeTwo256::hash(&extrinsics.encode()); + + //FIXME + match self.backend.blockchain().body(BlockId::Hash(parent_hash)).unwrap() { + Some(mut previous_block_extrinsics) => { + if previous_block_extrinsics.is_empty() { + info!("No extrinsics found for previous block"); + extrinsics.into_iter().for_each(|xt| { + self.api.execute_in_transaction(|api| { + match api.apply_extrinsic_with_context( + block_id, + ExecutionContext::BlockConstruction, + xt.clone(), + ) { + Ok(Ok(_)) => { + TransactionOutcome::Commit(()) + } + Ok(Err(tx_validity)) => { + TransactionOutcome::Rollback(()) + }, + Err(e) => TransactionOutcome::Rollback(()), + } + }) + }); + } else { + info!("transaction count {}", previous_block_extrinsics.len()); + + let mut rng: StdRng = SeedableRng::from_seed(extrinsics_hash.to_fixed_bytes()); + previous_block_extrinsics.shuffle(&mut rng); + + // self.backend.revert(1.into(), false); + info!("transaction execution after reversion"); + previous_block_extrinsics.into_iter().for_each(|xt| { + self.api.execute_in_transaction(|api| { + match api.apply_extrinsic_with_context( + block_id, + ExecutionContext::BlockConstruction, + xt.clone(), + ) { + Ok(Ok(_)) => { + TransactionOutcome::Commit(()) + } + Ok(Err(tx_validity)) => { + TransactionOutcome::Rollback(()) + }, + Err(e) => TransactionOutcome::Rollback(()), + } + }) + }); + } + + }, + None => { + info!("No extrinsics found for previous block"); + }, + } + + info!("Finalizing block"); + let header = self.api.finalize_block_with_context( + &self.block_id, ExecutionContext::BlockConstruction + )?; + + debug_assert_eq!( + header.extrinsics_root().clone(), + HashFor::::ordered_trie_root( + self.extrinsics.iter().map(Encode::encode).collect(), + ), + ); + + let proof = self.api.extract_proof(); + + let state = self.backend.state_at(self.block_id)?; + let changes_trie_state = backend::changes_tries_state_at_block( + &self.block_id, + self.backend.changes_trie_storage(), + )?; + + let storage_changes = self.api.into_storage_changes( + &state, + changes_trie_state.as_ref(), + parent_hash, + )?; + + Ok(BuiltBlock { + block: ::new(header, self.extrinsics), + storage_changes, + proof, + }) + } + + /// Create the inherents for the block. + /// + /// Returns the inherents created by the runtime or an error if something failed. + pub fn create_inherents( + &mut self, + inherent_data: sp_inherents::InherentData, + ) -> Result, ApiErrorFor> { + let block_id = self.block_id; + self.api.execute_in_transaction(move |api| { + // `create_inherents` should not change any state, to ensure this we always rollback + // the transaction. + TransactionOutcome::Rollback(api.inherent_extrinsics_with_context( + &block_id, + ExecutionContext::BlockConstruction, + inherent_data + )) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_blockchain::HeaderBackend; + use sp_core::Blake2Hasher; + use sp_state_machine::Backend; + use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; + + #[test] + fn block_building_storage_proof_does_not_include_runtime_by_default() { + let builder = substrate_test_runtime_client::TestClientBuilder::new(); + let backend = builder.backend(); + let client = builder.build(); + + let block = BlockBuilder::new( + &client, + client.info().best_hash, + client.info().best_number, + RecordProof::Yes, + Default::default(), + &*backend, + ).unwrap().build().unwrap(); + + let proof = block.proof.expect("Proof is build on request"); + + let backend = sp_state_machine::create_proof_check_backend::( + block.storage_changes.transaction_storage_root, + proof, + ).unwrap(); + + assert!( + backend.storage(&sp_core::storage::well_known_keys::CODE) + .unwrap_err() + .contains("Database missing expected key"), + ); + } +} diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml new file mode 100644 index 0000000000000..da520745ac9c2 --- /dev/null +++ b/client/consensus/babe/Cargo.toml @@ -0,0 +1,68 @@ +[package] +name = "sc-consensus-babe" +version = "0.8.0" +authors = ["Parity Technologies "] +description = "BABE consensus algorithm for substrate" +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-consensus-babe" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } +sp-consensus-babe = "0.8.0" +sp-core = "2.0.0" +sp-application-crypto = "2.0.0" +num-bigint = "0.2.3" +num-rational = "0.2.2" +num-traits = "0.2.8" +serde = { version = "1.0.104", features = ["derive"] } +sp-version = "2.0.0" +sp-io = "2.0.0" +sp-inherents = "2.0.0" +sp-timestamp = "2.0.0" +sc-telemetry = "2.0.0" +sc-keystore = "2.0.0" +sc-client-api = "2.0.0" +sc-consensus-epochs = "0.8.0" +sp-api = "2.0.0" +sp-block-builder = "2.0.0" +sp-blockchain = "2.0.0" +sp-consensus = "0.8.0" +sp-consensus-vrf = "0.8.0" +sc-consensus-uncles = "0.8.0" +sc-consensus-slots = "0.8.0" +sp-runtime = "2.0.0" +sp-utils = "2.0.0" +fork-tree = "2.0.0" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0"} +futures = "0.3.4" +futures-timer = "3.0.1" +parking_lot = "0.10.0" +log = "0.4.8" +schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } +rand = "0.7.2" +merlin = "2.0" +pdqselect = "0.1.0" +derive_more = "0.99.2" +retain_mut = "0.1.1" + +[dev-dependencies] +sp-keyring = "2.0.0" +sp-tracing = "2.0.0" +sc-executor = "0.8.0" +sc-network = "0.8.0" +sc-network-test = "0.8.0" +sc-service = { version = "0.8.0", default-features = false, path = "../../service" } +#substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0", path = "../../block-builder" } +rand_chacha = "0.2.2" +tempfile = "3.1.0" + +[features] +test-helpers = [] diff --git a/client/consensus/babe/README.md b/client/consensus/babe/README.md new file mode 100644 index 0000000000000..faba3948ed715 --- /dev/null +++ b/client/consensus/babe/README.md @@ -0,0 +1,48 @@ +# BABE (Blind Assignment for Blockchain Extension) + +BABE is a slot-based block production mechanism which uses a VRF PRNG to +randomly perform the slot allocation. On every slot, all the authorities +generate a new random number with the VRF function and if it is lower than a +given threshold (which is proportional to their weight/stake) they have a +right to produce a block. The proof of the VRF function execution will be +used by other peer to validate the legitimacy of the slot claim. + +The engine is also responsible for collecting entropy on-chain which will be +used to seed the given VRF PRNG. An epoch is a contiguous number of slots +under which we will be using the same authority set. During an epoch all VRF +outputs produced as a result of block production will be collected on an +on-chain randomness pool. Epoch changes are announced one epoch in advance, +i.e. when ending epoch N, we announce the parameters (randomness, +authorities, etc.) for epoch N+2. + +Since the slot assignment is randomized, it is possible that a slot is +assigned to multiple validators in which case we will have a temporary fork, +or that a slot is assigned to no validator in which case no block is +produced. Which means that block times are not deterministic. + +The protocol has a parameter `c` [0, 1] for which `1 - c` is the probability +of a slot being empty. The choice of this parameter affects the security of +the protocol relating to maximum tolerable network delays. + +In addition to the VRF-based slot assignment described above, which we will +call primary slots, the engine also supports a deterministic secondary slot +assignment. Primary slots take precedence over secondary slots, when +authoring the node starts by trying to claim a primary slot and falls back +to a secondary slot claim attempt. The secondary slot assignment is done +by picking the authority at index: + +`blake2_256(epoch_randomness ++ slot_number) % authorities_len`. + +The secondary slots supports either a `SecondaryPlain` or `SecondaryVRF` +variant. Comparing with `SecondaryPlain` variant, the `SecondaryVRF` variant +generates an additional VRF output. The output is not included in beacon +randomness, but can be consumed by parachains. + +The fork choice rule is weight-based, where weight equals the number of +primary blocks in the chain. We will pick the heaviest chain (more primary +blocks) and will go with the longest one in case of a tie. + +An in-depth description and analysis of the protocol can be found here: + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml new file mode 100644 index 0000000000000..9e2b64142330e --- /dev/null +++ b/client/consensus/babe/rpc/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "sc-consensus-babe-rpc" +version = "0.8.0" +authors = ["Parity Technologies "] +description = "RPC extensions for the BABE consensus algorithm" +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sc-consensus-babe = { version = "0.8.0", path = "../" } +sc-rpc-api = { version = "0.8.0", path = "../../../rpc-api" } +jsonrpc-core = "15.0.0" +jsonrpc-core-client = "15.0.0" +jsonrpc-derive = "15.0.0" +sp-consensus-babe = { version = "0.8.0", path = "../../../../primitives/consensus/babe" } +serde = { version = "1.0.104", features=["derive"] } +sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0", path = "../../../../primitives/runtime" } +sc-consensus-epochs = { version = "0.8.0", path = "../../epochs" } +futures = { version = "0.3.4", features = ["compat"] } +derive_more = "0.99.2" +sp-api = { version = "2.0.0", path = "../../../../primitives/api" } +sp-consensus = { version = "0.8.0", path = "../../../../primitives/consensus/common" } +sp-core = { version = "2.0.0", path = "../../../../primitives/core" } +sp-application-crypto = { version = "2.0.0", path = "../../../../primitives/application-crypto" } +sc-keystore = { version = "2.0.0", path = "../../../keystore" } + +[dev-dependencies] +sc-consensus = { version = "0.8.0", path = "../../../consensus/common" } +serde_json = "1.0.50" +sp-keyring = { version = "2.0.0", path = "../../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } +tempfile = "3.1.0" diff --git a/client/consensus/babe/rpc/README.md b/client/consensus/babe/rpc/README.md new file mode 100644 index 0000000000000..e76dd3dc67f81 --- /dev/null +++ b/client/consensus/babe/rpc/README.md @@ -0,0 +1,3 @@ +RPC api for babe. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs new file mode 100644 index 0000000000000..652f4f00baac2 --- /dev/null +++ b/client/consensus/babe/rpc/src/lib.rs @@ -0,0 +1,308 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! RPC api for babe. + +use sc_consensus_babe::{Epoch, authorship, Config}; +use futures::{FutureExt as _, TryFutureExt as _}; +use jsonrpc_core::{ + Error as RpcError, + futures::future as rpc_future, +}; +use jsonrpc_derive::rpc; +use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; +use sp_consensus_babe::{ + AuthorityId, + BabeApi as BabeRuntimeApi, + digests::PreDigest, +}; +use serde::{Deserialize, Serialize}; +use sp_core::{ + crypto::Public, + traits::BareCryptoStore, +}; +use sp_application_crypto::AppKey; +use sc_keystore::KeyStorePtr; +use sc_rpc_api::DenyUnsafe; +use sp_api::{ProvideRuntimeApi, BlockId}; +use sp_runtime::traits::{Block as BlockT, Header as _}; +use sp_consensus::{SelectChain, Error as ConsensusError}; +use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as BlockChainError}; +use std::{collections::HashMap, sync::Arc}; + +type FutureResult = Box + Send>; + +/// Provides rpc methods for interacting with Babe. +#[rpc] +pub trait BabeApi { + /// Returns data about which slots (primary or secondary) can be claimed in the current epoch + /// with the keys in the keystore. + #[rpc(name = "babe_epochAuthorship")] + fn epoch_authorship(&self) -> FutureResult>; +} + +/// Implements the BabeRpc trait for interacting with Babe. +pub struct BabeRpcHandler { + /// shared reference to the client. + client: Arc, + /// shared reference to EpochChanges + shared_epoch_changes: SharedEpochChanges, + /// shared reference to the Keystore + keystore: KeyStorePtr, + /// config (actually holds the slot duration) + babe_config: Config, + /// The SelectChain strategy + select_chain: SC, + /// Whether to deny unsafe calls + deny_unsafe: DenyUnsafe, +} + +impl BabeRpcHandler { + /// Creates a new instance of the BabeRpc handler. + pub fn new( + client: Arc, + shared_epoch_changes: SharedEpochChanges, + keystore: KeyStorePtr, + babe_config: Config, + select_chain: SC, + deny_unsafe: DenyUnsafe, + ) -> Self { + Self { + client, + shared_epoch_changes, + keystore, + babe_config, + select_chain, + deny_unsafe, + } + } +} + +impl BabeApi for BabeRpcHandler + where + B: BlockT, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata + 'static, + C::Api: BabeRuntimeApi, + SC: SelectChain + Clone + 'static, +{ + fn epoch_authorship(&self) -> FutureResult> { + if let Err(err) = self.deny_unsafe.check_if_safe() { + return Box::new(rpc_future::err(err.into())); + } + + let ( + babe_config, + keystore, + shared_epoch, + client, + select_chain, + ) = ( + self.babe_config.clone(), + self.keystore.clone(), + self.shared_epoch_changes.clone(), + self.client.clone(), + self.select_chain.clone(), + ); + let future = async move { + let header = select_chain.best_chain().map_err(Error::Consensus)?; + let epoch_start = client.runtime_api() + .current_epoch_start(&BlockId::Hash(header.hash())) + .map_err(|err| { + Error::StringError(format!("{:?}", err)) + })?; + let epoch = epoch_data(&shared_epoch, &client, &babe_config, epoch_start, &select_chain)?; + let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); + + let mut claims: HashMap = HashMap::new(); + + let keys = { + let ks = keystore.read(); + epoch.authorities.iter() + .enumerate() + .filter_map(|(i, a)| { + if ks.has_keys(&[(a.0.to_raw_vec(), AuthorityId::ID)]) { + Some((a.0.clone(), i)) + } else { + None + } + }) + .collect::>() + }; + + for slot_number in epoch_start..epoch_end { + if let Some((claim, key)) = + authorship::claim_slot_using_keys(slot_number, &epoch, &keystore, &keys) + { + match claim { + PreDigest::Primary { .. } => { + claims.entry(key).or_default().primary.push(slot_number); + } + PreDigest::SecondaryPlain { .. } => { + claims.entry(key).or_default().secondary.push(slot_number); + } + PreDigest::SecondaryVRF { .. } => { + claims.entry(key).or_default().secondary_vrf.push(slot_number); + }, + }; + } + } + + Ok(claims) + }.boxed(); + + Box::new(future.compat()) + } +} + +/// Holds information about the `slot_number`'s that can be claimed by a given key. +#[derive(Default, Debug, Deserialize, Serialize)] +pub struct EpochAuthorship { + /// the array of primary slots that can be claimed + primary: Vec, + /// the array of secondary slots that can be claimed + secondary: Vec, + /// The array of secondary VRF slots that can be claimed. + secondary_vrf: Vec, +} + +/// Errors encountered by the RPC +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum Error { + /// Consensus error + Consensus(ConsensusError), + /// Errors that can be formatted as a String + StringError(String) +} + +impl From for jsonrpc_core::Error { + fn from(error: Error) -> Self { + jsonrpc_core::Error { + message: format!("{}", error), + code: jsonrpc_core::ErrorCode::ServerError(1234), + data: None, + } + } +} + +/// fetches the epoch data for a given slot_number. +fn epoch_data( + epoch_changes: &SharedEpochChanges, + client: &Arc, + babe_config: &Config, + slot_number: u64, + select_chain: &SC, +) -> Result + where + B: BlockT, + C: HeaderBackend + HeaderMetadata + 'static, + SC: SelectChain, +{ + let parent = select_chain.best_chain()?; + epoch_changes.lock().epoch_data_for_child_of( + descendent_query(&**client), + &parent.hash(), + parent.number().clone(), + slot_number, + |slot| Epoch::genesis(&babe_config, slot), + ) + .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(format!("{:?}", e))))? + .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) +} + +#[cfg(test)] +mod tests { + use super::*; + use substrate_test_runtime_client::{ + runtime::Block, + Backend, + DefaultTestClientBuilderExt, + TestClient, + TestClientBuilderExt, + TestClientBuilder, + }; + use sp_application_crypto::AppPair; + use sp_keyring::Ed25519Keyring; + use sc_keystore::Store; + + use std::sync::Arc; + use sc_consensus_babe::{Config, block_import, AuthorityPair}; + use jsonrpc_core::IoHandler; + + /// creates keystore backed by a temp file + fn create_temp_keystore(authority: Ed25519Keyring) -> (KeyStorePtr, tempfile::TempDir) { + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = Store::open(keystore_path.path(), None).expect("Creates keystore"); + keystore.write().insert_ephemeral_from_seed::

(&authority.to_seed()) + .expect("Creates authority key"); + + (keystore, keystore_path) + } + + fn test_babe_rpc_handler( + deny_unsafe: DenyUnsafe + ) -> BabeRpcHandler> { + let builder = TestClientBuilder::new(); + let (client, longest_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); + let config = Config::get_or_compute(&*client).expect("config available"); + let (_, link) = block_import( + config.clone(), + client.clone(), + client.clone(), + ).expect("can initialize block-import"); + + let epoch_changes = link.epoch_changes().clone(); + let keystore = create_temp_keystore::(Ed25519Keyring::Alice).0; + + BabeRpcHandler::new( + client.clone(), + epoch_changes, + keystore, + config, + longest_chain, + deny_unsafe, + ) + } + + #[test] + fn epoch_authorship_works() { + let handler = test_babe_rpc_handler(DenyUnsafe::No); + let mut io = IoHandler::new(); + + io.extend_with(BabeApi::to_delegate(handler)); + let request = r#"{"jsonrpc":"2.0","method":"babe_epochAuthorship","params": [],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[1,2,4],"secondary_vrf":[]}},"id":1}"#; + + assert_eq!(Some(response.into()), io.handle_request_sync(request)); + } + + #[test] + fn epoch_authorship_is_unsafe() { + let handler = test_babe_rpc_handler(DenyUnsafe::Yes); + let mut io = IoHandler::new(); + + io.extend_with(BabeApi::to_delegate(handler)); + let request = r#"{"jsonrpc":"2.0","method":"babe_epochAuthorship","params": [],"id":1}"#; + + let response = io.handle_request_sync(request).unwrap(); + let mut response: serde_json::Value = serde_json::from_str(&response).unwrap(); + let error: RpcError = serde_json::from_value(response["error"].take()).unwrap(); + + assert_eq!(error, RpcError::method_not_found()) + } +} diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs new file mode 100644 index 0000000000000..682e04e380d7c --- /dev/null +++ b/client/consensus/babe/src/authorship.rs @@ -0,0 +1,325 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! BABE authority selection and slot claiming. + +use sp_application_crypto::AppKey; +use sp_consensus_babe::{ + BABE_VRF_PREFIX, + AuthorityId, BabeAuthorityWeight, + SlotNumber, + make_transcript, + make_transcript_data, +}; +use sp_consensus_babe::digests::{ + PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, +}; +use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; +use sp_core::{U256, blake2_256, crypto::Public, traits::BareCryptoStore}; +use codec::Encode; +use schnorrkel::{ + keys::PublicKey, + vrf::VRFInOut, +}; +use sc_keystore::KeyStorePtr; +use super::Epoch; + +/// Calculates the primary selection threshold for a given authority, taking +/// into account `c` (`1 - c` represents the probability of a slot being empty). +pub(super) fn calculate_primary_threshold( + c: (u64, u64), + authorities: &[(AuthorityId, BabeAuthorityWeight)], + authority_index: usize, +) -> u128 { + use num_bigint::BigUint; + use num_rational::BigRational; + use num_traits::{cast::ToPrimitive, identities::One}; + + let c = c.0 as f64 / c.1 as f64; + + let theta = + authorities[authority_index].1 as f64 / + authorities.iter().map(|(_, weight)| weight).sum::() as f64; + + assert!(theta > 0.0, "authority with weight 0."); + + // NOTE: in the equation `p = 1 - (1 - c)^theta` the value of `p` is always + // capped by `c`. For all pratical purposes `c` should always be set to a + // value < 0.5, as such in the computations below we should never be near + // edge cases like `0.999999`. + + let p = BigRational::from_float(1f64 - (1f64 - c).powf(theta)).expect( + "returns None when the given value is not finite; \ + c is a configuration parameter defined in (0, 1]; \ + theta must be > 0 if the given authority's weight is > 0; \ + theta represents the validator's relative weight defined in (0, 1]; \ + powf will always return values in (0, 1] given both the \ + base and exponent are in that domain; \ + qed.", + ); + + let numer = p.numer().to_biguint().expect( + "returns None when the given value is negative; \ + p is defined as `1 - n` where n is defined in (0, 1]; \ + p must be a value in [0, 1); \ + qed." + ); + + let denom = p.denom().to_biguint().expect( + "returns None when the given value is negative; \ + p is defined as `1 - n` where n is defined in (0, 1]; \ + p must be a value in [0, 1); \ + qed." + ); + + ((BigUint::one() << 128) * numer / denom).to_u128().expect( + "returns None if the underlying value cannot be represented with 128 bits; \ + we start with 2^128 which is one more than can be represented with 128 bits; \ + we multiple by p which is defined in [0, 1); \ + the result must be lower than 2^128 by at least one and thus representable with 128 bits; \ + qed.", + ) +} + +/// Returns true if the given VRF output is lower than the given threshold, +/// false otherwise. +pub(super) fn check_primary_threshold(inout: &VRFInOut, threshold: u128) -> bool { + u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(BABE_VRF_PREFIX)) < threshold +} + +/// Get the expected secondary author for the given slot and with given +/// authorities. This should always assign the slot to some authority unless the +/// authorities list is empty. +pub(super) fn secondary_slot_author( + slot_number: u64, + authorities: &[(AuthorityId, BabeAuthorityWeight)], + randomness: [u8; 32], +) -> Option<&AuthorityId> { + if authorities.is_empty() { + return None; + } + + let rand = U256::from((randomness, slot_number).using_encoded(blake2_256)); + + let authorities_len = U256::from(authorities.len()); + let idx = rand % authorities_len; + + let expected_author = authorities.get(idx.as_u32() as usize) + .expect("authorities not empty; index constrained to list length; \ + this is a valid index; qed"); + + Some(&expected_author.0) +} + +/// Claim a secondary slot if it is our turn to propose, returning the +/// pre-digest to use when authoring the block, or `None` if it is not our turn +/// to propose. +fn claim_secondary_slot( + slot_number: SlotNumber, + epoch: &Epoch, + keys: &[(AuthorityId, usize)], + keystore: &KeyStorePtr, + author_secondary_vrf: bool, +) -> Option<(PreDigest, AuthorityId)> { + let Epoch { authorities, randomness, epoch_index, .. } = epoch; + + if authorities.is_empty() { + return None; + } + + let expected_author = super::authorship::secondary_slot_author( + slot_number, + authorities, + *randomness, + )?; + + for (authority_id, authority_index) in keys { + if authority_id == expected_author { + let pre_digest = if author_secondary_vrf { + let transcript_data = super::authorship::make_transcript_data( + randomness, + slot_number, + *epoch_index, + ); + let result = keystore.read().sr25519_vrf_sign( + AuthorityId::ID, + authority_id.as_ref(), + transcript_data, + ); + if let Ok(signature) = result { + Some(PreDigest::SecondaryVRF(SecondaryVRFPreDigest { + slot_number, + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof), + authority_index: *authority_index as u32, + })) + } else { + None + } + } else if keystore.read().has_keys(&[(authority_id.to_raw_vec(), AuthorityId::ID)]) { + Some(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + slot_number, + authority_index: *authority_index as u32, + })) + } else { + None + }; + + if let Some(pre_digest) = pre_digest { + return Some((pre_digest, authority_id.clone())); + } + } + } + + None +} + +/// Tries to claim the given slot number. This method starts by trying to claim +/// a primary VRF based slot. If we are not able to claim it, then if we have +/// secondary slots enabled for the given epoch, we will fallback to trying to +/// claim a secondary slot. +pub fn claim_slot( + slot_number: SlotNumber, + epoch: &Epoch, + keystore: &KeyStorePtr, +) -> Option<(PreDigest, AuthorityId)> { + let authorities = epoch.authorities.iter() + .enumerate() + .map(|(index, a)| (a.0.clone(), index)) + .collect::>(); + claim_slot_using_keys(slot_number, epoch, keystore, &authorities) +} + +/// Like `claim_slot`, but allows passing an explicit set of key pairs. Useful if we intend +/// to make repeated calls for different slots using the same key pairs. +pub fn claim_slot_using_keys( + slot_number: SlotNumber, + epoch: &Epoch, + keystore: &KeyStorePtr, + keys: &[(AuthorityId, usize)], +) -> Option<(PreDigest, AuthorityId)> { + claim_primary_slot(slot_number, epoch, epoch.config.c, keystore, &keys) + .or_else(|| { + if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() || + epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() + { + claim_secondary_slot( + slot_number, + &epoch, + keys, + keystore, + epoch.config.allowed_slots.is_secondary_vrf_slots_allowed(), + ) + } else { + None + } + }) +} + +/// Claim a primary slot if it is our turn. Returns `None` if it is not our turn. +/// This hashes the slot number, epoch, genesis hash, and chain randomness into +/// the VRF. If the VRF produces a value less than `threshold`, it is our turn, +/// so it returns `Some(_)`. Otherwise, it returns `None`. +fn claim_primary_slot( + slot_number: SlotNumber, + epoch: &Epoch, + c: (u64, u64), + keystore: &KeyStorePtr, + keys: &[(AuthorityId, usize)], +) -> Option<(PreDigest, AuthorityId)> { + let Epoch { authorities, randomness, epoch_index, .. } = epoch; + + for (authority_id, authority_index) in keys { + let transcript = super::authorship::make_transcript( + randomness, + slot_number, + *epoch_index + ); + let transcript_data = super::authorship::make_transcript_data( + randomness, + slot_number, + *epoch_index + ); + // Compute the threshold we will use. + // + // We already checked that authorities contains `key.public()`, so it can't + // be empty. Therefore, this division in `calculate_threshold` is safe. + let threshold = super::authorship::calculate_primary_threshold(c, authorities, *authority_index); + + let result = keystore.read().sr25519_vrf_sign( + AuthorityId::ID, + authority_id.as_ref(), + transcript_data, + ); + if let Ok(signature) = result { + let public = PublicKey::from_bytes(&authority_id.to_raw_vec()).ok()?; + let inout = match signature.output.attach_input_hash(&public, transcript) { + Ok(inout) => inout, + Err(_) => continue, + }; + if super::authorship::check_primary_threshold(&inout, threshold) { + let pre_digest = PreDigest::Primary(PrimaryPreDigest { + slot_number, + vrf_output: VRFOutput(signature.output), + vrf_proof: VRFProof(signature.proof), + authority_index: *authority_index as u32, + }); + + return Some((pre_digest, authority_id.clone())); + } + } + } + + None +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::{sr25519::Pair, crypto::Pair as _}; + use sp_consensus_babe::{AuthorityId, BabeEpochConfiguration, AllowedSlots}; + + #[test] + fn claim_secondary_plain_slot_works() { + let keystore = sc_keystore::Store::new_in_memory(); + let valid_public_key = dbg!(keystore.write().sr25519_generate_new( + AuthorityId::ID, + Some(sp_core::crypto::DEV_PHRASE), + ).unwrap()); + + let authorities = vec![ + (AuthorityId::from(Pair::generate().0.public()), 5), + (AuthorityId::from(Pair::generate().0.public()), 7), + ]; + + let mut epoch = Epoch { + epoch_index: 10, + start_slot: 0, + duration: 20, + authorities: authorities.clone(), + randomness: Default::default(), + config: BabeEpochConfiguration { + c: (3, 10), + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, + }, + }; + + assert!(claim_slot(10, &epoch, &keystore).is_none()); + + epoch.authorities.push((valid_public_key.clone().into(), 10)); + assert_eq!(claim_slot(10, &epoch, &keystore).unwrap().1, valid_public_key.into()); + } +} diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs new file mode 100644 index 0000000000000..74078b4ee7b8a --- /dev/null +++ b/client/consensus/babe/src/aux_schema.rs @@ -0,0 +1,213 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Schema for BABE epoch changes in the aux-db. + +use std::sync::Arc; +use parking_lot::Mutex; +use log::info; +use codec::{Decode, Encode}; + +use sc_client_api::backend::AuxStore; +use sp_blockchain::{Result as ClientResult, Error as ClientError}; +use sp_runtime::traits::Block as BlockT; +use sp_consensus_babe::{BabeBlockWeight, BabeGenesisConfiguration}; +use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges, migration::EpochChangesForV0}; +use crate::{Epoch, migration::EpochV0}; + +const BABE_EPOCH_CHANGES_VERSION: &[u8] = b"babe_epoch_changes_version"; +const BABE_EPOCH_CHANGES_KEY: &[u8] = b"babe_epoch_changes"; +const BABE_EPOCH_CHANGES_CURRENT_VERSION: u32 = 2; + +fn block_weight_key(block_hash: H) -> Vec { + (b"block_weight", block_hash).encode() +} + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> + where + B: AuxStore, + T: Decode, +{ + let corrupt = |e: codec::Error| { + ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e.what())) + }; + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt) + } +} + +/// Load or initialize persistent epoch change data from backend. +pub fn load_epoch_changes( + backend: &B, + config: &BabeGenesisConfiguration, +) -> ClientResult> { + let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; + + let maybe_epoch_changes = match version { + None => load_decode::<_, EpochChangesForV0>( + backend, + BABE_EPOCH_CHANGES_KEY, + )?.map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))), + Some(1) => load_decode::<_, EpochChangesFor>( + backend, + BABE_EPOCH_CHANGES_KEY, + )?.map(|v1| v1.map(|_, _, epoch| epoch.migrate(config))), + Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => load_decode::<_, EpochChangesFor>( + backend, + BABE_EPOCH_CHANGES_KEY, + )?, + Some(other) => { + return Err(ClientError::Backend( + format!("Unsupported BABE DB version: {:?}", other) + )) + }, + }; + + let epoch_changes = Arc::new(Mutex::new(maybe_epoch_changes.unwrap_or_else(|| { + info!(target: "babe", + "👶 Creating empty BABE epoch changes on what appears to be first startup." + ); + EpochChangesFor::::default() + }))); + + // rebalance the tree after deserialization. this isn't strictly necessary + // since the tree is now rebalanced on every update operation. but since the + // tree wasn't rebalanced initially it's useful to temporarily leave it here + // to avoid having to wait until an import for rebalancing. + epoch_changes.lock().rebalance(); + + Ok(epoch_changes) +} + +/// Update the epoch changes on disk after a change. +pub(crate) fn write_epoch_changes( + epoch_changes: &EpochChangesFor, + write_aux: F, +) -> R where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + BABE_EPOCH_CHANGES_CURRENT_VERSION.using_encoded(|version| { + let encoded_epoch_changes = epoch_changes.encode(); + write_aux( + &[(BABE_EPOCH_CHANGES_KEY, encoded_epoch_changes.as_slice()), + (BABE_EPOCH_CHANGES_VERSION, version)], + ) + }) +} + +/// Write the cumulative chain-weight of a block ot aux storage. +pub(crate) fn write_block_weight( + block_hash: H, + block_weight: BabeBlockWeight, + write_aux: F, +) -> R where + F: FnOnce(&[(Vec, &[u8])]) -> R, +{ + let key = block_weight_key(block_hash); + block_weight.using_encoded(|s| + write_aux( + &[(key, s)], + ) + ) +} + +/// Load the cumulative chain-weight associated with a block. +pub(crate) fn load_block_weight( + backend: &B, + block_hash: H, +) -> ClientResult> { + load_decode(backend, block_weight_key(block_hash).as_slice()) +} + +#[cfg(test)] +mod test { + use super::*; + use crate::migration::EpochV0; + use fork_tree::ForkTree; + use substrate_test_runtime_client; + use sp_core::H256; + use sp_runtime::traits::NumberFor; + use sp_consensus_babe::{AllowedSlots, BabeGenesisConfiguration}; + use sc_consensus_epochs::{PersistedEpoch, PersistedEpochHeader, EpochHeader}; + use sp_consensus::Error as ConsensusError; + use sc_network_test::Block as TestBlock; + + #[test] + fn load_decode_from_v0_epoch_changes() { + let epoch = EpochV0 { + start_slot: 0, + authorities: vec![], + randomness: [0; 32], + epoch_index: 1, + duration: 100, + }; + let client = substrate_test_runtime_client::new(); + let mut v0_tree = ForkTree::, _>::new(); + v0_tree.import::<_, ConsensusError>( + Default::default(), + Default::default(), + PersistedEpoch::Regular(epoch), + &|_, _| Ok(false), // Test is single item only so this can be set to false. + ).unwrap(); + + client.insert_aux( + &[(BABE_EPOCH_CHANGES_KEY, + &EpochChangesForV0::::from_raw(v0_tree).encode()[..])], + &[], + ).unwrap(); + + assert_eq!( + load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), + None, + ); + + let epoch_changes = load_epoch_changes::( + &client, &BabeGenesisConfiguration { + slot_duration: 10, + epoch_length: 4, + c: (3, 10), + genesis_authorities: Vec::new(), + randomness: Default::default(), + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, + }, + ).unwrap(); + + assert!( + epoch_changes.lock() + .tree() + .iter() + .map(|(_, _, epoch)| epoch.clone()) + .collect::>() == + vec![PersistedEpochHeader::Regular(EpochHeader { + start_slot: 0, + end_slot: 100, + })], + ); // PersistedEpochHeader does not implement Debug, so we use assert! directly. + + write_epoch_changes::( + &epoch_changes.lock(), + |values| { + client.insert_aux(values, &[]).unwrap(); + }, + ); + + assert_eq!( + load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), + Some(2), + ); + } +} diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs new file mode 100644 index 0000000000000..59c43c7d3e6f9 --- /dev/null +++ b/client/consensus/babe/src/lib.rs @@ -0,0 +1,1529 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! # BABE (Blind Assignment for Blockchain Extension) +//! +//! BABE is a slot-based block production mechanism which uses a VRF PRNG to +//! randomly perform the slot allocation. On every slot, all the authorities +//! generate a new random number with the VRF function and if it is lower than a +//! given threshold (which is proportional to their weight/stake) they have a +//! right to produce a block. The proof of the VRF function execution will be +//! used by other peer to validate the legitimacy of the slot claim. +//! +//! The engine is also responsible for collecting entropy on-chain which will be +//! used to seed the given VRF PRNG. An epoch is a contiguous number of slots +//! under which we will be using the same authority set. During an epoch all VRF +//! outputs produced as a result of block production will be collected on an +//! on-chain randomness pool. Epoch changes are announced one epoch in advance, +//! i.e. when ending epoch N, we announce the parameters (randomness, +//! authorities, etc.) for epoch N+2. +//! +//! Since the slot assignment is randomized, it is possible that a slot is +//! assigned to multiple validators in which case we will have a temporary fork, +//! or that a slot is assigned to no validator in which case no block is +//! produced. Which means that block times are not deterministic. +//! +//! The protocol has a parameter `c` [0, 1] for which `1 - c` is the probability +//! of a slot being empty. The choice of this parameter affects the security of +//! the protocol relating to maximum tolerable network delays. +//! +//! In addition to the VRF-based slot assignment described above, which we will +//! call primary slots, the engine also supports a deterministic secondary slot +//! assignment. Primary slots take precedence over secondary slots, when +//! authoring the node starts by trying to claim a primary slot and falls back +//! to a secondary slot claim attempt. The secondary slot assignment is done +//! by picking the authority at index: +//! +//! `blake2_256(epoch_randomness ++ slot_number) % authorities_len`. +//! +//! The secondary slots supports either a `SecondaryPlain` or `SecondaryVRF` +//! variant. Comparing with `SecondaryPlain` variant, the `SecondaryVRF` variant +//! generates an additional VRF output. The output is not included in beacon +//! randomness, but can be consumed by parachains. +//! +//! The fork choice rule is weight-based, where weight equals the number of +//! primary blocks in the chain. We will pick the heaviest chain (more primary +//! blocks) and will go with the longest one in case of a tie. +//! +//! An in-depth description and analysis of the protocol can be found here: +//! + +#![forbid(unsafe_code)] +#![warn(missing_docs)] +pub use sp_consensus_babe::{ + BabeApi, ConsensusLog, BABE_ENGINE_ID, SlotNumber, + BabeEpochConfiguration, BabeGenesisConfiguration, + AuthorityId, AuthorityPair, AuthoritySignature, + BabeAuthorityWeight, VRF_OUTPUT_LENGTH, + digests::{ + CompatibleDigestItem, NextEpochDescriptor, NextConfigDescriptor, PreDigest, + PrimaryPreDigest, SecondaryPlainPreDigest, + }, +}; +pub use sp_consensus::SyncOracle; +use std::{ + collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, + any::Any, borrow::Cow, convert::TryInto, +}; +use sp_consensus::{ImportResult, CanAuthorWith}; +use sp_consensus::import_queue::{ + BoxJustificationImport, BoxFinalityProofImport, +}; +use sp_core::{crypto::Public, traits::BareCryptoStore}; +use sp_application_crypto::AppKey; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, Justification, + traits::{Block as BlockT, Header, DigestItemFor, Zero}, +}; +use sp_api::{ProvideRuntimeApi, NumberFor}; +use sc_keystore::KeyStorePtr; +use parking_lot::Mutex; +use sp_inherents::{InherentDataProviders, InherentData}; +use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG}; +use sp_consensus::{ + self, BlockImport, Environment, Proposer, BlockCheckParams, + ForkChoiceStrategy, BlockImportParams, BlockOrigin, Error as ConsensusError, + SelectChain, SlotData, +}; +use sp_consensus_babe::inherents::BabeInherentData; +use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; +use sp_consensus::import_queue::{Verifier, BasicQueue, DefaultImportQueue, CacheKeyId}; +use sc_client_api::{ + backend::AuxStore, + BlockchainEvents, ProvideUncles, +}; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use futures::channel::mpsc::{channel, Sender, Receiver}; +use retain_mut::RetainMut; + +use futures::prelude::*; +use log::{debug, info, log, trace, warn}; +use prometheus_endpoint::Registry; +use sc_consensus_slots::{ + SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, +}; +use sc_consensus_epochs::{ + descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, +}; +use sp_blockchain::{ + Result as ClientResult, Error as ClientError, + HeaderBackend, ProvideCache, HeaderMetadata +}; +use schnorrkel::SignatureError; +use codec::{Encode, Decode}; +use sp_api::ApiExt; + +mod verification; +mod migration; + +pub mod aux_schema; +pub mod authorship; +#[cfg(test)] +mod tests; + +/// BABE epoch information +#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index. + pub epoch_index: u64, + /// The starting slot of the epoch. + pub start_slot: SlotNumber, + /// The duration of this epoch. + pub duration: SlotNumber, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: [u8; VRF_OUTPUT_LENGTH], + /// Configuration of the epoch. + pub config: BabeEpochConfiguration, +} + +impl EpochT for Epoch { + type NextEpochDescriptor = (NextEpochDescriptor, BabeEpochConfiguration); + type SlotNumber = SlotNumber; + + fn increment( + &self, + (descriptor, config): (NextEpochDescriptor, BabeEpochConfiguration) + ) -> Epoch { + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.duration, + duration: self.duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + config, + } + } + + fn start_slot(&self) -> SlotNumber { + self.start_slot + } + + fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } +} + +impl Epoch { + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. + pub fn genesis( + genesis_config: &BabeGenesisConfiguration, + slot_number: SlotNumber + ) -> Epoch { + Epoch { + epoch_index: 0, + start_slot: slot_number, + duration: genesis_config.epoch_length, + authorities: genesis_config.genesis_authorities.clone(), + randomness: genesis_config.randomness, + config: BabeEpochConfiguration { + c: genesis_config.c, + allowed_slots: genesis_config.allowed_slots, + }, + } + } +} + +#[derive(derive_more::Display, Debug)] +enum Error { + #[display(fmt = "Multiple BABE pre-runtime digests, rejecting!")] + MultiplePreRuntimeDigests, + #[display(fmt = "No BABE pre-runtime digest found")] + NoPreRuntimeDigest, + #[display(fmt = "Multiple BABE epoch change digests, rejecting!")] + MultipleEpochChangeDigests, + #[display(fmt = "Multiple BABE config change digests, rejecting!")] + MultipleConfigChangeDigests, + #[display(fmt = "Could not extract timestamp and slot: {:?}", _0)] + Extraction(sp_consensus::Error), + #[display(fmt = "Could not fetch epoch at {:?}", _0)] + FetchEpoch(B::Hash), + #[display(fmt = "Header {:?} rejected: too far in the future", _0)] + TooFarInFuture(B::Hash), + #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] + ParentUnavailable(B::Hash, B::Hash), + #[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)] + SlotNumberMustIncrease(u64, u64), + #[display(fmt = "Header {:?} has a bad seal", _0)] + HeaderBadSeal(B::Hash), + #[display(fmt = "Header {:?} is unsealed", _0)] + HeaderUnsealed(B::Hash), + #[display(fmt = "Slot author not found")] + SlotAuthorNotFound, + #[display(fmt = "Secondary slot assignments are disabled for the current epoch.")] + SecondarySlotAssignmentsDisabled, + #[display(fmt = "Bad signature on {:?}", _0)] + BadSignature(B::Hash), + #[display(fmt = "Invalid author: Expected secondary author: {:?}, got: {:?}.", _0, _1)] + InvalidAuthor(AuthorityId, AuthorityId), + #[display(fmt = "No secondary author expected.")] + NoSecondaryAuthorExpected, + #[display(fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", _0, _1)] + VRFVerificationOfBlockFailed(AuthorityId, u128), + #[display(fmt = "VRF verification failed: {:?}", _0)] + VRFVerificationFailed(SignatureError), + #[display(fmt = "Could not fetch parent header: {:?}", _0)] + FetchParentHeader(sp_blockchain::Error), + #[display(fmt = "Expected epoch change to happen at {:?}, s{}", _0, _1)] + ExpectedEpochChange(B::Hash, u64), + #[display(fmt = "Unexpected config change")] + UnexpectedConfigChange, + #[display(fmt = "Unexpected epoch change")] + UnexpectedEpochChange, + #[display(fmt = "Parent block of {} has no associated weight", _0)] + ParentBlockNoAssociatedWeight(B::Hash), + #[display(fmt = "Checking inherents failed: {}", _0)] + CheckInherents(String), + Client(sp_blockchain::Error), + Runtime(sp_inherents::Error), + ForkTree(Box>), +} + +impl std::convert::From> for String { + fn from(error: Error) -> String { + error.to_string() + } +} + +fn babe_err(error: Error) -> Error { + debug!(target: "babe", "{}", error); + error +} + +/// Intermediate value passed to block importer. +pub struct BabeIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, +} + +/// Intermediate key for Babe engine. +pub static INTERMEDIATE_KEY: &[u8] = b"babe1"; + +/// A slot duration. Create with `get_or_compute`. +// FIXME: Once Rust has higher-kinded types, the duplication between this +// and `super::babe::Config` can be eliminated. +// https://github.com/paritytech/substrate/issues/2434 +#[derive(Clone)] +pub struct Config(sc_consensus_slots::SlotDuration); + +impl Config { + /// Either fetch the slot duration from disk or compute it from the genesis + /// state. + pub fn get_or_compute(client: &C) -> ClientResult where + C: AuxStore + ProvideRuntimeApi, C::Api: BabeApi, + { + trace!(target: "babe", "Getting slot duration"); + match sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| { + let has_api_v1 = a.has_api_with::, _>( + &b, |v| v == 1, + )?; + let has_api_v2 = a.has_api_with::, _>( + &b, |v| v == 2, + )?; + + if has_api_v1 { + #[allow(deprecated)] { + Ok(a.configuration_before_version_2(b)?.into()) + } + } else if has_api_v2 { + a.configuration(b) + } else { + Err(sp_blockchain::Error::VersionInvalid( + "Unsupported or invalid BabeApi version".to_string() + )) + } + }).map(Self) { + Ok(s) => Ok(s), + Err(s) => { + warn!(target: "babe", "Failed to get slot duration"); + Err(s) + } + } + } +} + +impl std::ops::Deref for Config { + type Target = BabeGenesisConfiguration; + + fn deref(&self) -> &BabeGenesisConfiguration { + &*self.0 + } +} + +/// Parameters for BABE. +pub struct BabeParams { + /// The keystore that manages the keys of the node. + pub keystore: KeyStorePtr, + + /// The client to use + pub client: Arc, + + /// The SelectChain Strategy + pub select_chain: SC, + + /// The environment we are producing blocks for. + pub env: E, + + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `BabeBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, + + /// A sync oracle + pub sync_oracle: SO, + + /// Providers for inherent data. + pub inherent_data_providers: InherentDataProviders, + + /// Force authoring of blocks even if we are offline + pub force_authoring: bool, + + /// The source of timestamps for relative slots + pub babe_link: BabeLink, + + /// Checks if the current native implementation can author with a runtime at a given block. + pub can_author_with: CAW, +} + +/// Start the babe worker. +pub fn start_babe(BabeParams { + keystore, + client, + select_chain, + env, + block_import, + sync_oracle, + inherent_data_providers, + force_authoring, + babe_link, + can_author_with, +}: BabeParams) -> Result< + BabeWorker, + sp_consensus::Error, +> where + B: BlockT, + C: ProvideRuntimeApi + ProvideCache + ProvideUncles + BlockchainEvents + + HeaderBackend + HeaderMetadata + Send + Sync + 'static, + C::Api: BabeApi, + SC: SelectChain + 'static, + E: Environment + Send + Sync + 'static, + E::Proposer: Proposer>, + I: BlockImport> + Send + + Sync + 'static, + Error: std::error::Error + Send + From + From + 'static, + SO: SyncOracle + Send + Sync + Clone + 'static, + CAW: CanAuthorWith + Send + 'static, +{ + let config = babe_link.config; + let slot_notification_sinks = Arc::new(Mutex::new(Vec::new())); + + let worker = BabeSlotWorker { + client: client.clone(), + block_import: Arc::new(Mutex::new(block_import)), + env, + sync_oracle: sync_oracle.clone(), + force_authoring, + keystore, + epoch_changes: babe_link.epoch_changes.clone(), + slot_notification_sinks: slot_notification_sinks.clone(), + config: config.clone(), + }; + + register_babe_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; + sc_consensus_uncles::register_uncles_inherent_data_provider( + client, + select_chain.clone(), + &inherent_data_providers, + )?; + + info!(target: "babe", "👶 Starting BABE Authorship worker"); + let inner = sc_consensus_slots::start_slot_worker( + config.0, + select_chain, + worker, + sync_oracle, + inherent_data_providers, + babe_link.time_source, + can_author_with, + ); + Ok(BabeWorker { + inner: Box::pin(inner), + slot_notification_sinks, + }) +} + +/// Worker for Babe which implements `Future`. This must be polled. +#[must_use] +pub struct BabeWorker { + inner: Pin + Send + 'static>>, + slot_notification_sinks: Arc, Epoch>)>>>>, +} + +impl BabeWorker { + /// Return an event stream of notifications for when new slot happens, and the corresponding + /// epoch descriptor. + pub fn slot_notification_stream( + &self + ) -> Receiver<(u64, ViableEpochDescriptor, Epoch>)> { + const CHANNEL_BUFFER_SIZE: usize = 1024; + + let (sink, stream) = channel(CHANNEL_BUFFER_SIZE); + self.slot_notification_sinks.lock().push(sink); + stream + } +} + +impl futures::Future for BabeWorker { + type Output = (); + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut futures::task::Context + ) -> futures::task::Poll { + self.inner.as_mut().poll(cx) + } +} + +/// Slot notification sinks. +type SlotNotificationSinks = Arc::Hash, NumberFor, Epoch>)>>>>; + +struct BabeSlotWorker { + client: Arc, + block_import: Arc>, + env: E, + sync_oracle: SO, + force_authoring: bool, + keystore: KeyStorePtr, + epoch_changes: SharedEpochChanges, + slot_notification_sinks: SlotNotificationSinks, + config: Config, +} + +impl sc_consensus_slots::SimpleSlotWorker for BabeSlotWorker where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata, + C::Api: BabeApi, + E: Environment, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Clone, + Error: std::error::Error + Send + From + From + 'static, +{ + type EpochData = ViableEpochDescriptor, Epoch>; + type Claim = (PreDigest, AuthorityId); + type SyncOracle = SO; + type CreateProposer = Pin> + Send + 'static + >>; + type Proposer = E::Proposer; + type BlockImport = I; + + fn logging_target(&self) -> &'static str { + "babe" + } + + fn block_import(&self) -> Arc> { + self.block_import.clone() + } + + fn epoch_data( + &self, + parent: &B::Header, + slot_number: u64, + ) -> Result { + self.epoch_changes.lock().epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot_number, + ) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + } + + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + self.epoch_changes.lock() + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) + .map(|epoch| epoch.as_ref().authorities.len()) + } + + fn claim_slot( + &self, + _parent_header: &B::Header, + slot_number: SlotNumber, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) -> Option { + debug!(target: "babe", "Attempting to claim slot {}", slot_number); + let s = authorship::claim_slot( + slot_number, + self.epoch_changes.lock().viable_epoch( + &epoch_descriptor, + |slot| Epoch::genesis(&self.config, slot) + )?.as_ref(), + &self.keystore, + ); + + if s.is_some() { + debug!(target: "babe", "Claimed slot {}", slot_number); + } + + s + } + + fn notify_slot( + &self, + _parent_header: &B::Header, + slot_number: SlotNumber, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) { + self.slot_notification_sinks.lock() + .retain_mut(|sink| { + match sink.try_send((slot_number, epoch_descriptor.clone())) { + Ok(()) => true, + Err(e) => { + if e.is_full() { + warn!(target: "babe", "Trying to notify a slot but the channel is full"); + true + } else { + false + } + }, + } + }); + } + + fn pre_digest_data( + &self, + _slot_number: u64, + claim: &Self::Claim, + ) -> Vec> { + vec![ + as CompatibleDigestItem>::babe_pre_digest(claim.0.clone()), + ] + } + + fn block_import_params(&self) -> Box, + StorageChanges, + Self::Claim, + Self::EpochData, + ) -> Result< + sp_consensus::BlockImportParams, + sp_consensus::Error> + Send + 'static> + { + let keystore = self.keystore.clone(); + Box::new(move |header, header_hash, body, storage_changes, (_, public), epoch_descriptor| { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let public_type_pair = public.clone().into(); + let public = public.to_raw_vec(); + let signature = keystore.read() + .sign_with( + ::ID, + &public_type_pair, + header_hash.as_ref() + ) + .map_err(|e| sp_consensus::Error::CannotSign( + public.clone(), e.to_string(), + ))?; + let signature: AuthoritySignature = signature.clone().try_into() + .map_err(|_| sp_consensus::Error::InvalidSignature( + signature, public + ))?; + let digest_item = as CompatibleDigestItem>::babe_seal(signature.into()); + + info!("Block import params using Babe"); + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + info!("Pushing digest to block"); + import_block.post_digests.push(digest_item); + import_block.body = Some(body); + import_block.storage_changes = Some(storage_changes); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + + Ok(import_block) + }) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin(self.env.init(block).map_err(|e| { + sp_consensus::Error::ClientImport(format!("{:?}", e)) + })) + } + + fn proposing_remaining_duration( + &self, + head: &B::Header, + slot_info: &SlotInfo, + ) -> Option { + let slot_remaining = self.slot_remaining_duration(slot_info); + + let parent_slot = match find_pre_digest::(head) { + Err(_) => return Some(slot_remaining), + Ok(d) => d.slot_number(), + }; + + if let Some(slot_lenience) = + sc_consensus_slots::slot_lenience_exponential(parent_slot, slot_info) + { + debug!(target: "babe", + "No block for {} slots. Applying exponential lenience of {}s", + slot_info.number.saturating_sub(parent_slot + 1), + slot_lenience.as_secs(), + ); + + Some(slot_remaining + slot_lenience) + } else { + Some(slot_remaining) + } + } +} + +impl SlotWorker for BabeSlotWorker where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata + Send + Sync, + C::Api: BabeApi, + E: Environment + Send + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone, + Error: std::error::Error + Send + From + From + 'static, +{ + type OnSlot = Pin> + Send>>; + + fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { + >::on_slot(self, chain_head, slot_info) + } +} + +/// Extract the BABE pre digest from the given header. Pre-runtime digests are +/// mandatory, the function will return `Err` if none is found. +fn find_pre_digest(header: &B::Header) -> Result> +{ + // genesis block doesn't contain a pre digest so let's generate a + // dummy one to not break any invariants in the rest of the code + if header.number().is_zero() { + return Ok(PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + slot_number: 0, + authority_index: 0, + })); + } + + let mut pre_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "babe", "Checking log {:?}, looking for pre runtime digest", log); + match (log.as_babe_pre_digest(), pre_digest.is_some()) { + (Some(_), true) => return Err(babe_err(Error::MultiplePreRuntimeDigests)), + (None, _) => trace!(target: "babe", "Ignoring digest not meant for us"), + (s, false) => pre_digest = s, + } + } + pre_digest.ok_or_else(|| babe_err(Error::NoPreRuntimeDigest)) +} + +/// Extract the BABE epoch change digest from the given header, if it exists. +fn find_next_epoch_digest(header: &B::Header) + -> Result, Error> + where DigestItemFor: CompatibleDigestItem, +{ + let mut epoch_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); + let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); + match (log, epoch_digest.is_some()) { + (Some(ConsensusLog::NextEpochData(_)), true) => return Err(babe_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), + _ => trace!(target: "babe", "Ignoring digest not meant for us"), + } + } + + Ok(epoch_digest) +} + +/// Extract the BABE config change digest from the given header, if it exists. +fn find_next_config_digest(header: &B::Header) + -> Result, Error> + where DigestItemFor: CompatibleDigestItem, +{ + let mut config_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); + let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); + match (log, config_digest.is_some()) { + (Some(ConsensusLog::NextConfigData(_)), true) => return Err(babe_err(Error::MultipleConfigChangeDigests)), + (Some(ConsensusLog::NextConfigData(config)), false) => config_digest = Some(config), + _ => trace!(target: "babe", "Ignoring digest not meant for us"), + } + } + + Ok(config_digest) +} + +#[derive(Default, Clone)] +struct TimeSource(Arc, Vec<(Instant, u64)>)>>); + +impl SlotCompatible for TimeSource { + fn extract_timestamp_and_slot( + &self, + data: &InherentData, + ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { + trace!(target: "babe", "extract timestamp"); + data.timestamp_inherent_data() + .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) + } +} + +/// State that must be shared between the import queue and the authoring logic. +#[derive(Clone)] +pub struct BabeLink { + time_source: TimeSource, + epoch_changes: SharedEpochChanges, + config: Config, +} + +impl BabeLink { + /// Get the epoch changes of this link. + pub fn epoch_changes(&self) -> &SharedEpochChanges { + &self.epoch_changes + } + + /// Get the config of this link. + pub fn config(&self) -> &Config { + &self.config + } +} + +/// A verifier for Babe blocks. +pub struct BabeVerifier { + client: Arc, + select_chain: SelectChain, + inherent_data_providers: sp_inherents::InherentDataProviders, + config: Config, + epoch_changes: SharedEpochChanges, + time_source: TimeSource, + can_author_with: CAW, +} + +impl BabeVerifier +where + Block: BlockT, + Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi + + BabeApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith, +{ + fn check_inherents( + &self, + block: Block, + block_id: BlockId, + inherent_data: InherentData, + ) -> Result<(), Error> { + if let Err(e) = self.can_author_with.can_author_with(&block_id) { + debug!( + target: "babe", + "Skipping `check_inherents` as authoring version is not compatible: {}", + e, + ); + + return Ok(()) + } + + let inherent_res = self.client.runtime_api().check_inherents( + &block_id, + block, + inherent_data, + ).map_err(Error::Client)?; + + if !inherent_res.ok() { + inherent_res + .into_errors() + .try_for_each(|(i, e)| { + Err(Error::CheckInherents(self.inherent_data_providers.error_to_string(&i, &e))) + }) + } else { + Ok(()) + } + } + + fn check_and_report_equivocation( + &self, + slot_now: SlotNumber, + slot: SlotNumber, + header: &Block::Header, + author: &AuthorityId, + origin: &BlockOrigin, + ) -> Result<(), Error> { + // don't report any equivocations during initial sync + // as they are most likely stale. + if *origin == BlockOrigin::NetworkInitialSync { + return Ok(()); + } + + // check if authorship of this header is an equivocation and return a proof if so. + let equivocation_proof = + match check_equivocation(&*self.client, slot_now, slot, header, author) + .map_err(Error::Client)? + { + Some(proof) => proof, + None => return Ok(()), + }; + + info!( + "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + + // get the best block on which we will build and send the equivocation report. + let best_id = self + .select_chain + .best_chain() + .map(|h| BlockId::Hash(h.hash())) + .map_err(|e| Error::Client(e.into()))?; + + // generate a key ownership proof. we start by trying to generate the + // key owernship proof at the parent of the equivocating header, this + // will make sure that proof generation is successful since it happens + // during the on-going session (i.e. session keys are available in the + // state to be able to generate the proof). this might fail if the + // equivocation happens on the first block of the session, in which case + // its parent would be on the previous session. if generation on the + // parent header fails we try with best block as well. + let generate_key_owner_proof = |block_id: &BlockId| { + self.client + .runtime_api() + .generate_key_ownership_proof(block_id, slot, equivocation_proof.offender.clone()) + .map_err(Error::Client) + }; + + let parent_id = BlockId::Hash(*header.parent_hash()); + let key_owner_proof = match generate_key_owner_proof(&parent_id)? { + Some(proof) => proof, + None => match generate_key_owner_proof(&best_id)? { + Some(proof) => proof, + None => { + debug!(target: "babe", "Equivocation offender is not part of the authority set."); + return Ok(()); + } + }, + }; + + // submit equivocation report at best block. + self.client + .runtime_api() + .submit_report_equivocation_unsigned_extrinsic( + &best_id, + equivocation_proof, + key_owner_proof, + ) + .map_err(Error::Client)?; + + info!(target: "babe", "Submitted equivocation report for author {:?}", author); + + Ok(()) + } +} + +impl Verifier + for BabeVerifier +where + Block: BlockT, + Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi + + Send + Sync + AuxStore + ProvideCache, + Client::Api: BlockBuilderApi + BabeApi, + SelectChain: sp_consensus::SelectChain, + CAW: CanAuthorWith + Send + Sync, +{ + fn verify( + &mut self, + origin: BlockOrigin, + header: Block::Header, + justification: Option, + mut body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + trace!( + target: "babe", + "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", + origin, + header, + justification, + body, + ); + + debug!(target: "babe", "We have {:?} logs in this header", header.digest().logs().len()); + let mut inherent_data = self + .inherent_data_providers + .create_inherent_data() + .map_err(Error::::Runtime)?; + + let (_, slot_now, _) = self.time_source.extract_timestamp_and_slot(&inherent_data) + .map_err(Error::::Extraction)?; + + let hash = header.hash(); + let parent_hash = *header.parent_hash(); + + let parent_header_metadata = self.client.header_metadata(parent_hash) + .map_err(Error::::FetchParentHeader)?; + + let pre_digest = find_pre_digest::(&header)?; + let epoch_changes = self.epoch_changes.lock(); + let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot_number(), + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes.viable_epoch( + &epoch_descriptor, + |slot| Epoch::genesis(&self.config, slot) + ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + + // We add one to the current slot to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of headers + let v_params = verification::VerificationParams { + header: header.clone(), + pre_digest: Some(pre_digest), + slot_now: slot_now + 1, + epoch: viable_epoch.as_ref(), + }; + + match verification::check_header::(v_params)? { + CheckedHeader::Checked(pre_header, verified_info) => { + let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() + .expect("check_header always returns a pre-digest digest item; qed"); + let slot_number = babe_pre_digest.slot_number(); + + // the header is valid but let's check if there was something else already + // proposed at the same slot by the given author. if there was, we will + // report the equivocation to the runtime. + if let Err(err) = self.check_and_report_equivocation( + slot_now, + slot_number, + &header, + &verified_info.author, + &origin, + ) { + warn!(target: "babe", "Error checking/reporting BABE equivocation: {:?}", err); + } + + // if the body is passed through, we need to use the runtime + // to check that the internally-set timestamp in the inherents + // actually matches the slot set in the seal. + if let Some(inner_body) = body.take() { + inherent_data.babe_replace_inherent_data(slot_number); + let block = Block::new(pre_header.clone(), inner_body); + + self.check_inherents( + block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + )?; + + let (_, inner_body) = block.deconstruct(); + body = Some(inner_body); + } + + trace!(target: "babe", "Checked {:?}; importing.", pre_header); + telemetry!( + CONSENSUS_TRACE; + "babe.checked_and_importing"; + "pre_header" => ?pre_header); + + let mut import_block = BlockImportParams::new(origin, pre_header); + import_block.post_digests.push(verified_info.seal); + import_block.body = body; + import_block.justification = justification; + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + import_block.post_hash = Some(hash); + + Ok((import_block, Default::default())) + } + CheckedHeader::Deferred(a, b) => { + debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!(CONSENSUS_DEBUG; "babe.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(Error::::TooFarInFuture(hash).into()) + } + } + } +} + +/// Register the babe inherent data provider, if not registered already. +pub fn register_babe_inherent_data_provider( + inherent_data_providers: &InherentDataProviders, + slot_duration: u64, +) -> Result<(), sp_consensus::Error> { + debug!(target: "babe", "Registering"); + if !inherent_data_providers.has_provider(&sp_consensus_babe::inherents::INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(sp_consensus_babe::inherents::InherentDataProvider::new(slot_duration)) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + } else { + Ok(()) + } +} + +/// A block-import handler for BABE. +/// +/// This scans each imported block for epoch change signals. The signals are +/// tracked in a tree (of all forks), and the import logic validates all epoch +/// change transitions, i.e. whether a given epoch change is expected or whether +/// it is missing. +/// +/// The epoch change tree should be pruned as blocks are finalized. +pub struct BabeBlockImport { + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + config: Config, +} + +impl Clone for BabeBlockImport { + fn clone(&self) -> Self { + BabeBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + epoch_changes: self.epoch_changes.clone(), + config: self.config.clone(), + } + } +} + +impl BabeBlockImport { + fn new( + client: Arc, + epoch_changes: SharedEpochChanges, + block_import: I, + config: Config, + ) -> Self { + BabeBlockImport { + client, + inner: block_import, + epoch_changes, + config, + } + } +} + +impl BlockImport for BabeBlockImport where + Block: BlockT, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + HeaderMetadata + + AuxStore + ProvideRuntimeApi + ProvideCache + Send + Sync, + Client::Api: BabeApi + ApiExt, +{ + type Error = ConsensusError; + type Transaction = sp_api::TransactionFor; + + fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + info!("Importing block using Babe"); + let hash = block.post_hash(); + let number = *block.header.number(); + + // early exit if block already in chain, otherwise the check for + // epoch changes will error when trying to re-import an epoch change + match self.client.status(BlockId::Hash(hash)) { + Ok(sp_blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), + Ok(sp_blockchain::BlockStatus::Unknown) => {}, + Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + } + + info!("Processing block import using Babe"); + let pre_digest = find_pre_digest::(&block.header) + .expect("valid babe headers must contain a predigest; \ + header has been already verified; qed"); + let slot_number = pre_digest.slot_number(); + + let parent_hash = *block.header.parent_hash(); + let parent_header = self.client.header(BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| ConsensusError::ChainLookup(babe_err( + Error::::ParentUnavailable(parent_hash, hash) + ).into()))?; + + + info!("Parent slot pre digest"); + let parent_slot = find_pre_digest::(&parent_header) + .map(|d| d.slot_number()) + .expect("parent is non-genesis; valid BABE headers contain a pre-digest; \ + header has already been verified; qed"); + + // make sure that slot number is strictly increasing + if slot_number <= parent_slot { + return Err( + ConsensusError::ClientImport(babe_err( + Error::::SlotNumberMustIncrease(parent_slot, slot_number) + ).into()) + ); + } + + let mut epoch_changes = self.epoch_changes.lock(); + + // check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + // + // also provides the total weight of the chain, including the imported block. + let (epoch_descriptor, first_in_epoch, parent_weight) = { + let parent_weight = if *parent_header.number() == Zero::zero() { + 0 + } else { + aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| ConsensusError::ClientImport( + babe_err(Error::::ParentBlockNoAssociatedWeight(hash)).into() + ))? + }; + + let intermediate = block.take_intermediate::>( + INTERMEDIATE_KEY + )?; + + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + (epoch_descriptor, first_in_epoch, parent_weight) + }; + + let total_weight = parent_weight + pre_digest.added_weight(); + + // search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let next_config_digest = find_next_config_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) { + (true, true, _) => {}, + (false, false, false) => {}, + (false, false, true) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::UnexpectedConfigChange).into(), + ) + ) + }, + (true, false, _) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::ExpectedEpochChange(hash, slot_number)).into(), + ) + ) + }, + (false, true, _) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::UnexpectedEpochChange).into(), + ) + ) + }, + } + + // if there's a pending epoch we'll save the previous epoch changes here + // this way we can revert it if there's any error + let mut old_epoch_changes = None; + + let info = self.client.info(); + + if let Some(next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some(epoch_changes.clone()); + + let viable_epoch = epoch_changes.viable_epoch( + &epoch_descriptor, + |slot| Epoch::genesis(&self.config, slot) + ).ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; + + let epoch_config = next_config_digest.map(Into::into).unwrap_or_else( + || viable_epoch.as_ref().config.clone() + ); + + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; + + log!(target: "babe", + log_level, + "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot_number, + viable_epoch.as_ref().start_slot, + ); + + let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config)); + + log!(target: "babe", + log_level, + "👶 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); + + // prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized( + self.client.clone(), + &mut epoch_changes, + )?; + + epoch_changes.import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + debug!(target: "babe", "Failed to launch next epoch: {:?}", e); + *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e); + } + + crate::aux_schema::write_epoch_changes::( + &*epoch_changes, + |insert| block.auxiliary.extend( + insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ) + ); + } + + aux_schema::write_block_weight( + hash, + total_weight, + |values| block.auxiliary.extend( + values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ), + ); + + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + info!("Fork choice"); + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + .ok_or_else( + || ConsensusError::ChainLookup("No block weight for parent header.".to_string()) + )? + }; + + Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + })) + }; + + let import_result = self.inner.import_block(block, new_cache); + + // revert to the original epoch changes in case there's an error + // importing the block + if import_result.is_err() { + if let Some(old_epoch_changes) = old_epoch_changes { + *epoch_changes = old_epoch_changes; + } + } + + import_result.map_err(Into::into) + } + + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).map_err(Into::into) + } +} + +/// Gets the best finalized block and its slot, and prunes the given epoch tree. +fn prune_finalized( + client: Arc, + epoch_changes: &mut EpochChangesFor, +) -> Result<(), ConsensusError> where + Block: BlockT, + Client: HeaderBackend + HeaderMetadata, +{ + let info = client.info(); + + let finalized_slot = { + let finalized_header = client.header(BlockId::Hash(info.finalized_hash)) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))? + .expect("best finalized hash was given by client; \ + finalized headers must exist in db; qed"); + + find_pre_digest::(&finalized_header) + .expect("finalized header must be valid; \ + valid blocks have a pre-digest; qed") + .slot_number() + }; + + epoch_changes.prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + + Ok(()) +} + +/// Produce a BABE block-import object to be used later on in the construction of +/// an import-queue. +/// +/// Also returns a link object used to correctly instantiate the import queue +/// and background worker. +pub fn block_import( + config: Config, + wrapped_block_import: I, + client: Arc, +) -> ClientResult<(BabeBlockImport, BabeLink)> where + Client: AuxStore + HeaderBackend + HeaderMetadata, +{ + + info!("Block import using Babe"); + let epoch_changes = aux_schema::load_epoch_changes::(&*client, &config)?; + let link = BabeLink { + epoch_changes: epoch_changes.clone(), + time_source: Default::default(), + config: config.clone(), + }; + + // NOTE: this isn't entirely necessary, but since we didn't use to prune the + // epoch tree it is useful as a migration, so that nodes prune long trees on + // startup rather than waiting until importing the next epoch change block. + prune_finalized( + client.clone(), + &mut epoch_changes.lock(), + )?; + + let import = BabeBlockImport::new( + client, + epoch_changes, + wrapped_block_import, + config, + ); + + Ok((import, link)) +} + +/// Start an import queue for the BABE consensus algorithm. +/// +/// This method returns the import queue, some data that needs to be passed to the block authoring +/// logic (`BabeLink`), and a future that must be run to +/// completion and is responsible for listening to finality notifications and +/// pruning the epoch changes tree. +/// +/// The block import object provided must be the `BabeBlockImport` or a wrapper +/// of it, otherwise crucial import logic will be omitted. +pub fn import_queue( + babe_link: BabeLink, + block_import: Inner, + justification_import: Option>, + finality_proof_import: Option>, + client: Arc, + select_chain: SelectChain, + inherent_data_providers: InherentDataProviders, + spawner: &impl sp_core::traits::SpawnNamed, + registry: Option<&Registry>, + can_author_with: CAW, +) -> ClientResult> where + Inner: BlockImport> + + Send + Sync + 'static, + Client: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, + Client: HeaderBackend + HeaderMetadata, + Client::Api: BlockBuilderApi + BabeApi + ApiExt, + SelectChain: sp_consensus::SelectChain + 'static, + CAW: CanAuthorWith + Send + Sync + 'static, +{ + register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; + + let verifier = BabeVerifier { + client, + select_chain, + inherent_data_providers, + config: babe_link.config, + epoch_changes: babe_link.epoch_changes, + time_source: babe_link.time_source, + can_author_with, + }; + + Ok(BasicQueue::new( + verifier, + Box::new(block_import), + justification_import, + finality_proof_import, + spawner, + registry, + )) +} + +/// BABE test helpers. Utility methods for manually authoring blocks. +#[cfg(feature = "test-helpers")] +pub mod test_helpers { + use super::*; + + /// Try to claim the given slot and return a `BabePreDigest` if + /// successful. + pub fn claim_slot( + slot_number: u64, + parent: &B::Header, + client: &C, + keystore: &KeyStorePtr, + link: &BabeLink, + ) -> Option where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata, + C::Api: BabeApi, + { + let epoch_changes = link.epoch_changes.lock(); + let epoch = epoch_changes.epoch_data_for_child_of( + descendent_query(client), + &parent.hash(), + parent.number().clone(), + slot_number, + |slot| Epoch::genesis(&link.config, slot), + ).unwrap().unwrap(); + + authorship::claim_slot( + slot_number, + &epoch, + keystore, + ).map(|(digest, _)| digest) + } +} diff --git a/client/consensus/babe/src/migration.rs b/client/consensus/babe/src/migration.rs new file mode 100644 index 0000000000000..2a5a8749cc3c1 --- /dev/null +++ b/client/consensus/babe/src/migration.rs @@ -0,0 +1,64 @@ +use codec::{Encode, Decode}; +use sc_consensus_epochs::Epoch as EpochT; +use crate::{ + Epoch, SlotNumber, AuthorityId, BabeAuthorityWeight, BabeGenesisConfiguration, + BabeEpochConfiguration, VRF_OUTPUT_LENGTH, NextEpochDescriptor, +}; + +/// BABE epoch information, version 0. +#[derive(Decode, Encode, PartialEq, Eq, Clone, Debug)] +pub struct EpochV0 { + /// The epoch index. + pub epoch_index: u64, + /// The starting slot of the epoch. + pub start_slot: SlotNumber, + /// The duration of this epoch. + pub duration: SlotNumber, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: [u8; VRF_OUTPUT_LENGTH], +} + +impl EpochT for EpochV0 { + type NextEpochDescriptor = NextEpochDescriptor; + type SlotNumber = SlotNumber; + + fn increment( + &self, + descriptor: NextEpochDescriptor + ) -> EpochV0 { + EpochV0 { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.duration, + duration: self.duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + } + } + + fn start_slot(&self) -> SlotNumber { + self.start_slot + } + + fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } +} + +impl EpochV0 { + /// Migrate the sturct to current epoch version. + pub fn migrate(self, config: &BabeGenesisConfiguration) -> Epoch { + Epoch { + epoch_index: self.epoch_index, + start_slot: self.start_slot, + duration: self.duration, + authorities: self.authorities, + randomness: self.randomness, + config: BabeEpochConfiguration { + c: config.c, + allowed_slots: config.allowed_slots, + }, + } + } +} diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs new file mode 100644 index 0000000000000..87876be8ae456 --- /dev/null +++ b/client/consensus/babe/src/tests.rs @@ -0,0 +1,853 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! BABE testsuite + +// FIXME #2532: need to allow deprecated until refactor is done +// https://github.com/paritytech/substrate/issues/2532 +#![allow(deprecated)] +use super::*; +use authorship::claim_slot; +use sp_core::{crypto::Pair, vrf::make_transcript as transcript_from_data}; +use sp_consensus_babe::{ + AuthorityPair, + SlotNumber, + AllowedSlots, + make_transcript, + make_transcript_data, +}; +use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sp_consensus::{ + NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor, + import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, +}; +use sc_network_test::*; +use sc_network_test::{Block as TestBlock, PeersClient}; +use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; +use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; +use sc_client_api::{BlockchainEvents, backend::TransactionFor}; +use log::debug; +use std::{time::Duration, cell::RefCell, task::Poll}; +use rand::RngCore; +use rand_chacha::{ + rand_core::SeedableRng, + ChaChaRng, +}; + +type Item = DigestItem; + +type Error = sp_blockchain::Error; + +type TestClient = substrate_test_runtime_client::client::Client< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + TestBlock, + substrate_test_runtime_client::runtime::RuntimeApi, +>; + +#[derive(Copy, Clone, PartialEq)] +enum Stage { + PreSeal, + PostSeal, +} + +type Mutator = Arc; + +#[derive(Clone)] +struct DummyFactory { + client: Arc, + epoch_changes: SharedEpochChanges, + config: Config, + mutator: Mutator, +} + +struct DummyProposer { + factory: DummyFactory, + parent_hash: Hash, + parent_number: u64, + parent_slot: SlotNumber, +} + +impl Environment for DummyFactory { + type CreateProposer = future::Ready>; + type Proposer = DummyProposer; + type Error = Error; + + fn init(&mut self, parent_header: &::Header) + -> Self::CreateProposer + { + + let parent_slot = crate::find_pre_digest::(parent_header) + .expect("parent header has a pre-digest") + .slot_number(); + + future::ready(Ok(DummyProposer { + factory: self.clone(), + parent_hash: parent_header.hash(), + parent_number: *parent_header.number(), + parent_slot, + })) + } +} + +impl DummyProposer { + fn propose_with(&mut self, pre_digests: DigestFor) + -> future::Ready< + Result< + Proposal< + TestBlock, + sc_client_api::TransactionFor + >, + Error + > + > + { + let block_builder = self.factory.client.new_block_at( + &BlockId::Hash(self.parent_hash), + pre_digests, + false, + ).unwrap(); + + let mut block = match block_builder.build().map_err(|e| e.into()) { + Ok(b) => b.block, + Err(e) => return future::ready(Err(e)), + }; + + let this_slot = crate::find_pre_digest::(block.header()) + .expect("baked block has valid pre-digest") + .slot_number(); + + // figure out if we should add a consensus digest, since the test runtime + // doesn't. + let epoch_changes = self.factory.epoch_changes.lock(); + let epoch = epoch_changes.epoch_data_for_child_of( + descendent_query(&*self.factory.client), + &self.parent_hash, + self.parent_number, + this_slot, + |slot| Epoch::genesis(&self.factory.config, slot), + ) + .expect("client has data to find epoch") + .expect("can compute epoch for baked block"); + + let first_in_epoch = self.parent_slot < epoch.start_slot; + if first_in_epoch { + // push a `Consensus` digest signalling next change. + // we just reuse the same randomness and authorities as the prior + // epoch. this will break when we add light client support, since + // that will re-check the randomness logic off-chain. + let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: epoch.authorities.clone(), + randomness: epoch.randomness.clone(), + }).encode(); + let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); + block.header.digest_mut().push(digest) + } + + // mutate the block header according to the mutator. + (self.factory.mutator)(&mut block.header, Stage::PreSeal); + + future::ready(Ok(Proposal { block, proof: None, storage_changes: Default::default() })) + } +} + +impl Proposer for DummyProposer { + type Error = Error; + type Transaction = sc_client_api::TransactionFor; + type Proposal = future::Ready, Error>>; + + fn propose( + mut self, + _: InherentData, + pre_digests: DigestFor, + _: Duration, + _: RecordProof, + ) -> Self::Proposal { + self.propose_with(pre_digests) + } +} + +thread_local! { + static MUTATOR: RefCell = RefCell::new(Arc::new(|_, _|())); +} + +#[derive(Clone)] +struct PanickingBlockImport(B); + +impl> BlockImport for PanickingBlockImport { + type Error = B::Error; + type Transaction = B::Transaction; + + fn import_block( + &mut self, + block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + Ok(self.0.import_block(block, new_cache).expect("importing block failed")) + } + + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + Ok(self.0.check_block(block).expect("checking block failed")) + } +} + +pub struct BabeTestNet { + peers: Vec>>, +} + +type TestHeader = ::Header; +type TestExtrinsic = ::Extrinsic; + +type TestSelectChain = substrate_test_runtime_client::LongestChain< + substrate_test_runtime_client::Backend, + TestBlock, +>; + +pub struct TestVerifier { + inner: BabeVerifier, + mutator: Mutator, +} + +impl Verifier for TestVerifier { + /// Verify the given data and return the BlockImportParams and an optional + /// new set of validators to import. If not, err with an Error-Message + /// presented to the User in the logs. + fn verify( + &mut self, + origin: BlockOrigin, + mut header: TestHeader, + justification: Option, + body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + // apply post-sealing mutations (i.e. stripping seal, if desired). + (self.mutator)(&mut header, Stage::PostSeal); + self.inner.verify(origin, header, justification, body) + } +} + +pub struct PeerData { + link: BabeLink, + inherent_data_providers: InherentDataProviders, + block_import: Mutex< + Option>> + >, +} + +impl TestNetFactory for BabeTestNet { + type Verifier = TestVerifier; + type PeerData = Option; + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + debug!(target: "babe", "Creating test network from config"); + BabeTestNet { + peers: Vec::new(), + } + } + + fn make_block_import(&self, client: PeersClient) + -> ( + BlockImportAdapter, + Option>, + Option>, + Option>, + Option, + ) + { + let client = client.as_full().expect("only full clients are tested"); + let inherent_data_providers = InherentDataProviders::new(); + + let config = Config::get_or_compute(&*client).expect("config available"); + let (block_import, link) = crate::block_import( + config, + client.clone(), + client.clone(), + ).expect("can initialize block-import"); + + let block_import = PanickingBlockImport(block_import); + + let data_block_import = Mutex::new( + Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>) + ); + ( + BlockImportAdapter::new_full(block_import), + None, + None, + None, + Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), + ) + } + + fn make_verifier( + &self, + client: PeersClient, + _cfg: &ProtocolConfig, + maybe_link: &Option, + ) + -> Self::Verifier + { + use substrate_test_runtime_client::DefaultTestClientBuilderExt; + + let client = client.as_full().expect("only full clients are used in test"); + trace!(target: "babe", "Creating a verifier"); + + // ensure block import and verifier are linked correctly. + let data = maybe_link.as_ref().expect("babe link always provided to verifier instantiation"); + + let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + + TestVerifier { + inner: BabeVerifier { + client: client.clone(), + select_chain: longest_chain, + inherent_data_providers: data.inherent_data_providers.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + time_source: data.link.time_source.clone(), + can_author_with: AlwaysCanAuthor, + }, + mutator: MUTATOR.with(|m| m.borrow().clone()), + } + } + + fn peer(&mut self, i: usize) -> &mut Peer { + trace!(target: "babe", "Retrieving a peer"); + &mut self.peers[i] + } + + fn peers(&self) -> &Vec> { + trace!(target: "babe", "Retrieving peers"); + &self.peers + } + + fn mut_peers>)>( + &mut self, + closure: F, + ) { + closure(&mut self.peers); + } +} + +#[test] +#[should_panic] +fn rejects_empty_block() { + sp_tracing::try_init_simple(); + let mut net = BabeTestNet::new(3); + let block_builder = |builder: BlockBuilder<_, _, _>| { + builder.build().unwrap().block + }; + net.mut_peers(|peer| { + peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder); + }) +} + +fn run_one_test( + mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static, +) { + sp_tracing::try_init_simple(); + let mutator = Arc::new(mutator) as Mutator; + + MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); + let net = BabeTestNet::new(3); + + let peers = &[ + (0, "//Alice"), + (1, "//Bob"), + (2, "//Charlie"), + ]; + + let net = Arc::new(Mutex::new(net)); + let mut import_notifications = Vec::new(); + let mut babe_futures = Vec::new(); + let mut keystore_paths = Vec::new(); + + for (peer_id, seed) in peers { + let mut net = net.lock(); + let peer = net.peer(*peer_id); + let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let select_chain = peer.select_chain().expect("Full client has select_chain"); + + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + keystore.write().insert_ephemeral_from_seed::(seed).expect("Generates authority key"); + keystore_paths.push(keystore_path); + + let mut got_own = false; + let mut got_other = false; + + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let environ = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: mutator.clone(), + }; + + import_notifications.push( + // run each future until we get one of our own blocks with number higher than 5 + // that was produced locally. + client.import_notification_stream() + .take_while(move |n| future::ready(n.header.number() < &5 || { + if n.origin == BlockOrigin::Own { + got_own = true; + } else { + got_other = true; + } + + // continue until we have at least one block of our own + // and one of another peer. + !(got_own && got_other) + })) + .for_each(|_| future::ready(()) ) + ); + + + babe_futures.push(start_babe(BabeParams { + block_import: data.block_import.lock().take().expect("import set up during init"), + select_chain, + client, + env: environ, + sync_oracle: DummyOracle, + inherent_data_providers: data.inherent_data_providers.clone(), + force_authoring: false, + babe_link: data.link.clone(), + keystore, + can_author_with: sp_consensus::AlwaysCanAuthor, + }).expect("Starts babe")); + } + + futures::executor::block_on(future::select( + futures::future::poll_fn(move |cx| { + let mut net = net.lock(); + net.poll(cx); + for p in net.peers() { + for (h, e) in p.failed_verifications() { + panic!("Verification failed for {:?}: {}", h, e); + } + } + + Poll::<()>::Pending + }), + future::select(future::join_all(import_notifications), future::join_all(babe_futures)) + )); +} + +#[test] +fn authoring_blocks() { + run_one_test(|_, _| ()) +} + +#[test] +#[should_panic] +fn rejects_missing_inherent_digest() { + run_one_test(|header: &mut TestHeader, stage| { + let v = std::mem::take(&mut header.digest_mut().logs); + header.digest_mut().logs = v.into_iter() + .filter(|v| stage == Stage::PostSeal || v.as_babe_pre_digest().is_none()) + .collect() + }) +} + +#[test] +#[should_panic] +fn rejects_missing_seals() { + run_one_test(|header: &mut TestHeader, stage| { + let v = std::mem::take(&mut header.digest_mut().logs); + header.digest_mut().logs = v.into_iter() + .filter(|v| stage == Stage::PreSeal || v.as_babe_seal().is_none()) + .collect() + }) +} + +#[test] +#[should_panic] +fn rejects_missing_consensus_digests() { + run_one_test(|header: &mut TestHeader, stage| { + let v = std::mem::take(&mut header.digest_mut().logs); + header.digest_mut().logs = v.into_iter() + .filter(|v| stage == Stage::PostSeal || v.as_next_epoch_descriptor().is_none()) + .collect() + }); +} + +#[test] +fn wrong_consensus_engine_id_rejected() { + sp_tracing::try_init_simple(); + let sig = AuthorityPair::generate().0.sign(b""); + let bad_seal: Item = DigestItem::Seal([0; 4], sig.to_vec()); + assert!(bad_seal.as_babe_pre_digest().is_none()); + assert!(bad_seal.as_babe_seal().is_none()) +} + +#[test] +fn malformed_pre_digest_rejected() { + sp_tracing::try_init_simple(); + let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, [0; 64].to_vec()); + assert!(bad_seal.as_babe_pre_digest().is_none()); +} + +#[test] +fn sig_is_not_pre_digest() { + sp_tracing::try_init_simple(); + let sig = AuthorityPair::generate().0.sign(b""); + let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, sig.to_vec()); + assert!(bad_seal.as_babe_pre_digest().is_none()); + assert!(bad_seal.as_babe_seal().is_some()) +} + +#[test] +fn can_author_block() { + sp_tracing::try_init_simple(); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + let pair = keystore.write().insert_ephemeral_from_seed::("//Alice") + .expect("Generates authority pair"); + + let mut i = 0; + let epoch = Epoch { + start_slot: 0, + authorities: vec![(pair.public(), 1)], + randomness: [0; 32], + epoch_index: 1, + duration: 100, + config: BabeEpochConfiguration { + c: (3, 10), + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, + }, + }; + + let mut config = crate::BabeGenesisConfiguration { + slot_duration: 1000, + epoch_length: 100, + c: (3, 10), + genesis_authorities: Vec::new(), + randomness: [0; 32], + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, + }; + + // with secondary slots enabled it should never be empty + match claim_slot(i, &epoch, &keystore) { + None => i += 1, + Some(s) => debug!(target: "babe", "Authored block {:?}", s.0), + } + + // otherwise with only vrf-based primary slots we might need to try a couple + // of times. + config.allowed_slots = AllowedSlots::PrimarySlots; + loop { + match claim_slot(i, &epoch, &keystore) { + None => i += 1, + Some(s) => { + debug!(target: "babe", "Authored block {:?}", s.0); + break; + } + } + } +} + +// Propose and import a new BABE block on top of the given parent. +fn propose_and_import_block( + parent: &TestHeader, + slot_number: Option, + proposer_factory: &mut DummyFactory, + block_import: &mut BoxBlockImport, +) -> sp_core::H256 { + let mut proposer = futures::executor::block_on(proposer_factory.init(parent)).unwrap(); + + let slot_number = slot_number.unwrap_or_else(|| { + let parent_pre_digest = find_pre_digest::(parent).unwrap(); + parent_pre_digest.slot_number() + 1 + }); + + let pre_digest = sp_runtime::generic::Digest { + logs: vec![ + Item::babe_pre_digest( + PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + authority_index: 0, + slot_number, + }), + ), + ], + }; + + let parent_hash = parent.hash(); + + let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; + + let epoch_descriptor = proposer_factory.epoch_changes.lock().epoch_descriptor_for_child_of( + descendent_query(&*proposer_factory.client), + &parent_hash, + *parent.number(), + slot_number, + ).unwrap().unwrap(); + + let seal = { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let pair = AuthorityPair::from_seed(&[1; 32]); + let pre_hash = block.header.hash(); + let signature = pair.sign(pre_hash.as_ref()); + Item::babe_seal(signature) + }; + + let post_hash = { + block.header.digest_mut().push(seal.clone()); + let h = block.header.hash(); + block.header.digest_mut().pop(); + h + }; + + let mut import = BlockImportParams::new(BlockOrigin::Own, block.header); + import.post_digests.push(seal); + import.body = Some(block.extrinsics); + import.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + let import_result = block_import.import_block(import, Default::default()).unwrap(); + + match import_result { + ImportResult::Imported(_) => {}, + _ => panic!("expected block to be imported"), + } + + post_hash +} + +#[test] +fn importing_block_one_sets_genesis_epoch() { + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + + let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); + + let block_hash = propose_and_import_block( + &genesis_header, + Some(999), + &mut proposer_factory, + &mut block_import, + ); + + let genesis_epoch = Epoch::genesis(&data.link.config, 999); + + let epoch_changes = data.link.epoch_changes.lock(); + let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( + descendent_query(&*client), + &block_hash, + 1, + 1000, + |slot| Epoch::genesis(&data.link.config, slot), + ).unwrap().unwrap(); + + assert_eq!(epoch_for_second_block, genesis_epoch); +} + +#[test] +fn importing_epoch_change_block_prunes_tree() { + use sc_client_api::Finalizer; + + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + let epoch_changes = data.link.epoch_changes.clone(); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + // This is just boilerplate code for proposing and importing n valid BABE + // blocks that are built on top of the given parent. The proposer takes care + // of producing epoch change digests according to the epoch duration (which + // is set to 6 slots in the test runtime). + let mut propose_and_import_blocks = |parent_id, n| { + let mut hashes = Vec::new(); + let mut parent_header = client.header(&parent_id).unwrap().unwrap(); + + for _ in 0..n { + let block_hash = propose_and_import_block( + &parent_header, + None, + &mut proposer_factory, + &mut block_import, + ); + hashes.push(block_hash); + parent_header = client.header(&BlockId::Hash(block_hash)).unwrap().unwrap(); + } + + hashes + }; + + // This is the block tree that we're going to use in this test. Each node + // represents an epoch change block, the epoch duration is 6 slots. + // + // *---- F (#7) + // / *------ G (#19) - H (#25) + // / / + // A (#1) - B (#7) - C (#13) - D (#19) - E (#25) + // \ + // *------ I (#25) + + // Create and import the canon chain and keep track of fork blocks (A, C, D) + // from the diagram above. + let canon_hashes = propose_and_import_blocks(BlockId::Number(0), 30); + + // Create the forks + let fork_1 = propose_and_import_blocks(BlockId::Hash(canon_hashes[0]), 10); + let fork_2 = propose_and_import_blocks(BlockId::Hash(canon_hashes[12]), 15); + let fork_3 = propose_and_import_blocks(BlockId::Hash(canon_hashes[18]), 10); + + // We should be tracking a total of 9 epochs in the fork tree + assert_eq!( + epoch_changes.lock().tree().iter().count(), + 9, + ); + + // And only one root + assert_eq!( + epoch_changes.lock().tree().roots().count(), + 1, + ); + + // We finalize block #13 from the canon chain, so on the next epoch + // change the tree should be pruned, to not contain F (#7). + client.finalize_block(BlockId::Hash(canon_hashes[12]), None, false).unwrap(); + propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 7); + + // at this point no hashes from the first fork must exist on the tree + assert!( + !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), + ); + + // but the epoch changes from the other forks must still exist + assert!( + epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) + ); + + assert!( + epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), + ); + + // finalizing block #25 from the canon chain should prune out the second fork + client.finalize_block(BlockId::Hash(canon_hashes[24]), None, false).unwrap(); + propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 8); + + // at this point no hashes from the second fork must exist on the tree + assert!( + !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), + ); + + // while epoch changes from the last fork should still exist + assert!( + epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), + ); +} + +#[test] +#[should_panic] +fn verify_slots_are_strictly_increasing() { + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); + + // we should have no issue importing this block + let b1 = propose_and_import_block( + &genesis_header, + Some(999), + &mut proposer_factory, + &mut block_import, + ); + + let b1 = client.header(&BlockId::Hash(b1)).unwrap().unwrap(); + + // we should fail to import this block since the slot number didn't increase. + // we will panic due to the `PanickingBlockImport` defined above. + propose_and_import_block( + &b1, + Some(999), + &mut proposer_factory, + &mut block_import, + ); +} + +#[test] +fn babe_transcript_generation_match() { + sp_tracing::try_init_simple(); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + let pair = keystore.write().insert_ephemeral_from_seed::("//Alice") + .expect("Generates authority pair"); + + let epoch = Epoch { + start_slot: 0, + authorities: vec![(pair.public(), 1)], + randomness: [0; 32], + epoch_index: 1, + duration: 100, + config: BabeEpochConfiguration { + c: (3, 10), + allowed_slots: AllowedSlots::PrimaryAndSecondaryPlainSlots, + }, + }; + + let orig_transcript = make_transcript(&epoch.randomness.clone(), 1, epoch.epoch_index); + let new_transcript = make_transcript_data(&epoch.randomness, 1, epoch.epoch_index); + + let test = |t: merlin::Transcript| -> [u8; 16] { + let mut b = [0u8; 16]; + t.build_rng() + .finalize(&mut ChaChaRng::from_seed([0u8;32])) + .fill_bytes(&mut b); + b + }; + debug_assert!(test(orig_transcript) == test(transcript_from_data(new_transcript))); +} diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs new file mode 100644 index 0000000000000..e8235a3ba667d --- /dev/null +++ b/client/consensus/babe/src/verification.rs @@ -0,0 +1,260 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Verification for BABE headers. +use sp_runtime::{traits::Header, traits::DigestItemFor}; +use sp_core::{Pair, Public}; +use sp_consensus_babe::{make_transcript, AuthoritySignature, SlotNumber, AuthorityPair, AuthorityId}; +use sp_consensus_babe::digests::{ + PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, + CompatibleDigestItem +}; +use sc_consensus_slots::CheckedHeader; +use log::{info, debug, trace}; +use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; +use super::authorship::{calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; + +/// BABE verification parameters +pub(super) struct VerificationParams<'a, B: 'a + BlockT> { + /// The header being verified. + pub(super) header: B::Header, + /// The pre-digest of the header being verified. this is optional - if prior + /// verification code had to read it, it can be included here to avoid duplicate + /// work. + pub(super) pre_digest: Option, + /// The slot number of the current time. + pub(super) slot_now: SlotNumber, + /// Epoch descriptor of the epoch this block _should_ be under, if it's valid. + pub(super) epoch: &'a Epoch, +} + +/// Check a header has been signed by the right key. If the slot is too far in +/// the future, an error will be returned. If successful, returns the pre-header +/// and the digest item containing the seal. +/// +/// The seal must be the last digest. Otherwise, the whole header is considered +/// unsigned. This is required for security and must not be changed. +/// +/// This digest item will always return `Some` when used with `as_babe_pre_digest`. +/// +/// The given header can either be from a primary or secondary slot assignment, +/// with each having different validation logic. +pub(super) fn check_header( + params: VerificationParams, +) -> Result>, Error> where + DigestItemFor: CompatibleDigestItem, +{ + info!("checking header"); + let VerificationParams { + mut header, + pre_digest, + slot_now, + epoch, + } = params; + + let authorities = &epoch.authorities; + let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; + + trace!(target: "babe", "Checking header"); + let seal = match header.digest_mut().pop() { + Some(x) => x, + None => return Err(babe_err(Error::HeaderUnsealed(header.hash()))), + }; + + let sig = seal.as_babe_seal().ok_or_else(|| { + babe_err(Error::HeaderBadSeal(header.hash())) + })?; + + // the pre-hash of the header doesn't include the seal + // and that's what we sign + let pre_hash = header.hash(); + + if pre_digest.slot_number() > slot_now { + header.digest_mut().push(seal); + return Ok(CheckedHeader::Deferred(header, pre_digest.slot_number())); + } + + let author = match authorities.get(pre_digest.authority_index() as usize) { + Some(author) => author.0.clone(), + None => return Err(babe_err(Error::SlotAuthorNotFound)), + }; + + match &pre_digest { + PreDigest::Primary(primary) => { + debug!(target: "babe", "Verifying Primary block"); + + check_primary_header::( + pre_hash, + primary, + sig, + &epoch, + epoch.config.c, + )?; + }, + PreDigest::SecondaryPlain(secondary) if epoch.config.allowed_slots.is_secondary_plain_slots_allowed() => { + debug!(target: "babe", "Verifying Secondary plain block"); + check_secondary_plain_header::( + pre_hash, + secondary, + sig, + &epoch, + )?; + }, + PreDigest::SecondaryVRF(secondary) if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => { + debug!(target: "babe", "Verifying Secondary VRF block"); + check_secondary_vrf_header::( + pre_hash, + secondary, + sig, + &epoch, + )?; + }, + _ => { + return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)); + } + } + + let info = VerifiedHeaderInfo { + pre_digest: CompatibleDigestItem::babe_pre_digest(pre_digest), + seal, + author, + }; + Ok(CheckedHeader::Checked(header, info)) +} + +pub(super) struct VerifiedHeaderInfo { + pub(super) pre_digest: DigestItemFor, + pub(super) seal: DigestItemFor, + pub(super) author: AuthorityId, +} + +/// Check a primary slot proposal header. We validate that the given header is +/// properly signed by the expected authority, and that the contained VRF proof +/// is valid. Additionally, the weight of this block must increase compared to +/// its parent since it is a primary block. +fn check_primary_header( + pre_hash: B::Hash, + pre_digest: &PrimaryPreDigest, + signature: AuthoritySignature, + epoch: &Epoch, + c: (u64, u64), +) -> Result<(), Error> { + info!("checking primary header"); + let author = &epoch.authorities[pre_digest.authority_index as usize].0; + + if AuthorityPair::verify(&signature, pre_hash, &author) { + let (inout, _) = { + let transcript = make_transcript( + &epoch.randomness, + pre_digest.slot_number, + epoch.epoch_index, + ); + + schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { + p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) + }).map_err(|s| { + babe_err(Error::VRFVerificationFailed(s)) + })? + }; + + let threshold = calculate_primary_threshold( + c, + &epoch.authorities, + pre_digest.authority_index as usize, + ); + + if !check_primary_threshold(&inout, threshold) { + return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))); + } + + Ok(()) + } else { + Err(babe_err(Error::BadSignature(pre_hash))) + } +} + +/// Check a secondary slot proposal header. We validate that the given header is +/// properly signed by the expected authority, which we have a deterministic way +/// of computing. Additionally, the weight of this block must stay the same +/// compared to its parent since it is a secondary block. +fn check_secondary_plain_header( + pre_hash: B::Hash, + pre_digest: &SecondaryPlainPreDigest, + signature: AuthoritySignature, + epoch: &Epoch, +) -> Result<(), Error> { + info!("checking secondary plain header"); + // check the signature is valid under the expected authority and + // chain state. + let expected_author = secondary_slot_author( + pre_digest.slot_number, + &epoch.authorities, + epoch.randomness, + ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; + + let author = &epoch.authorities[pre_digest.authority_index as usize].0; + + if expected_author != author { + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + } + + if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { + Ok(()) + } else { + Err(Error::BadSignature(pre_hash)) + } +} + +/// Check a secondary VRF slot proposal header. +fn check_secondary_vrf_header( + pre_hash: B::Hash, + pre_digest: &SecondaryVRFPreDigest, + signature: AuthoritySignature, + epoch: &Epoch, +) -> Result<(), Error> { + info!("checking secondary vrf header"); + // check the signature is valid under the expected authority and + // chain state. + let expected_author = secondary_slot_author( + pre_digest.slot_number, + &epoch.authorities, + epoch.randomness, + ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; + + let author = &epoch.authorities[pre_digest.authority_index as usize].0; + + if expected_author != author { + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + } + + if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { + let transcript = make_transcript( + &epoch.randomness, + pre_digest.slot_number, + epoch.epoch_index, + ); + + schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { + p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) + }).map_err(|s| { + babe_err(Error::VRFVerificationFailed(s)) + })?; + + Ok(()) + } else { + Err(Error::BadSignature(pre_hash)) + } +} diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml new file mode 100644 index 0000000000000..c4997d6586053 --- /dev/null +++ b/client/network/Cargo.toml @@ -0,0 +1,84 @@ +[package] +description = "Substrate network protocol" +name = "sc-network" +version = "0.8.0" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-network" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[build-dependencies] +prost-build = "0.6.1" + +[dependencies] +async-trait = "0.1" +async-std = { version = "1.6.2", features = ["unstable"] } +bitflags = "1.2.0" +bs58 = "0.3.1" +bytes = "0.5.0" +codec = { package = "parity-scale-codec", version = "1.3.4", features = ["derive"] } +derive_more = "0.99.2" +either = "1.5.3" +erased-serde = "0.3.9" +fnv = "1.0.6" +fork-tree = "2.0.0" +futures = "0.3.4" +futures-timer = "3.0.2" +futures_codec = "0.4.0" +hex = "0.4.0" +ip_network = "0.3.4" +linked-hash-map = "0.5.2" +linked_hash_set = "0.1.3" +log = "0.4.8" +lru = "0.4.0" +nohash-hasher = "0.2.0" +parking_lot = "0.10.0" +pin-project = "0.4.6" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0" } +prost = "0.6.1" +rand = "0.7.2" +sc-block-builder = { version = "0.8.0", path = "../block-builder" } +sc-client-api = "2.0.0" +sc-peerset = "2.0.0" +serde = { version = "1.0.101", features = ["derive"] } +serde_json = "1.0.41" +slog = { version = "2.5.2", features = ["nested-values"] } +slog_derive = "0.2.0" +smallvec = "0.6.10" +sp-arithmetic = "2.0.0" +sp-blockchain = "2.0.0" +sp-consensus = "0.8.0" +sp-core = "2.0.0" +sp-runtime = "2.0.0" +sp-utils = "2.0.0" +thiserror = "1" +unsigned-varint = { version = "0.4.0", features = ["futures", "futures-codec"] } +void = "1.0.2" +wasm-timer = "0.2" +zeroize = "1.0.0" + +[dependencies.libp2p] +version = "0.28.1" +default-features = false +features = ["identify", "kad", "mdns-async-std", "mplex", "noise", "ping", "request-response", "tcp-async-std", "websocket", "yamux"] + +[dev-dependencies] +assert_matches = "1.3" +libp2p = { version = "0.28.1", default-features = false } +quickcheck = "0.9.0" +rand = "0.7.2" +sp-keyring = "2.0.0" +sp-test-primitives = "2.0.0" +sp-tracing = "2.0.0" +#substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +#substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +tempfile = "3.1.0" + +[features] +default = [] diff --git a/client/network/README.md b/client/network/README.md new file mode 100644 index 0000000000000..e0bd691043bee --- /dev/null +++ b/client/network/README.md @@ -0,0 +1,226 @@ +Substrate-specific P2P networking. + +**Important**: This crate is unstable and the API and usage may change. + +# Node identities and addresses + +In a decentralized network, each node possesses a network private key and a network public key. +In Substrate, the keys are based on the ed25519 curve. + +From a node's public key, we can derive its *identity*. In Substrate and libp2p, a node's +identity is represented with the [`PeerId`] struct. All network communications between nodes on +the network use encryption derived from both sides's keys, which means that **identities cannot +be faked**. + +A node's identity uniquely identifies a machine on the network. If you start two or more +clients using the same network key, large interferences will happen. + +# Substrate's network protocol + +Substrate's networking protocol is based upon libp2p. It is at the moment not possible and not +planned to permit using something else than the libp2p network stack and the rust-libp2p +library. However the libp2p framework is very flexible and the rust-libp2p library could be +extended to support a wider range of protocols than what is offered by libp2p. + +## Discovery mechanisms + +In order for our node to join a peer-to-peer network, it has to know a list of nodes that are +part of said network. This includes nodes identities and their address (how to reach them). +Building such a list is called the **discovery** mechanism. There are three mechanisms that +Substrate uses: + +- Bootstrap nodes. These are hard-coded node identities and addresses passed alongside with +the network configuration. +- mDNS. We perform a UDP broadcast on the local network. Nodes that listen may respond with +their identity. More info [here](https://github.com/libp2p/specs/blob/master/discovery/mdns.md). +mDNS can be disabled in the network configuration. +- Kademlia random walk. Once connected, we perform random Kademlia `FIND_NODE` requests on the +configured Kademlia DHTs (one per configured chain protocol) in order for nodes to propagate to +us their view of the network. More information about Kademlia can be found [on +Wikipedia](https://en.wikipedia.org/wiki/Kademlia). + +## Connection establishment + +When node Alice knows node Bob's identity and address, it can establish a connection with Bob. +All connections must always use encryption and multiplexing. While some node addresses (eg. +addresses using `/quic`) already imply which encryption and/or multiplexing to use, for others +the **multistream-select** protocol is used in order to negotiate an encryption layer and/or a +multiplexing layer. + +The connection establishment mechanism is called the **transport**. + +As of the writing of this documentation, the following base-layer protocols are supported by +Substrate: + +- TCP/IP for addresses of the form `/ip4/1.2.3.4/tcp/5`. Once the TCP connection is open, an +encryption and a multiplexing layer are negotiated on top. +- WebSockets for addresses of the form `/ip4/1.2.3.4/tcp/5/ws`. A TCP/IP connection is open and +the WebSockets protocol is negotiated on top. Communications then happen inside WebSockets data +frames. Encryption and multiplexing are additionally negotiated again inside this channel. +- DNS for addresses of the form `/dns/example.com/tcp/5` or `/dns/example.com/tcp/5/ws`. A +node's address can contain a domain name. +- (All of the above using IPv6 instead of IPv4.) + +On top of the base-layer protocol, the [Noise](https://noiseprotocol.org/) protocol is +negotiated and applied. The exact handshake protocol is experimental and is subject to change. + +The following multiplexing protocols are supported: + +- [Mplex](https://github.com/libp2p/specs/tree/master/mplex). Support for mplex will likely +be deprecated in the future. +- [Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). + +## Substreams + +Once a connection has been established and uses multiplexing, substreams can be opened. When +a substream is open, the **multistream-select** protocol is used to negotiate which protocol +to use on that given substream. + +Protocols that are specific to a certain chain have a `` in their name. This +"protocol ID" is defined in the chain specifications. For example, the protocol ID of Polkadot +is "dot". In the protocol names below, `` must be replaced with the corresponding +protocol ID. + +> **Note**: It is possible for the same connection to be used for multiple chains. For example, +> one can use both the `/dot/sync/2` and `/sub/sync/2` protocols on the same +> connection, provided that the remote supports them. + +Substrate uses the following standard libp2p protocols: + +- **`/ipfs/ping/1.0.0`**. We periodically open an ephemeral substream in order to ping the +remote and check whether the connection is still alive. Failure for the remote to reply leads +to a disconnection. +- **[`/ipfs/id/1.0.0`](https://github.com/libp2p/specs/tree/master/identify)**. We +periodically open an ephemeral substream in order to ask information from the remote. +- **[`//kad`](https://github.com/libp2p/specs/pull/108)**. We periodically open +ephemeral substreams for Kademlia random walk queries. Each Kademlia query is done in a +separate substream. + +Additionally, Substrate uses the following non-libp2p-standard protocols: + +- **`/substrate//`** (where `` must be replaced with the +protocol ID of the targeted chain, and `` is a number between 2 and 6). For each +connection we optionally keep an additional substream for all Substrate-based communications alive. +This protocol is considered legacy, and is progressively being replaced with alternatives. +This is designated as "The legacy Substrate substream" in this documentation. See below for +more details. +- **`//sync/2`** is a request-response protocol (see below) that lets one perform +requests for information about blocks. Each request is the encoding of a `BlockRequest` and +each response is the encoding of a `BlockResponse`, as defined in the `api.v1.proto` file in +this source tree. +- **`//light/2`** is a request-response protocol (see below) that lets one perform +light-client-related requests for information about the state. Each request is the encoding of +a `light::Request` and each response is the encoding of a `light::Response`, as defined in the +`light.v1.proto` file in this source tree. +- **`//transactions/1`** is a notifications protocol (see below) where +transactions are pushed to other nodes. The handshake is empty on both sides. The message +format is a SCALE-encoded list of transactions, where each transaction is an opaque list of +bytes. +- **`//block-announces/1`** is a notifications protocol (see below) where +block announces are pushed to other nodes. The handshake is empty on both sides. The message +format is a SCALE-encoded tuple containing a block header followed with an opaque list of +bytes containing some data associated with this block announcement, e.g. a candidate message. +- Notifications protocols that are registered using the `register_notifications_protocol` +method. For example: `/paritytech/grandpa/1`. See below for more information. + +## The legacy Substrate substream + +Substrate uses a component named the **peerset manager (PSM)**. Through the discovery +mechanism, the PSM is aware of the nodes that are part of the network and decides which nodes +we should perform Substrate-based communications with. For these nodes, we open a connection +if necessary and open a unique substream for Substrate-based communications. If the PSM decides +that we should disconnect a node, then that substream is closed. + +For more information about the PSM, see the *sc-peerset* crate. + +Note that at the moment there is no mechanism in place to solve the issues that arise where the +two sides of a connection open the unique substream simultaneously. In order to not run into +issues, only the dialer of a connection is allowed to open the unique substream. When the +substream is closed, the entire connection is closed as well. This is a bug that will be +resolved by deprecating the protocol entirely. + +Within the unique Substrate substream, messages encoded using +[*parity-scale-codec*](https://github.com/paritytech/parity-scale-codec) are exchanged. +The detail of theses messages is not totally in place, but they can be found in the +`message.rs` file. + +Once the substream is open, the first step is an exchange of a *status* message from both +sides, containing information such as the chain root hash, head of chain, and so on. + +Communications within this substream include: + +- Syncing. Blocks are announced and requested from other nodes. +- Light-client requests. When a light client requires information, a random node we have a +substream open with is chosen, and the information is requested from it. +- Gossiping. Used for example by grandpa. + +## Request-response protocols + +A so-called request-response protocol is defined as follow: + +- When a substream is opened, the opening side sends a message whose content is +protocol-specific. The message must be prefixed with an +[LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. After the +message has been sent, the writing side is closed. +- The remote sends back the response prefixed with a LEB128-encoded length, and closes its +side as well. + +Each request is performed in a new separate substream. + +## Notifications protocols + +A so-called notifications protocol is defined as follow: + +- When a substream is opened, the opening side sends a handshake message whose content is +protocol-specific. The handshake message must be prefixed with an +[LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. The +handshake message can be of length 0, in which case the sender has to send a single `0`. +- The receiver then either immediately closes the substream, or answers with its own +LEB128-prefixed protocol-specific handshake response. The message can be of length 0, in which +case a single `0` has to be sent back. +- Once the handshake has completed, the notifications protocol is unidirectional. Only the +node which initiated the substream can push notifications. If the remote wants to send +notifications as well, it has to open its own undirectional substream. +- Each notification must be prefixed with an LEB128-encoded length. The encoding of the +messages is specific to each protocol. +- Either party can signal that it doesn't want a notifications substream anymore by closing +its writing side. The other party should respond by closing its own writing side soon after. + +The API of `sc-network` allows one to register user-defined notification protocols. +`sc-network` automatically tries to open a substream towards each node for which the legacy +Substream substream is open. The handshake is then performed automatically. + +For example, the `sc-finality-grandpa` crate registers the `/paritytech/grandpa/1` +notifications protocol. + +At the moment, for backwards-compatibility, notification protocols are tied to the legacy +Substrate substream. Additionally, the handshake message is hardcoded to be a single 8-bits +integer representing the role of the node: + +- 1 for a full node. +- 2 for a light node. +- 4 for an authority. + +In the future, though, these restrictions will be removed. + +# Usage + +Using the `sc-network` crate is done through the [`NetworkWorker`] struct. Create this +struct by passing a [`config::Params`], then poll it as if it was a `Future`. You can extract an +`Arc` from the `NetworkWorker`, which can be shared amongst multiple places +in order to give orders to the networking. + +See the [`config`] module for more information about how to configure the networking. + +After the `NetworkWorker` has been created, the important things to do are: + +- Calling `NetworkWorker::poll` in order to advance the network. This can be done by +dispatching a background task with the [`NetworkWorker`]. +- Calling `on_block_import` whenever a block is added to the client. +- Calling `on_block_finalized` whenever a block is finalized. +- Calling `trigger_repropagate` when a transaction is added to the pool. + +More precise usage details are still being worked on and will likely change in the future. + + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/network/build.rs b/client/network/build.rs new file mode 100644 index 0000000000000..8ed460f163eb4 --- /dev/null +++ b/client/network/build.rs @@ -0,0 +1,9 @@ +const PROTOS: &[&str] = &[ + "src/schema/api.v1.proto", + "src/schema/finality.v1.proto", + "src/schema/light.v1.proto" +]; + +fn main() { + prost_build::compile_protos(PROTOS, &["src/schema"]).unwrap(); +} diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs new file mode 100644 index 0000000000000..6b3cfac38ae99 --- /dev/null +++ b/client/network/src/behaviour.rs @@ -0,0 +1,548 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::{ + config::{ProtocolId, Role}, block_requests, light_client_handler, finality_requests, + peer_info, request_responses, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, + protocol::{message::{self, Roles}, CustomMessageOutcome, NotificationsSink, Protocol}, + ObservedRole, DhtEvent, ExHashT, +}; + +use bytes::Bytes; +use codec::Encode as _; +use libp2p::NetworkBehaviour; +use libp2p::core::{Multiaddr, PeerId, PublicKey}; +use libp2p::identify::IdentifyInfo; +use libp2p::kad::record; +use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}; +use log::debug; +use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; +use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId, Justification}; +use std::{ + borrow::Cow, + collections::{HashSet, VecDeque}, + iter, + task::{Context, Poll}, + time::Duration, +}; + +pub use crate::request_responses::{ + ResponseFailure, InboundFailure, RequestFailure, OutboundFailure, RequestId, SendRequestError +}; + +/// General behaviour of the network. Combines all protocols together. +#[derive(NetworkBehaviour)] +#[behaviour(out_event = "BehaviourOut", poll_method = "poll")] +pub struct Behaviour { + /// All the substrate-specific protocols. + substrate: Protocol, + /// Periodically pings and identifies the nodes we are connected to, and store information in a + /// cache. + peer_info: peer_info::PeerInfoBehaviour, + /// Discovers nodes of the network. + discovery: DiscoveryBehaviour, + /// Generic request-reponse protocols. + request_responses: request_responses::RequestResponsesBehaviour, + /// Block request handling. + block_requests: block_requests::BlockRequests, + /// Finality proof request handling. + finality_proof_requests: finality_requests::FinalityProofRequests, + /// Light client request handling. + light_client_handler: light_client_handler::LightClientHandler, + + /// Queue of events to produce for the outside. + #[behaviour(ignore)] + events: VecDeque>, + + /// Role of our local node, as originally passed from the configuration. + #[behaviour(ignore)] + role: Role, +} + +/// Event generated by `Behaviour`. +pub enum BehaviourOut { + BlockImport(BlockOrigin, Vec>), + JustificationImport(Origin, B::Hash, NumberFor, Justification), + FinalityProofImport(Origin, B::Hash, NumberFor, Vec), + + /// Started a random iterative Kademlia discovery query. + RandomKademliaStarted(ProtocolId), + + /// We have received a request from a peer and answered it. + /// + /// This event is generated for statistics purposes. + InboundRequest { + /// Peer which sent us a request. + peer: PeerId, + /// Protocol name of the request. + protocol: Cow<'static, str>, + /// If `Ok`, contains the time elapsed between when we received the request and when we + /// sent back the response. If `Err`, the error that happened. + result: Result, + }, + + /// A request initiated using [`Behaviour::send_request`] has succeeded or failed. + RequestFinished { + /// Request that has succeeded. + request_id: RequestId, + /// Response sent by the remote or reason for failure. + result: Result, RequestFailure>, + }, + + /// Started a new request with the given node. + /// + /// This event is for statistics purposes only. The request and response handling are entirely + /// internal to the behaviour. + OpaqueRequestStarted { + peer: PeerId, + /// Protocol name of the request. + protocol: String, + }, + /// Finished, successfully or not, a previously-started request. + /// + /// This event is for statistics purposes only. The request and response handling are entirely + /// internal to the behaviour. + OpaqueRequestFinished { + /// Who we were requesting. + peer: PeerId, + /// Protocol name of the request. + protocol: String, + /// How long before the response came or the request got cancelled. + request_duration: Duration, + }, + + /// Opened a substream with the given node with the given notifications protocol. + /// + /// The protocol is always one of the notification protocols that have been registered. + NotificationStreamOpened { + /// Node we opened the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + /// Object that permits sending notifications to the peer. + notifications_sink: NotificationsSink, + /// Role of the remote. + role: ObservedRole, + }, + + /// The [`NotificationsSink`] object used to send notifications with the given peer must be + /// replaced with a new one. + /// + /// This event is typically emitted when a transport-level connection is closed and we fall + /// back to a secondary connection. + NotificationStreamReplaced { + /// Id of the peer we are connected to. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + /// Replacement for the previous [`NotificationsSink`]. + notifications_sink: NotificationsSink, + }, + + /// Closed a substream with the given node. Always matches a corresponding previous + /// `NotificationStreamOpened` message. + NotificationStreamClosed { + /// Node we closed the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + }, + + /// Received one or more messages from the given node using the given protocol. + NotificationsReceived { + /// Node we received the message from. + remote: PeerId, + /// Concerned protocol and associated message. + messages: Vec<(ConsensusEngineId, Bytes)>, + }, + + /// Events generated by a DHT as a response to get_value or put_value requests as well as the + /// request duration. + Dht(DhtEvent, Duration), +} + +impl Behaviour { + /// Builds a new `Behaviour`. + pub fn new( + substrate: Protocol, + role: Role, + user_agent: String, + local_public_key: PublicKey, + block_requests: block_requests::BlockRequests, + finality_proof_requests: finality_requests::FinalityProofRequests, + light_client_handler: light_client_handler::LightClientHandler, + disco_config: DiscoveryConfig, + request_response_protocols: Vec, + ) -> Result { + Ok(Behaviour { + substrate, + peer_info: peer_info::PeerInfoBehaviour::new(user_agent, local_public_key), + discovery: disco_config.finish(), + request_responses: + request_responses::RequestResponsesBehaviour::new(request_response_protocols.into_iter())?, + block_requests, + finality_proof_requests, + light_client_handler, + events: VecDeque::new(), + role, + }) + } + + /// Returns the list of nodes that we know exist in the network. + pub fn known_peers(&mut self) -> HashSet { + self.discovery.known_peers() + } + + /// Adds a hard-coded address for the given peer, that never expires. + pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { + self.discovery.add_known_address(peer_id, addr) + } + + /// Returns the number of nodes in each Kademlia kbucket for each Kademlia instance. + /// + /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm + /// of their lower bound. + pub fn num_entries_per_kbucket(&mut self) -> impl ExactSizeIterator)> { + self.discovery.num_entries_per_kbucket() + } + + /// Returns the number of records in the Kademlia record stores. + pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { + self.discovery.num_kademlia_records() + } + + /// Returns the total size in bytes of all the records in the Kademlia record stores. + pub fn kademlia_records_total_size(&mut self) -> impl ExactSizeIterator { + self.discovery.kademlia_records_total_size() + } + + /// Borrows `self` and returns a struct giving access to the information about a node. + /// + /// Returns `None` if we don't know anything about this node. Always returns `Some` for nodes + /// we're connected to, meaning that if `None` is returned then we're not connected to that + /// node. + pub fn node(&self, peer_id: &PeerId) -> Option { + self.peer_info.node(peer_id) + } + + /// Initiates sending a request. + /// + /// An error is returned if we are not connected to the target peer of if the protocol doesn't + /// match one that has been registered. + pub fn send_request(&mut self, target: &PeerId, protocol: &str, request: Vec) + -> Result + { + self.request_responses.send_request(target, protocol, request) + } + + /// Registers a new notifications protocol. + /// + /// Please call `event_stream` before registering a protocol, otherwise you may miss events + /// about the protocol that you have registered. + /// + /// You are very strongly encouraged to call this method very early on. Any connection open + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notifications_protocol( + &mut self, + engine_id: ConsensusEngineId, + protocol_name: impl Into>, + ) { + // This is the message that we will send to the remote as part of the initial handshake. + // At the moment, we force this to be an encoded `Roles`. + let handshake_message = Roles::from(&self.role).encode(); + + let list = self.substrate.register_notifications_protocol(engine_id, protocol_name, handshake_message); + for (remote, roles, notifications_sink) in list { + let role = reported_roles_to_observed_role(&self.role, remote, roles); + self.events.push_back(BehaviourOut::NotificationStreamOpened { + remote: remote.clone(), + engine_id, + role, + notifications_sink: notifications_sink.clone(), + }); + } + } + + /// Returns a shared reference to the user protocol. + pub fn user_protocol(&self) -> &Protocol { + &self.substrate + } + + /// Returns a mutable reference to the user protocol. + pub fn user_protocol_mut(&mut self) -> &mut Protocol { + &mut self.substrate + } + + /// Start querying a record from the DHT. Will later produce either a `ValueFound` or a `ValueNotFound` event. + pub fn get_value(&mut self, key: &record::Key) { + self.discovery.get_value(key); + } + + /// Starts putting a record into DHT. Will later produce either a `ValuePut` or a `ValuePutFailed` event. + pub fn put_value(&mut self, key: record::Key, value: Vec) { + self.discovery.put_value(key, value); + } + + /// Issue a light client request. + pub fn light_client_request(&mut self, r: light_client_handler::Request) -> Result<(), light_client_handler::Error> { + self.light_client_handler.request(r) + } +} + +fn reported_roles_to_observed_role(local_role: &Role, remote: &PeerId, roles: Roles) -> ObservedRole { + if roles.is_authority() { + match local_role { + Role::Authority { sentry_nodes } + if sentry_nodes.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurSentry, + Role::Sentry { validators } + if validators.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurGuardedAuthority, + _ => ObservedRole::Authority + } + } else if roles.is_full() { + ObservedRole::Full + } else { + ObservedRole::Light + } +} + +impl NetworkBehaviourEventProcess for +Behaviour { + fn inject_event(&mut self, event: void::Void) { + void::unreachable(event) + } +} + +impl NetworkBehaviourEventProcess> for +Behaviour { + fn inject_event(&mut self, event: CustomMessageOutcome) { + match event { + CustomMessageOutcome::BlockImport(origin, blocks) => + self.events.push_back(BehaviourOut::BlockImport(origin, blocks)), + CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => + self.events.push_back(BehaviourOut::JustificationImport(origin, hash, nb, justification)), + CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => + self.events.push_back(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)), + CustomMessageOutcome::BlockRequest { target, request } => { + match self.block_requests.send_request(&target, request) { + block_requests::SendRequestOutcome::Ok => { + self.events.push_back(BehaviourOut::OpaqueRequestStarted { + peer: target, + protocol: self.block_requests.protocol_name().to_owned(), + }); + }, + block_requests::SendRequestOutcome::Replaced { request_duration, .. } => { + self.events.push_back(BehaviourOut::OpaqueRequestFinished { + peer: target.clone(), + protocol: self.block_requests.protocol_name().to_owned(), + request_duration, + }); + self.events.push_back(BehaviourOut::OpaqueRequestStarted { + peer: target, + protocol: self.block_requests.protocol_name().to_owned(), + }); + } + block_requests::SendRequestOutcome::NotConnected | + block_requests::SendRequestOutcome::EncodeError(_) => {}, + } + }, + CustomMessageOutcome::FinalityProofRequest { target, block_hash, request } => { + self.finality_proof_requests.send_request(&target, block_hash, request); + }, + CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles, notifications_sink } => { + let role = reported_roles_to_observed_role(&self.role, &remote, roles); + for engine_id in protocols { + self.events.push_back(BehaviourOut::NotificationStreamOpened { + remote: remote.clone(), + engine_id, + role: role.clone(), + notifications_sink: notifications_sink.clone(), + }); + } + }, + CustomMessageOutcome::NotificationStreamReplaced { remote, protocols, notifications_sink } => + for engine_id in protocols { + self.events.push_back(BehaviourOut::NotificationStreamReplaced { + remote: remote.clone(), + engine_id, + notifications_sink: notifications_sink.clone(), + }); + }, + CustomMessageOutcome::NotificationStreamClosed { remote, protocols } => + for engine_id in protocols { + self.events.push_back(BehaviourOut::NotificationStreamClosed { + remote: remote.clone(), + engine_id, + }); + }, + CustomMessageOutcome::NotificationsReceived { remote, messages } => { + self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); + }, + CustomMessageOutcome::PeerNewBest(peer_id, number) => { + self.light_client_handler.update_best_block(&peer_id, number); + } + CustomMessageOutcome::None => {} + } + } +} + +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, event: request_responses::Event) { + match event { + request_responses::Event::InboundRequest { peer, protocol, result } => { + self.events.push_back(BehaviourOut::InboundRequest { + peer, + protocol, + result, + }); + } + + request_responses::Event::RequestFinished { request_id, result } => { + self.events.push_back(BehaviourOut::RequestFinished { + request_id, + result, + }); + }, + } + } +} + +impl NetworkBehaviourEventProcess> for Behaviour { + fn inject_event(&mut self, event: block_requests::Event) { + match event { + block_requests::Event::AnsweredRequest { peer, total_handling_time } => { + self.events.push_back(BehaviourOut::InboundRequest { + peer, + protocol: self.block_requests.protocol_name().to_owned().into(), + result: Ok(total_handling_time), + }); + }, + block_requests::Event::Response { peer, original_request: _, response, request_duration } => { + self.events.push_back(BehaviourOut::OpaqueRequestFinished { + peer: peer.clone(), + protocol: self.block_requests.protocol_name().to_owned(), + request_duration, + }); + let ev = self.substrate.on_block_response(peer, response); + self.inject_event(ev); + } + block_requests::Event::RequestCancelled { peer, request_duration, .. } | + block_requests::Event::RequestTimeout { peer, request_duration, .. } => { + // There doesn't exist any mechanism to report cancellations or timeouts yet, so + // we process them by disconnecting the node. + self.events.push_back(BehaviourOut::OpaqueRequestFinished { + peer: peer.clone(), + protocol: self.block_requests.protocol_name().to_owned(), + request_duration, + }); + self.substrate.on_block_request_failed(&peer); + } + } + } +} + +impl NetworkBehaviourEventProcess> for Behaviour { + fn inject_event(&mut self, event: finality_requests::Event) { + match event { + finality_requests::Event::Response { peer, block_hash, proof } => { + let response = message::FinalityProofResponse { + id: 0, + block: block_hash, + proof: if !proof.is_empty() { + Some(proof) + } else { + None + }, + }; + let ev = self.substrate.on_finality_proof_response(peer, response); + self.inject_event(ev); + } + } + } +} + +impl NetworkBehaviourEventProcess + for Behaviour { + fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { + let peer_info::PeerInfoEvent::Identified { + peer_id, + info: IdentifyInfo { + protocol_version, + agent_version, + mut listen_addrs, + protocols, + .. + }, + } = event; + + if listen_addrs.len() > 30 { + debug!( + target: "sub-libp2p", + "Node {:?} has reported more than 30 addresses; it is identified by {:?} and {:?}", + peer_id, protocol_version, agent_version + ); + listen_addrs.truncate(30); + } + + for addr in listen_addrs { + self.discovery.add_self_reported_address(&peer_id, protocols.iter(), addr); + } + self.substrate.add_discovered_nodes(iter::once(peer_id)); + } +} + +impl NetworkBehaviourEventProcess + for Behaviour { + fn inject_event(&mut self, out: DiscoveryOut) { + match out { + DiscoveryOut::UnroutablePeer(_peer_id) => { + // Obtaining and reporting listen addresses for unroutable peers back + // to Kademlia is handled by the `Identify` protocol, part of the + // `PeerInfoBehaviour`. See the `NetworkBehaviourEventProcess` + // implementation for `PeerInfoEvent`. + } + DiscoveryOut::Discovered(peer_id) => { + self.substrate.add_discovered_nodes(iter::once(peer_id)); + } + DiscoveryOut::ValueFound(results, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueFound(results), duration)); + } + DiscoveryOut::ValueNotFound(key, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValueNotFound(key), duration)); + } + DiscoveryOut::ValuePut(key, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePut(key), duration)); + } + DiscoveryOut::ValuePutFailed(key, duration) => { + self.events.push_back(BehaviourOut::Dht(DhtEvent::ValuePutFailed(key), duration)); + } + DiscoveryOut::RandomKademliaStarted(protocols) => { + for protocol in protocols { + self.events.push_back(BehaviourOut::RandomKademliaStarted(protocol)); + } + } + } + } +} + +impl Behaviour { + fn poll(&mut self, _: &mut Context, _: &mut impl PollParameters) -> Poll>> { + if let Some(event) = self.events.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) + } + + Poll::Pending + } +} diff --git a/client/network/src/block_requests.rs b/client/network/src/block_requests.rs new file mode 100644 index 0000000000000..7ee8f18f3a26f --- /dev/null +++ b/client/network/src/block_requests.rs @@ -0,0 +1,866 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. +// +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! `NetworkBehaviour` implementation which handles incoming block requests. +//! +//! Every request is coming in on a separate connection substream which gets +//! closed after we have sent the response back. Incoming requests are encoded +//! as protocol buffers (cf. `api.v1.proto`). + +#![allow(unused)] + +use bytes::Bytes; +use codec::{Encode, Decode}; +use crate::{ + chain::Client, + config::ProtocolId, + protocol::{message::{self, BlockAttributes}}, + schema, +}; +use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; +use futures_timer::Delay; +use libp2p::{ + core::{ + ConnectedPoint, + Multiaddr, + PeerId, + connection::ConnectionId, + upgrade::{InboundUpgrade, OutboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, + upgrade::{DeniedUpgrade, read_one, write_one} + }, + swarm::{ + NegotiatedSubstream, + NetworkBehaviour, + NetworkBehaviourAction, + NotifyHandler, + OneShotHandler, + OneShotHandlerConfig, + PollParameters, + SubstreamProtocol + } +}; +use prost::Message; +use sp_runtime::{generic::BlockId, traits::{Block, Header, One, Zero}}; +use std::{ + cmp::min, + collections::{HashMap, VecDeque}, + io, + iter, + marker::PhantomData, + pin::Pin, + sync::Arc, + time::Duration, + task::{Context, Poll} +}; +use void::{Void, unreachable}; +use wasm_timer::Instant; + +// Type alias for convenience. +pub type Error = Box; + +/// Event generated by the block requests behaviour. +#[derive(Debug)] +pub enum Event { + /// A request came and we have successfully answered it. + AnsweredRequest { + /// Peer which has emitted the request. + peer: PeerId, + /// Time elapsed between when we received the request and when we sent back the response. + total_handling_time: Duration, + }, + + /// A response to a block request has arrived. + Response { + peer: PeerId, + /// The original request passed to `send_request`. + original_request: message::BlockRequest, + response: message::BlockResponse, + /// Time elapsed between the start of the request and the response. + request_duration: Duration, + }, + + /// A request has been cancelled because the peer has disconnected. + /// Disconnects can also happen as a result of violating the network protocol. + /// + /// > **Note**: This event is NOT emitted if a request is overridden by calling `send_request`. + /// > For that, you must check the value returned by `send_request`. + RequestCancelled { + peer: PeerId, + /// The original request passed to `send_request`. + original_request: message::BlockRequest, + /// Time elapsed between the start of the request and the cancellation. + request_duration: Duration, + }, + + /// A request has timed out. + RequestTimeout { + peer: PeerId, + /// The original request passed to `send_request`. + original_request: message::BlockRequest, + /// Time elapsed between the start of the request and the timeout. + request_duration: Duration, + } +} + +/// Configuration options for `BlockRequests`. +#[derive(Debug, Clone)] +pub struct Config { + max_block_data_response: u32, + max_block_body_bytes: usize, + max_request_len: usize, + max_response_len: usize, + inactivity_timeout: Duration, + request_timeout: Duration, + protocol: String, +} + +impl Config { + /// Create a fresh configuration with the following options: + /// + /// - max. block data in response = 128 + /// - max. request size = 1 MiB + /// - max. response size = 16 MiB + /// - inactivity timeout = 15s + /// - request timeout = 40s + pub fn new(id: &ProtocolId) -> Self { + let mut c = Config { + max_block_data_response: 128, + max_block_body_bytes: 8 * 1024 * 1024, + max_request_len: 1024 * 1024, + max_response_len: 16 * 1024 * 1024, + inactivity_timeout: Duration::from_secs(15), + request_timeout: Duration::from_secs(40), + protocol: String::new(), + }; + c.set_protocol(id); + c + } + + /// Limit the max. number of block data in a response. + pub fn set_max_block_data_response(&mut self, v: u32) -> &mut Self { + self.max_block_data_response = v; + self + } + + /// Limit the max. length of incoming block request bytes. + pub fn set_max_request_len(&mut self, v: usize) -> &mut Self { + self.max_request_len = v; + self + } + + /// Limit the max. size of responses to our block requests. + pub fn set_max_response_len(&mut self, v: usize) -> &mut Self { + self.max_response_len = v; + self + } + + /// Limit the max. duration the substream may remain inactive before closing it. + pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { + self.inactivity_timeout = v; + self + } + + /// Set the maximum total bytes of block bodies that are send in the response. + /// Note that at least one block is always sent regardless of the limit. + /// This should be lower than the value specified in `set_max_response_len` + /// accounting for headers, justifications and encoding overhead. + pub fn set_max_block_body_bytes(&mut self, v: usize) -> &mut Self { + self.max_block_body_bytes = v; + self + } + + /// Set protocol to use for upgrade negotiation. + pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { + let mut s = String::new(); + s.push_str("/"); + s.push_str(id.as_ref()); + s.push_str("/sync/2"); + self.protocol = s; + self + } +} + +/// The block request handling behaviour. +pub struct BlockRequests { + /// This behaviour's configuration. + config: Config, + /// Blockchain client. + chain: Arc>, + /// List of all active connections and the requests we've sent. + peers: HashMap>>, + /// Futures sending back the block request response. Returns the `PeerId` we sent back to, and + /// the total time the handling of this request took. + outgoing: FuturesUnordered>, + /// Events to return as soon as possible from `poll`. + pending_events: VecDeque, Event>>, +} + +/// Local tracking of a libp2p connection. +#[derive(Debug)] +struct Connection { + id: ConnectionId, + ongoing_request: Option>, +} + +#[derive(Debug)] +struct OngoingRequest { + /// `Instant` when the request has been emitted. Used for diagnostic purposes. + emitted: Instant, + request: message::BlockRequest, + timeout: Delay, +} + +/// Outcome of calling `send_request`. +#[derive(Debug)] +#[must_use] +pub enum SendRequestOutcome { + /// Request has been emitted. + Ok, + /// The request has been emitted and has replaced an existing request. + Replaced { + /// The previously-emitted request. + previous: message::BlockRequest, + /// Time that had elapsed since `previous` has been emitted. + request_duration: Duration, + }, + /// Didn't start a request because we have no connection to this node. + /// If `send_request` returns that, it is as if the function had never been called. + NotConnected, + /// Error while serializing the request. + EncodeError(prost::EncodeError), +} + +impl BlockRequests +where + B: Block, +{ + pub fn new(cfg: Config, chain: Arc>) -> Self { + BlockRequests { + config: cfg, + chain, + peers: HashMap::new(), + outgoing: FuturesUnordered::new(), + pending_events: VecDeque::new(), + } + } + + /// Returns the libp2p protocol name used on the wire (e.g. `/foo/sync/2`). + pub fn protocol_name(&self) -> &str { + &self.config.protocol + } + + /// Issue a new block request. + /// + /// Cancels any existing request targeting the same `PeerId`. + /// + /// If the response doesn't arrive in time, or if the remote answers improperly, the target + /// will be disconnected. + pub fn send_request(&mut self, target: &PeerId, req: message::BlockRequest) -> SendRequestOutcome { + // Determine which connection to send the request to. + let connection = if let Some(peer) = self.peers.get_mut(target) { + // We don't want to have multiple requests for any given node, so in priority try to + // find a connection with an existing request, to override it. + if let Some(entry) = peer.iter_mut().find(|c| c.ongoing_request.is_some()) { + entry + } else if let Some(entry) = peer.get_mut(0) { + entry + } else { + log::error!( + target: "sync", + "State inconsistency: empty list of peer connections" + ); + return SendRequestOutcome::NotConnected; + } + } else { + return SendRequestOutcome::NotConnected; + }; + + let protobuf_rq = build_protobuf_block_request( + req.fields, + req.from.clone(), + req.to.clone(), + req.direction, + req.max, + ); + + let mut buf = Vec::with_capacity(protobuf_rq.encoded_len()); + if let Err(err) = protobuf_rq.encode(&mut buf) { + log::warn!( + target: "sync", + "Failed to encode block request {:?}: {:?}", + protobuf_rq, + err + ); + return SendRequestOutcome::EncodeError(err); + } + + let previous_request = connection.ongoing_request.take(); + connection.ongoing_request = Some(OngoingRequest { + emitted: Instant::now(), + request: req.clone(), + timeout: Delay::new(self.config.request_timeout), + }); + + log::trace!(target: "sync", "Enqueueing block request to {:?}: {:?}", target, protobuf_rq); + self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: target.clone(), + handler: NotifyHandler::One(connection.id), + event: OutboundProtocol { + request: buf, + original_request: req, + max_response_size: self.config.max_response_len, + protocol: self.config.protocol.as_bytes().to_vec().into(), + }, + }); + + if let Some(previous_request) = previous_request { + log::debug!( + target: "sync", + "Replacing existing block request on connection {:?}", + connection.id + ); + SendRequestOutcome::Replaced { + previous: previous_request.request, + request_duration: previous_request.emitted.elapsed(), + } + } else { + SendRequestOutcome::Ok + } + } + + /// Callback, invoked when a new block request has been received from remote. + fn on_block_request + ( &mut self + , peer: &PeerId + , request: &schema::v1::BlockRequest + ) -> Result + { + log::trace!( + target: "sync", + "Block request from peer {}: from block {:?} to block {:?}, max blocks {:?}", + peer, + request.from_block, + request.to_block, + request.max_blocks); + + let from_block_id = + match request.from_block { + Some(schema::v1::block_request::FromBlock::Hash(ref h)) => { + let h = Decode::decode(&mut h.as_ref())?; + BlockId::::Hash(h) + } + Some(schema::v1::block_request::FromBlock::Number(ref n)) => { + let n = Decode::decode(&mut n.as_ref())?; + BlockId::::Number(n) + } + None => { + let msg = "missing `BlockRequest::from_block` field"; + return Err(io::Error::new(io::ErrorKind::Other, msg).into()) + } + }; + + let max_blocks = + if request.max_blocks == 0 { + self.config.max_block_data_response + } else { + min(request.max_blocks, self.config.max_block_data_response) + }; + + let direction = + if request.direction == schema::v1::Direction::Ascending as i32 { + schema::v1::Direction::Ascending + } else if request.direction == schema::v1::Direction::Descending as i32 { + schema::v1::Direction::Descending + } else { + let msg = format!("invalid `BlockRequest::direction` value: {}", request.direction); + return Err(io::Error::new(io::ErrorKind::Other, msg).into()) + }; + + let attributes = BlockAttributes::from_be_u32(request.fields)?; + let get_header = attributes.contains(BlockAttributes::HEADER); + let get_body = attributes.contains(BlockAttributes::BODY); + let get_justification = attributes.contains(BlockAttributes::JUSTIFICATION); + + let mut blocks = Vec::new(); + let mut block_id = from_block_id; + let mut total_size = 0; + while let Some(header) = self.chain.header(block_id).unwrap_or(None) { + if blocks.len() >= max_blocks as usize + || (blocks.len() >= 1 && total_size > self.config.max_block_body_bytes) + { + break + } + + let number = *header.number(); + let hash = header.hash(); + let parent_hash = *header.parent_hash(); + let justification = if get_justification { + self.chain.justification(&BlockId::Hash(hash))? + } else { + None + }; + let is_empty_justification = justification.as_ref().map(|j| j.is_empty()).unwrap_or(false); + + let body = if get_body { + match self.chain.block_body(&BlockId::Hash(hash))? { + Some(mut extrinsics) => extrinsics.iter_mut() + .map(|extrinsic| extrinsic.encode()) + .collect(), + None => { + log::trace!(target: "sync", "Missing data for block request."); + break; + } + } + } else { + Vec::new() + }; + + let block_data = schema::v1::BlockData { + hash: hash.encode(), + header: if get_header { + header.encode() + } else { + Vec::new() + }, + body, + receipt: Vec::new(), + message_queue: Vec::new(), + justification: justification.unwrap_or_default(), + is_empty_justification, + }; + + total_size += block_data.body.len(); + blocks.push(block_data); + + match direction { + schema::v1::Direction::Ascending => { + block_id = BlockId::Number(number + One::one()) + } + schema::v1::Direction::Descending => { + if number.is_zero() { + break + } + block_id = BlockId::Hash(parent_hash) + } + } + } + + Ok(schema::v1::BlockResponse { blocks }) + } +} + +impl NetworkBehaviour for BlockRequests +where + B: Block +{ + type ProtocolsHandler = OneShotHandler, OutboundProtocol, NodeEvent>; + type OutEvent = Event; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + let p = InboundProtocol { + max_request_len: self.config.max_request_len, + protocol: self.config.protocol.as_bytes().to_owned().into(), + marker: PhantomData, + }; + let mut cfg = OneShotHandlerConfig::default(); + cfg.keep_alive_timeout = self.config.inactivity_timeout; + cfg.outbound_substream_timeout = self.config.request_timeout; + OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) + } + + fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, _peer: &PeerId) { + } + + fn inject_disconnected(&mut self, _peer: &PeerId) { + } + + fn inject_connection_established(&mut self, peer_id: &PeerId, id: &ConnectionId, _: &ConnectedPoint) { + self.peers.entry(peer_id.clone()) + .or_default() + .push(Connection { + id: *id, + ongoing_request: None, + }); + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, id: &ConnectionId, _: &ConnectedPoint) { + let mut needs_remove = false; + if let Some(entry) = self.peers.get_mut(peer_id) { + if let Some(pos) = entry.iter().position(|i| i.id == *id) { + let ongoing_request = entry.remove(pos).ongoing_request; + if let Some(ongoing_request) = ongoing_request { + log::debug!( + target: "sync", + "Connection {:?} with {} closed with ongoing sync request: {:?}", + id, + peer_id, + ongoing_request + ); + let ev = Event::RequestCancelled { + peer: peer_id.clone(), + original_request: ongoing_request.request.clone(), + request_duration: ongoing_request.emitted.elapsed(), + }; + self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); + } + if entry.is_empty() { + needs_remove = true; + } + } else { + log::error!( + target: "sync", + "State inconsistency: connection id not found in list" + ); + } + } else { + log::error!( + target: "sync", + "State inconsistency: peer_id not found in list of connections" + ); + } + if needs_remove { + self.peers.remove(peer_id); + } + } + + fn inject_event( + &mut self, + peer: PeerId, + connection_id: ConnectionId, + node_event: NodeEvent + ) { + match node_event { + NodeEvent::Request(request, mut stream, handling_start) => { + match self.on_block_request(&peer, &request) { + Ok(res) => { + log::trace!( + target: "sync", + "Enqueueing block response for peer {} with {} blocks", + peer, res.blocks.len() + ); + let mut data = Vec::with_capacity(res.encoded_len()); + if let Err(e) = res.encode(&mut data) { + log::debug!( + target: "sync", + "Error encoding block response for peer {}: {}", + peer, e + ) + } else { + self.outgoing.push(async move { + if let Err(e) = write_one(&mut stream, data).await { + log::debug!( + target: "sync", + "Error writing block response: {}", + e + ); + } + (peer, handling_start.elapsed()) + }.boxed()); + } + } + Err(e) => log::debug!( + target: "sync", + "Error handling block request from peer {}: {}", peer, e + ) + } + } + NodeEvent::Response(original_request, response) => { + log::trace!( + target: "sync", + "Received block response from peer {} with {} blocks", + peer, response.blocks.len() + ); + let request_duration = if let Some(connections) = self.peers.get_mut(&peer) { + if let Some(connection) = connections.iter_mut().find(|c| c.id == connection_id) { + if let Some(ongoing_request) = &mut connection.ongoing_request { + if ongoing_request.request == original_request { + let request_duration = ongoing_request.emitted.elapsed(); + connection.ongoing_request = None; + request_duration + } else { + // We're no longer interested in that request. + log::debug!( + target: "sync", + "Received response from {} to obsolete block request {:?}", + peer, + original_request + ); + return; + } + } else { + // We remove from `self.peers` requests we're no longer interested in, + // so this can legitimately happen. + log::trace!( + target: "sync", + "Response discarded because it concerns an obsolete request" + ); + return; + } + } else { + log::error!( + target: "sync", + "State inconsistency: response on non-existing connection {:?}", + connection_id + ); + return; + } + } else { + log::error!( + target: "sync", + "State inconsistency: response on non-connected peer {}", + peer + ); + return; + }; + + let blocks = response.blocks.into_iter().map(|block_data| { + Ok(message::BlockData:: { + hash: Decode::decode(&mut block_data.hash.as_ref())?, + header: if !block_data.header.is_empty() { + Some(Decode::decode(&mut block_data.header.as_ref())?) + } else { + None + }, + body: if original_request.fields.contains(message::BlockAttributes::BODY) { + Some(block_data.body.iter().map(|body| { + Decode::decode(&mut body.as_ref()) + }).collect::, _>>()?) + } else { + None + }, + receipt: if !block_data.message_queue.is_empty() { + Some(block_data.receipt) + } else { + None + }, + message_queue: if !block_data.message_queue.is_empty() { + Some(block_data.message_queue) + } else { + None + }, + justification: if !block_data.justification.is_empty() { + Some(block_data.justification) + } else if block_data.is_empty_justification { + Some(Vec::new()) + } else { + None + }, + }) + }).collect::, codec::Error>>(); + + match blocks { + Ok(blocks) => { + let id = original_request.id; + let ev = Event::Response { + peer, + original_request, + response: message::BlockResponse:: { id, blocks }, + request_duration, + }; + self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); + } + Err(err) => { + log::debug!( + target: "sync", + "Failed to decode block response from peer {}: {}", peer, err + ); + } + } + } + } + } + + fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) + -> Poll, Event>> + { + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(ev); + } + + // Check the request timeouts. + for (peer, connections) in &mut self.peers { + for connection in connections { + let ongoing_request = match &mut connection.ongoing_request { + Some(rq) => rq, + None => continue, + }; + + if let Poll::Ready(_) = Pin::new(&mut ongoing_request.timeout).poll(cx) { + let original_request = ongoing_request.request.clone(); + let request_duration = ongoing_request.emitted.elapsed(); + connection.ongoing_request = None; + log::debug!( + target: "sync", + "Request timeout for {}: {:?}", + peer, original_request + ); + let ev = Event::RequestTimeout { + peer: peer.clone(), + original_request, + request_duration, + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + } + } + + if let Poll::Ready(Some((peer, total_handling_time))) = self.outgoing.poll_next_unpin(cx) { + let ev = Event::AnsweredRequest { + peer, + total_handling_time, + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + + Poll::Pending + } +} + +/// Output type of inbound and outbound substream upgrades. +#[derive(Debug)] +pub enum NodeEvent { + /// Incoming request from remote, substream to use for the response, and when we started + /// handling this request. + Request(schema::v1::BlockRequest, T, Instant), + /// Incoming response from remote. + Response(message::BlockRequest, schema::v1::BlockResponse), +} + +/// Substream upgrade protocol. +/// +/// We attempt to parse an incoming protobuf encoded request (cf. `Request`) +/// which will be handled by the `BlockRequests` behaviour, i.e. the request +/// will become visible via `inject_node_event` which then dispatches to the +/// relevant callback to process the message and prepare a response. +#[derive(Debug, Clone)] +pub struct InboundProtocol { + /// The max. request length in bytes. + max_request_len: usize, + /// The protocol to use during upgrade negotiation. + protocol: Bytes, + /// Type of the block. + marker: PhantomData, +} + +impl UpgradeInfo for InboundProtocol { + type Info = Bytes; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol.clone()) + } +} + +impl InboundUpgrade for InboundProtocol +where + B: Block, + T: AsyncRead + AsyncWrite + Unpin + Send + 'static +{ + type Output = NodeEvent; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; + + fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { + // This `Instant` will be passed around until the processing of this request is done. + let handling_start = Instant::now(); + + let future = async move { + let len = self.max_request_len; + let vec = read_one(&mut s, len).await?; + match schema::v1::BlockRequest::decode(&vec[..]) { + Ok(r) => Ok(NodeEvent::Request(r, s, handling_start)), + Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) + } + }; + future.boxed() + } +} + +/// Substream upgrade protocol. +/// +/// Sends a request to remote and awaits the response. +#[derive(Debug, Clone)] +pub struct OutboundProtocol { + /// The serialized protobuf request. + request: Vec, + /// The original request. Passed back through the API when the response comes back. + original_request: message::BlockRequest, + /// The max. response length in bytes. + max_response_size: usize, + /// The protocol to use for upgrade negotiation. + protocol: Bytes, +} + +impl UpgradeInfo for OutboundProtocol { + type Info = Bytes; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol.clone()) + } +} + +impl OutboundUpgrade for OutboundProtocol +where + B: Block, + T: AsyncRead + AsyncWrite + Unpin + Send + 'static +{ + type Output = NodeEvent; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; + + fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { + async move { + write_one(&mut s, &self.request).await?; + let vec = read_one(&mut s, self.max_response_size).await?; + + schema::v1::BlockResponse::decode(&vec[..]) + .map(|r| NodeEvent::Response(self.original_request, r)) + .map_err(|e| { + ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) + }) + }.boxed() + } +} + +/// Build protobuf block request message. +pub(crate) fn build_protobuf_block_request( + attributes: BlockAttributes, + from_block: message::FromBlock, + to_block: Option, + direction: message::Direction, + max_blocks: Option, +) -> schema::v1::BlockRequest { + schema::v1::BlockRequest { + fields: attributes.to_be_u32(), + from_block: match from_block { + message::FromBlock::Hash(h) => + Some(schema::v1::block_request::FromBlock::Hash(h.encode())), + message::FromBlock::Number(n) => + Some(schema::v1::block_request::FromBlock::Number(n.encode())), + }, + to_block: to_block.map(|h| h.encode()).unwrap_or_default(), + direction: match direction { + message::Direction::Ascending => schema::v1::Direction::Ascending as i32, + message::Direction::Descending => schema::v1::Direction::Descending as i32, + }, + max_blocks: max_blocks.unwrap_or(0), + } +} diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs new file mode 100644 index 0000000000000..20fbe0284397d --- /dev/null +++ b/client/network/src/chain.rs @@ -0,0 +1,46 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Blockchain access trait + +use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; +use sc_client_api::{BlockBackend, ProofProvider}; +use sp_runtime::traits::{Block as BlockT, BlockIdTo}; + +/// Local client abstraction for the network. +pub trait Client: HeaderBackend + ProofProvider + BlockIdTo + + BlockBackend + HeaderMetadata + Send + Sync +{} + +impl Client for T + where + T: HeaderBackend + ProofProvider + BlockIdTo + + BlockBackend + HeaderMetadata + Send + Sync +{} + +/// Finality proof provider. +pub trait FinalityProofProvider: Send + Sync { + /// Prove finality of the block. + fn prove_finality(&self, for_block: Block::Hash, request: &[u8]) -> Result>, Error>; +} + +impl FinalityProofProvider for () { + fn prove_finality(&self, _for_block: Block::Hash, _request: &[u8]) -> Result>, Error> { + Ok(None) + } +} diff --git a/client/network/src/config.rs b/client/network/src/config.rs new file mode 100644 index 0000000000000..7445ea0534bb7 --- /dev/null +++ b/client/network/src/config.rs @@ -0,0 +1,746 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Configuration of the networking layer. +//! +//! The [`Params`] struct is the struct that must be passed in order to initialize the networking. +//! See the documentation of [`Params`]. + +pub use crate::chain::{Client, FinalityProofProvider}; +pub use crate::on_demand_layer::{AlwaysBadChecker, OnDemand}; +pub use crate::request_responses::{IncomingRequest, ProtocolConfig as RequestResponseConfig}; +pub use libp2p::{identity, core::PublicKey, wasm_ext::ExtTransport, build_multiaddr}; + +// Note: this re-export shouldn't be part of the public API of the crate and will be removed in +// the future. +#[doc(hidden)] +pub use crate::protocol::ProtocolConfig; + +use crate::ExHashT; + +use core::{fmt, iter}; +use futures::future; +use libp2p::{ + identity::{ed25519, Keypair}, + multiaddr, wasm_ext, Multiaddr, PeerId, +}; +use prometheus_endpoint::Registry; +use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; +use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use std::{borrow::Cow, convert::TryFrom, future::Future, pin::Pin, str::FromStr}; +use std::{ + collections::HashMap, + error::Error, + fs, + io::{self, Write}, + net::Ipv4Addr, + path::{Path, PathBuf}, + str, + sync::Arc, +}; +use zeroize::Zeroize; + +/// Network initialization parameters. +pub struct Params { + /// Assigned role for our node (full, light, ...). + pub role: Role, + + /// How to spawn background tasks. If you pass `None`, then a threads pool will be used by + /// default. + pub executor: Option + Send>>) + Send>>, + + /// Network layer configuration. + pub network_config: NetworkConfiguration, + + /// Client that contains the blockchain. + pub chain: Arc>, + + /// Finality proof provider. + /// + /// This object, if `Some`, is used when a node on the network requests a proof of finality + /// from us. + pub finality_proof_provider: Option>>, + + /// How to build requests for proofs of finality. + /// + /// This object, if `Some`, is used when we need a proof of finality from another node. + pub finality_proof_request_builder: Option>, + + /// The `OnDemand` object acts as a "receiver" for block data requests from the client. + /// If `Some`, the network worker will process these requests and answer them. + /// Normally used only for light clients. + pub on_demand: Option>>, + + /// Pool of transactions. + /// + /// The network worker will fetch transactions from this object in order to propagate them on + /// the network. + pub transaction_pool: Arc>, + + /// Name of the protocol to use on the wire. Should be different for each chain. + pub protocol_id: ProtocolId, + + /// Import queue to use. + /// + /// The import queue is the component that verifies that blocks received from other nodes are + /// valid. + pub import_queue: Box>, + + /// Type to check incoming block announcements. + pub block_announce_validator: Box + Send>, + + /// Registry for recording prometheus metrics to. + pub metrics_registry: Option, +} + +/// Role of the local node. +#[derive(Debug, Clone)] +pub enum Role { + /// Regular full node. + Full, + /// Regular light node. + Light, + /// Sentry node that guards an authority. Will be reported as "authority" on the wire protocol. + Sentry { + /// Address and identity of the validator nodes that we're guarding. + /// + /// The nodes will be granted some priviledged status. + validators: Vec, + }, + /// Actual authority. + Authority { + /// List of public addresses and identities of our sentry nodes. + sentry_nodes: Vec, + } +} + +impl Role { + /// True for `Role::Authority` + pub fn is_authority(&self) -> bool { + matches!(self, Role::Authority { .. }) + } + + /// True for `Role::Authority` and `Role::Sentry` since they're both + /// announced as having the authority role to the network. + pub fn is_network_authority(&self) -> bool { + matches!(self, Role::Authority { .. } | Role::Sentry { .. }) + } +} + +impl fmt::Display for Role { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Role::Full => write!(f, "FULL"), + Role::Light => write!(f, "LIGHT"), + Role::Sentry { .. } => write!(f, "SENTRY"), + Role::Authority { .. } => write!(f, "AUTHORITY"), + } + } +} + +/// Finality proof request builder. +pub trait FinalityProofRequestBuilder: Send { + /// Build data blob, associated with the request. + fn build_request_data(&mut self, hash: &B::Hash) -> Vec; +} + +/// Implementation of `FinalityProofRequestBuilder` that builds a dummy empty request. +#[derive(Debug, Default)] +pub struct DummyFinalityProofRequestBuilder; + +impl FinalityProofRequestBuilder for DummyFinalityProofRequestBuilder { + fn build_request_data(&mut self, _: &B::Hash) -> Vec { + Vec::new() + } +} + +/// Shared finality proof request builder struct used by the queue. +pub type BoxFinalityProofRequestBuilder = Box + Send + Sync>; + +/// Result of the transaction import. +#[derive(Clone, Copy, Debug)] +pub enum TransactionImport { + /// Transaction is good but already known by the transaction pool. + KnownGood, + /// Transaction is good and not yet known. + NewGood, + /// Transaction is invalid. + Bad, + /// Transaction import was not performed. + None, +} + +/// Fuure resolving to transaction import result. +pub type TransactionImportFuture = Pin + Send>>; + +/// Transaction pool interface +pub trait TransactionPool: Send + Sync { + /// Get transactions from the pool that are ready to be propagated. + fn transactions(&self) -> Vec<(H, B::Extrinsic)>; + /// Get hash of transaction. + fn hash_of(&self, transaction: &B::Extrinsic) -> H; + /// Import a transaction into the pool. + /// + /// This will return future. + fn import( + &self, + transaction: B::Extrinsic, + ) -> TransactionImportFuture; + /// Notify the pool about transactions broadcast. + fn on_broadcasted(&self, propagations: HashMap>); + /// Get transaction by hash. + fn transaction(&self, hash: &H) -> Option; +} + +/// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always +/// empty and discards all incoming transactions. +/// +/// Requires the "hash" type to implement the `Default` trait. +/// +/// Useful for testing purposes. +pub struct EmptyTransactionPool; + +impl TransactionPool for EmptyTransactionPool { + fn transactions(&self) -> Vec<(H, B::Extrinsic)> { + Vec::new() + } + + fn hash_of(&self, _transaction: &B::Extrinsic) -> H { + Default::default() + } + + fn import( + &self, + _transaction: B::Extrinsic + ) -> TransactionImportFuture { + Box::pin(future::ready(TransactionImport::KnownGood)) + } + + fn on_broadcasted(&self, _: HashMap>) {} + + fn transaction(&self, _h: &H) -> Option { None } +} + +/// Name of a protocol, transmitted on the wire. Should be unique for each chain. Always UTF-8. +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct ProtocolId(smallvec::SmallVec<[u8; 6]>); + +impl<'a> From<&'a str> for ProtocolId { + fn from(bytes: &'a str) -> ProtocolId { + ProtocolId(bytes.as_bytes().into()) + } +} + +impl AsRef for ProtocolId { + fn as_ref(&self) -> &str { + str::from_utf8(&self.0[..]) + .expect("the only way to build a ProtocolId is through a UTF-8 String; qed") + } +} + +impl fmt::Debug for ProtocolId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self.as_ref(), f) + } +} + +/// Parses a string address and splits it into Multiaddress and PeerId, if +/// valid. +/// +/// # Example +/// +/// ``` +/// # use sc_network::{Multiaddr, PeerId, config::parse_str_addr}; +/// let (peer_id, addr) = parse_str_addr( +/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" +/// ).unwrap(); +/// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); +/// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); +/// ``` +/// +pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { + let addr: Multiaddr = addr_str.parse()?; + parse_addr(addr) +} + +/// Splits a Multiaddress into a Multiaddress and PeerId. +pub fn parse_addr(mut addr: Multiaddr)-> Result<(PeerId, Multiaddr), ParseErr> { + let who = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) + .map_err(|_| ParseErr::InvalidPeerId)?, + _ => return Err(ParseErr::PeerIdMissing), + }; + + Ok((who, addr)) +} + +/// Address of a node, including its identity. +/// +/// This struct represents a decoded version of a multiaddress that ends with `/p2p/`. +/// +/// # Example +/// +/// ``` +/// # use sc_network::{Multiaddr, PeerId, config::MultiaddrWithPeerId}; +/// let addr: MultiaddrWithPeerId = +/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); +/// assert_eq!(addr.peer_id.to_base58(), "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"); +/// assert_eq!(addr.multiaddr.to_string(), "/ip4/198.51.100.19/tcp/30333"); +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(try_from = "String", into = "String")] +pub struct MultiaddrWithPeerId { + /// Address of the node. + pub multiaddr: Multiaddr, + /// Its identity. + pub peer_id: PeerId, +} + +impl MultiaddrWithPeerId { + /// Concatenates the multiaddress and peer ID into one multiaddress containing both. + pub fn concat(&self) -> Multiaddr { + let proto = multiaddr::Protocol::P2p(From::from(self.peer_id.clone())); + self.multiaddr.clone().with(proto) + } +} + +impl fmt::Display for MultiaddrWithPeerId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.concat(), f) + } +} + +impl FromStr for MultiaddrWithPeerId { + type Err = ParseErr; + + fn from_str(s: &str) -> Result { + let (peer_id, multiaddr) = parse_str_addr(s)?; + Ok(MultiaddrWithPeerId { + peer_id, + multiaddr, + }) + } +} + +impl From for String { + fn from(ma: MultiaddrWithPeerId) -> String { + format!("{}", ma) + } +} + +impl TryFrom for MultiaddrWithPeerId { + type Error = ParseErr; + fn try_from(string: String) -> Result { + string.parse() + } +} + +/// Error that can be generated by `parse_str_addr`. +#[derive(Debug)] +pub enum ParseErr { + /// Error while parsing the multiaddress. + MultiaddrParse(multiaddr::Error), + /// Multihash of the peer ID is invalid. + InvalidPeerId, + /// The peer ID is missing from the address. + PeerIdMissing, +} + +impl fmt::Display for ParseErr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ParseErr::MultiaddrParse(err) => write!(f, "{}", err), + ParseErr::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), + ParseErr::PeerIdMissing => write!(f, "Peer id is missing from the address"), + } + } +} + +impl std::error::Error for ParseErr { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + ParseErr::MultiaddrParse(err) => Some(err), + ParseErr::InvalidPeerId => None, + ParseErr::PeerIdMissing => None, + } + } +} + +impl From for ParseErr { + fn from(err: multiaddr::Error) -> ParseErr { + ParseErr::MultiaddrParse(err) + } +} + +/// Network service configuration. +#[derive(Clone, Debug)] +pub struct NetworkConfiguration { + /// Directory path to store network-specific configuration. None means nothing will be saved. + pub net_config_path: Option, + /// Multiaddresses to listen for incoming connections. + pub listen_addresses: Vec, + /// Multiaddresses to advertise. Detected automatically if empty. + pub public_addresses: Vec, + /// List of initial node addresses + pub boot_nodes: Vec, + /// The node key configuration, which determines the node's network identity keypair. + pub node_key: NodeKeyConfig, + /// List of notifications protocols that the node supports. Must also include a + /// `ConsensusEngineId` for backwards-compatibility. + pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, + /// List of request-response protocols that the node supports. + pub request_response_protocols: Vec, + /// Maximum allowed number of incoming connections. + pub in_peers: u32, + /// Number of outgoing connections we're trying to maintain. + pub out_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// The non-reserved peer mode. + pub non_reserved_mode: NonReservedPeerMode, + /// Client identifier. Sent over the wire for debugging purposes. + pub client_version: String, + /// Name of the node. Sent over the wire for debugging purposes. + pub node_name: String, + /// Configuration for the transport layer. + pub transport: TransportConfig, + /// Maximum number of peers to ask the same blocks in parallel. + pub max_parallel_downloads: u32, + /// Should we insert non-global addresses into the DHT? + pub allow_non_globals_in_dht: bool, +} + +impl NetworkConfiguration { + /// Create new default configuration + pub fn new, SV: Into>( + node_name: SN, + client_version: SV, + node_key: NodeKeyConfig, + net_config_path: Option, + ) -> Self { + NetworkConfiguration { + net_config_path, + listen_addresses: Vec::new(), + public_addresses: Vec::new(), + boot_nodes: Vec::new(), + node_key, + notifications_protocols: Vec::new(), + request_response_protocols: Vec::new(), + in_peers: 25, + out_peers: 75, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + client_version: client_version.into(), + node_name: node_name.into(), + transport: TransportConfig::Normal { + enable_mdns: false, + allow_private_ipv4: true, + wasm_external_transport: None, + use_yamux_flow_control: false, + }, + max_parallel_downloads: 5, + allow_non_globals_in_dht: false, + } + } + + /// Create new default configuration for localhost-only connection with random port (useful for testing) + pub fn new_local() -> NetworkConfiguration { + let mut config = NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ); + + config.listen_addresses = vec![ + iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(multiaddr::Protocol::Tcp(0))) + .collect() + ]; + + config.allow_non_globals_in_dht = true; + config + } + + /// Create new default configuration for localhost-only connection with random port (useful for testing) + pub fn new_memory() -> NetworkConfiguration { + let mut config = NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ); + + config.listen_addresses = vec![ + iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(multiaddr::Protocol::Tcp(0))) + .collect() + ]; + + config.allow_non_globals_in_dht = true; + config + } +} + +/// Configuration for the transport layer. +#[derive(Clone, Debug)] +pub enum TransportConfig { + /// Normal transport mode. + Normal { + /// If true, the network will use mDNS to discover other libp2p nodes on the local network + /// and connect to them if they support the same chain. + enable_mdns: bool, + + /// If true, allow connecting to private IPv4 addresses (as defined in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have + /// been passed in [`NetworkConfiguration::reserved_nodes`] or + /// [`NetworkConfiguration::boot_nodes`]. + allow_private_ipv4: bool, + + /// Optional external implementation of a libp2p transport. Used in WASM contexts where we + /// need some binding between the networking provided by the operating system or environment + /// and libp2p. + /// + /// This parameter exists whatever the target platform is, but it is expected to be set to + /// `Some` only when compiling for WASM. + wasm_external_transport: Option, + /// Use flow control for yamux streams if set to true. + use_yamux_flow_control: bool, + }, + + /// Only allow connections within the same process. + /// Only addresses of the form `/memory/...` will be supported. + MemoryOnly, +} + +/// The policy for connections to non-reserved peers. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum NonReservedPeerMode { + /// Accept them. This is the default. + Accept, + /// Deny them. + Deny, +} + +impl NonReservedPeerMode { + /// Attempt to parse the peer mode from a string. + pub fn parse(s: &str) -> Option { + match s { + "accept" => Some(NonReservedPeerMode::Accept), + "deny" => Some(NonReservedPeerMode::Deny), + _ => None, + } + } +} + +/// The configuration of a node's secret key, describing the type of key +/// and how it is obtained. A node's identity keypair is the result of +/// the evaluation of the node key configuration. +#[derive(Clone, Debug)] +pub enum NodeKeyConfig { + /// A Ed25519 secret key configuration. + Ed25519(Secret) +} + +impl Default for NodeKeyConfig { + fn default() -> NodeKeyConfig { + NodeKeyConfig::Ed25519(Secret::New) + } +} + +/// The options for obtaining a Ed25519 secret key. +pub type Ed25519Secret = Secret; + +/// The configuration options for obtaining a secret key `K`. +#[derive(Clone)] +pub enum Secret { + /// Use the given secret key `K`. + Input(K), + /// Read the secret key from a file. If the file does not exist, + /// it is created with a newly generated secret key `K`. The format + /// of the file is determined by `K`: + /// + /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. + File(PathBuf), + /// Always generate a new secret key `K`. + New +} + +impl fmt::Debug for Secret { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Secret::Input(_) => f.debug_tuple("Secret::Input").finish(), + Secret::File(path) => f.debug_tuple("Secret::File").field(path).finish(), + Secret::New => f.debug_tuple("Secret::New").finish(), + } + } +} + +impl NodeKeyConfig { + /// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`: + /// + /// * If the secret is configured as input, the corresponding keypair is returned. + /// + /// * If the secret is configured as a file, it is read from that file, if it exists. + /// Otherwise a new secret is generated and stored. In either case, the + /// keypair obtained from the secret is returned. + /// + /// * If the secret is configured to be new, it is generated and the corresponding + /// keypair is returned. + pub fn into_keypair(self) -> io::Result { + use NodeKeyConfig::*; + match self { + Ed25519(Secret::New) => + Ok(Keypair::generate_ed25519()), + + Ed25519(Secret::Input(k)) => + Ok(Keypair::Ed25519(k.into())), + + Ed25519(Secret::File(f)) => + get_secret( + f, + |mut b| { + match String::from_utf8(b.to_vec()) + .ok() + .and_then(|s|{ + if s.len() == 64 { + hex::decode(&s).ok() + } else { + None + }} + ) + { + Some(s) => ed25519::SecretKey::from_bytes(s), + _ => ed25519::SecretKey::from_bytes(&mut b), + } + }, + ed25519::SecretKey::generate, + |b| b.as_ref().to_vec() + ) + .map(ed25519::Keypair::from) + .map(Keypair::Ed25519), + } + } +} + +/// Load a secret key from a file, if it exists, or generate a +/// new secret key and write it to that file. In either case, +/// the secret key is returned. +fn get_secret(file: P, parse: F, generate: G, serialize: W) -> io::Result +where + P: AsRef, + F: for<'r> FnOnce(&'r mut [u8]) -> Result, + G: FnOnce() -> K, + E: Error + Send + Sync + 'static, + W: Fn(&K) -> Vec, +{ + std::fs::read(&file) + .and_then(|mut sk_bytes| + parse(&mut sk_bytes) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))) + .or_else(|e| { + if e.kind() == io::ErrorKind::NotFound { + file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; + let sk = generate(); + let mut sk_vec = serialize(&sk); + write_secret_file(file, &sk_vec)?; + sk_vec.zeroize(); + Ok(sk) + } else { + Err(e) + } + }) +} + +/// Write secret bytes to a file. +fn write_secret_file

(path: P, sk_bytes: &[u8]) -> io::Result<()> +where + P: AsRef +{ + let mut file = open_secret_file(&path)?; + file.write_all(sk_bytes) +} + +/// Opens a file containing a secret key in write mode. +#[cfg(unix)] +fn open_secret_file

(path: P) -> io::Result +where + P: AsRef +{ + use std::os::unix::fs::OpenOptionsExt; + fs::OpenOptions::new() + .write(true) + .create_new(true) + .mode(0o600) + .open(path) +} + +/// Opens a file containing a secret key in write mode. +#[cfg(not(unix))] +fn open_secret_file

(path: P) -> Result +where + P: AsRef +{ + fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(path) +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn tempdir_with_prefix(prefix: &str) -> TempDir { + tempfile::Builder::new().prefix(prefix).tempdir().unwrap() + } + + fn secret_bytes(kp: &Keypair) -> Vec { + match kp { + Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), + Keypair::Secp256k1(p) => p.secret().to_bytes().to_vec(), + _ => panic!("Unexpected keypair.") + } + } + + #[test] + fn test_secret_file() { + let tmp = tempdir_with_prefix("x"); + std::fs::remove_dir(tmp.path()).unwrap(); // should be recreated + let file = tmp.path().join("x").to_path_buf(); + let kp1 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); + assert!(file.is_file() && secret_bytes(&kp1) == secret_bytes(&kp2)) + } + + #[test] + fn test_secret_input() { + let sk = ed25519::SecretKey::generate(); + let kp1 = NodeKeyConfig::Ed25519(Secret::Input(sk.clone())).into_keypair().unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::Input(sk)).into_keypair().unwrap(); + assert!(secret_bytes(&kp1) == secret_bytes(&kp2)); + } + + #[test] + fn test_secret_new() { + let kp1 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); + assert!(secret_bytes(&kp1) != secret_bytes(&kp2)); + } +} diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs new file mode 100644 index 0000000000000..6ef97708c1336 --- /dev/null +++ b/client/network/src/discovery.rs @@ -0,0 +1,975 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Discovery mechanisms of Substrate. +//! +//! The `DiscoveryBehaviour` struct implements the `NetworkBehaviour` trait of libp2p and is +//! responsible for discovering other nodes that are part of the network. +//! +//! Substrate uses the following mechanisms in order to discover nodes that are part of the network: +//! +//! - Bootstrap nodes. These are hard-coded node identities and addresses passed in the constructor +//! of the `DiscoveryBehaviour`. You can also call `add_known_address` later to add an entry. +//! +//! - mDNS. Discovers nodes on the local network by broadcasting UDP packets. +//! +//! - Kademlia random walk. Once connected, we perform random Kademlia `FIND_NODE` requests on the +//! configured Kademlia DHTs in order for nodes to propagate to us their view of the network. This +//! is performed automatically by the `DiscoveryBehaviour`. +//! +//! Additionally, the `DiscoveryBehaviour` is also capable of storing and loading value in the +//! configured DHTs. +//! +//! ## Usage +//! +//! The `DiscoveryBehaviour` generates events of type `DiscoveryOut`, most notably +//! `DiscoveryOut::Discovered` that is generated whenever we discover a node. +//! Only the identity of the node is returned. The node's addresses are stored within the +//! `DiscoveryBehaviour` and can be queried through the `NetworkBehaviour` trait. +//! +//! **Important**: In order for the discovery mechanism to work properly, there needs to be an +//! active mechanism that asks nodes for the addresses they are listening on. Whenever we learn +//! of a node's address, you must call `add_self_reported_address`. +//! + +use crate::config::ProtocolId; +use crate::utils::LruHashSet; +use futures::prelude::*; +use futures_timer::Delay; +use ip_network::IpNetwork; +use libp2p::core::{connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, PublicKey}; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; +use libp2p::swarm::protocols_handler::multi::MultiHandler; +use libp2p::kad::{Kademlia, KademliaBucketInserts, KademliaConfig, KademliaEvent, QueryResult, Quorum, Record}; +use libp2p::kad::GetClosestPeersError; +use libp2p::kad::handler::KademliaHandler; +use libp2p::kad::QueryId; +use libp2p::kad::record::{self, store::{MemoryStore, RecordStore}}; +#[cfg(not(target_os = "unknown"))] +use libp2p::swarm::toggle::Toggle; +#[cfg(not(target_os = "unknown"))] +use libp2p::mdns::{Mdns, MdnsEvent}; +use libp2p::multiaddr::Protocol; +use log::{debug, info, trace, warn}; +use std::{cmp, collections::{HashMap, HashSet, VecDeque}, io, num::NonZeroUsize, time::Duration}; +use std::task::{Context, Poll}; +use sp_core::hexdisplay::HexDisplay; + +/// Maximum number of known external addresses that we will cache. +/// This only affects whether we will log whenever we (re-)discover +/// a given address. +const MAX_KNOWN_EXTERNAL_ADDRESSES: usize = 32; + +/// `DiscoveryBehaviour` configuration. +/// +/// Note: In order to discover nodes or load and store values via Kademlia one has to add at least +/// one protocol via [`DiscoveryConfig::add_protocol`]. +pub struct DiscoveryConfig { + local_peer_id: PeerId, + user_defined: Vec<(PeerId, Multiaddr)>, + allow_private_ipv4: bool, + allow_non_globals_in_dht: bool, + discovery_only_if_under_num: u64, + enable_mdns: bool, + kademlias: HashMap> +} + +impl DiscoveryConfig { + /// Create a default configuration with the given public key. + pub fn new(local_public_key: PublicKey) -> Self { + DiscoveryConfig { + local_peer_id: local_public_key.into_peer_id(), + user_defined: Vec::new(), + allow_private_ipv4: true, + allow_non_globals_in_dht: false, + discovery_only_if_under_num: std::u64::MAX, + enable_mdns: false, + kademlias: HashMap::new() + } + } + + /// Set the number of active connections at which we pause discovery. + pub fn discovery_limit(&mut self, limit: u64) -> &mut Self { + self.discovery_only_if_under_num = limit; + self + } + + /// Set custom nodes which never expire, e.g. bootstrap or reserved nodes. + pub fn with_user_defined(&mut self, user_defined: I) -> &mut Self + where + I: IntoIterator + { + for (peer_id, addr) in user_defined { + for kad in self.kademlias.values_mut() { + kad.add_address(&peer_id, addr.clone()); + } + self.user_defined.push((peer_id, addr)) + } + self + } + + /// Should private IPv4 addresses be reported? + pub fn allow_private_ipv4(&mut self, value: bool) -> &mut Self { + self.allow_private_ipv4 = value; + self + } + + /// Should non-global addresses be inserted to the DHT? + pub fn allow_non_globals_in_dht(&mut self, value: bool) -> &mut Self { + self.allow_non_globals_in_dht = value; + self + } + + /// Should MDNS discovery be supported? + pub fn with_mdns(&mut self, value: bool) -> &mut Self { + if value && cfg!(target_os = "unknown") { + log::warn!(target: "sub-libp2p", "mDNS is not available on this platform") + } + self.enable_mdns = value; + self + } + + /// Add discovery via Kademlia for the given protocol. + pub fn add_protocol(&mut self, id: ProtocolId) -> &mut Self { + let name = protocol_name_from_protocol_id(&id); + self.add_kademlia(id, name); + self + } + + fn add_kademlia(&mut self, id: ProtocolId, proto_name: Vec) { + if self.kademlias.contains_key(&id) { + warn!(target: "sub-libp2p", "Discovery already registered for protocol {:?}", id); + return + } + + let mut config = KademliaConfig::default(); + config.set_protocol_name(proto_name); + // By default Kademlia attempts to insert all peers into its routing table once a dialing + // attempt succeeds. In order to control which peer is added, disable the auto-insertion and + // instead add peers manually. + config.set_kbucket_inserts(KademliaBucketInserts::Manual); + + let store = MemoryStore::new(self.local_peer_id.clone()); + let mut kad = Kademlia::with_config(self.local_peer_id.clone(), store, config); + + for (peer_id, addr) in &self.user_defined { + kad.add_address(peer_id, addr.clone()); + } + + self.kademlias.insert(id, kad); + } + + /// Create a `DiscoveryBehaviour` from this config. + pub fn finish(self) -> DiscoveryBehaviour { + DiscoveryBehaviour { + user_defined: self.user_defined, + kademlias: self.kademlias, + next_kad_random_query: Delay::new(Duration::new(0, 0)), + duration_to_next_kad: Duration::from_secs(1), + pending_events: VecDeque::new(), + local_peer_id: self.local_peer_id, + num_connections: 0, + allow_private_ipv4: self.allow_private_ipv4, + discovery_only_if_under_num: self.discovery_only_if_under_num, + #[cfg(not(target_os = "unknown"))] + mdns: if self.enable_mdns { + match Mdns::new() { + Ok(mdns) => Some(mdns).into(), + Err(err) => { + warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); + None.into() + } + } + } else { + None.into() + }, + allow_non_globals_in_dht: self.allow_non_globals_in_dht, + known_external_addresses: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_EXTERNAL_ADDRESSES) + .expect("value is a constant; constant is non-zero; qed.") + ), + } + } +} + +/// Implementation of `NetworkBehaviour` that discovers the nodes on the network. +pub struct DiscoveryBehaviour { + /// User-defined list of nodes and their addresses. Typically includes bootstrap nodes and + /// reserved nodes. + user_defined: Vec<(PeerId, Multiaddr)>, + /// Kademlia requests and answers. + kademlias: HashMap>, + /// Discovers nodes on the local network. + #[cfg(not(target_os = "unknown"))] + mdns: Toggle, + /// Stream that fires when we need to perform the next random Kademlia query. + next_kad_random_query: Delay, + /// After `next_kad_random_query` triggers, the next one triggers after this duration. + duration_to_next_kad: Duration, + /// Events to return in priority when polled. + pending_events: VecDeque, + /// Identity of our local node. + local_peer_id: PeerId, + /// Number of nodes we're currently connected to. + num_connections: u64, + /// If false, `addresses_of_peer` won't return any private IPv4 address, except for the ones + /// stored in `user_defined`. + allow_private_ipv4: bool, + /// Number of active connections over which we interrupt the discovery process. + discovery_only_if_under_num: u64, + /// Should non-global addresses be added to the DHT? + allow_non_globals_in_dht: bool, + /// A cache of discovered external addresses. Only used for logging purposes. + known_external_addresses: LruHashSet, +} + +impl DiscoveryBehaviour { + /// Returns the list of nodes that we know exist in the network. + pub fn known_peers(&mut self) -> HashSet { + let mut peers = HashSet::new(); + for k in self.kademlias.values_mut() { + for b in k.kbuckets() { + for e in b.iter() { + if !peers.contains(e.node.key.preimage()) { + peers.insert(e.node.key.preimage().clone()); + } + } + } + } + peers + } + + /// Adds a hard-coded address for the given peer, that never expires. + /// + /// This adds an entry to the parameter that was passed to `new`. + /// + /// If we didn't know this address before, also generates a `Discovered` event. + pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { + if self.user_defined.iter().all(|(p, a)| *p != peer_id && *a != addr) { + for k in self.kademlias.values_mut() { + k.add_address(&peer_id, addr.clone()); + } + self.pending_events.push_back(DiscoveryOut::Discovered(peer_id.clone())); + self.user_defined.push((peer_id, addr)); + } + } + + /// Add a self-reported address of a remote peer to the k-buckets of the supported + /// DHTs (`supported_protocols`). + /// + /// **Note**: It is important that you call this method. The discovery mechanism will not + /// automatically add connecting peers to the Kademlia k-buckets. + pub fn add_self_reported_address( + &mut self, + peer_id: &PeerId, + supported_protocols: impl Iterator>, + addr: Multiaddr + ) { + if !self.allow_non_globals_in_dht && !self.can_add_to_dht(&addr) { + log::trace!(target: "sub-libp2p", "Ignoring self-reported non-global address {} from {}.", addr, peer_id); + return + } + + let mut added = false; + for protocol in supported_protocols { + for kademlia in self.kademlias.values_mut() { + if protocol.as_ref() == kademlia.protocol_name() { + log::trace!( + target: "sub-libp2p", + "Adding self-reported address {} from {} to Kademlia DHT {}.", + addr, peer_id, String::from_utf8_lossy(kademlia.protocol_name()), + ); + kademlia.add_address(peer_id, addr.clone()); + added = true; + } + } + } + + if !added { + log::trace!( + target: "sub-libp2p", + "Ignoring self-reported address {} from {} as remote node is not part of any \ + Kademlia DHTs supported by the local node.", addr, peer_id, + ); + } + } + + /// Start fetching a record from the DHT. + /// + /// A corresponding `ValueFound` or `ValueNotFound` event will later be generated. + pub fn get_value(&mut self, key: &record::Key) { + for k in self.kademlias.values_mut() { + k.get_record(key, Quorum::One); + } + } + + /// Start putting a record into the DHT. Other nodes can later fetch that value with + /// `get_value`. + /// + /// A corresponding `ValuePut` or `ValuePutFailed` event will later be generated. + pub fn put_value(&mut self, key: record::Key, value: Vec) { + for k in self.kademlias.values_mut() { + if let Err(e) = k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) { + warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); + self.pending_events.push_back(DiscoveryOut::ValuePutFailed(key.clone(), Duration::from_secs(0))); + } + } + } + + /// Returns the number of nodes in each Kademlia kbucket for each Kademlia instance. + /// + /// Identifies Kademlia instances by their [`ProtocolId`] and kbuckets by the base 2 logarithm + /// of their lower bound. + pub fn num_entries_per_kbucket(&mut self) -> impl ExactSizeIterator)> { + self.kademlias.iter_mut() + .map(|(id, kad)| { + let buckets = kad.kbuckets() + .map(|bucket| (bucket.range().0.ilog2().unwrap_or(0), bucket.iter().count())) + .collect(); + (id, buckets) + }) + } + + /// Returns the number of records in the Kademlia record stores. + pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { + // Note that this code is ok only because we use a `MemoryStore`. + self.kademlias.iter_mut().map(|(id, kad)| { + let num = kad.store_mut().records().count(); + (id, num) + }) + } + + /// Returns the total size in bytes of all the records in the Kademlia record stores. + pub fn kademlia_records_total_size(&mut self) -> impl ExactSizeIterator { + // Note that this code is ok only because we use a `MemoryStore`. If the records were + // for example stored on disk, this would load every single one of them every single time. + self.kademlias.iter_mut().map(|(id, kad)| { + let size = kad.store_mut().records().fold(0, |tot, rec| tot + rec.value.len()); + (id, size) + }) + } + + /// Can the given `Multiaddr` be put into the DHT? + /// + /// This test is successful only for global IP addresses and DNS names. + // + // NB: Currently all DNS names are allowed and no check for TLD suffixes is done + // because the set of valid domains is highly dynamic and would require frequent + // updates, for example by utilising publicsuffix.org or IANA. + pub fn can_add_to_dht(&self, addr: &Multiaddr) -> bool { + let ip = match addr.iter().next() { + Some(Protocol::Ip4(ip)) => IpNetwork::from(ip), + Some(Protocol::Ip6(ip)) => IpNetwork::from(ip), + Some(Protocol::Dns(_)) | Some(Protocol::Dns4(_)) | Some(Protocol::Dns6(_)) + => return true, + _ => return false + }; + ip.is_global() + } +} + +/// Event generated by the `DiscoveryBehaviour`. +#[derive(Debug)] +pub enum DiscoveryOut { + /// A connection to a peer has been established but the peer has not been + /// added to the routing table because [`KademliaBucketInserts::Manual`] is + /// configured. If the peer is to be included in the routing table, it must + /// be explicitly added via + /// [`DiscoveryBehaviour::add_self_reported_address`]. + Discovered(PeerId), + + /// A peer connected to this node for whom no listen address is known. + /// + /// In order for the peer to be added to the Kademlia routing table, a known + /// listen address must be added via + /// [`DiscoveryBehaviour::add_self_reported_address`], e.g. obtained through + /// the `identify` protocol. + UnroutablePeer(PeerId), + + /// The DHT yielded results for the record request. + /// + /// Returning the result grouped in (key, value) pairs as well as the request duration.. + ValueFound(Vec<(record::Key, Vec)>, Duration), + + /// The record requested was not found in the DHT. + /// + /// Returning the corresponding key as well as the request duration. + ValueNotFound(record::Key, Duration), + + /// The record with a given key was successfully inserted into the DHT. + /// + /// Returning the corresponding key as well as the request duration. + ValuePut(record::Key, Duration), + + /// Inserting a value into the DHT failed. + /// + /// Returning the corresponding key as well as the request duration. + ValuePutFailed(record::Key, Duration), + + /// Started a random Kademlia query for each DHT identified by the given `ProtocolId`s. + RandomKademliaStarted(Vec), +} + +impl NetworkBehaviour for DiscoveryBehaviour { + type ProtocolsHandler = MultiHandler>; + type OutEvent = DiscoveryOut; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + let iter = self.kademlias.iter_mut() + .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); + + MultiHandler::try_from_iter(iter) + .expect("There can be at most one handler per `ProtocolId` and \ + protocol names contain the `ProtocolId` so no two protocol \ + names in `self.kademlias` can be equal which is the only error \ + `try_from_iter` can return, therefore this call is guaranteed \ + to succeed; qed") + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + let mut list = self.user_defined.iter() + .filter_map(|(p, a)| if p == peer_id { Some(a.clone()) } else { None }) + .collect::>(); + + { + let mut list_to_filter = Vec::new(); + for k in self.kademlias.values_mut() { + list_to_filter.extend(k.addresses_of_peer(peer_id)) + } + + #[cfg(not(target_os = "unknown"))] + list_to_filter.extend(self.mdns.addresses_of_peer(peer_id)); + + if !self.allow_private_ipv4 { + list_to_filter.retain(|addr| { + if let Some(Protocol::Ip4(addr)) = addr.iter().next() { + if addr.is_private() { + return false; + } + } + + true + }); + } + + list.extend(list_to_filter); + } + + trace!(target: "sub-libp2p", "Addresses of {:?}: {:?}", peer_id, list); + + list + } + + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.num_connections += 1; + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_connection_established(k, peer_id, conn, endpoint) + } + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_connected(k, peer_id) + } + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.num_connections -= 1; + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_connection_closed(k, peer_id, conn, endpoint) + } + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_disconnected(k, peer_id) + } + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error + ) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_addr_reach_failure(k, peer_id, addr, error) + } + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + (pid, event): ::OutEvent, + ) { + if let Some(kad) = self.kademlias.get_mut(&pid) { + return kad.inject_event(peer_id, connection, event) + } + log::error!(target: "sub-libp2p", + "inject_node_event: no kademlia instance registered for protocol {:?}", + pid) + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + let new_addr = addr.clone() + .with(Protocol::P2p(self.local_peer_id.clone().into())); + + // NOTE: we might re-discover the same address multiple times + // in which case we just want to refrain from logging. + if self.known_external_addresses.insert(new_addr.clone()) { + info!(target: "sub-libp2p", + "🔍 Discovered new external address for our node: {}", + new_addr, + ); + } + + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_new_external_addr(k, addr) + } + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_expired_listen_addr(k, addr) + } + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_dial_failure(k, peer_id) + } + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_new_listen_addr(k, addr) + } + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_listener_error(k, id, err) + } + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_listener_closed(k, id, reason) + } + } + + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters, + ) -> Poll< + NetworkBehaviourAction< + ::InEvent, + Self::OutEvent, + >, + > { + // Immediately process the content of `discovered`. + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + + // Poll the stream that fires when we need to start a random Kademlia query. + while let Poll::Ready(_) = self.next_kad_random_query.poll_unpin(cx) { + let actually_started = if self.num_connections < self.discovery_only_if_under_num { + let random_peer_id = PeerId::random(); + debug!(target: "sub-libp2p", + "Libp2p <= Starting random Kademlia request for {:?}", + random_peer_id); + for k in self.kademlias.values_mut() { + k.get_closest_peers(random_peer_id.clone()); + } + true + } else { + debug!( + target: "sub-libp2p", + "Kademlia paused due to high number of connections ({})", + self.num_connections + ); + false + }; + + // Schedule the next random query with exponentially increasing delay, + // capped at 60 seconds. + self.next_kad_random_query = Delay::new(self.duration_to_next_kad); + self.duration_to_next_kad = cmp::min(self.duration_to_next_kad * 2, + Duration::from_secs(60)); + + if actually_started { + let ev = DiscoveryOut::RandomKademliaStarted(self.kademlias.keys().cloned().collect()); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + } + + // Poll Kademlias. + for (pid, kademlia) in &mut self.kademlias { + while let Poll::Ready(ev) = kademlia.poll(cx, params) { + match ev { + NetworkBehaviourAction::GenerateEvent(ev) => match ev { + KademliaEvent::RoutingUpdated { peer, .. } => { + let ev = DiscoveryOut::Discovered(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::UnroutablePeer { peer, .. } => { + let ev = DiscoveryOut::UnroutablePeer(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::RoutablePeer { peer, .. } => { + let ev = DiscoveryOut::Discovered(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::PendingRoutablePeer { .. } => { + // We are not interested in this event at the moment. + } + KademliaEvent::QueryResult { result: QueryResult::GetClosestPeers(res), .. } => { + match res { + Err(GetClosestPeersError::Timeout { key, peers }) => { + debug!(target: "sub-libp2p", + "Libp2p => Query for {:?} timed out with {} results", + HexDisplay::from(&key), peers.len()); + }, + Ok(ok) => { + trace!(target: "sub-libp2p", + "Libp2p => Query for {:?} yielded {:?} results", + HexDisplay::from(&ok.key), ok.peers.len()); + if ok.peers.is_empty() && self.num_connections != 0 { + debug!(target: "sub-libp2p", "Libp2p => Random Kademlia query has yielded empty \ + results"); + } + } + } + } + KademliaEvent::QueryResult { result: QueryResult::GetRecord(res), stats, .. } => { + let ev = match res { + Ok(ok) => { + let results = ok.records + .into_iter() + .map(|r| (r.record.key, r.record.value)) + .collect(); + + DiscoveryOut::ValueFound(results, stats.duration().unwrap_or_else(Default::default)) + } + Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { + trace!(target: "sub-libp2p", + "Libp2p => Failed to get record: {:?}", e); + DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) + } + Err(e) => { + warn!(target: "sub-libp2p", + "Libp2p => Failed to get record: {:?}", e); + DiscoveryOut::ValueNotFound(e.into_key(), stats.duration().unwrap_or_else(Default::default)) + } + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::QueryResult { result: QueryResult::PutRecord(res), stats, .. } => { + let ev = match res { + Ok(ok) => DiscoveryOut::ValuePut(ok.key, stats.duration().unwrap_or_else(Default::default)), + Err(e) => { + warn!(target: "sub-libp2p", + "Libp2p => Failed to put record: {:?}", e); + DiscoveryOut::ValuePutFailed(e.into_key(), stats.duration().unwrap_or_else(Default::default)) + } + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::QueryResult { result: QueryResult::RepublishRecord(res), .. } => { + match res { + Ok(ok) => debug!(target: "sub-libp2p", + "Libp2p => Record republished: {:?}", + ok.key), + Err(e) => warn!(target: "sub-libp2p", + "Libp2p => Republishing of record {:?} failed with: {:?}", + e.key(), e) + } + } + // We never start any other type of query. + e => { + warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) + } + } + NetworkBehaviourAction::DialAddress { address } => + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), + NetworkBehaviourAction::DialPeer { peer_id, condition } => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event: (pid.clone(), event) + }), + NetworkBehaviourAction::ReportObservedAddr { address } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + } + } + } + + // Poll mDNS. + #[cfg(not(target_os = "unknown"))] + while let Poll::Ready(ev) = self.mdns.poll(cx, params) { + match ev { + NetworkBehaviourAction::GenerateEvent(event) => { + match event { + MdnsEvent::Discovered(list) => { + if self.num_connections >= self.discovery_only_if_under_num { + continue; + } + + self.pending_events.extend(list.map(|(peer_id, _)| DiscoveryOut::Discovered(peer_id))); + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + }, + MdnsEvent::Expired(_) => {} + } + }, + NetworkBehaviourAction::DialAddress { address } => + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), + NetworkBehaviourAction::DialPeer { peer_id, condition } => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + NetworkBehaviourAction::NotifyHandler { event, .. } => + match event {}, // `event` is an enum with no variant + NetworkBehaviourAction::ReportObservedAddr { address } => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + } + } + + Poll::Pending + } +} + +// NB: If this protocol name derivation is changed, check if +// `DiscoveryBehaviour::new_handler` is still correct. +fn protocol_name_from_protocol_id(id: &ProtocolId) -> Vec { + let mut v = vec![b'/']; + v.extend_from_slice(id.as_ref().as_bytes()); + v.extend_from_slice(b"/kad"); + v +} + +#[cfg(test)] +mod tests { + use crate::config::ProtocolId; + use futures::prelude::*; + use libp2p::identity::Keypair; + use libp2p::{Multiaddr, PeerId}; + use libp2p::core::upgrade; + use libp2p::core::transport::{Transport, MemoryTransport}; + use libp2p::noise; + use libp2p::swarm::Swarm; + use libp2p::yamux; + use std::{collections::HashSet, task::Poll}; + use super::{DiscoveryConfig, DiscoveryOut, protocol_name_from_protocol_id}; + + #[test] + fn discovery_working() { + let mut first_swarm_peer_id_and_addr = None; + let protocol_id = ProtocolId::from("dot"); + + // Build swarms whose behaviour is `DiscoveryBehaviour`, each aware of + // the first swarm via `with_user_defined`. + let mut swarms = (0..25).map(|i| { + let keypair = Keypair::generate_ed25519(); + + let noise_keys = noise::Keypair::::new() + .into_authentic(&keypair) + .unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(yamux::Config::default()); + + let behaviour = { + let mut config = DiscoveryConfig::new(keypair.public()); + config.with_user_defined(first_swarm_peer_id_and_addr.clone()) + .allow_private_ipv4(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .add_protocol(protocol_id.clone()); + + config.finish() + }; + + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + if i == 0 { + first_swarm_peer_id_and_addr = Some((keypair.public().into_peer_id(), listen_addr.clone())) + } + + Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); + (swarm, listen_addr) + }).collect::>(); + + // Build a `Vec>` with the list of nodes remaining to be discovered. + let mut to_discover = (0..swarms.len()).map(|n| { + (0..swarms.len()) + // Skip the first swarm as all other swarms already know it. + .skip(1) + .filter(|p| *p != n) + .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) + .collect::>() + }).collect::>(); + + let fut = futures::future::poll_fn(move |cx| { + 'polling: loop { + for swarm_n in 0..swarms.len() { + match swarms[swarm_n].0.poll_next_unpin(cx) { + Poll::Ready(Some(e)) => { + match e { + DiscoveryOut::UnroutablePeer(other) | DiscoveryOut::Discovered(other) => { + // Call `add_self_reported_address` to simulate identify happening. + let addr = swarms.iter().find_map(|(s, a)| + if s.local_peer_id == other { + Some(a.clone()) + } else { + None + }) + .unwrap(); + swarms[swarm_n].0.add_self_reported_address( + &other, + [protocol_name_from_protocol_id(&protocol_id)].iter(), + addr, + ); + + to_discover[swarm_n].remove(&other); + }, + DiscoveryOut::RandomKademliaStarted(_) => {}, + e => {panic!("Unexpected event: {:?}", e)}, + } + continue 'polling + } + _ => {} + } + } + break + } + + if to_discover.iter().all(|l| l.is_empty()) { + Poll::Ready(()) + } else { + Poll::Pending + } + }); + + futures::executor::block_on(fut); + } + + #[test] + fn discovery_ignores_peers_with_unknown_protocols() { + let supported_protocol_id = ProtocolId::from("a"); + let unsupported_protocol_id = ProtocolId::from("b"); + + let mut discovery = { + let keypair = Keypair::generate_ed25519(); + let mut config = DiscoveryConfig::new(keypair.public()); + config.allow_private_ipv4(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .add_protocol(supported_protocol_id.clone()); + config.finish() + }; + + let remote_peer_id = PeerId::random(); + let remote_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + // Add remote peer with unsupported protocol. + discovery.add_self_reported_address( + &remote_peer_id, + [protocol_name_from_protocol_id(&unsupported_protocol_id)].iter(), + remote_addr.clone(), + ); + + for kademlia in discovery.kademlias.values_mut() { + assert!( + kademlia.kbucket(remote_peer_id.clone()) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expect peer with unsupported protocol not to be added." + ); + } + + // Add remote peer with supported protocol. + discovery.add_self_reported_address( + &remote_peer_id, + [protocol_name_from_protocol_id(&supported_protocol_id)].iter(), + remote_addr.clone(), + ); + + for kademlia in discovery.kademlias.values_mut() { + assert_eq!( + 1, + kademlia.kbucket(remote_peer_id.clone()) + .expect("Remote peer id not to be equal to local peer id.") + .num_entries(), + "Expect peer with supported protocol to be added." + ); + } + } + + #[test] + fn discovery_adds_peer_to_kademlia_of_same_protocol_only() { + let protocol_a = ProtocolId::from("a"); + let protocol_b = ProtocolId::from("b"); + + let mut discovery = { + let keypair = Keypair::generate_ed25519(); + let mut config = DiscoveryConfig::new(keypair.public()); + config.allow_private_ipv4(true) + .allow_non_globals_in_dht(true) + .discovery_limit(50) + .add_protocol(protocol_a.clone()) + .add_protocol(protocol_b.clone()); + config.finish() + }; + + let remote_peer_id = PeerId::random(); + let remote_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + // Add remote peer with `protocol_a` only. + discovery.add_self_reported_address( + &remote_peer_id, + [protocol_name_from_protocol_id(&protocol_a)].iter(), + remote_addr.clone(), + ); + + assert_eq!( + 1, + discovery.kademlias.get_mut(&protocol_a) + .expect("Kademlia instance to exist.") + .kbucket(remote_peer_id.clone()) + .expect("Remote peer id not to be equal to local peer id.") + .num_entries(), + "Expected remote peer to be added to `protocol_a` Kademlia instance.", + + ); + + assert!( + discovery.kademlias.get_mut(&protocol_b) + .expect("Kademlia instance to exist.") + .kbucket(remote_peer_id.clone()) + .expect("Remote peer id not to be equal to local peer id.") + .is_empty(), + "Expected remote peer not to be added to `protocol_b` Kademlia instance.", + ); + } +} diff --git a/client/network/src/error.rs b/client/network/src/error.rs new file mode 100644 index 0000000000000..7d7603ce92aab --- /dev/null +++ b/client/network/src/error.rs @@ -0,0 +1,90 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate network possible errors. + +use crate::config::TransportConfig; +use libp2p::{PeerId, Multiaddr}; + +use std::{borrow::Cow, fmt}; + +/// Result type alias for the network. +pub type Result = std::result::Result; + +/// Error type for the network. +#[derive(derive_more::Display, derive_more::From)] +pub enum Error { + /// Io error + Io(std::io::Error), + /// Client error + Client(Box), + /// The same bootnode (based on address) is registered with two different peer ids. + #[display( + fmt = "The same bootnode (`{}`) is registered with two different peer ids: `{}` and `{}`", + address, + first_id, + second_id, + )] + DuplicateBootnode { + /// The address of the bootnode. + address: Multiaddr, + /// The first peer id that was found for the bootnode. + first_id: PeerId, + /// The second peer id that was found for the bootnode. + second_id: PeerId, + }, + /// Prometheus metrics error. + Prometheus(prometheus_endpoint::PrometheusError), + /// The network addresses are invalid because they don't match the transport. + #[display( + fmt = "The following addresses are invalid because they don't match the transport: {:?}", + addresses, + )] + AddressesForAnotherTransport { + /// Transport used. + transport: TransportConfig, + /// The invalid addresses. + addresses: Vec, + }, + /// The same request-response protocol has been registered multiple times. + #[display(fmt = "Request-response protocol registered multiple times: {}", protocol)] + DuplicateRequestResponseProtocol { + /// Name of the protocol registered multiple times. + protocol: Cow<'static, str>, + }, +} + +// Make `Debug` use the `Display` implementation. +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Io(ref err) => Some(err), + Error::Client(ref err) => Some(err), + Error::DuplicateBootnode { .. } => None, + Error::Prometheus(ref err) => Some(err), + Error::AddressesForAnotherTransport { .. } => None, + Error::DuplicateRequestResponseProtocol { .. } => None, + } + } +} diff --git a/client/network/src/finality_requests.rs b/client/network/src/finality_requests.rs new file mode 100644 index 0000000000000..55f56b9a0cc25 --- /dev/null +++ b/client/network/src/finality_requests.rs @@ -0,0 +1,403 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. +// +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! `NetworkBehaviour` implementation which handles incoming finality proof requests. +//! +//! Every request is coming in on a separate connection substream which gets +//! closed after we have sent the response back. Incoming requests are encoded +//! as protocol buffers (cf. `finality.v1.proto`). + +#![allow(unused)] + +use bytes::Bytes; +use codec::{Encode, Decode}; +use crate::{ + chain::FinalityProofProvider, + config::ProtocolId, + protocol::message, + schema, +}; +use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; +use libp2p::{ + core::{ + ConnectedPoint, + Multiaddr, + PeerId, + connection::ConnectionId, + upgrade::{InboundUpgrade, OutboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, + upgrade::{DeniedUpgrade, read_one, write_one} + }, + swarm::{ + NegotiatedSubstream, + NetworkBehaviour, + NetworkBehaviourAction, + NotifyHandler, + OneShotHandler, + OneShotHandlerConfig, + PollParameters, + SubstreamProtocol + } +}; +use prost::Message; +use sp_runtime::{generic::BlockId, traits::{Block, Header, One, Zero}}; +use std::{ + cmp::min, + collections::VecDeque, + io, + iter, + marker::PhantomData, + sync::Arc, + time::Duration, + task::{Context, Poll} +}; +use void::{Void, unreachable}; + +// Type alias for convenience. +pub type Error = Box; + +/// Event generated by the finality proof requests behaviour. +#[derive(Debug)] +pub enum Event { + /// A response to a finality proof request has arrived. + Response { + peer: PeerId, + /// Block hash originally passed to `send_request`. + block_hash: B::Hash, + /// Finality proof returned by the remote. + proof: Vec, + }, +} + +/// Configuration options for `FinalityProofRequests`. +#[derive(Debug, Clone)] +pub struct Config { + max_request_len: usize, + max_response_len: usize, + inactivity_timeout: Duration, + protocol: Bytes, +} + +impl Config { + /// Create a fresh configuration with the following options: + /// + /// - max. request size = 1 MiB + /// - max. response size = 1 MiB + /// - inactivity timeout = 15s + pub fn new(id: &ProtocolId) -> Self { + let mut c = Config { + max_request_len: 1024 * 1024, + max_response_len: 1024 * 1024, + inactivity_timeout: Duration::from_secs(15), + protocol: Bytes::new(), + }; + c.set_protocol(id); + c + } + + /// Limit the max. length of incoming finality proof request bytes. + pub fn set_max_request_len(&mut self, v: usize) -> &mut Self { + self.max_request_len = v; + self + } + + /// Limit the max. length of incoming finality proof response bytes. + pub fn set_max_response_len(&mut self, v: usize) -> &mut Self { + self.max_response_len = v; + self + } + + /// Limit the max. duration the substream may remain inactive before closing it. + pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { + self.inactivity_timeout = v; + self + } + + /// Set protocol to use for upgrade negotiation. + pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { + let mut v = Vec::new(); + v.extend_from_slice(b"/"); + v.extend_from_slice(id.as_ref().as_bytes()); + v.extend_from_slice(b"/finality-proof/1"); + self.protocol = v.into(); + self + } +} + +/// The finality proof request handling behaviour. +pub struct FinalityProofRequests { + /// This behaviour's configuration. + config: Config, + /// How to construct finality proofs. + finality_proof_provider: Option>>, + /// Futures sending back the finality proof request responses. + outgoing: FuturesUnordered>, + /// Events to return as soon as possible from `poll`. + pending_events: VecDeque, Event>>, +} + +impl FinalityProofRequests +where + B: Block, +{ + /// Initializes the behaviour. + /// + /// If the proof provider is `None`, then the behaviour will not support the finality proof + /// requests protocol. + pub fn new(cfg: Config, finality_proof_provider: Option>>) -> Self { + FinalityProofRequests { + config: cfg, + finality_proof_provider, + outgoing: FuturesUnordered::new(), + pending_events: VecDeque::new(), + } + } + + /// Issue a new finality proof request. + /// + /// If the response doesn't arrive in time, or if the remote answers improperly, the target + /// will be disconnected. + pub fn send_request(&mut self, target: &PeerId, block_hash: B::Hash, request: Vec) { + let protobuf_rq = schema::v1::finality::FinalityProofRequest { + block_hash: block_hash.encode(), + request, + }; + + let mut buf = Vec::with_capacity(protobuf_rq.encoded_len()); + if let Err(err) = protobuf_rq.encode(&mut buf) { + log::warn!("failed to encode finality proof request {:?}: {:?}", protobuf_rq, err); + return; + } + + log::trace!("enqueueing finality proof request to {:?}: {:?}", target, protobuf_rq); + self.pending_events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: target.clone(), + handler: NotifyHandler::Any, + event: OutboundProtocol { + request: buf, + block_hash, + max_response_size: self.config.max_response_len, + protocol: self.config.protocol.clone(), + }, + }); + } + + /// Callback, invoked when a new finality request has been received from remote. + fn on_finality_request(&mut self, peer: &PeerId, request: &schema::v1::finality::FinalityProofRequest) + -> Result + { + let block_hash = Decode::decode(&mut request.block_hash.as_ref())?; + + log::trace!(target: "sync", "Finality proof request from {} for {}", peer, block_hash); + + // Note that an empty Vec is sent if no proof is available. + let finality_proof = if let Some(provider) = &self.finality_proof_provider { + provider + .prove_finality(block_hash, &request.request)? + .unwrap_or_default() + } else { + log::error!("Answering a finality proof request while finality provider is empty"); + return Err(From::from("Empty finality proof provider".to_string())) + }; + + Ok(schema::v1::finality::FinalityProofResponse { proof: finality_proof }) + } +} + +impl NetworkBehaviour for FinalityProofRequests +where + B: Block +{ + type ProtocolsHandler = OneShotHandler, OutboundProtocol, NodeEvent>; + type OutEvent = Event; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + let p = InboundProtocol { + max_request_len: self.config.max_request_len, + protocol: if self.finality_proof_provider.is_some() { + Some(self.config.protocol.clone()) + } else { + None + }, + marker: PhantomData, + }; + let mut cfg = OneShotHandlerConfig::default(); + cfg.keep_alive_timeout = self.config.inactivity_timeout; + OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) + } + + fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, _peer: &PeerId) { + } + + fn inject_disconnected(&mut self, _peer: &PeerId) { + } + + fn inject_event( + &mut self, + peer: PeerId, + connection: ConnectionId, + event: NodeEvent + ) { + match event { + NodeEvent::Request(request, mut stream) => { + match self.on_finality_request(&peer, &request) { + Ok(res) => { + log::trace!("enqueueing finality response for peer {}", peer); + let mut data = Vec::with_capacity(res.encoded_len()); + if let Err(e) = res.encode(&mut data) { + log::debug!("error encoding finality response for peer {}: {}", peer, e) + } else { + let future = async move { + if let Err(e) = write_one(&mut stream, data).await { + log::debug!("error writing finality response: {}", e) + } + }; + self.outgoing.push(future.boxed()) + } + } + Err(e) => log::debug!("error handling finality request from peer {}: {}", peer, e) + } + } + NodeEvent::Response(response, block_hash) => { + let ev = Event::Response { + peer, + block_hash, + proof: response.proof, + }; + self.pending_events.push_back(NetworkBehaviourAction::GenerateEvent(ev)); + } + } + } + + fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) + -> Poll, Event>> + { + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(ev); + } + + while let Poll::Ready(Some(_)) = self.outgoing.poll_next_unpin(cx) {} + Poll::Pending + } +} + +/// Output type of inbound and outbound substream upgrades. +#[derive(Debug)] +pub enum NodeEvent { + /// Incoming request from remote and substream to use for the response. + Request(schema::v1::finality::FinalityProofRequest, T), + /// Incoming response from remote. + Response(schema::v1::finality::FinalityProofResponse, B::Hash), +} + +/// Substream upgrade protocol. +/// +/// We attempt to parse an incoming protobuf encoded request (cf. `Request`) +/// which will be handled by the `FinalityProofRequests` behaviour, i.e. the request +/// will become visible via `inject_node_event` which then dispatches to the +/// relevant callback to process the message and prepare a response. +#[derive(Debug, Clone)] +pub struct InboundProtocol { + /// The max. request length in bytes. + max_request_len: usize, + /// The protocol to use during upgrade negotiation. If `None`, then the incoming protocol + /// is simply disabled. + protocol: Option, + /// Marker to pin the block type. + marker: PhantomData, +} + +impl UpgradeInfo for InboundProtocol { + type Info = Bytes; + // This iterator will return either 0 elements if `self.protocol` is `None`, or 1 element if + // it is `Some`. + type InfoIter = std::option::IntoIter; + + fn protocol_info(&self) -> Self::InfoIter { + self.protocol.clone().into_iter() + } +} + +impl InboundUpgrade for InboundProtocol +where + B: Block, + T: AsyncRead + AsyncWrite + Unpin + Send + 'static +{ + type Output = NodeEvent; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; + + fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { + async move { + let len = self.max_request_len; + let vec = read_one(&mut s, len).await?; + match schema::v1::finality::FinalityProofRequest::decode(&vec[..]) { + Ok(r) => Ok(NodeEvent::Request(r, s)), + Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) + } + }.boxed() + } +} + +/// Substream upgrade protocol. +/// +/// Sends a request to remote and awaits the response. +#[derive(Debug, Clone)] +pub struct OutboundProtocol { + /// The serialized protobuf request. + request: Vec, + /// Block hash that has been requested. + block_hash: B::Hash, + /// The max. response length in bytes. + max_response_size: usize, + /// The protocol to use for upgrade negotiation. + protocol: Bytes, +} + +impl UpgradeInfo for OutboundProtocol { + type Info = Bytes; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol.clone()) + } +} + +impl OutboundUpgrade for OutboundProtocol +where + B: Block, + T: AsyncRead + AsyncWrite + Unpin + Send + 'static +{ + type Output = NodeEvent; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; + + fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { + async move { + write_one(&mut s, &self.request).await?; + let vec = read_one(&mut s, self.max_response_size).await?; + + schema::v1::finality::FinalityProofResponse::decode(&vec[..]) + .map(|r| NodeEvent::Response(r, self.block_hash)) + .map_err(|e| { + ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) + }) + }.boxed() + } +} diff --git a/client/network/src/gossip.rs b/client/network/src/gossip.rs new file mode 100644 index 0000000000000..0650e7a2f818b --- /dev/null +++ b/client/network/src/gossip.rs @@ -0,0 +1,245 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Helper for sending rate-limited gossip messages. +//! +//! # Context +//! +//! The [`NetworkService`] struct provides a way to send notifications to a certain peer through +//! the [`NetworkService::notification_sender`] method. This method is quite low level and isn't +//! expected to be used directly. +//! +//! The [`QueuedSender`] struct provided by this module is built on top of +//! [`NetworkService::notification_sender`] and provides a cleaner way to send notifications. +//! +//! # Behaviour +//! +//! An instance of [`QueuedSender`] is specific to a certain combination of `PeerId` and +//! protocol name. It maintains a buffer of messages waiting to be sent out. The user of this API +//! is able to manipulate that queue, adding or removing obsolete messages. +//! +//! Creating a [`QueuedSender`] also returns a opaque `Future` whose responsibility it to +//! drain that queue and actually send the messages. If the substream with the given combination +//! of peer and protocol is closed, the queue is silently discarded. It is the role of the user +//! to track which peers we are connected to. +//! +//! In normal situations, messages sent through a [`QueuedSender`] will arrive in the same +//! order as they have been sent. +//! It is possible, in the situation of disconnects and reconnects, that messages arrive in a +//! different order. See also https://github.com/paritytech/substrate/issues/6756. +//! However, if multiple instances of [`QueuedSender`] exist for the same peer and protocol, or +//! if some other code uses the [`NetworkService`] to send notifications to this combination or +//! peer and protocol, then the notifications will be interleaved in an unpredictable way. +//! + +use crate::{ExHashT, NetworkService}; + +use async_std::sync::{Condvar, Mutex, MutexGuard}; +use futures::prelude::*; +use libp2p::PeerId; +use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use std::{ + collections::VecDeque, + fmt, + sync::{atomic, Arc}, + time::Duration, +}; + +#[cfg(test)] +mod tests; + +/// Notifications sender for a specific combination of network service, peer, and protocol. +pub struct QueuedSender { + /// Shared between the front and the back task. + shared: Arc>, +} + +impl QueuedSender { + /// Returns a new [`QueuedSender`] containing a queue of message for this specific + /// combination of peer and protocol. + /// + /// In addition to the [`QueuedSender`], also returns a `Future` whose role is to drive + /// the messages sending forward. + pub fn new( + service: Arc>, + peer_id: PeerId, + protocol: ConsensusEngineId, + queue_size_limit: usize, + messages_encode: F + ) -> (Self, impl Future + Send + 'static) + where + M: Send + 'static, + B: BlockT + 'static, + H: ExHashT, + F: Fn(M) -> Vec + Send + 'static, + { + let shared = Arc::new(Shared { + stop_task: atomic::AtomicBool::new(false), + condvar: Condvar::new(), + queue_size_limit, + messages_queue: Mutex::new(VecDeque::with_capacity(queue_size_limit)), + }); + + let task = spawn_task( + service, + peer_id, + protocol, + shared.clone(), + messages_encode + ); + + (QueuedSender { shared }, task) + } + + /// Locks the queue of messages towards this peer. + /// + /// The returned `Future` is expected to be ready quite quickly. + pub async fn lock_queue<'a>(&'a self) -> QueueGuard<'a, M> { + QueueGuard { + messages_queue: self.shared.messages_queue.lock().await, + condvar: &self.shared.condvar, + queue_size_limit: self.shared.queue_size_limit, + } + } + + /// Pushes a message to the queue, or discards it if the queue is full. + /// + /// The returned `Future` is expected to be ready quite quickly. + pub async fn queue_or_discard(&self, message: M) + where + M: Send + 'static + { + self.lock_queue().await.push_or_discard(message); + } +} + +impl fmt::Debug for QueuedSender { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("QueuedSender").finish() + } +} + +impl Drop for QueuedSender { + fn drop(&mut self) { + // The "clean" way to notify the `Condvar` here is normally to first lock the `Mutex`, + // then notify the `Condvar` while the `Mutex` is locked. Unfortunately, the `Mutex` + // being asynchronous, it can't reasonably be locked from within a destructor. + // See also the corresponding code in the background task. + self.shared.stop_task.store(true, atomic::Ordering::Release); + self.shared.condvar.notify_all(); + } +} + +/// Locked queue of messages to the given peer. +/// +/// As long as this struct exists, the background task is asleep and the owner of the [`QueueGuard`] +/// is in total control of the buffer. Messages can only ever be sent out after the [`QueueGuard`] +/// is dropped. +#[must_use] +pub struct QueueGuard<'a, M> { + messages_queue: MutexGuard<'a, VecDeque>, + condvar: &'a Condvar, + /// Same as [`Shared::queue_size_limit`]. + queue_size_limit: usize, +} + +impl<'a, M: Send + 'static> QueueGuard<'a, M> { + /// Pushes a message to the queue, or discards it if the queue is full. + /// + /// The message will only start being sent out after the [`QueueGuard`] is dropped. + pub fn push_or_discard(&mut self, message: M) { + if self.messages_queue.len() < self.queue_size_limit { + self.messages_queue.push_back(message); + } + } + + /// Calls `filter` for each message in the queue, and removes the ones for which `false` is + /// returned. + /// + /// > **Note**: The parameter of `filter` is a `&M` and not a `&mut M` (which would be + /// > better) because the underlying implementation relies on `VecDeque::retain`. + pub fn retain(&mut self, filter: impl FnMut(&M) -> bool) { + self.messages_queue.retain(filter); + } +} + +impl<'a, M> Drop for QueueGuard<'a, M> { + fn drop(&mut self) { + // We notify the `Condvar` in the destructor in order to be able to push multiple + // messages and wake up the background task only once afterwards. + self.condvar.notify_one(); + } +} + +#[derive(Debug)] +struct Shared { + /// Read by the background task after locking `locked`. If true, the task stops. + stop_task: atomic::AtomicBool, + /// Queue of messages waiting to be sent out. + messages_queue: Mutex>, + /// Must be notified every time the content of `locked` changes. + condvar: Condvar, + /// Maximum number of elements in `messages_queue`. + queue_size_limit: usize, +} + +async fn spawn_task Vec>( + service: Arc>, + peer_id: PeerId, + protocol: ConsensusEngineId, + shared: Arc>, + messages_encode: F, +) { + loop { + let next_message = 'next_msg: loop { + let mut queue = shared.messages_queue.lock().await; + + loop { + if shared.stop_task.load(atomic::Ordering::Acquire) { + return; + } + + if let Some(msg) = queue.pop_front() { + break 'next_msg msg; + } + + // It is possible that the destructor of `QueuedSender` sets `stop_task` to + // true and notifies the `Condvar` after the background task loads `stop_task` + // and before it calls `Condvar::wait`. + // See also the corresponding comment in `QueuedSender::drop`. + // For this reason, we use `wait_timeout`. In the worst case scenario, + // `stop_task` will always be checked again after the timeout is reached. + queue = shared.condvar.wait_timeout(queue, Duration::from_secs(10)).await.0; + } + }; + + // Starting from below, we try to send the message. If an error happens when sending, + // the only sane option we have is to silently discard the message. + let sender = match service.notification_sender(peer_id.clone(), protocol) { + Ok(s) => s, + Err(_) => continue, + }; + + let ready = match sender.ready().await { + Ok(r) => r, + Err(_) => continue, + }; + + let _ = ready.send(messages_encode(next_message)); + } +} diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs new file mode 100644 index 0000000000000..9ba44f564e132 --- /dev/null +++ b/client/network/src/gossip/tests.rs @@ -0,0 +1,201 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{config, gossip::QueuedSender, Event, NetworkService, NetworkWorker}; + +use futures::prelude::*; +use sp_runtime::traits::{Block as BlockT, Header as _}; +use std::{sync::Arc, time::Duration}; +use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; + +type TestNetworkService = NetworkService< + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::Hash, +>; + +/// Builds a full node to be used for testing. Returns the node service and its associated events +/// stream. +/// +/// > **Note**: We return the events stream in order to not possibly lose events between the +/// > construction of the service and the moment the events stream is grabbed. +fn build_test_full_node(config: config::NetworkConfiguration) + -> (Arc, impl Stream) +{ + let client = Arc::new( + TestClientBuilder::with_default_backend() + .build_with_longest_chain() + .0, + ); + + #[derive(Clone)] + struct PassThroughVerifier(bool); + impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + fn verify( + &mut self, + origin: sp_consensus::BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result< + ( + sp_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"babe")) + }) + }) + .map(|blob| { + vec![( + sp_blockchain::well_known_cache_keys::AUTHORITIES, + blob.to_vec(), + )] + }); + + let mut import = sp_consensus::BlockImportParams::new(origin, header); + import.body = body; + import.finalized = self.0; + import.justification = justification; + import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + Ok((import, maybe_keys)) + } + } + + let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + None, + &sp_core::testing::TaskExecutor::new(), + None, + )); + + let worker = NetworkWorker::new(config::Params { + role: config::Role::Full, + executor: None, + network_config: config, + chain: client.clone(), + finality_proof_provider: None, + finality_proof_request_builder: None, + on_demand: None, + transaction_pool: Arc::new(crate::config::EmptyTransactionPool), + protocol_id: config::ProtocolId::from("/test-protocol-name"), + import_queue, + block_announce_validator: Box::new( + sp_consensus::block_validation::DefaultBlockAnnounceValidator, + ), + metrics_registry: None, + }) + .unwrap(); + + let service = worker.service().clone(); + let event_stream = service.event_stream("test"); + + async_std::task::spawn(async move { + futures::pin_mut!(worker); + let _ = worker.await; + }); + + (service, event_stream) +} + +const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; + +/// Builds two nodes and their associated events stream. +/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +fn build_nodes_one_proto() + -> (Arc, impl Stream, Arc, impl Stream) +{ + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + listen_addresses: vec![], + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + (node1, events_stream1, node2, events_stream2) +} + +#[test] +fn basic_works() { + const NUM_NOTIFS: usize = 256; + + let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let node2_id = node2.local_peer_id().clone(); + + let receiver = async_std::task::spawn(async move { + let mut received_notifications = 0; + + while received_notifications < NUM_NOTIFS { + match events_stream2.next().await.unwrap() { + Event::NotificationStreamClosed { .. } => panic!(), + Event::NotificationsReceived { messages, .. } => { + for message in messages { + assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.1, &b"message"[..]); + received_notifications += 1; + } + } + _ => {} + }; + + if rand::random::() < 2 { + async_std::task::sleep(Duration::from_millis(rand::random::() % 750)).await; + } + } + }); + + async_std::task::block_on(async move { + let (sender, bg_future) = + QueuedSender::new(node1, node2_id, ENGINE_ID, NUM_NOTIFS, |msg| msg); + async_std::task::spawn(bg_future); + + // Wait for the `NotificationStreamOpened`. + loop { + match events_stream1.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => break, + _ => {} + }; + } + + for _ in 0..NUM_NOTIFS { + sender.queue_or_discard(b"message".to_vec()).await; + } + + receiver.await; + }); +} diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs new file mode 100644 index 0000000000000..3fd01c33dcf5f --- /dev/null +++ b/client/network/src/lib.rs @@ -0,0 +1,319 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +#![warn(unused_extern_crates)] +#![warn(missing_docs)] + +//! Substrate-specific P2P networking. +//! +//! **Important**: This crate is unstable and the API and usage may change. +//! +//! # Node identities and addresses +//! +//! In a decentralized network, each node possesses a network private key and a network public key. +//! In Substrate, the keys are based on the ed25519 curve. +//! +//! From a node's public key, we can derive its *identity*. In Substrate and libp2p, a node's +//! identity is represented with the [`PeerId`] struct. All network communications between nodes on +//! the network use encryption derived from both sides's keys, which means that **identities cannot +//! be faked**. +//! +//! A node's identity uniquely identifies a machine on the network. If you start two or more +//! clients using the same network key, large interferences will happen. +//! +//! # Substrate's network protocol +//! +//! Substrate's networking protocol is based upon libp2p. It is at the moment not possible and not +//! planned to permit using something else than the libp2p network stack and the rust-libp2p +//! library. However the libp2p framework is very flexible and the rust-libp2p library could be +//! extended to support a wider range of protocols than what is offered by libp2p. +//! +//! ## Discovery mechanisms +//! +//! In order for our node to join a peer-to-peer network, it has to know a list of nodes that are +//! part of said network. This includes nodes identities and their address (how to reach them). +//! Building such a list is called the **discovery** mechanism. There are three mechanisms that +//! Substrate uses: +//! +//! - Bootstrap nodes. These are hard-coded node identities and addresses passed alongside with +//! the network configuration. +//! - mDNS. We perform a UDP broadcast on the local network. Nodes that listen may respond with +//! their identity. More info [here](https://github.com/libp2p/specs/blob/master/discovery/mdns.md). +//! mDNS can be disabled in the network configuration. +//! - Kademlia random walk. Once connected, we perform random Kademlia `FIND_NODE` requests on the +//! configured Kademlia DHTs (one per configured chain protocol) in order for nodes to propagate to +//! us their view of the network. More information about Kademlia can be found [on +//! Wikipedia](https://en.wikipedia.org/wiki/Kademlia). +//! +//! ## Connection establishment +//! +//! When node Alice knows node Bob's identity and address, it can establish a connection with Bob. +//! All connections must always use encryption and multiplexing. While some node addresses (eg. +//! addresses using `/quic`) already imply which encryption and/or multiplexing to use, for others +//! the **multistream-select** protocol is used in order to negotiate an encryption layer and/or a +//! multiplexing layer. +//! +//! The connection establishment mechanism is called the **transport**. +//! +//! As of the writing of this documentation, the following base-layer protocols are supported by +//! Substrate: +//! +//! - TCP/IP for addresses of the form `/ip4/1.2.3.4/tcp/5`. Once the TCP connection is open, an +//! encryption and a multiplexing layer are negotiated on top. +//! - WebSockets for addresses of the form `/ip4/1.2.3.4/tcp/5/ws`. A TCP/IP connection is open and +//! the WebSockets protocol is negotiated on top. Communications then happen inside WebSockets data +//! frames. Encryption and multiplexing are additionally negotiated again inside this channel. +//! - DNS for addresses of the form `/dns/example.com/tcp/5` or `/dns/example.com/tcp/5/ws`. A +//! node's address can contain a domain name. +//! - (All of the above using IPv6 instead of IPv4.) +//! +//! On top of the base-layer protocol, the [Noise](https://noiseprotocol.org/) protocol is +//! negotiated and applied. The exact handshake protocol is experimental and is subject to change. +//! +//! The following multiplexing protocols are supported: +//! +//! - [Mplex](https://github.com/libp2p/specs/tree/master/mplex). Support for mplex will likely +//! be deprecated in the future. +//! - [Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). +//! +//! ## Substreams +//! +//! Once a connection has been established and uses multiplexing, substreams can be opened. When +//! a substream is open, the **multistream-select** protocol is used to negotiate which protocol +//! to use on that given substream. +//! +//! Protocols that are specific to a certain chain have a `` in their name. This +//! "protocol ID" is defined in the chain specifications. For example, the protocol ID of Polkadot +//! is "dot". In the protocol names below, `` must be replaced with the corresponding +//! protocol ID. +//! +//! > **Note**: It is possible for the same connection to be used for multiple chains. For example, +//! > one can use both the `/dot/sync/2` and `/sub/sync/2` protocols on the same +//! > connection, provided that the remote supports them. +//! +//! Substrate uses the following standard libp2p protocols: +//! +//! - **`/ipfs/ping/1.0.0`**. We periodically open an ephemeral substream in order to ping the +//! remote and check whether the connection is still alive. Failure for the remote to reply leads +//! to a disconnection. +//! - **[`/ipfs/id/1.0.0`](https://github.com/libp2p/specs/tree/master/identify)**. We +//! periodically open an ephemeral substream in order to ask information from the remote. +//! - **[`//kad`](https://github.com/libp2p/specs/pull/108)**. We periodically open +//! ephemeral substreams for Kademlia random walk queries. Each Kademlia query is done in a +//! separate substream. +//! +//! Additionally, Substrate uses the following non-libp2p-standard protocols: +//! +//! - **`/substrate//`** (where `` must be replaced with the +//! protocol ID of the targeted chain, and `` is a number between 2 and 6). For each +//! connection we optionally keep an additional substream for all Substrate-based communications alive. +//! This protocol is considered legacy, and is progressively being replaced with alternatives. +//! This is designated as "The legacy Substrate substream" in this documentation. See below for +//! more details. +//! - **`//sync/2`** is a request-response protocol (see below) that lets one perform +//! requests for information about blocks. Each request is the encoding of a `BlockRequest` and +//! each response is the encoding of a `BlockResponse`, as defined in the `api.v1.proto` file in +//! this source tree. +//! - **`//light/2`** is a request-response protocol (see below) that lets one perform +//! light-client-related requests for information about the state. Each request is the encoding of +//! a `light::Request` and each response is the encoding of a `light::Response`, as defined in the +//! `light.v1.proto` file in this source tree. +//! - **`//transactions/1`** is a notifications protocol (see below) where +//! transactions are pushed to other nodes. The handshake is empty on both sides. The message +//! format is a SCALE-encoded list of transactions, where each transaction is an opaque list of +//! bytes. +//! - **`//block-announces/1`** is a notifications protocol (see below) where +//! block announces are pushed to other nodes. The handshake is empty on both sides. The message +//! format is a SCALE-encoded tuple containing a block header followed with an opaque list of +//! bytes containing some data associated with this block announcement, e.g. a candidate message. +//! - Notifications protocols that are registered using the `register_notifications_protocol` +//! method. For example: `/paritytech/grandpa/1`. See below for more information. +//! +//! ## The legacy Substrate substream +//! +//! Substrate uses a component named the **peerset manager (PSM)**. Through the discovery +//! mechanism, the PSM is aware of the nodes that are part of the network and decides which nodes +//! we should perform Substrate-based communications with. For these nodes, we open a connection +//! if necessary and open a unique substream for Substrate-based communications. If the PSM decides +//! that we should disconnect a node, then that substream is closed. +//! +//! For more information about the PSM, see the *sc-peerset* crate. +//! +//! Note that at the moment there is no mechanism in place to solve the issues that arise where the +//! two sides of a connection open the unique substream simultaneously. In order to not run into +//! issues, only the dialer of a connection is allowed to open the unique substream. When the +//! substream is closed, the entire connection is closed as well. This is a bug that will be +//! resolved by deprecating the protocol entirely. +//! +//! Within the unique Substrate substream, messages encoded using +//! [*parity-scale-codec*](https://github.com/paritytech/parity-scale-codec) are exchanged. +//! The detail of theses messages is not totally in place, but they can be found in the +//! `message.rs` file. +//! +//! Once the substream is open, the first step is an exchange of a *status* message from both +//! sides, containing information such as the chain root hash, head of chain, and so on. +//! +//! Communications within this substream include: +//! +//! - Syncing. Blocks are announced and requested from other nodes. +//! - Light-client requests. When a light client requires information, a random node we have a +//! substream open with is chosen, and the information is requested from it. +//! - Gossiping. Used for example by grandpa. +//! +//! ## Request-response protocols +//! +//! A so-called request-response protocol is defined as follow: +//! +//! - When a substream is opened, the opening side sends a message whose content is +//! protocol-specific. The message must be prefixed with an +//! [LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. After the +//! message has been sent, the writing side is closed. +//! - The remote sends back the response prefixed with a LEB128-encoded length, and closes its +//! side as well. +//! +//! Each request is performed in a new separate substream. +//! +//! ## Notifications protocols +//! +//! A so-called notifications protocol is defined as follow: +//! +//! - When a substream is opened, the opening side sends a handshake message whose content is +//! protocol-specific. The handshake message must be prefixed with an +//! [LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. The +//! handshake message can be of length 0, in which case the sender has to send a single `0`. +//! - The receiver then either immediately closes the substream, or answers with its own +//! LEB128-prefixed protocol-specific handshake response. The message can be of length 0, in which +//! case a single `0` has to be sent back. +//! - Once the handshake has completed, the notifications protocol is unidirectional. Only the +//! node which initiated the substream can push notifications. If the remote wants to send +//! notifications as well, it has to open its own undirectional substream. +//! - Each notification must be prefixed with an LEB128-encoded length. The encoding of the +//! messages is specific to each protocol. +//! - Either party can signal that it doesn't want a notifications substream anymore by closing +//! its writing side. The other party should respond by closing its own writing side soon after. +//! +//! The API of `sc-network` allows one to register user-defined notification protocols. +//! `sc-network` automatically tries to open a substream towards each node for which the legacy +//! Substream substream is open. The handshake is then performed automatically. +//! +//! For example, the `sc-finality-grandpa` crate registers the `/paritytech/grandpa/1` +//! notifications protocol. +//! +//! At the moment, for backwards-compatibility, notification protocols are tied to the legacy +//! Substrate substream. Additionally, the handshake message is hardcoded to be a single 8-bits +//! integer representing the role of the node: +//! +//! - 1 for a full node. +//! - 2 for a light node. +//! - 4 for an authority. +//! +//! In the future, though, these restrictions will be removed. +//! +//! # Usage +//! +//! Using the `sc-network` crate is done through the [`NetworkWorker`] struct. Create this +//! struct by passing a [`config::Params`], then poll it as if it was a `Future`. You can extract an +//! `Arc` from the `NetworkWorker`, which can be shared amongst multiple places +//! in order to give orders to the networking. +//! +//! See the [`config`] module for more information about how to configure the networking. +//! +//! After the `NetworkWorker` has been created, the important things to do are: +//! +//! - Calling `NetworkWorker::poll` in order to advance the network. This can be done by +//! dispatching a background task with the [`NetworkWorker`]. +//! - Calling `on_block_import` whenever a block is added to the client. +//! - Calling `on_block_finalized` whenever a block is finalized. +//! - Calling `trigger_repropagate` when a transaction is added to the pool. +//! +//! More precise usage details are still being worked on and will likely change in the future. +//! + +mod behaviour; +mod block_requests; +mod chain; +mod peer_info; +mod discovery; +mod finality_requests; +mod light_client_handler; +mod on_demand_layer; +mod protocol; +mod request_responses; +mod schema; +mod service; +mod transport; +mod utils; + +pub mod config; +pub mod error; +pub mod gossip; +pub mod network_state; + +#[doc(inline)] +pub use libp2p::{multiaddr, Multiaddr, PeerId}; +pub use protocol::{event::{DhtEvent, Event, ObservedRole}, sync::SyncState, PeerInfo}; +pub use service::{ + NetworkService, NetworkWorker, RequestFailure, OutboundFailure, NotificationSender, + NotificationSenderReady, +}; + +pub use sc_peerset::ReputationChange; +use sp_runtime::traits::{Block as BlockT, NumberFor}; + +/// The maximum allowed number of established connections per peer. +/// +/// Typically, and by design of the network behaviours in this crate, +/// there is a single established connection per peer. However, to +/// avoid unnecessary and nondeterministic connection closure in +/// case of (possibly repeated) simultaneous dialing attempts between +/// two peers, the per-peer connection limit is not set to 1 but 2. +const MAX_CONNECTIONS_PER_PEER: usize = 2; + +/// Minimum Requirements for a Hash within Networking +pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} + +impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static +{} + +/// Trait for providing information about the local network state +pub trait NetworkStateInfo { + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec; + + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId; +} + +/// Overview status of the network. +#[derive(Clone)] +pub struct NetworkStatus { + /// Current global sync state. + pub sync_state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_sync_peers: u32, + /// Total number of connected peers + pub num_connected_peers: usize, + /// Total number of active peers. + pub num_active_peers: usize, + /// The total number of bytes received. + pub total_bytes_inbound: u64, + /// The total number of bytes sent. + pub total_bytes_outbound: u64, +} diff --git a/client/network/src/light_client_handler.rs b/client/network/src/light_client_handler.rs new file mode 100644 index 0000000000000..c1ff14fc82a22 --- /dev/null +++ b/client/network/src/light_client_handler.rs @@ -0,0 +1,2058 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. +// +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! [`NetworkBehaviour`] implementation which handles light client requests. +//! +//! Every request is coming in on a separate connection substream which gets +//! closed after we have sent the response back. Requests and responses are +//! encoded as protocol buffers (cf. `api.v1.proto`). +//! +//! For every outgoing request we likewise open a separate substream. + +#![allow(unused)] + +use bytes::Bytes; +use codec::{self, Encode, Decode}; +use crate::{ + block_requests::build_protobuf_block_request, + chain::Client, + config::ProtocolId, + protocol::message::{BlockAttributes, Direction, FromBlock}, + schema, +}; +use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; +use libp2p::{ + core::{ + ConnectedPoint, + Multiaddr, + PeerId, + connection::ConnectionId, + upgrade::{InboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, + upgrade::{OutboundUpgrade, read_one, write_one} + }, + swarm::{ + NegotiatedSubstream, + NetworkBehaviour, + NetworkBehaviourAction, + NotifyHandler, + OneShotHandler, + OneShotHandlerConfig, + PollParameters, + SubstreamProtocol, + } +}; +use nohash_hasher::IntMap; +use prost::Message; +use sc_client_api::{ + StorageProof, + light::{ + self, RemoteReadRequest, RemoteBodyRequest, ChangesProof, + RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, + } +}; +use sc_peerset::ReputationChange; +use sp_core::{ + storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey}, + hexdisplay::HexDisplay, +}; +use smallvec::SmallVec; +use sp_blockchain::{Error as ClientError}; +use sp_runtime::{ + traits::{Block, Header, NumberFor, Zero}, + generic::BlockId, +}; +use std::{ + collections::{BTreeMap, VecDeque, HashMap}, + iter, + io, + sync::Arc, + time::Duration, + task::{Context, Poll} +}; +use void::Void; +use wasm_timer::Instant; + +/// Reputation change for a peer when a request timed out. +pub(crate) const TIMEOUT_REPUTATION_CHANGE: i32 = -(1 << 8); + +/// Configuration options for `LightClientHandler` behaviour. +#[derive(Debug, Clone)] +pub struct Config { + max_request_size: usize, + max_response_size: usize, + max_pending_requests: usize, + inactivity_timeout: Duration, + request_timeout: Duration, + light_protocol: Bytes, + block_protocol: Bytes, +} + +impl Config { + /// Create a fresh configuration with the following options: + /// + /// - max. request size = 1 MiB + /// - max. response size = 16 MiB + /// - max. pending requests = 128 + /// - inactivity timeout = 15s + /// - request timeout = 15s + pub fn new(id: &ProtocolId) -> Self { + let mut c = Config { + max_request_size: 1 * 1024 * 1024, + max_response_size: 16 * 1024 * 1024, + max_pending_requests: 128, + inactivity_timeout: Duration::from_secs(15), + request_timeout: Duration::from_secs(15), + light_protocol: Bytes::new(), + block_protocol: Bytes::new(), + }; + c.set_protocol(id); + c + } + + /// Limit the max. length in bytes of a request. + pub fn set_max_request_size(&mut self, v: usize) -> &mut Self { + self.max_request_size = v; + self + } + + /// Limit the max. length in bytes of a response. + pub fn set_max_response_size(&mut self, v: usize) -> &mut Self { + self.max_response_size = v; + self + } + + /// Limit the max. number of pending requests. + pub fn set_max_pending_requests(&mut self, v: usize) -> &mut Self { + self.max_pending_requests = v; + self + } + + /// Limit the max. duration the connection may remain inactive before closing it. + pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { + self.inactivity_timeout = v; + self + } + + /// Limit the max. request duration. + pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { + self.request_timeout = v; + self + } + + /// Set protocol to use for upgrade negotiation. + pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { + let mut vl = Vec::new(); + vl.extend_from_slice(b"/"); + vl.extend_from_slice(id.as_ref().as_bytes()); + vl.extend_from_slice(b"/light/2"); + self.light_protocol = vl.into(); + + let mut vb = Vec::new(); + vb.extend_from_slice(b"/"); + vb.extend_from_slice(id.as_ref().as_bytes()); + vb.extend_from_slice(b"/sync/2"); + self.block_protocol = vb.into(); + + self + } +} + +/// Possible errors while handling light clients. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// There are currently too many pending request. + #[error("too many pending requests")] + TooManyRequests, + /// The response type does not correspond to the issued request. + #[error("unexpected response")] + UnexpectedResponse, + /// A bad request has been received. + #[error("bad request: {0}")] + BadRequest(&'static str), + /// The chain client errored. + #[error("client error: {0}")] + Client(#[from] ClientError), + /// Encoding or decoding of some data failed. + #[error("codec error: {0}")] + Codec(#[from] codec::Error), +} + +/// The possible light client requests we support. +/// +/// The associated `oneshot::Sender` will be used to convey the result of +/// their request back to them (cf. `Reply`). +// +// This is modeled after light_dispatch.rs's `RequestData` which is not +// used because we currently only support a subset of those. +#[derive(Debug)] +pub enum Request { + Body { + request: RemoteBodyRequest, + sender: oneshot::Sender, ClientError>> + }, + Header { + request: light::RemoteHeaderRequest, + sender: oneshot::Sender> + }, + Read { + request: light::RemoteReadRequest, + sender: oneshot::Sender, Option>>, ClientError>> + }, + ReadChild { + request: light::RemoteReadChildRequest, + sender: oneshot::Sender, Option>>, ClientError>> + }, + Call { + request: light::RemoteCallRequest, + sender: oneshot::Sender, ClientError>> + }, + Changes { + request: light::RemoteChangesRequest, + sender: oneshot::Sender, u32)>, ClientError>> + } +} + +/// The data to send back to the light client over the oneshot channel. +// +// It is unified here in order to be able to return it as a function +// result instead of delivering it to the client as a side effect of +// response processing. +#[derive(Debug)] +enum Reply { + VecU8(Vec), + VecNumberU32(Vec<(::Number, u32)>), + MapVecU8OptVecU8(HashMap, Option>>), + Header(B::Header), + Extrinsics(Vec), +} + +/// Augments a light client request with metadata. +#[derive(Debug)] +struct RequestWrapper { + /// Time when this value was created. + timestamp: Instant, + /// Remaining retries. + retries: usize, + /// The actual request. + request: Request, + /// The peer to send the request to, e.g. `PeerId`. + peer: P, + /// The connection to use for sending the request. + connection: Option, +} + +/// Information we have about some peer. +#[derive(Debug)] +struct PeerInfo { + connections: SmallVec<[(ConnectionId, Multiaddr); crate::MAX_CONNECTIONS_PER_PEER]>, + best_block: Option>, + status: PeerStatus, +} + +impl Default for PeerInfo { + fn default() -> Self { + PeerInfo { + connections: SmallVec::new(), + best_block: None, + status: PeerStatus::Idle, + } + } +} + +type RequestId = u64; + +/// A peer is either idle or busy processing a request from us. +#[derive(Debug, Clone, PartialEq, Eq)] +enum PeerStatus { + /// The peer is available. + Idle, + /// We wait for the peer to return us a response for the given request ID. + BusyWith(RequestId), +} + +/// The light client handler behaviour. +pub struct LightClientHandler { + /// This behaviour's configuration. + config: Config, + /// Blockchain client. + chain: Arc>, + /// Verifies that received responses are correct. + checker: Arc>, + /// Peer information (addresses, their best block, etc.) + peers: HashMap>, + /// Futures sending back response to remote clients. + responses: FuturesUnordered>, + /// Pending (local) requests. + pending_requests: VecDeque>, + /// Requests on their way to remote peers. + outstanding: IntMap>, + /// (Local) Request ID counter + next_request_id: RequestId, + /// Handle to use for reporting misbehaviour of peers. + peerset: sc_peerset::PeersetHandle, +} + +impl LightClientHandler +where + B: Block, +{ + /// Construct a new light client handler. + pub fn new( + cfg: Config, + chain: Arc>, + checker: Arc>, + peerset: sc_peerset::PeersetHandle, + ) -> Self { + LightClientHandler { + config: cfg, + chain, + checker, + peers: HashMap::new(), + responses: FuturesUnordered::new(), + pending_requests: VecDeque::new(), + outstanding: IntMap::default(), + next_request_id: 1, + peerset, + } + } + + /// We rely on external information about peers best blocks as we lack the + /// means to determine it ourselves. + pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { + if let Some(info) = self.peers.get_mut(peer) { + log::trace!("new best block for {:?}: {:?}", peer, num); + info.best_block = Some(num) + } + } + + /// Issue a new light client request. + pub fn request(&mut self, req: Request) -> Result<(), Error> { + if self.pending_requests.len() >= self.config.max_pending_requests { + return Err(Error::TooManyRequests) + } + let rw = RequestWrapper { + timestamp: Instant::now(), + retries: retries(&req), + request: req, + peer: (), // we do not know the peer yet + connection: None, + }; + self.pending_requests.push_back(rw); + Ok(()) + } + + fn next_request_id(&mut self) -> RequestId { + let id = self.next_request_id; + self.next_request_id += 1; + id + } + + /// Remove the given peer. + /// + /// If we have a request to this peer in flight, we move it back to + /// the pending requests queue. + fn remove_peer(&mut self, peer: &PeerId) { + if let Some(id) = self.outstanding.iter().find(|(_, rw)| &rw.peer == peer).map(|(k, _)| *k) { + let rw = self.outstanding.remove(&id).expect("key belongs to entry in this map"); + let rw = RequestWrapper { + timestamp: rw.timestamp, + retries: rw.retries, + request: rw.request, + peer: (), // need to find another peer + connection: None, + }; + self.pending_requests.push_back(rw); + } + self.peers.remove(peer); + } + + /// Prepares a request by selecting a suitable peer and connection to send it to. + /// + /// If there is currently no suitable peer for the request, the given request + /// is returned as `Err`. + fn prepare_request(&self, req: RequestWrapper) + -> Result<(PeerId, RequestWrapper), RequestWrapper> + { + let number = required_block(&req.request); + + let mut peer = None; + for (peer_id, peer_info) in self.peers.iter() { + if peer_info.status == PeerStatus::Idle { + match peer_info.best_block { + Some(n) => if n >= number { + peer = Some((peer_id, peer_info)); + break + }, + None => peer = Some((peer_id, peer_info)) + } + } + } + + if let Some((peer_id, peer_info)) = peer { + let connection = peer_info.connections.iter().next().map(|(id, _)| *id); + let rw = RequestWrapper { + timestamp: req.timestamp, + retries: req.retries, + request: req.request, + peer: peer_id.clone(), + connection, + }; + Ok((peer_id.clone(), rw)) + } else { + Err(req) + } + } + + /// Process a local request's response from remote. + /// + /// If successful, this will give us the actual, checked data we should be + /// sending back to the client, otherwise an error. + fn on_response + ( &mut self + , peer: &PeerId + , request: &Request + , response: Response + ) -> Result, Error> + { + log::trace!("response from {}", peer); + match response { + Response::Light(r) => self.on_response_light(peer, request, r), + Response::Block(r) => self.on_response_block(peer, request, r), + } + } + + fn on_response_light + ( &mut self + , peer: &PeerId + , request: &Request + , response: schema::v1::light::Response + ) -> Result, Error> + { + use schema::v1::light::response::Response; + match response.response { + Some(Response::RemoteCallResponse(response)) => + if let Request::Call { request , .. } = request { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_execution_proof(request, proof)?; + Ok(Reply::VecU8(reply)) + } else { + Err(Error::UnexpectedResponse) + } + Some(Response::RemoteReadResponse(response)) => + match request { + Request::Read { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + } + Request::ReadChild { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_child_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + } + _ => Err(Error::UnexpectedResponse) + } + Some(Response::RemoteChangesResponse(response)) => + if let Request::Changes { request, .. } = request { + let max_block = Decode::decode(&mut response.max.as_ref())?; + let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; + let roots = { + let mut r = BTreeMap::new(); + for pair in response.roots { + let k = Decode::decode(&mut pair.fst.as_ref())?; + let v = Decode::decode(&mut pair.snd.as_ref())?; + r.insert(k, v); + } + r + }; + let reply = self.checker.check_changes_proof(&request, light::ChangesProof { + max_block, + proof: response.proof, + roots, + roots_proof, + })?; + Ok(Reply::VecNumberU32(reply)) + } else { + Err(Error::UnexpectedResponse) + } + Some(Response::RemoteHeaderResponse(response)) => + if let Request::Header { request, .. } = request { + let header = + if response.header.is_empty() { + None + } else { + Some(Decode::decode(&mut response.header.as_ref())?) + }; + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_header_proof(&request, header, proof)?; + Ok(Reply::Header(reply)) + } else { + Err(Error::UnexpectedResponse) + } + None => Err(Error::UnexpectedResponse) + } + } + + fn on_response_block + ( &mut self + , peer: &PeerId + , request: &Request + , response: schema::v1::BlockResponse + ) -> Result, Error> + { + let request = if let Request::Body { request , .. } = &request { + request + } else { + return Err(Error::UnexpectedResponse); + }; + + let body: Vec<_> = match response.blocks.into_iter().next() { + Some(b) => b.body, + None => return Err(Error::UnexpectedResponse), + }; + + let body = body.into_iter() + .map(|mut extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) + .collect::>()?; + + let body = self.checker.check_body_proof(&request, body)?; + Ok(Reply::Extrinsics(body)) + } + + fn on_remote_call_request + ( &mut self + , peer: &PeerId + , request: &schema::v1::light::RemoteCallRequest + ) -> Result + { + log::trace!("remote call request from {} ({} at {:?})", + peer, + request.method, + request.block, + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let proof = match self.chain.execution_proof(&BlockId::Hash(block), &request.method, &request.data) { + Ok((_, proof)) => proof, + Err(e) => { + log::trace!("remote call request from {} ({} at {:?}) failed with: {}", + peer, + request.method, + request.block, + e, + ); + StorageProof::empty() + } + }; + + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteCallResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_read_request + ( &mut self + , peer: &PeerId + , request: &schema::v1::light::RemoteReadRequest + ) -> Result + { + if request.keys.is_empty() { + log::debug!("invalid remote read request sent by {}", peer); + return Err(Error::BadRequest("remote read request without keys")) + } + + log::trace!("remote read request from {} ({} at {:?})", + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let proof = match self.chain.read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) { + Ok(proof) => proof, + Err(error) => { + log::trace!("remote read request from {} ({} at {:?}) failed with: {}", + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error); + StorageProof::empty() + } + }; + + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_read_child_request + ( &mut self + , peer: &PeerId + , request: &schema::v1::light::RemoteReadChildRequest + ) -> Result + { + if request.keys.is_empty() { + log::debug!("invalid remote child read request sent by {}", peer); + return Err(Error::BadRequest("remove read child request without keys")) + } + + log::trace!("remote read child request from {} ({} {} at {:?})", + peer, + HexDisplay::from(&request.storage_key), + fmt_keys(request.keys.first(), request.keys.last()), + request.block); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); + let child_info = match ChildType::from_prefixed_key(prefixed_key) { + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err("Invalid child storage key".into()), + }; + let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( + &BlockId::Hash(block), + &child_info, + &mut request.keys.iter().map(AsRef::as_ref) + )) { + Ok(proof) => proof, + Err(error) => { + log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", + peer, + HexDisplay::from(&request.storage_key), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error); + StorageProof::empty() + } + }; + + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() }; + schema::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_header_request + ( &mut self + , peer: &PeerId + , request: &schema::v1::light::RemoteHeaderRequest + ) -> Result + { + log::trace!("remote header proof request from {} ({:?})", peer, request.block); + + let block = Decode::decode(&mut request.block.as_ref())?; + let (header, proof) = match self.chain.header_proof(&BlockId::Number(block)) { + Ok((header, proof)) => (header.encode(), proof), + Err(error) => { + log::trace!("remote header proof request from {} ({:?}) failed with: {}", + peer, + request.block, + error); + (Default::default(), StorageProof::empty()) + } + }; + + let response = { + let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; + schema::v1::light::response::Response::RemoteHeaderResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } + + fn on_remote_changes_request + ( &mut self + , peer: &PeerId + , request: &schema::v1::light::RemoteChangesRequest + ) -> Result + { + log::trace!("remote changes proof request from {} for key {} ({:?}..{:?})", + peer, + if !request.storage_key.is_empty() { + format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&request.key)) + } else { + HexDisplay::from(&request.key).to_string() + }, + request.first, + request.last); + + let first = Decode::decode(&mut request.first.as_ref())?; + let last = Decode::decode(&mut request.last.as_ref())?; + let min = Decode::decode(&mut request.min.as_ref())?; + let max = Decode::decode(&mut request.max.as_ref())?; + let key = StorageKey(request.key.clone()); + let storage_key = if request.storage_key.is_empty() { + None + } else { + Some(PrefixedStorageKey::new_ref(&request.storage_key)) + }; + + let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key, &key) { + Ok(proof) => proof, + Err(error) => { + log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", + peer, + format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), + request.first, + request.last, + error); + + light::ChangesProof:: { + max_block: Zero::zero(), + proof: Vec::new(), + roots: BTreeMap::new(), + roots_proof: StorageProof::empty(), + } + } + }; + + let response = { + let r = schema::v1::light::RemoteChangesResponse { + max: proof.max_block.encode(), + proof: proof.proof, + roots: proof.roots.into_iter() + .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) + .collect(), + roots_proof: proof.roots_proof.encode(), + }; + schema::v1::light::response::Response::RemoteChangesResponse(r) + }; + + Ok(schema::v1::light::Response { response: Some(response) }) + } +} + +impl NetworkBehaviour for LightClientHandler +where + B: Block +{ + type ProtocolsHandler = OneShotHandler>; + type OutEvent = Void; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + let p = InboundProtocol { + max_request_size: self.config.max_request_size, + protocol: self.config.light_protocol.clone(), + }; + let mut cfg = OneShotHandlerConfig::default(); + cfg.keep_alive_timeout = self.config.inactivity_timeout; + OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg) + } + + fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { + self.peers.get(peer) + .map(|info| info.connections.iter().map(|(_, a)| a.clone()).collect()) + .unwrap_or_default() + } + + fn inject_connected(&mut self, peer: &PeerId) { + } + + fn inject_connection_established(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { + let peer_address = match info { + ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(), + ConnectedPoint::Dialer { address } => address.clone() + }; + + log::trace!("peer {} connected with address {}", peer, peer_address); + + let entry = self.peers.entry(peer.clone()).or_default(); + entry.connections.push((*conn, peer_address)); + } + + fn inject_disconnected(&mut self, peer: &PeerId) { + log::trace!("peer {} disconnected", peer); + self.remove_peer(peer) + } + + fn inject_connection_closed(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { + let peer_address = match info { + ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, + ConnectedPoint::Dialer { address } => address + }; + + log::trace!("connection to peer {} closed: {}", peer, peer_address); + + if let Some(info) = self.peers.get_mut(peer) { + info.connections.retain(|(c, _)| c != conn) + } + + // Add any outstanding requests on the closed connection back to the + // pending requests. + if let Some(id) = self.outstanding.iter() + .find(|(_, rw)| &rw.peer == peer && rw.connection == Some(*conn)) // (*) + .map(|(id, _)| *id) + { + let rw = self.outstanding.remove(&id).expect("by (*)"); + let rw = RequestWrapper { + timestamp: rw.timestamp, + retries: rw.retries, + request: rw.request, + peer: (), // need to find another peer + connection: None, + }; + self.pending_requests.push_back(rw); + } + } + + fn inject_event(&mut self, peer: PeerId, conn: ConnectionId, event: Event) { + match event { + // An incoming request from remote has been received. + Event::Request(request, mut stream) => { + log::trace!("incoming request from {}", peer); + let result = match &request.request { + Some(schema::v1::light::request::Request::RemoteCallRequest(r)) => + self.on_remote_call_request(&peer, r), + Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => + self.on_remote_read_request(&peer, r), + Some(schema::v1::light::request::Request::RemoteHeaderRequest(r)) => + self.on_remote_header_request(&peer, r), + Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => + self.on_remote_read_child_request(&peer, r), + Some(schema::v1::light::request::Request::RemoteChangesRequest(r)) => + self.on_remote_changes_request(&peer, r), + None => { + log::debug!("ignoring request without request data from peer {}", peer); + return + } + }; + match result { + Ok(response) => { + log::trace!("enqueueing response for peer {}", peer); + let mut data = Vec::new(); + if let Err(e) = response.encode(&mut data) { + log::debug!("error encoding response for peer {}: {}", peer, e) + } else { + let future = async move { + if let Err(e) = write_one(&mut stream, data).await { + log::debug!("error writing response: {}", e) + } + }; + self.responses.push(future.boxed()) + } + } + Err(Error::BadRequest(_)) => { + self.remove_peer(&peer); + self.peerset.report_peer(peer, ReputationChange::new(-(1 << 12), "bad request")) + } + Err(e) => log::debug!("error handling request from peer {}: {}", peer, e) + } + } + // A response to one of our own requests has been received. + Event::Response(id, response) => { + if let Some(request) = self.outstanding.remove(&id) { + // We first just check if the response originates from the expected peer + // and connection. + if request.peer != peer { + log::debug!("Expected response from {} instead of {}.", request.peer, peer); + self.outstanding.insert(id, request); + self.remove_peer(&peer); + self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); + return + } + + if let Some(info) = self.peers.get_mut(&peer) { + if info.status != PeerStatus::BusyWith(id) { + // If we get here, something is wrong with our internal handling of peer + // status information. At any time, a single peer processes at most one + // request from us and its status should contain the request ID we are + // expecting a response for. If a peer would send us a response with a + // random ID, we should not have an entry for it with this peer ID in + // our `outstanding` map, so a malicious peer should not be able to get + // us here. It is our own fault and must be fixed! + panic!("unexpected peer status {:?} for {}", info.status, peer); + } + + info.status = PeerStatus::Idle; // Make peer available again. + + match self.on_response(&peer, &request.request, response) { + Ok(reply) => send_reply(Ok(reply), request.request), + Err(Error::UnexpectedResponse) => { + log::debug!("unexpected response {} from peer {}", id, peer); + self.remove_peer(&peer); + self.peerset.report_peer(peer, ReputationChange::new_fatal("unexpected response from peer")); + let rw = RequestWrapper { + timestamp: request.timestamp, + retries: request.retries, + request: request.request, + peer: (), + connection: None, + }; + self.pending_requests.push_back(rw); + } + Err(other) => { + log::debug!("error handling response {} from peer {}: {}", id, peer, other); + self.remove_peer(&peer); + self.peerset.report_peer(peer, ReputationChange::new_fatal("invalid response from peer")); + if request.retries > 0 { + let rw = RequestWrapper { + timestamp: request.timestamp, + retries: request.retries - 1, + request: request.request, + peer: (), + connection: None, + }; + self.pending_requests.push_back(rw) + } else { + send_reply(Err(ClientError::RemoteFetchFailed), request.request) + } + } + } + } else { + // If we get here, something is wrong with our internal handling of peers. + // We apparently have an entry in our `outstanding` map and the peer is the one we + // expected. So, if we can not find an entry for it in our peer information table, + // then these two collections are out of sync which must not happen and is a clear + // programmer error that must be fixed! + panic!("missing peer information for {}; response {}", peer, id); + } + } else { + log::debug!("unexpected response {} from peer {}", id, peer); + self.remove_peer(&peer); + self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); + } + } + } + } + + fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) -> Poll> { + // Process response sending futures. + while let Poll::Ready(Some(_)) = self.responses.poll_next_unpin(cx) {} + + // If we have a pending request to send, try to find an available peer and send it. + let now = Instant::now(); + while let Some(mut request) = self.pending_requests.pop_front() { + if now > request.timestamp + self.config.request_timeout { + if request.retries == 0 { + send_reply(Err(ClientError::RemoteFetchFailed), request.request); + continue + } + request.timestamp = Instant::now(); + request.retries -= 1 + } + + + match self.prepare_request(request) { + Err(request) => { + self.pending_requests.push_front(request); + log::debug!("no peer available to send request to"); + break + } + Ok((peer, request)) => { + let request_bytes = match serialize_request(&request.request) { + Ok(bytes) => bytes, + Err(error) => { + log::debug!("failed to serialize request: {}", error); + send_reply(Err(ClientError::RemoteFetchFailed), request.request); + continue + } + }; + + let (expected, protocol) = match request.request { + Request::Body { .. } => + (ExpectedResponseTy::Block, self.config.block_protocol.clone()), + _ => + (ExpectedResponseTy::Light, self.config.light_protocol.clone()), + }; + + let peer_id = peer.clone(); + let handler = request.connection.map_or(NotifyHandler::Any, NotifyHandler::One); + + let request_id = self.next_request_id(); + if let Some(p) = self.peers.get_mut(&peer) { + p.status = PeerStatus::BusyWith(request_id); + } + self.outstanding.insert(request_id, request); + + let event = OutboundProtocol { + request_id, + request: request_bytes, + expected, + max_response_size: self.config.max_response_size, + protocol, + }; + + log::trace!("sending request {} to peer {}", request_id, peer_id); + + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }) + } + } + } + + // Look for ongoing requests that have timed out. + let mut expired = Vec::new(); + for (id, rw) in &self.outstanding { + if now > rw.timestamp + self.config.request_timeout { + log::debug!("request {} timed out", id); + expired.push(*id) + } + } + for id in expired { + if let Some(rw) = self.outstanding.remove(&id) { + self.remove_peer(&rw.peer); + self.peerset.report_peer(rw.peer.clone(), + ReputationChange::new(TIMEOUT_REPUTATION_CHANGE, "light request timeout")); + if rw.retries == 0 { + send_reply(Err(ClientError::RemoteFetchFailed), rw.request); + continue + } + let rw = RequestWrapper { + timestamp: Instant::now(), + retries: rw.retries - 1, + request: rw.request, + peer: (), + connection: None, + }; + self.pending_requests.push_back(rw) + } + } + + Poll::Pending + } +} + +fn required_block(request: &Request) -> NumberFor { + match request { + Request::Body { request, .. } => *request.header.number(), + Request::Header { request, .. } => request.block, + Request::Read { request, .. } => *request.header.number(), + Request::ReadChild { request, .. } => *request.header.number(), + Request::Call { request, .. } => *request.header.number(), + Request::Changes { request, .. } => request.max_block.0, + } +} + +fn retries(request: &Request) -> usize { + let rc = match request { + Request::Body { request, .. } => request.retry_count, + Request::Header { request, .. } => request.retry_count, + Request::Read { request, .. } => request.retry_count, + Request::ReadChild { request, .. } => request.retry_count, + Request::Call { request, .. } => request.retry_count, + Request::Changes { request, .. } => request.retry_count, + }; + rc.unwrap_or(0) +} + +fn serialize_request(request: &Request) -> Result, prost::EncodeError> { + let request = match request { + Request::Body { request, .. } => { + let rq = build_protobuf_block_request::<_, NumberFor>( + BlockAttributes::BODY, + FromBlock::Hash(request.header.hash()), + None, + Direction::Ascending, + Some(1), + ); + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + return Ok(buf); + } + Request::Header { request, .. } => { + let r = schema::v1::light::RemoteHeaderRequest { block: request.block.encode() }; + schema::v1::light::request::Request::RemoteHeaderRequest(r) + } + Request::Read { request, .. } => { + let r = schema::v1::light::RemoteReadRequest { + block: request.block.encode(), + keys: request.keys.clone(), + }; + schema::v1::light::request::Request::RemoteReadRequest(r) + } + Request::ReadChild { request, .. } => { + let r = schema::v1::light::RemoteReadChildRequest { + block: request.block.encode(), + storage_key: request.storage_key.clone().into_inner(), + keys: request.keys.clone(), + }; + schema::v1::light::request::Request::RemoteReadChildRequest(r) + } + Request::Call { request, .. } => { + let r = schema::v1::light::RemoteCallRequest { + block: request.block.encode(), + method: request.method.clone(), + data: request.call_data.clone(), + }; + schema::v1::light::request::Request::RemoteCallRequest(r) + } + Request::Changes { request, .. } => { + let r = schema::v1::light::RemoteChangesRequest { + first: request.first_block.1.encode(), + last: request.last_block.1.encode(), + min: request.tries_roots.1.encode(), + max: request.max_block.1.encode(), + storage_key: request.storage_key.clone().map(|s| s.into_inner()) + .unwrap_or_default(), + key: request.key.clone(), + }; + schema::v1::light::request::Request::RemoteChangesRequest(r) + } + }; + + let rq = schema::v1::light::Request { request: Some(request) }; + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + Ok(buf) +} + +fn send_reply(result: Result, ClientError>, request: Request) { + fn send(item: T, sender: oneshot::Sender) { + let _ = sender.send(item); // It is okay if the other end already hung up. + } + match request { + Request::Body { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), + } + Request::Header { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::Header(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for header request: {:?}, {:?}", reply, request), + } + Request::Read { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), + } + Request::ReadChild { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), + } + Request::Call { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::VecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), + } + Request::Changes { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for changes request: {:?}, {:?}", reply, request), + } + } +} + +/// Output type of inbound and outbound substream upgrades. +#[derive(Debug)] +pub enum Event { + /// Incoming request from remote and substream to use for the response. + Request(schema::v1::light::Request, T), + /// Incoming response from remote. + Response(RequestId, Response), +} + +/// Incoming response from remote. +#[derive(Debug, Clone)] +pub enum Response { + /// Incoming light response from remote. + Light(schema::v1::light::Response), + /// Incoming block response from remote. + Block(schema::v1::BlockResponse), +} + +/// Substream upgrade protocol. +/// +/// Reads incoming requests from remote. +#[derive(Debug, Clone)] +pub struct InboundProtocol { + /// The max. request length in bytes. + max_request_size: usize, + /// The protocol to use for upgrade negotiation. + protocol: Bytes, +} + +impl UpgradeInfo for InboundProtocol { + type Info = Bytes; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol.clone()) + } +} + +impl InboundUpgrade for InboundProtocol +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static +{ + type Output = Event; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; + + fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { + let future = async move { + let vec = read_one(&mut s, self.max_request_size).await?; + match schema::v1::light::Request::decode(&vec[..]) { + Ok(r) => Ok(Event::Request(r, s)), + Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) + } + }; + future.boxed() + } +} + +/// Substream upgrade protocol. +/// +/// Sends a request to remote and awaits the response. +#[derive(Debug, Clone)] +pub struct OutboundProtocol { + /// The serialized protobuf request. + request: Vec, + /// Local identifier for the request. Used to associate it with a response. + request_id: RequestId, + /// Kind of response expected for this request. + expected: ExpectedResponseTy, + /// The max. response length in bytes. + max_response_size: usize, + /// The protocol to use for upgrade negotiation. + protocol: Bytes, +} + +/// Type of response expected from the remote for this request. +#[derive(Debug, Clone)] +enum ExpectedResponseTy { + Light, + Block, +} + +impl UpgradeInfo for OutboundProtocol { + type Info = Bytes; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol.clone()) + } +} + +impl OutboundUpgrade for OutboundProtocol +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static +{ + type Output = Event; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; + + fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { + let future = async move { + write_one(&mut s, &self.request).await?; + let vec = read_one(&mut s, self.max_response_size).await?; + + match self.expected { + ExpectedResponseTy::Light => { + schema::v1::light::Response::decode(&vec[..]) + .map(|r| Event::Response(self.request_id, Response::Light(r))) + .map_err(|e| { + ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) + }) + }, + ExpectedResponseTy::Block => { + schema::v1::BlockResponse::decode(&vec[..]) + .map(|r| Event::Response(self.request_id, Response::Block(r))) + .map_err(|e| { + ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) + }) + } + } + }; + future.boxed() + } +} + +fn fmt_keys(first: Option<&Vec>, last: Option<&Vec>) -> String { + if let (Some(first), Some(last)) = (first, last) { + if first == last { + HexDisplay::from(first).to_string() + } else { + format!("{}..{}", HexDisplay::from(first), HexDisplay::from(last)) + } + } else { + String::from("n/a") + } +} + +#[cfg(test)] +mod tests { + use super::*; + use async_std::task; + use assert_matches::assert_matches; + use codec::Encode; + use crate::{ + chain::Client, + config::ProtocolId, + schema, + }; + use futures::{channel::oneshot, prelude::*}; + use libp2p::{ + PeerId, + Multiaddr, + core::{ + ConnectedPoint, + connection::ConnectionId, + identity, + muxing::{StreamMuxerBox, SubstreamRef}, + transport::{Transport, boxed::Boxed, memory::MemoryTransport}, + upgrade + }, + noise::{self, Keypair, X25519, NoiseConfig}, + swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, + yamux + }; + use sc_client_api::{StorageProof, RemoteReadChildRequest, FetchChecker}; + use sp_blockchain::{Error as ClientError}; + use sp_core::storage::ChildInfo; + use std::{ + collections::{HashMap, HashSet}, + io, + iter::{self, FromIterator}, + pin::Pin, + sync::Arc, + task::{Context, Poll} + }; + use sp_runtime::{generic::Header, traits::{BlakeTwo256, Block as BlockT, NumberFor}}; + use super::{Event, LightClientHandler, Request, Response, OutboundProtocol, PeerStatus}; + use void::Void; + + type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; + type Handler = LightClientHandler; + type Swarm = libp2p::swarm::Swarm; + + fn empty_proof() -> Vec { + StorageProof::empty().encode() + } + + fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { + let client = Arc::new(substrate_test_runtime_client::new()); + let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); + let id_key = identity::Keypair::generate_ed25519(); + let dh_key = Keypair::::new().into_authentic(&id_key).unwrap(); + let local_peer = id_key.public().into_peer_id(); + let transport = MemoryTransport::default() + .upgrade(upgrade::Version::V1) + .authenticate(NoiseConfig::xx(dh_key).into_authenticated()) + .multiplex(yamux::Config::default()) + .map(|(peer, muxer), _| (peer, StreamMuxerBox::new(muxer))) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + .boxed(); + Swarm::new(transport, LightClientHandler::new(cf, client, checker, ps), local_peer) + } + + struct DummyFetchChecker { + ok: bool, + _mark: std::marker::PhantomData + } + + impl light::FetchChecker for DummyFetchChecker { + fn check_header_proof( + &self, + _request: &RemoteHeaderRequest, + header: Option, + _remote_proof: StorageProof, + ) -> Result { + match self.ok { + true if header.is_some() => Ok(header.unwrap()), + _ => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_proof( + &self, + request: &RemoteReadRequest, + _: StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request.keys + .iter() + .cloned() + .map(|k| (k, Some(vec![42]))) + .collect() + ), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_child_proof( + &self, + request: &RemoteReadChildRequest, + _: StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request.keys + .iter() + .cloned() + .map(|k| (k, Some(vec![42]))) + .collect() + ), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_execution_proof( + &self, + _: &RemoteCallRequest, + _: StorageProof, + ) -> Result, ClientError> { + match self.ok { + true => Ok(vec![42]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_changes_proof( + &self, + _: &RemoteChangesRequest, + _: ChangesProof + ) -> Result, u32)>, ClientError> { + match self.ok { + true => Ok(vec![(100.into(), 2)]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_body_proof( + &self, + _: &RemoteBodyRequest, + body: Vec + ) -> Result, ClientError> { + match self.ok { + true => Ok(body), + false => Err(ClientError::Backend("Test error".into())), + } + } + } + + fn make_config() -> super::Config { + super::Config::new(&ProtocolId::from("foo")) + } + + fn dummy_header() -> sp_test_primitives::Header { + sp_test_primitives::Header { + parent_hash: Default::default(), + number: 0, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + + struct EmptyPollParams(PeerId); + + impl PollParameters for EmptyPollParams { + type SupportedProtocolsIter = iter::Empty>; + type ListenedAddressesIter = iter::Empty; + type ExternalAddressesIter = iter::Empty; + + fn supported_protocols(&self) -> Self::SupportedProtocolsIter { + iter::empty() + } + + fn listened_addresses(&self) -> Self::ListenedAddressesIter { + iter::empty() + } + + fn external_addresses(&self) -> Self::ExternalAddressesIter { + iter::empty() + } + + fn local_peer_id(&self) -> &PeerId { + &self.0 + } + } + + fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) { + let cfg = sc_peerset::PeersetConfig { + in_peers: 128, + out_peers: 128, + bootnodes: Vec::new(), + reserved_only: false, + priority_groups: Vec::new(), + }; + sc_peerset::Peerset::from_config(cfg) + } + + fn make_behaviour + ( ok: bool + , ps: sc_peerset::PeersetHandle + , cf: super::Config + ) -> LightClientHandler + { + let client = Arc::new(substrate_test_runtime_client::new()); + let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); + LightClientHandler::new(cf, client, checker, ps) + } + + fn empty_dialer() -> ConnectedPoint { + ConnectedPoint::Dialer { address: Multiaddr::empty() } + } + + fn poll(mut b: &mut LightClientHandler) -> Poll> { + let mut p = EmptyPollParams(PeerId::random()); + match future::poll_fn(|cx| Pin::new(&mut b).poll(cx, &mut p)).now_or_never() { + Some(a) => Poll::Ready(a), + None => Poll::Pending + } + } + + #[test] + fn disconnects_from_peer_if_told() { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + behaviour.inject_connection_established(&peer, &ConnectionId::new(1), &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + + behaviour.inject_connection_closed(&peer, &ConnectionId::new(1), &empty_dialer()); + behaviour.inject_disconnected(&peer); + assert_eq!(0, behaviour.peers.len()) + } + + #[test] + fn disconnects_from_peer_if_request_times_out() { + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + behaviour.inject_connection_established(&peer0, &ConnectionId::new(1), &empty_dialer()); + behaviour.inject_connected(&peer0); + behaviour.inject_connection_established(&peer1, &ConnectionId::new(2), &empty_dialer()); + behaviour.inject_connected(&peer1); + + // We now know about two peers. + assert_eq!(HashSet::from_iter(&[peer0.clone(), peer1.clone()]), behaviour.peers.keys().collect::>()); + + // No requests have been made yet. + assert!(behaviour.pending_requests.is_empty()); + assert!(behaviour.outstanding.is_empty()); + + // Issue our first request! + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); + assert_eq!(1, behaviour.pending_requests.len()); + + // The behaviour should now attempt to send the request. + assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, .. }) => { + assert!(peer_id == peer0 || peer_id == peer1) + }); + + // And we should have one busy peer. + assert!({ + let (idle, busy): (Vec<_>, Vec<_>) = + behaviour.peers.iter().partition(|(_, info)| info.status == PeerStatus::Idle); + + idle.len() == 1 && busy.len() == 1 + && (idle[0].0 == &peer0 || busy[0].0 == &peer0) + && (idle[0].0 == &peer1 || busy[0].0 == &peer1) + }); + + // No more pending requests, but one should be outstanding. + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + + // We now set back the timestamp of the outstanding request to make it expire. + let request = behaviour.outstanding.values_mut().next().unwrap(); + request.timestamp -= make_config().request_timeout; + + // Make progress, but do not expect some action. + assert_matches!(poll(&mut behaviour), Poll::Pending); + + // The request should have timed out by now and the corresponding peer be removed. + assert_eq!(1, behaviour.peers.len()); + // Since we asked for one retry, the request should be back in the pending queue. + assert_eq!(1, behaviour.pending_requests.len()); + // No other request should be ongoing. + assert_eq!(0, behaviour.outstanding.len()); + } + + #[test] + fn disconnects_from_peer_on_incorrect_response() { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(false, pset.1, make_config()); + // ^--- Making sure the response data check fails. + + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); + + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + poll(&mut behaviour); // Make progress + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + + let request_id = *behaviour.outstanding.keys().next().unwrap(); + + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + } + }; + + behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); + assert!(behaviour.peers.is_empty()); + + poll(&mut behaviour); // More progress + + // The request should be back in the pending queue + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + } + + #[test] + fn disconnects_from_peer_on_unexpected_response() { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + + // Some unsolicited response + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + } + }; + + behaviour.inject_event(peer.clone(), conn, Event::Response(2347895932, Response::Light(response))); + + assert!(behaviour.peers.is_empty()); + poll(&mut behaviour); + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + } + + #[test] + fn disconnects_from_peer_on_wrong_response_type() { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); + + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + poll(&mut behaviour); // Make progress + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + + let request_id = *behaviour.outstanding.keys().next().unwrap(); + + let response = { + let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse! + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), + } + }; + + behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); + assert!(behaviour.peers.is_empty()); + + poll(&mut behaviour); // More progress + + // The request should be back in the pending queue + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + } + + #[test] + fn receives_remote_failure_after_retry_count_failures() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let peer3 = PeerId::random(); + let peer4 = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(false, pset.1, make_config()); + // ^--- Making sure the response data check fails. + + let conn1 = ConnectionId::new(1); + behaviour.inject_connection_established(&peer1, &conn1, &empty_dialer()); + behaviour.inject_connected(&peer1); + let conn2 = ConnectionId::new(2); + behaviour.inject_connection_established(&peer2, &conn2, &empty_dialer()); + behaviour.inject_connected(&peer2); + let conn3 = ConnectionId::new(3); + behaviour.inject_connection_established(&peer3, &conn3, &empty_dialer()); + behaviour.inject_connected(&peer3); + let conn4 = ConnectionId::new(3); + behaviour.inject_connection_established(&peer4, &conn4, &empty_dialer()); + behaviour.inject_connected(&peer4); + assert_eq!(4, behaviour.peers.len()); + + let mut chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(3), // Attempt up to three retries. + }; + behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); + + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + + for i in 1 ..= 3 { + // Construct an invalid response + let request_id = *behaviour.outstanding.keys().next().unwrap(); + let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)) + } + }; + let conn = ConnectionId::new(i); + behaviour.inject_event(responding_peer, conn, Event::Response(request_id, Response::Light(response.clone()))); + assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); + assert_matches!(chan.1.try_recv(), Ok(None)) + } + // Final invalid response + let request_id = *behaviour.outstanding.keys().next().unwrap(); + let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); + let response = { + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + } + }; + behaviour.inject_event(responding_peer, conn4, Event::Response(request_id, Response::Light(response))); + assert_matches!(poll(&mut behaviour), Poll::Pending); + assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) + } + + fn issue_request(request: Request) { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + + let response = match request { + Request::Body { .. } => unimplemented!(), + Request::Header{..} => { + let r = schema::v1::light::RemoteHeaderResponse { + header: dummy_header().encode(), + proof: empty_proof() + }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteHeaderResponse(r)), + } + } + Request::Read{..} => { + let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), + } + } + Request::ReadChild{..} => { + let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), + } + } + Request::Call{..} => { + let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), + } + } + Request::Changes{..} => { + let r = schema::v1::light::RemoteChangesResponse { + max: iter::repeat(1).take(32).collect(), + proof: Vec::new(), + roots: Vec::new(), + roots_proof: empty_proof() + }; + schema::v1::light::Response { + response: Some(schema::v1::light::response::Response::RemoteChangesResponse(r)), + } + } + }; + + behaviour.request(request).unwrap(); + + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + assert_eq!(1, *behaviour.outstanding.keys().next().unwrap()); + + behaviour.inject_event(peer.clone(), conn, Event::Response(1, Response::Light(response))); + + poll(&mut behaviour); + + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()) + } + + #[test] + fn receives_remote_call_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }; + issue_request(Request::Call { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_read_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteReadRequest { + header: dummy_header(), + block: Default::default(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + issue_request(Request::Read { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_read_child_response() { + let mut chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); + let request = light::RemoteReadChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: child_info.prefixed_storage_key(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + issue_request(Request::ReadChild { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_header_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteHeaderRequest { + cht_root: Default::default(), + block: 1, + retry_count: None, + }; + issue_request(Request::Header { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_changes_response() { + let mut chan = oneshot::channel(); + let request = light::RemoteChangesRequest { + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], + first_block: (1, Default::default()), + last_block: (100, Default::default()), + max_block: (100, Default::default()), + tries_roots: (1, Default::default(), Vec::new()), + key: Vec::new(), + storage_key: None, + retry_count: None, + }; + issue_request(Request::Changes { request, sender: chan.0 }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + fn send_receive(request: Request) { + // We start a swarm on the listening side which awaits incoming requests and answers them: + let local_pset = peerset(); + let local_listen_addr: libp2p::Multiaddr = libp2p::multiaddr::Protocol::Memory(rand::random()).into(); + let mut local_swarm = make_swarm(true, local_pset.1, make_config()); + Swarm::listen_on(&mut local_swarm, local_listen_addr.clone()).unwrap(); + + // We also start a swarm that makes requests and awaits responses: + let remote_pset = peerset(); + let mut remote_swarm = make_swarm(true, remote_pset.1, make_config()); + + // We now schedule a request, dial the remote and let the two swarm work it out: + remote_swarm.request(request).unwrap(); + Swarm::dial_addr(&mut remote_swarm, local_listen_addr).unwrap(); + + let future = { + let a = local_swarm.for_each(|_| future::ready(())); + let b = remote_swarm.for_each(|_| future::ready(())); + future::join(a, b).map(|_| ()) + }; + + task::spawn(future); + } + + #[test] + fn send_receive_call() { + let chan = oneshot::channel(); + let request = light::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }; + send_receive(Request::Call { request, sender: chan.0 }); + assert_eq!(vec![42], task::block_on(chan.1).unwrap().unwrap()); + // ^--- from `DummyFetchChecker::check_execution_proof` + } + + #[test] + fn send_receive_read() { + let chan = oneshot::channel(); + let request = light::RemoteReadRequest { + header: dummy_header(), + block: Default::default(), + keys: vec![b":key".to_vec()], + retry_count: None + }; + send_receive(Request::Read { request, sender: chan.0 }); + assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); + // ^--- from `DummyFetchChecker::check_read_proof` + } + + #[test] + fn send_receive_read_child() { + let chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); + let request = light::RemoteReadChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: child_info.prefixed_storage_key(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + send_receive(Request::ReadChild { request, sender: chan.0 }); + assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); + // ^--- from `DummyFetchChecker::check_read_child_proof` + } + + #[test] + fn send_receive_header() { + sp_tracing::try_init_simple(); + let chan = oneshot::channel(); + let request = light::RemoteHeaderRequest { + cht_root: Default::default(), + block: 1, + retry_count: None, + }; + send_receive(Request::Header { request, sender: chan.0 }); + // The remote does not know block 1: + assert_matches!(task::block_on(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); + } + + #[test] + fn send_receive_changes() { + let chan = oneshot::channel(); + let request = light::RemoteChangesRequest { + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], + first_block: (1, Default::default()), + last_block: (100, Default::default()), + max_block: (100, Default::default()), + tries_roots: (1, Default::default(), Vec::new()), + key: Vec::new(), + storage_key: None, + retry_count: None, + }; + send_receive(Request::Changes { request, sender: chan.0 }); + assert_eq!(vec![(100, 2)], task::block_on(chan.1).unwrap().unwrap()); + // ^--- from `DummyFetchChecker::check_changes_proof` + } + + #[test] + fn body_request_fields_encoded_properly() { + let (sender, _) = oneshot::channel(); + let serialized_request = serialize_request::(&Request::Body { + request: RemoteBodyRequest { + header: dummy_header(), + retry_count: None, + }, + sender, + }).unwrap(); + let deserialized_request = schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); + assert!( + BlockAttributes::from_be_u32(deserialized_request.fields) + .unwrap() + .contains(BlockAttributes::BODY) + ); + } +} diff --git a/client/network/src/network_state.rs b/client/network/src/network_state.rs new file mode 100644 index 0000000000000..db2b6429304bb --- /dev/null +++ b/client/network/src/network_state.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Information about the networking, for diagnostic purposes. +//! +//! **Warning**: These APIs are not stable. + +use libp2p::{core::ConnectedPoint, Multiaddr}; +use serde::{Deserialize, Serialize}; +use slog_derive::SerdeValue; +use std::{collections::{HashMap, HashSet}, time::Duration}; + +/// Returns general information about the networking. +/// +/// Meant for general diagnostic purposes. +/// +/// **Warning**: This API is not stable. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, SerdeValue)] +#[serde(rename_all = "camelCase")] +pub struct NetworkState { + /// PeerId of the local node. + pub peer_id: String, + /// List of addresses the node is currently listening on. + pub listened_addresses: HashSet, + /// List of addresses the node knows it can be reached as. + pub external_addresses: HashSet, + /// List of node we're connected to. + pub connected_peers: HashMap, + /// List of node that we know of but that we're not connected to. + pub not_connected_peers: HashMap, + /// State of the peerset manager. + pub peerset: serde_json::Value, +} + +/// Part of the `NetworkState` struct. Unstable. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Peer { + /// How we are connected to the node. + pub endpoint: PeerEndpoint, + /// Node information, as provided by the node itself. Can be empty if not known yet. + pub version_string: Option, + /// Latest ping duration with this node. + pub latest_ping_time: Option, + /// If true, the peer is "enabled", which means that we try to open Substrate-related protocols + /// with this peer. If false, we stick to Kademlia and/or other network-only protocols. + pub enabled: bool, + /// If true, the peer is "open", which means that we have a Substrate-related protocol + /// with this peer. + pub open: bool, + /// List of addresses known for this node. + pub known_addresses: HashSet, +} + +/// Part of the `NetworkState` struct. Unstable. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct NotConnectedPeer { + /// List of addresses known for this node. + pub known_addresses: HashSet, + /// Node information, as provided by the node itself, if we were ever connected to this node. + pub version_string: Option, + /// Latest ping duration with this node, if we were ever connected to this node. + pub latest_ping_time: Option, +} + +/// Part of the `NetworkState` struct. Unstable. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum PeerEndpoint { + /// We are dialing the given address. + Dialing(Multiaddr), + /// We are listening. + Listening { + /// Local address of the connection. + local_addr: Multiaddr, + /// Address data is sent back to. + send_back_addr: Multiaddr, + }, +} + +impl From for PeerEndpoint { + fn from(endpoint: ConnectedPoint) -> Self { + match endpoint { + ConnectedPoint::Dialer { address } => + PeerEndpoint::Dialing(address), + ConnectedPoint::Listener { local_addr, send_back_addr } => + PeerEndpoint::Listening { + local_addr, + send_back_addr + } + } + } +} diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs new file mode 100644 index 0000000000000..084172ee57c4f --- /dev/null +++ b/client/network/src/on_demand_layer.rs @@ -0,0 +1,228 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! On-demand requests service. + +use crate::light_client_handler; + +use futures::{channel::oneshot, prelude::*}; +use parking_lot::Mutex; +use sc_client_api::{ + FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, + RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, StorageProof, ChangesProof, +}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_blockchain::Error as ClientError; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use std::{collections::HashMap, pin::Pin, sync::Arc, task::Context, task::Poll}; + +/// Implements the `Fetcher` trait of the client. Makes it possible for the light client to perform +/// network requests for some state. +/// +/// This implementation stores all the requests in a queue. The network, in parallel, is then +/// responsible for pulling elements out of that queue and fulfilling them. +pub struct OnDemand { + /// Objects that checks whether what has been retrieved is correct. + checker: Arc>, + + /// Queue of requests. Set to `Some` at initialization, then extracted by the network. + /// + /// Note that a better alternative would be to use a MPMC queue here, and add a `poll` method + /// from the `OnDemand`. However there exists no popular implementation of MPMC channels in + /// asynchronous Rust at the moment + requests_queue: Mutex>>>, + + /// Sending side of `requests_queue`. + requests_send: TracingUnboundedSender>, +} + +/// Dummy implementation of `FetchChecker` that always assumes that responses are bad. +/// +/// Considering that it is the responsibility of the client to build the fetcher, it can use this +/// implementation if it knows that it will never perform any request. +#[derive(Default, Clone)] +pub struct AlwaysBadChecker; + +impl FetchChecker for AlwaysBadChecker { + fn check_header_proof( + &self, + _request: &RemoteHeaderRequest, + _remote_header: Option, + _remote_proof: StorageProof, + ) -> Result { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_read_proof( + &self, + _request: &RemoteReadRequest, + _remote_proof: StorageProof, + ) -> Result,Option>>, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_read_child_proof( + &self, + _request: &RemoteReadChildRequest, + _remote_proof: StorageProof, + ) -> Result, Option>>, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_execution_proof( + &self, + _request: &RemoteCallRequest, + _remote_proof: StorageProof, + ) -> Result, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_changes_proof( + &self, + _request: &RemoteChangesRequest, + _remote_proof: ChangesProof + ) -> Result, u32)>, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_body_proof( + &self, + _request: &RemoteBodyRequest, + _body: Vec + ) -> Result, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } +} + +impl OnDemand +where + B::Header: HeaderT, +{ + /// Creates new on-demand service. + pub fn new(checker: Arc>) -> Self { + let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); + let requests_queue = Mutex::new(Some(requests_queue)); + + OnDemand { + checker, + requests_queue, + requests_send, + } + } + + /// Get checker reference. + pub fn checker(&self) -> &Arc> { + &self.checker + } + + /// Extracts the queue of requests. + /// + /// Whenever one of the methods of the `Fetcher` trait is called, an element is pushed on this + /// channel. + /// + /// If this function returns `None`, that means that the receiver has already been extracted in + /// the past, and therefore that something already handles the requests. + pub(crate) fn extract_receiver(&self) + -> Option>> + { + self.requests_queue.lock().take() + } +} + +impl Fetcher for OnDemand +where + B: BlockT, + B::Header: HeaderT, +{ + type RemoteHeaderResult = RemoteResponse; + type RemoteReadResult = RemoteResponse, Option>>>; + type RemoteCallResult = RemoteResponse>; + type RemoteChangesResult = RemoteResponse, u32)>>; + type RemoteBodyResult = RemoteResponse>; + + fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Header { request, sender }); + RemoteResponse { receiver } + } + + fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Read { request, sender }); + RemoteResponse { receiver } + } + + fn remote_read_child( + &self, + request: RemoteReadChildRequest, + ) -> Self::RemoteReadResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::ReadChild { request, sender }); + RemoteResponse { receiver } + } + + fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Call { request, sender }); + RemoteResponse { receiver } + } + + fn remote_changes( + &self, + request: RemoteChangesRequest, + ) -> Self::RemoteChangesResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Changes { request, sender }); + RemoteResponse { receiver } + } + + fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Body { request, sender }); + RemoteResponse { receiver } + } +} + +/// Future for an on-demand remote call response. +pub struct RemoteResponse { + receiver: oneshot::Receiver>, +} + +impl Future for RemoteResponse { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + match self.receiver.poll_unpin(cx) { + Poll::Ready(Ok(res)) => Poll::Ready(res), + Poll::Ready(Err(_)) => Poll::Ready(Err(ClientError::RemoteFetchCancelled)), + Poll::Pending => Poll::Pending, + } + } +} diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs new file mode 100644 index 0000000000000..e69ad2b17e59c --- /dev/null +++ b/client/network/src/peer_info.rs @@ -0,0 +1,350 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use fnv::FnvHashMap; +use futures::prelude::*; +use libp2p::Multiaddr; +use libp2p::core::connection::{ConnectionId, ListenerId}; +use libp2p::core::{ConnectedPoint, either::EitherOutput, PeerId, PublicKey}; +use libp2p::swarm::{IntoProtocolsHandler, IntoProtocolsHandlerSelect, ProtocolsHandler}; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use libp2p::identify::{Identify, IdentifyEvent, IdentifyInfo}; +use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; +use log::{debug, trace, error}; +use smallvec::SmallVec; +use std::{error, io}; +use std::collections::hash_map::Entry; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; +use wasm_timer::Instant; +use crate::utils::interval; + +/// Time after we disconnect from a node before we purge its information from the cache. +const CACHE_EXPIRE: Duration = Duration::from_secs(10 * 60); +/// Interval at which we perform garbage collection on the node info. +const GARBAGE_COLLECT_INTERVAL: Duration = Duration::from_secs(2 * 60); + +/// Implementation of `NetworkBehaviour` that holds information about peers in cache. +pub struct PeerInfoBehaviour { + /// Periodically ping nodes, and close the connection if it's unresponsive. + ping: Ping, + /// Periodically identifies the remote and responds to incoming requests. + identify: Identify, + /// Information that we know about all nodes. + nodes_info: FnvHashMap, + /// Interval at which we perform garbage collection in `nodes_info`. + garbage_collect: Pin + Send>>, +} + +/// Information about a node we're connected to. +#[derive(Debug)] +struct NodeInfo { + /// When we will remove the entry about this node from the list, or `None` if we're connected + /// to the node. + info_expire: Option, + /// Non-empty list of connected endpoints, one per connection. + endpoints: SmallVec<[ConnectedPoint; crate::MAX_CONNECTIONS_PER_PEER]>, + /// Version reported by the remote, or `None` if unknown. + client_version: Option, + /// Latest ping time with this node. + latest_ping: Option, +} + +impl NodeInfo { + fn new(endpoint: ConnectedPoint) -> Self { + let mut endpoints = SmallVec::new(); + endpoints.push(endpoint); + NodeInfo { + info_expire: None, + endpoints, + client_version: None, + latest_ping: None, + } + } +} + +impl PeerInfoBehaviour { + /// Builds a new `PeerInfoBehaviour`. + pub fn new( + user_agent: String, + local_public_key: PublicKey, + ) -> Self { + let identify = { + let proto_version = "/substrate/1.0".to_string(); + Identify::new(proto_version, user_agent, local_public_key) + }; + + PeerInfoBehaviour { + ping: Ping::new(PingConfig::new()), + identify, + nodes_info: FnvHashMap::default(), + garbage_collect: Box::pin(interval(GARBAGE_COLLECT_INTERVAL)), + } + } + + /// Borrows `self` and returns a struct giving access to the information about a node. + /// + /// Returns `None` if we don't know anything about this node. Always returns `Some` for nodes + /// we're connected to, meaning that if `None` is returned then we're not connected to that + /// node. + pub fn node(&self, peer_id: &PeerId) -> Option { + self.nodes_info.get(peer_id).map(Node) + } + + /// Inserts a ping time in the cache. Has no effect if we don't have any entry for that node, + /// which shouldn't happen. + fn handle_ping_report(&mut self, peer_id: &PeerId, ping_time: Duration) { + trace!(target: "sub-libp2p", "Ping time with {:?}: {:?}", peer_id, ping_time); + if let Some(entry) = self.nodes_info.get_mut(peer_id) { + entry.latest_ping = Some(ping_time); + } else { + error!(target: "sub-libp2p", + "Received ping from node we're not connected to {:?}", peer_id); + } + } + + /// Inserts an identify record in the cache. Has no effect if we don't have any entry for that + /// node, which shouldn't happen. + fn handle_identify_report(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { + trace!(target: "sub-libp2p", "Identified {:?} => {:?}", peer_id, info); + if let Some(entry) = self.nodes_info.get_mut(peer_id) { + entry.client_version = Some(info.agent_version.clone()); + } else { + error!(target: "sub-libp2p", + "Received pong from node we're not connected to {:?}", peer_id); + } + } +} + +/// Gives access to the information about a node. +pub struct Node<'a>(&'a NodeInfo); + +impl<'a> Node<'a> { + /// Returns the endpoint of an established connection to the peer. + pub fn endpoint(&self) -> &'a ConnectedPoint { + &self.0.endpoints[0] // `endpoints` are non-empty by definition + } + + /// Returns the latest version information we know of. + pub fn client_version(&self) -> Option<&'a str> { + self.0.client_version.as_ref().map(|s| &s[..]) + } + + /// Returns the latest ping time we know of for this node. `None` if we never successfully + /// pinged this node. + pub fn latest_ping(&self) -> Option { + self.0.latest_ping + } +} + +/// Event that can be emitted by the behaviour. +#[derive(Debug)] +pub enum PeerInfoEvent { + /// We have obtained identity information from a peer, including the addresses it is listening + /// on. + Identified { + /// Id of the peer that has been identified. + peer_id: PeerId, + /// Information about the peer. + info: IdentifyInfo, + }, +} + +impl NetworkBehaviour for PeerInfoBehaviour { + type ProtocolsHandler = IntoProtocolsHandlerSelect< + ::ProtocolsHandler, + ::ProtocolsHandler + >; + type OutEvent = PeerInfoEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + IntoProtocolsHandler::select(self.ping.new_handler(), self.identify.new_handler()) + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + let mut list = self.ping.addresses_of_peer(peer_id); + list.extend_from_slice(&self.identify.addresses_of_peer(peer_id)); + list + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + self.ping.inject_connected(peer_id); + self.identify.inject_connected(peer_id); + } + + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.ping.inject_connection_established(peer_id, conn, endpoint); + self.identify.inject_connection_established(peer_id, conn, endpoint); + match self.nodes_info.entry(peer_id.clone()) { + Entry::Vacant(e) => { + e.insert(NodeInfo::new(endpoint.clone())); + } + Entry::Occupied(e) => { + let e = e.into_mut(); + if e.info_expire.as_ref().map(|exp| *exp < Instant::now()).unwrap_or(false) { + e.client_version = None; + e.latest_ping = None; + } + e.info_expire = None; + e.endpoints.push(endpoint.clone()); + } + } + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.ping.inject_connection_closed(peer_id, conn, endpoint); + self.identify.inject_connection_closed(peer_id, conn, endpoint); + + if let Some(entry) = self.nodes_info.get_mut(peer_id) { + entry.endpoints.retain(|ep| ep != endpoint) + } else { + error!(target: "sub-libp2p", + "Unknown connection to {:?} closed: {:?}", peer_id, endpoint); + } + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.ping.inject_disconnected(peer_id); + self.identify.inject_disconnected(peer_id); + + if let Some(entry) = self.nodes_info.get_mut(peer_id) { + entry.info_expire = Some(Instant::now() + CACHE_EXPIRE); + } else { + error!(target: "sub-libp2p", + "Disconnected from node we were not connected to {:?}", peer_id); + } + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + event: <::Handler as ProtocolsHandler>::OutEvent + ) { + match event { + EitherOutput::First(event) => self.ping.inject_event(peer_id, connection, event), + EitherOutput::Second(event) => self.identify.inject_event(peer_id, connection, event), + } + } + + fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn std::error::Error) { + self.ping.inject_addr_reach_failure(peer_id, addr, error); + self.identify.inject_addr_reach_failure(peer_id, addr, error); + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + self.ping.inject_dial_failure(peer_id); + self.identify.inject_dial_failure(peer_id); + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + self.ping.inject_new_listen_addr(addr); + self.identify.inject_new_listen_addr(addr); + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + self.ping.inject_expired_listen_addr(addr); + self.identify.inject_expired_listen_addr(addr); + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + self.ping.inject_new_external_addr(addr); + self.identify.inject_new_external_addr(addr); + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { + self.ping.inject_listener_error(id, err); + self.identify.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.ping.inject_listener_closed(id, reason); + self.identify.inject_listener_closed(id, reason); + } + + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters + ) -> Poll< + NetworkBehaviourAction< + <::Handler as ProtocolsHandler>::InEvent, + Self::OutEvent + > + > { + loop { + match self.ping.poll(cx, params) { + Poll::Pending => break, + Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => { + if let PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) } = ev { + self.handle_ping_report(&peer, rtt) + } + }, + Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event: EitherOutput::First(event) + }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + } + } + + loop { + match self.identify.poll(cx, params) { + Poll::Pending => break, + Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) => { + match event { + IdentifyEvent::Received { peer_id, info, .. } => { + self.handle_identify_report(&peer_id, &info); + let event = PeerInfoEvent::Identified { peer_id, info }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); + } + IdentifyEvent::Error { peer_id, error } => + debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error), + IdentifyEvent::Sent { .. } => {} + } + }, + Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event: EitherOutput::Second(event) + }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + } + } + + while let Poll::Ready(Some(())) = self.garbage_collect.poll_next_unpin(cx) { + self.nodes_info.retain(|_, node| { + node.info_expire.as_ref().map(|exp| *exp >= Instant::now()).unwrap_or(true) + }); + } + + Poll::Pending + } +} diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs new file mode 100644 index 0000000000000..c1887ce35bfdb --- /dev/null +++ b/client/network/src/protocol.rs @@ -0,0 +1,1706 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{ + ExHashT, + chain::Client, + config::{BoxFinalityProofRequestBuilder, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, + error, + utils::{interval, LruHashSet}, +}; + +use bytes::{Bytes, BytesMut}; +use futures::{prelude::*, stream::FuturesUnordered}; +use generic_proto::{GenericProto, GenericProtoOut}; +use libp2p::{Multiaddr, PeerId}; +use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; +use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use sp_consensus::{ + BlockOrigin, + block_validation::BlockAnnounceValidator, + import_queue::{BlockImportResult, BlockImportError, IncomingBlock, Origin} +}; +use codec::{Decode, DecodeAll, Encode}; +use sp_runtime::{generic::BlockId, ConsensusEngineId, Justification}; +use sp_runtime::traits::{ + Block as BlockT, Header as HeaderT, NumberFor, Zero, CheckedSub +}; +use sp_arithmetic::traits::SaturatedConversion; +use message::{BlockAnnounce, Message}; +use message::generic::{Message as GenericMessage, Roles}; +use prometheus_endpoint::{ + Registry, Gauge, Counter, GaugeVec, + PrometheusError, Opts, register, U64 +}; +use sync::{ChainSync, SyncState}; +use std::borrow::Cow; +use std::collections::{HashMap, HashSet, VecDeque, hash_map::Entry}; +use std::sync::Arc; +use std::fmt::Write; +use std::{io, iter, num::NonZeroUsize, pin::Pin, task::Poll, time}; +use log::{log, Level, trace, debug, warn, error}; +use wasm_timer::Instant; + +mod generic_proto; + +pub mod message; +pub mod event; +pub mod sync; + +pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError, LegacyConnectionKillError}; + +const REQUEST_TIMEOUT_SEC: u64 = 40; +/// Interval at which we perform time based maintenance +const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); +/// Interval at which we propagate transactions; +const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); + +/// Maximim number of known block hashes to keep for a peer. +const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead +/// Maximim number of known transaction hashes to keep for a peer. +/// +/// This should be approx. 2 blocks full of transactions for the network to function properly. +const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. + +/// Maximim number of transaction validation request we keep at any moment. +const MAX_PENDING_TRANSACTIONS: usize = 8192; + +/// Current protocol version. +pub(crate) const CURRENT_VERSION: u32 = 6; +/// Lowest version we support +pub(crate) const MIN_VERSION: u32 = 3; + +/// When light node connects to the full node and the full node is behind light node +/// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful +/// and disconnect to free connection slot. +const LIGHT_MAXIMAL_BLOCKS_DIFFERENCE: u64 = 8192; + +mod rep { + use sc_peerset::ReputationChange as Rep; + /// Reputation change when a peer doesn't respond in time to our messages. + pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); + /// Reputation change when we are a light client and a peer is behind us. + pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); + /// Reputation change when a peer sends us any transaction. + /// + /// This forces node to verify it, thus the negative value here. Once transaction is verified, + /// reputation change should be refunded with `ANY_TRANSACTION_REFUND` + pub const ANY_TRANSACTION: Rep = Rep::new(-(1 << 4), "Any transaction"); + /// Reputation change when a peer sends us any transaction that is not invalid. + pub const ANY_TRANSACTION_REFUND: Rep = Rep::new(1 << 4, "Any transaction (refund)"); + /// Reputation change when a peer sends us an transaction that we didn't know about. + pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); + /// Reputation change when a peer sends us a bad transaction. + pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); + /// We received a message that failed to decode. + pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); + /// We received an unexpected response. + pub const UNEXPECTED_RESPONSE: Rep = Rep::new_fatal("Unexpected response packet"); + /// We received an unexpected transaction packet. + pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); + /// Peer has different genesis. + pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); + /// Peer is on unsupported protocol version. + pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); + /// Peer role does not match (e.g. light peer connecting to another light peer). + pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); + /// Peer response data does not have requested bits. + pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); +} + +struct Metrics { + obsolete_requests: Gauge, + peers: Gauge, + queued_blocks: Gauge, + fork_targets: Gauge, + finality_proofs: GaugeVec, + justifications: GaugeVec, + propagated_transactions: Counter, +} + +impl Metrics { + fn register(r: &Registry) -> Result { + Ok(Metrics { + obsolete_requests: { + let g = Gauge::new("sync_obsolete_requests", "Number of obsolete requests")?; + register(g, r)? + }, + peers: { + let g = Gauge::new("sync_peers", "Number of peers we sync with")?; + register(g, r)? + }, + queued_blocks: { + let g = Gauge::new("sync_queued_blocks", "Number of blocks in import queue")?; + register(g, r)? + }, + fork_targets: { + let g = Gauge::new("sync_fork_targets", "Number of fork sync targets")?; + register(g, r)? + }, + justifications: { + let g = GaugeVec::new( + Opts::new( + "sync_extra_justifications", + "Number of extra justifications requests" + ), + &["status"], + )?; + register(g, r)? + }, + finality_proofs: { + let g = GaugeVec::new( + Opts::new( + "sync_extra_finality_proofs", + "Number of extra finality proof requests", + ), + &["status"], + )?; + register(g, r)? + }, + propagated_transactions: register(Counter::new( + "sync_propagated_transactions", + "Number of transactions propagated to at least one peer", + )?, r)?, + }) + } +} + +#[pin_project::pin_project] +struct PendingTransaction { + #[pin] + validation: TransactionImportFuture, + tx_hash: H, +} + +impl Future for PendingTransaction { + type Output = (H, TransactionImport); + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let mut this = self.project(); + + if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { + return Poll::Ready((this.tx_hash.clone(), import_result)); + } + + Poll::Pending + } +} + +// Lock must always be taken in order declared here. +pub struct Protocol { + /// Interval at which we call `tick`. + tick_timeout: Pin + Send>>, + /// Interval at which we call `propagate_transactions`. + propagate_timeout: Pin + Send>>, + /// Pending list of messages to return from `poll` as a priority. + pending_messages: VecDeque>, + /// Pending transactions verification tasks. + pending_transactions: FuturesUnordered>, + /// As multiple peers can send us the same transaction, we group + /// these peers using the transaction hash while the transaction is + /// imported. This prevents that we import the same transaction + /// multiple times concurrently. + pending_transactions_peers: HashMap>, + config: ProtocolConfig, + genesis_hash: B::Hash, + sync: ChainSync, + context_data: ContextData, + /// List of nodes for which we perform additional logging because they are important for the + /// user. + important_peers: HashSet, + /// Used to report reputation changes. + peerset_handle: sc_peerset::PeersetHandle, + transaction_pool: Arc>, + /// Handles opening the unique substream and sending and receiving raw messages. + behaviour: GenericProto, + /// For each legacy gossiping engine ID, the corresponding new protocol name. + protocol_name_by_engine: HashMap>, + /// For each protocol name, the legacy equivalent. + legacy_equiv_by_name: HashMap, Fallback>, + /// Name of the protocol used for transactions. + transactions_protocol: Cow<'static, str>, + /// Name of the protocol used for block announces. + block_announces_protocol: Cow<'static, str>, + /// Prometheus metrics. + metrics: Option, + /// The `PeerId`'s of all boot nodes. + boot_node_ids: Arc>, +} + +#[derive(Default)] +struct PacketStats { + bytes_in: u64, + bytes_out: u64, + count_in: u64, + count_out: u64, +} +/// Peer information +#[derive(Debug, Clone)] +struct Peer { + info: PeerInfo, + /// Current block request, if any. + block_request: Option<(Instant, message::BlockRequest)>, + /// Requests we are no longer interested in. + obsolete_requests: HashMap, + /// Holds a set of transactions known to this peer. + known_transactions: LruHashSet, + /// Holds a set of blocks known to this peer. + known_blocks: LruHashSet, + /// Request counter, + next_request_id: message::RequestId, +} + +/// Info about a peer's known state. +#[derive(Clone, Debug)] +pub struct PeerInfo { + /// Roles + pub roles: Roles, + /// Peer best block hash + pub best_hash: B::Hash, + /// Peer best block number + pub best_number: ::Number, +} + +/// Data necessary to create a context. +struct ContextData { + // All connected peers + peers: HashMap>, + stats: HashMap<&'static str, PacketStats>, + pub chain: Arc>, +} + +/// Configuration for the Substrate-specific part of the networking layer. +#[derive(Clone)] +pub struct ProtocolConfig { + /// Assigned roles. + pub roles: Roles, + /// Maximum number of peers to ask the same blocks in parallel. + pub max_parallel_downloads: u32, +} + +impl Default for ProtocolConfig { + fn default() -> ProtocolConfig { + ProtocolConfig { + roles: Roles::FULL, + max_parallel_downloads: 5, + } + } +} + +/// Handshake sent when we open a block announces substream. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +struct BlockAnnouncesHandshake { + /// Roles of the node. + roles: Roles, + /// Best block number. + best_number: NumberFor, + /// Best block hash. + best_hash: B::Hash, + /// Genesis block hash. + genesis_hash: B::Hash, +} + +impl BlockAnnouncesHandshake { + fn build(protocol_config: &ProtocolConfig, chain: &Arc>) -> Self { + let info = chain.info(); + BlockAnnouncesHandshake { + genesis_hash: info.genesis_hash, + roles: protocol_config.roles, + best_number: info.best_number, + best_hash: info.best_hash, + } + } +} + +/// Builds a SCALE-encoded "Status" message to send as handshake for the legacy protocol. +fn build_status_message(protocol_config: &ProtocolConfig, chain: &Arc>) -> Vec { + let info = chain.info(); + let status = message::generic::Status { + version: CURRENT_VERSION, + min_supported_version: MIN_VERSION, + genesis_hash: info.genesis_hash, + roles: protocol_config.roles.into(), + best_number: info.best_number, + best_hash: info.best_hash, + chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible + }; + + Message::::Status(status).encode() +} + +/// Fallback mechanism to use to send a notification if no substream is open. +#[derive(Debug, Clone, PartialEq, Eq)] +enum Fallback { + /// Use a `Message::Consensus` with the given engine ID. + Consensus(ConsensusEngineId), + /// The message is the bytes encoding of a `Transactions` (which is itself defined as a `Vec`). + Transactions, + /// The message is the bytes encoding of a `BlockAnnounce`. + BlockAnnounce, +} + +impl Protocol { + /// Create a new instance. + pub fn new( + config: ProtocolConfig, + local_peer_id: PeerId, + chain: Arc>, + transaction_pool: Arc>, + finality_proof_request_builder: Option>, + protocol_id: ProtocolId, + peerset_config: sc_peerset::PeersetConfig, + block_announce_validator: Box + Send>, + metrics_registry: Option<&Registry>, + boot_node_ids: Arc>, + ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { + let info = chain.info(); + let sync = ChainSync::new( + config.roles, + chain.clone(), + &info, + finality_proof_request_builder, + block_announce_validator, + config.max_parallel_downloads, + ); + + let important_peers = { + let mut imp_p = HashSet::new(); + for reserved in peerset_config.priority_groups.iter().flat_map(|(_, l)| l.iter()) { + imp_p.insert(reserved.clone()); + } + imp_p.shrink_to_fit(); + imp_p + }; + + let (peerset, peerset_handle) = sc_peerset::Peerset::from_config(peerset_config); + + let mut legacy_equiv_by_name = HashMap::new(); + + let transactions_protocol: Cow<'static, str> = Cow::from({ + let mut proto = String::new(); + proto.push_str("/"); + proto.push_str(protocol_id.as_ref()); + proto.push_str("/transactions/1"); + proto + }); + legacy_equiv_by_name.insert(transactions_protocol.clone(), Fallback::Transactions); + + let block_announces_protocol: Cow<'static, str> = Cow::from({ + let mut proto = String::new(); + proto.push_str("/"); + proto.push_str(protocol_id.as_ref()); + proto.push_str("/block-announces/1"); + proto + }); + legacy_equiv_by_name.insert(block_announces_protocol.clone(), Fallback::BlockAnnounce); + + let behaviour = { + let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); + let block_announces_handshake = BlockAnnouncesHandshake::build(&config, &chain).encode(); + GenericProto::new( + local_peer_id, + protocol_id.clone(), + versions, + build_status_message(&config, &chain), + peerset, + // As documented in `GenericProto`, the first protocol in the list is always the + // one carrying the handshake reported in the `CustomProtocolOpen` event. + iter::once((block_announces_protocol.clone(), block_announces_handshake)) + .chain(iter::once((transactions_protocol.clone(), vec![]))), + ) + }; + + let protocol = Protocol { + tick_timeout: Box::pin(interval(TICK_TIMEOUT)), + propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), + pending_messages: VecDeque::new(), + pending_transactions: FuturesUnordered::new(), + pending_transactions_peers: HashMap::new(), + config, + context_data: ContextData { + peers: HashMap::new(), + stats: HashMap::new(), + chain, + }, + genesis_hash: info.genesis_hash, + sync, + important_peers, + transaction_pool, + peerset_handle: peerset_handle.clone(), + behaviour, + protocol_name_by_engine: HashMap::new(), + legacy_equiv_by_name, + transactions_protocol, + block_announces_protocol, + metrics: if let Some(r) = metrics_registry { + Some(Metrics::register(r)?) + } else { + None + }, + boot_node_ids, + }; + + Ok((protocol, peerset_handle)) + } + + /// Returns the list of all the peers we have an open channel to. + pub fn open_peers(&self) -> impl Iterator { + self.behaviour.open_peers() + } + + /// Returns true if we have a channel open with this node. + pub fn is_open(&self, peer_id: &PeerId) -> bool { + self.behaviour.is_open(peer_id) + } + + /// Returns the list of all the peers that the peerset currently requests us to be connected to. + pub fn requested_peers(&self) -> impl Iterator { + self.behaviour.requested_peers() + } + + /// Returns the number of discovered nodes that we keep in memory. + pub fn num_discovered_peers(&self) -> usize { + self.behaviour.num_discovered_peers() + } + + /// Disconnects the given peer if we are connected to it. + pub fn disconnect_peer(&mut self, peer_id: &PeerId) { + self.behaviour.disconnect_peer(peer_id) + } + + /// Returns true if we try to open protocols with the given peer. + pub fn is_enabled(&self, peer_id: &PeerId) -> bool { + self.behaviour.is_enabled(peer_id) + } + + /// Returns the state of the peerset manager, for debugging purposes. + pub fn peerset_debug_info(&mut self) -> serde_json::Value { + self.behaviour.peerset_debug_info() + } + + /// Returns the number of peers we're connected to. + pub fn num_connected_peers(&self) -> usize { + self.context_data.peers.values().count() + } + + /// Returns the number of peers we're connected to and that are being queried. + pub fn num_active_peers(&self) -> usize { + self.context_data + .peers + .values() + .filter(|p| p.block_request.is_some()) + .count() + } + + /// Current global sync state. + pub fn sync_state(&self) -> SyncState { + self.sync.status().state + } + + /// Target sync block number. + pub fn best_seen_block(&self) -> Option> { + self.sync.status().best_seen_block + } + + /// Number of peers participating in syncing. + pub fn num_sync_peers(&self) -> u32 { + self.sync.status().num_peers + } + + /// Number of blocks in the import queue. + pub fn num_queued_blocks(&self) -> u32 { + self.sync.status().queued_blocks + } + + /// Number of downloaded blocks. + pub fn num_downloaded_blocks(&self) -> usize { + self.sync.num_downloaded_blocks() + } + + /// Number of active sync requests. + pub fn num_sync_requests(&self) -> usize { + self.sync.num_sync_requests() + } + + /// Sync local state with the blockchain state. + pub fn update_chain(&mut self) { + let info = self.context_data.chain.info(); + self.sync.update_chain_info(&info.best_hash, info.best_number); + self.behaviour.set_legacy_handshake_message(build_status_message(&self.config, &self.context_data.chain)); + self.behaviour.set_notif_protocol_handshake( + &self.block_announces_protocol, + BlockAnnouncesHandshake::build(&self.config, &self.context_data.chain).encode() + ); + } + + /// Inform sync about an own imported block. + pub fn own_block_imported(&mut self, hash: B::Hash, number: NumberFor) { + self.sync.update_chain_info(&hash, number); + } + + fn update_peer_info(&mut self, who: &PeerId) { + if let Some(info) = self.sync.peer_info(who) { + if let Some(ref mut peer) = self.context_data.peers.get_mut(who) { + peer.info.best_hash = info.best_hash; + peer.info.best_number = info.best_number; + } + } + } + + /// Returns information about all the peers we are connected to after the handshake message. + pub fn peers_info(&self) -> impl Iterator)> { + self.context_data.peers.iter().map(|(id, peer)| (id, &peer.info)) + } + + pub fn on_custom_message( + &mut self, + who: PeerId, + data: BytesMut, + ) -> CustomMessageOutcome { + + let message = match as Decode>::decode(&mut &data[..]) { + Ok(message) => message, + Err(err) => { + debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); + self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); + return CustomMessageOutcome::None; + } + }; + + let mut stats = self.context_data.stats.entry(message.id()).or_default(); + stats.bytes_in += data.len() as u64; + stats.count_in += 1; + + match message { + GenericMessage::Status(_) => + debug!(target: "sub-libp2p", "Received unexpected Status"), + GenericMessage::BlockAnnounce(announce) => { + let outcome = self.on_block_announce(who.clone(), announce); + self.update_peer_info(&who); + return outcome; + }, + GenericMessage::Transactions(m) => + self.on_transactions(who, m), + GenericMessage::BlockResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected BlockResponse"), + GenericMessage::RemoteCallResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected RemoteCallResponse"), + GenericMessage::RemoteReadResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected RemoteReadResponse"), + GenericMessage::RemoteHeaderResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected RemoteHeaderResponse"), + GenericMessage::RemoteChangesResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected RemoteChangesResponse"), + GenericMessage::FinalityProofResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected FinalityProofResponse"), + GenericMessage::BlockRequest(_) | + GenericMessage::FinalityProofRequest(_) | + GenericMessage::RemoteReadChildRequest(_) | + GenericMessage::RemoteCallRequest(_) | + GenericMessage::RemoteReadRequest(_) | + GenericMessage::RemoteHeaderRequest(_) | + GenericMessage::RemoteChangesRequest(_) => { + debug!( + target: "sub-libp2p", + "Received no longer supported legacy request from {:?}", + who + ); + self.disconnect_peer(&who); + self.peerset_handle.report_peer(who, rep::BAD_PROTOCOL); + }, + GenericMessage::Consensus(msg) => + return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { + CustomMessageOutcome::NotificationsReceived { + remote: who, + messages: vec![(msg.engine_id, From::from(msg.data))], + } + } else { + debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); + CustomMessageOutcome::None + }, + GenericMessage::ConsensusBatch(messages) => { + let messages = messages + .into_iter() + .filter_map(|msg| { + if self.protocol_name_by_engine.contains_key(&msg.engine_id) { + Some((msg.engine_id, From::from(msg.data))) + } else { + debug!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); + None + } + }) + .collect::>(); + + return if !messages.is_empty() { + CustomMessageOutcome::NotificationsReceived { + remote: who, + messages, + } + } else { + CustomMessageOutcome::None + }; + }, + } + + CustomMessageOutcome::None + } + + fn update_peer_request(&mut self, who: &PeerId, request: &mut message::BlockRequest) { + update_peer_request::(&mut self.context_data.peers, who, request) + } + + /// Called by peer when it is disconnecting + pub fn on_peer_disconnected(&mut self, peer: PeerId) -> CustomMessageOutcome { + if self.important_peers.contains(&peer) { + warn!(target: "sync", "Reserved peer {} disconnected", peer); + } else { + trace!(target: "sync", "{} disconnected", peer); + } + + if let Some(_peer_data) = self.context_data.peers.remove(&peer) { + self.sync.peer_disconnected(&peer); + + // Notify all the notification protocols as closed. + CustomMessageOutcome::NotificationStreamClosed { + remote: peer, + protocols: self.protocol_name_by_engine.keys().cloned().collect(), + } + } else { + CustomMessageOutcome::None + } + } + + /// Adjusts the reputation of a node. + pub fn report_peer(&self, who: PeerId, reputation: sc_peerset::ReputationChange) { + self.peerset_handle.report_peer(who, reputation) + } + + /// Must be called in response to a [`CustomMessageOutcome::BlockRequest`] being emitted. + /// Must contain the same `PeerId` and request that have been emitted. + pub fn on_block_response( + &mut self, + peer: PeerId, + response: message::BlockResponse, + ) -> CustomMessageOutcome { + let request = if let Some(ref mut p) = self.context_data.peers.get_mut(&peer) { + if p.obsolete_requests.remove(&response.id).is_some() { + trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", peer, response.id); + return CustomMessageOutcome::None; + } + // Clear the request. If the response is invalid peer will be disconnected anyway. + match p.block_request.take() { + Some((_, request)) if request.id == response.id => request, + Some(_) => { + trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", peer, response.id); + return CustomMessageOutcome::None; + } + None => { + trace!(target: "sync", "Unexpected response packet from unknown peer {}", peer); + self.behaviour.disconnect_peer(&peer); + self.peerset_handle.report_peer(peer, rep::UNEXPECTED_RESPONSE); + return CustomMessageOutcome::None; + } + } + } else { + trace!(target: "sync", "Unexpected response packet from unknown peer {}", peer); + self.behaviour.disconnect_peer(&peer); + self.peerset_handle.report_peer(peer, rep::UNEXPECTED_RESPONSE); + return CustomMessageOutcome::None; + }; + + let blocks_range = || match ( + response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), + response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + trace!(target: "sync", "BlockResponse {} from {} with {} blocks {}", + response.id, + peer, + response.blocks.len(), + blocks_range(), + ); + + if request.fields == message::BlockAttributes::JUSTIFICATION { + match self.sync.on_block_justification(peer, response) { + Ok(sync::OnBlockJustification::Nothing) => CustomMessageOutcome::None, + Ok(sync::OnBlockJustification::Import { peer, hash, number, justification }) => + CustomMessageOutcome::JustificationImport(peer, hash, number, justification), + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + } + } + } else { + // Validate fields against the request. + if request.fields.contains(message::BlockAttributes::HEADER) && response.blocks.iter().any(|b| b.header.is_none()) { + self.behaviour.disconnect_peer(&peer); + self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); + trace!(target: "sync", "Missing header for a block"); + return CustomMessageOutcome::None + } + if request.fields.contains(message::BlockAttributes::BODY) && response.blocks.iter().any(|b| b.body.is_none()) { + self.behaviour.disconnect_peer(&peer); + self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); + trace!(target: "sync", "Missing body for a block"); + return CustomMessageOutcome::None + } + + match self.sync.on_block_data(&peer, Some(request), response) { + Ok(sync::OnBlockData::Import(origin, blocks)) => + CustomMessageOutcome::BlockImport(origin, blocks), + Ok(sync::OnBlockData::Request(peer, mut req)) => { + self.update_peer_request(&peer, &mut req); + CustomMessageOutcome::BlockRequest { + target: peer, + request: req, + } + } + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + } + } + } + } + + /// Must be called in response to a [`CustomMessageOutcome::BlockRequest`] if it has failed. + pub fn on_block_request_failed( + &mut self, + peer: &PeerId, + ) { + self.peerset_handle.report_peer(peer.clone(), rep::TIMEOUT); + self.behaviour.disconnect_peer(peer); + } + + /// Perform time based maintenance. + /// + /// > **Note**: This method normally doesn't have to be called except for testing purposes. + pub fn tick(&mut self) { + self.maintain_peers(); + self.report_metrics() + } + + fn maintain_peers(&mut self) { + let tick = Instant::now(); + let mut aborting = Vec::new(); + { + for (who, peer) in self.context_data.peers.iter() { + if peer.block_request.as_ref().map_or(false, |(t, _)| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { + log!( + target: "sync", + if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, + "Request timeout {}", who + ); + aborting.push(who.clone()); + } else if peer.obsolete_requests.values().any(|t| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { + log!( + target: "sync", + if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, + "Obsolete timeout {}", who + ); + aborting.push(who.clone()); + } + } + } + + for p in aborting { + self.behaviour.disconnect_peer(&p); + self.peerset_handle.report_peer(p, rep::TIMEOUT); + } + } + + /// Called on the first connection between two peers, after their exchange of handshake. + fn on_peer_connected( + &mut self, + who: PeerId, + status: BlockAnnouncesHandshake, + notifications_sink: NotificationsSink, + ) -> CustomMessageOutcome { + trace!(target: "sync", "New peer {} {:?}", who, status); + + if self.context_data.peers.contains_key(&who) { + debug!(target: "sync", "Ignoring duplicate status packet from {}", who); + return CustomMessageOutcome::None; + } + if status.genesis_hash != self.genesis_hash { + log!( + target: "sync", + if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, + "Peer is on different chain (our genesis: {} theirs: {})", + self.genesis_hash, status.genesis_hash + ); + self.peerset_handle.report_peer(who.clone(), rep::GENESIS_MISMATCH); + self.behaviour.disconnect_peer(&who); + + if self.boot_node_ids.contains(&who) { + error!( + target: "sync", + "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", + who, + self.genesis_hash, + status.genesis_hash, + ); + } + + return CustomMessageOutcome::None; + } + + if self.config.roles.is_light() { + // we're not interested in light peers + if status.roles.is_light() { + debug!(target: "sync", "Peer {} is unable to serve light requests", who); + self.peerset_handle.report_peer(who.clone(), rep::BAD_ROLE); + self.behaviour.disconnect_peer(&who); + return CustomMessageOutcome::None; + } + + // we don't interested in peers that are far behind us + let self_best_block = self + .context_data + .chain + .info() + .best_number; + let blocks_difference = self_best_block + .checked_sub(&status.best_number) + .unwrap_or_else(Zero::zero) + .saturated_into::(); + if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { + debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); + self.peerset_handle.report_peer(who.clone(), rep::PEER_BEHIND_US_LIGHT); + self.behaviour.disconnect_peer(&who); + return CustomMessageOutcome::None; + } + } + + let peer = Peer { + info: PeerInfo { + roles: status.roles, + best_hash: status.best_hash, + best_number: status.best_number + }, + block_request: None, + known_transactions: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS) + .expect("Constant is nonzero")), + known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) + .expect("Constant is nonzero")), + next_request_id: 0, + obsolete_requests: HashMap::new(), + }; + self.context_data.peers.insert(who.clone(), peer); + + debug!(target: "sync", "Connected {}", who); + + let info = self.context_data.peers.get(&who).expect("We just inserted above; QED").info.clone(); + self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who.clone(), status.best_number)); + if info.roles.is_full() { + match self.sync.new_peer(who.clone(), info.best_hash, info.best_number) { + Ok(None) => (), + Ok(Some(mut req)) => { + self.update_peer_request(&who, &mut req); + self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { + target: who.clone(), + request: req, + }); + }, + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu) + } + } + } + + // Notify all the notification protocols as open. + CustomMessageOutcome::NotificationStreamOpened { + remote: who, + protocols: self.protocol_name_by_engine.keys().cloned().collect(), + roles: info.roles, + notifications_sink, + } + } + + /// Registers a new notifications protocol. + /// + /// While registering a protocol while we already have open connections is discouraged, we + /// nonetheless handle it by notifying that we opened channels with everyone. This function + /// returns a list of substreams to open as a result. + pub fn register_notifications_protocol<'a>( + &'a mut self, + engine_id: ConsensusEngineId, + protocol_name: impl Into>, + handshake_message: Vec, + ) -> impl Iterator + 'a { + let protocol_name = protocol_name.into(); + if self.protocol_name_by_engine.insert(engine_id, protocol_name.clone()).is_some() { + error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol_name); + } else { + self.behaviour.register_notif_protocol(protocol_name.clone(), handshake_message); + self.legacy_equiv_by_name.insert(protocol_name, Fallback::Consensus(engine_id)); + } + + let behaviour = &self.behaviour; + self.context_data.peers.iter().filter_map(move |(peer_id, peer)| { + if let Some(notifications_sink) = behaviour.notifications_sink(peer_id) { + Some((peer_id, peer.info.roles, notifications_sink)) + } else { + log::error!("State mismatch: no notifications sink for opened peer {:?}", peer_id); + None + } + }) + } + + /// Called when peer sends us new transactions + fn on_transactions( + &mut self, + who: PeerId, + transactions: message::Transactions, + ) { + // sending transaction to light node is considered a bad behavior + if !self.config.roles.is_full() { + trace!(target: "sync", "Peer {} is trying to send transactions to the light node", who); + self.behaviour.disconnect_peer(&who); + self.peerset_handle.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); + return; + } + + // Accept transactions only when fully synced + if self.sync.status().state != SyncState::Idle { + trace!(target: "sync", "{} Ignoring transactions while syncing", who); + return; + } + + trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); + if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { + for t in transactions { + if self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS { + debug!( + target: "sync", + "Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit", + MAX_PENDING_TRANSACTIONS, + ); + break; + } + + let hash = self.transaction_pool.hash_of(&t); + peer.known_transactions.insert(hash.clone()); + + self.peerset_handle.report_peer(who.clone(), rep::ANY_TRANSACTION); + + match self.pending_transactions_peers.entry(hash.clone()) { + Entry::Vacant(entry) => { + self.pending_transactions.push(PendingTransaction { + validation: self.transaction_pool.import(t), + tx_hash: hash, + }); + entry.insert(vec![who.clone()]); + }, + Entry::Occupied(mut entry) => { + entry.get_mut().push(who.clone()); + } + } + } + } + } + + fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { + match import { + TransactionImport::KnownGood => self.peerset_handle.report_peer(who, rep::ANY_TRANSACTION_REFUND), + TransactionImport::NewGood => self.peerset_handle.report_peer(who, rep::GOOD_TRANSACTION), + TransactionImport::Bad => self.peerset_handle.report_peer(who, rep::BAD_TRANSACTION), + TransactionImport::None => {}, + } + } + + /// Propagate one transaction. + pub fn propagate_transaction( + &mut self, + hash: &H, + ) { + debug!(target: "sync", "Propagating transaction [{:?}]", hash); + // Accept transactions only when fully synced + if self.sync.status().state != SyncState::Idle { + return; + } + if let Some(transaction) = self.transaction_pool.transaction(hash) { + let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); + self.transaction_pool.on_broadcasted(propagated_to); + } + } + + fn do_propagate_transactions( + &mut self, + transactions: &[(H, B::Extrinsic)], + ) -> HashMap> { + let mut propagated_to = HashMap::<_, Vec<_>>::new(); + let mut propagated_transactions = 0; + + for (who, peer) in self.context_data.peers.iter_mut() { + // never send transactions to the light node + if !peer.info.roles.is_full() { + continue; + } + + let (hashes, to_send): (Vec<_>, Vec<_>) = transactions + .iter() + .filter(|&(ref hash, _)| peer.known_transactions.insert(hash.clone())) + .cloned() + .unzip(); + + propagated_transactions += hashes.len(); + + if !to_send.is_empty() { + for hash in hashes { + propagated_to + .entry(hash) + .or_default() + .push(who.to_base58()); + } + trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); + self.behaviour.write_notification( + who, + self.transactions_protocol.clone(), + to_send.encode() + ); + } + } + + if let Some(ref metrics) = self.metrics { + metrics.propagated_transactions.inc_by(propagated_transactions as _) + } + + propagated_to + } + + /// Call when we must propagate ready transactions to peers. + pub fn propagate_transactions(&mut self) { + debug!(target: "sync", "Propagating transactions"); + // Accept transactions only when fully synced + if self.sync.status().state != SyncState::Idle { + return; + } + let transactions = self.transaction_pool.transactions(); + let propagated_to = self.do_propagate_transactions(&transactions); + self.transaction_pool.on_broadcasted(propagated_to); + } + + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. + pub fn announce_block(&mut self, hash: B::Hash, data: Vec) { + let header = match self.context_data.chain.header(BlockId::Hash(hash)) { + Ok(Some(header)) => header, + Ok(None) => { + warn!("Trying to announce unknown block: {}", hash); + return; + } + Err(e) => { + warn!("Error reading block header {}: {:?}", hash, e); + return; + } + }; + + // don't announce genesis block since it will be ignored + if header.number().is_zero() { + return; + } + + let is_best = self.context_data.chain.info().best_hash == hash; + debug!(target: "sync", "Reannouncing block {:?} is_best: {}", hash, is_best); + self.send_announcement(&header, data, is_best, true) + } + + fn send_announcement(&mut self, header: &B::Header, data: Vec, is_best: bool, force: bool) { + let hash = header.hash(); + + for (who, ref mut peer) in self.context_data.peers.iter_mut() { + trace!(target: "sync", "Announcing block {:?} to {}", hash, who); + let inserted = peer.known_blocks.insert(hash); + if inserted || force { + let message = message::BlockAnnounce { + header: header.clone(), + state: if is_best { + Some(message::BlockState::Best) + } else { + Some(message::BlockState::Normal) + }, + data: Some(data.clone()), + }; + + self.behaviour.write_notification( + who, + self.block_announces_protocol.clone(), + message.encode() + ); + } + } + } + + fn on_block_announce( + &mut self, + who: PeerId, + announce: BlockAnnounce, + ) -> CustomMessageOutcome { + let hash = announce.header.hash(); + let number = *announce.header.number(); + + if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { + peer.known_blocks.insert(hash.clone()); + } + + let is_their_best = match announce.state.unwrap_or(message::BlockState::Best) { + message::BlockState::Best => true, + message::BlockState::Normal => false, + }; + + match self.sync.on_block_announce(&who, hash, &announce, is_their_best) { + sync::OnBlockAnnounce::Nothing => { + // `on_block_announce` returns `OnBlockAnnounce::ImportHeader` + // when we have all data required to import the block + // in the BlockAnnounce message. This is only when: + // 1) we're on light client; + // AND + // 2) parent block is already imported and not pruned. + if is_their_best { + return CustomMessageOutcome::PeerNewBest(who, number); + } else { + return CustomMessageOutcome::None; + } + } + sync::OnBlockAnnounce::ImportHeader => () // We proceed with the import. + } + + // to import header from announced block let's construct response to request that normally would have + // been sent over network (but it is not in our case) + let blocks_to_import = self.sync.on_block_data( + &who, + None, + message::generic::BlockResponse { + id: 0, + blocks: vec![ + message::generic::BlockData { + hash: hash, + header: Some(announce.header), + body: None, + receipt: None, + message_queue: None, + justification: None, + }, + ], + }, + ); + + if is_their_best { + self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number)); + } + + match blocks_to_import { + Ok(sync::OnBlockData::Import(origin, blocks)) => { + CustomMessageOutcome::BlockImport(origin, blocks) + }, + Ok(sync::OnBlockData::Request(peer, mut req)) => { + self.update_peer_request(&peer, &mut req); + CustomMessageOutcome::BlockRequest { + target: peer, + request: req, + } + } + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + } + } + } + + /// Call this when a block has been finalized. The sync layer may have some additional + /// requesting to perform. + pub fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { + self.sync.on_block_finalized(&hash, *header.number()) + } + + /// Request a justification for the given block. + /// + /// Uses `protocol` to queue a new justification request and tries to dispatch all pending + /// requests. + pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + self.sync.request_justification(&hash, number) + } + + /// Request syncing for the given block from given set of peers. + /// Uses `protocol` to queue a new block download request and tries to dispatch all pending + /// requests. + pub fn set_sync_fork_request(&mut self, peers: Vec, hash: &B::Hash, number: NumberFor) { + self.sync.set_sync_fork_request(peers, hash, number) + } + + /// A batch of blocks have been processed, with or without errors. + /// Call this when a batch of blocks have been processed by the importqueue, with or without + /// errors. + pub fn on_blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)> + ) { + let new_best = results.iter().rev().find_map(|r| match r { + (Ok(BlockImportResult::ImportedUnknown(n, aux, _)), hash) if aux.is_new_best => Some((*n, hash.clone())), + _ => None, + }); + if let Some((best_num, best_hash)) = new_best { + self.sync.update_chain_info(&best_hash, best_num); + self.behaviour.set_legacy_handshake_message(build_status_message(&self.config, &self.context_data.chain)); + self.behaviour.set_notif_protocol_handshake( + &self.block_announces_protocol, + BlockAnnouncesHandshake::build(&self.config, &self.context_data.chain).encode() + ); + } + let results = self.sync.on_blocks_processed( + imported, + count, + results, + ); + for result in results { + match result { + Ok((id, mut req)) => { + update_peer_request(&mut self.context_data.peers, &id, &mut req); + self.pending_messages.push_back(CustomMessageOutcome::BlockRequest { + target: id, + request: req, + }); + } + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu) + } + } + } + } + + /// Call this when a justification has been processed by the import queue, with or without + /// errors. + pub fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + self.sync.on_justification_import(hash, number, success) + } + + /// Request a finality proof for the given block. + /// + /// Queues a new finality proof request and tries to dispatch all pending requests. + pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { + self.sync.request_finality_proof(&hash, number) + } + + /// Notify the protocol that we have learned about the existence of nodes. + /// + /// Can be called multiple times with the same `PeerId`s. + pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { + self.behaviour.add_discovered_nodes(peer_ids) + } + + pub fn finality_proof_import_result( + &mut self, + request_block: (B::Hash, NumberFor), + finalization_result: Result<(B::Hash, NumberFor), ()>, + ) { + self.sync.on_finality_proof_import(request_block, finalization_result) + } + + /// Must be called after a [`CustomMessageOutcome::FinalityProofRequest`] has been emitted, + /// to notify of the response having arrived. + pub fn on_finality_proof_response( + &mut self, + who: PeerId, + response: message::FinalityProofResponse, + ) -> CustomMessageOutcome { + trace!(target: "sync", "Finality proof response from {} for {}", who, response.block); + match self.sync.on_block_finality_proof(who, response) { + Ok(sync::OnBlockFinalityProof::Nothing) => CustomMessageOutcome::None, + Ok(sync::OnBlockFinalityProof::Import { peer, hash, number, proof }) => + CustomMessageOutcome::FinalityProofImport(peer, hash, number, proof), + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + } + } + } + + fn format_stats(&self) -> String { + let mut out = String::new(); + for (id, stats) in &self.context_data.stats { + let _ = writeln!( + &mut out, + "{}: In: {} bytes ({}), Out: {} bytes ({})", + id, + stats.bytes_in, + stats.count_in, + stats.bytes_out, + stats.count_out, + ); + } + out + } + + fn report_metrics(&self) { + use std::convert::TryInto; + + if let Some(metrics) = &self.metrics { + let mut obsolete_requests: u64 = 0; + for peer in self.context_data.peers.values() { + let n = peer.obsolete_requests.len().try_into().unwrap_or(std::u64::MAX); + obsolete_requests = obsolete_requests.saturating_add(n); + } + metrics.obsolete_requests.set(obsolete_requests); + + let n = self.context_data.peers.len().try_into().unwrap_or(std::u64::MAX); + metrics.peers.set(n); + + let m = self.sync.metrics(); + + metrics.fork_targets.set(m.fork_targets.into()); + metrics.queued_blocks.set(m.queued_blocks.into()); + + metrics.justifications.with_label_values(&["pending"]) + .set(m.justifications.pending_requests.into()); + metrics.justifications.with_label_values(&["active"]) + .set(m.justifications.active_requests.into()); + metrics.justifications.with_label_values(&["failed"]) + .set(m.justifications.failed_requests.into()); + metrics.justifications.with_label_values(&["importing"]) + .set(m.justifications.importing_requests.into()); + + metrics.finality_proofs.with_label_values(&["pending"]) + .set(m.finality_proofs.pending_requests.into()); + metrics.finality_proofs.with_label_values(&["active"]) + .set(m.finality_proofs.active_requests.into()); + metrics.finality_proofs.with_label_values(&["failed"]) + .set(m.finality_proofs.failed_requests.into()); + metrics.finality_proofs.with_label_values(&["importing"]) + .set(m.finality_proofs.importing_requests.into()); + } + } +} + +/// Outcome of an incoming custom message. +#[derive(Debug)] +#[must_use] +pub enum CustomMessageOutcome { + BlockImport(BlockOrigin, Vec>), + JustificationImport(Origin, B::Hash, NumberFor, Justification), + FinalityProofImport(Origin, B::Hash, NumberFor, Vec), + /// Notification protocols have been opened with a remote. + NotificationStreamOpened { + remote: PeerId, + protocols: Vec, + roles: Roles, + notifications_sink: NotificationsSink + }, + /// The [`NotificationsSink`] of some notification protocols need an update. + NotificationStreamReplaced { + remote: PeerId, + protocols: Vec, + notifications_sink: NotificationsSink, + }, + /// Notification protocols have been closed with a remote. + NotificationStreamClosed { remote: PeerId, protocols: Vec }, + /// Messages have been received on one or more notifications protocols. + NotificationsReceived { remote: PeerId, messages: Vec<(ConsensusEngineId, Bytes)> }, + /// A new block request must be emitted. + /// You must later call either [`Protocol::on_block_response`] or + /// [`Protocol::on_block_request_failed`]. + /// Each peer can only have one active request. If a request already exists for this peer, it + /// must be silently discarded. + /// It is the responsibility of the handler to ensure that a timeout exists. + BlockRequest { target: PeerId, request: message::BlockRequest }, + /// A new finality proof request must be emitted. + /// Once you have the response, you must call `Protocol::on_finality_proof_response`. + /// It is the responsibility of the handler to ensure that a timeout exists. + /// If the request times out, or the peer responds in an invalid way, the peer has to be + /// disconnect. This will inform the state machine that the request it has emitted is stale. + FinalityProofRequest { target: PeerId, block_hash: B::Hash, request: Vec }, + /// Peer has a reported a new head of chain. + PeerNewBest(PeerId, NumberFor), + None, +} + +fn update_peer_request( + peers: &mut HashMap>, + who: &PeerId, + request: &mut message::BlockRequest, +) { + if let Some(ref mut peer) = peers.get_mut(who) { + request.id = peer.next_request_id; + peer.next_request_id += 1; + if let Some((timestamp, request)) = peer.block_request.take() { + trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who); + peer.obsolete_requests.insert(request.id, timestamp); + } + peer.block_request = Some((Instant::now(), request.clone())); + } +} + +impl NetworkBehaviour for Protocol { + type ProtocolsHandler = ::ProtocolsHandler; + type OutEvent = CustomMessageOutcome; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + self.behaviour.new_handler() + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + self.behaviour.addresses_of_peer(peer_id) + } + + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.behaviour.inject_connection_established(peer_id, conn, endpoint) + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.behaviour.inject_connection_closed(peer_id, conn, endpoint) + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + self.behaviour.inject_connected(peer_id) + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.behaviour.inject_disconnected(peer_id) + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + event: <::Handler as ProtocolsHandler>::OutEvent, + ) { + self.behaviour.inject_event(peer_id, connection, event) + } + + fn poll( + &mut self, + cx: &mut std::task::Context, + params: &mut impl PollParameters, + ) -> Poll< + NetworkBehaviourAction< + <::Handler as ProtocolsHandler>::InEvent, + Self::OutEvent + > + > { + if let Some(message) = self.pending_messages.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + } + + while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { + self.tick(); + } + + while let Poll::Ready(Some(())) = self.propagate_timeout.poll_next_unpin(cx) { + self.propagate_transactions(); + } + + for (id, mut r) in self.sync.block_requests() { + update_peer_request(&mut self.context_data.peers, &id, &mut r); + let event = CustomMessageOutcome::BlockRequest { + target: id.clone(), + request: r, + }; + self.pending_messages.push_back(event); + } + for (id, mut r) in self.sync.justification_requests() { + update_peer_request(&mut self.context_data.peers, &id, &mut r); + let event = CustomMessageOutcome::BlockRequest { + target: id, + request: r, + }; + self.pending_messages.push_back(event); + } + for (id, r) in self.sync.finality_proof_requests() { + let event = CustomMessageOutcome::FinalityProofRequest { + target: id, + block_hash: r.block, + request: r.request, + }; + self.pending_messages.push_back(event); + } + if let Poll::Ready(Some((tx_hash, result))) = self.pending_transactions.poll_next_unpin(cx) { + if let Some(peers) = self.pending_transactions_peers.remove(&tx_hash) { + peers.into_iter().for_each(|p| self.on_handle_transaction_import(p, result)); + } else { + warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!"); + } + } + if let Some(message) = self.pending_messages.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + } + + let event = match self.behaviour.poll(cx, params) { + Poll::Pending => return Poll::Pending, + Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => ev, + Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }), + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), + }; + + let outcome = match event { + GenericProtoOut::CustomProtocolOpen { peer_id, received_handshake, notifications_sink, .. } => { + // `received_handshake` can be either a `Status` message if received from the + // legacy substream ,or a `BlockAnnouncesHandshake` if received from the block + // announces substream. + match as DecodeAll>::decode_all(&mut &received_handshake[..]) { + Ok(GenericMessage::Status(handshake)) => { + let handshake = BlockAnnouncesHandshake { + roles: handshake.roles, + best_number: handshake.best_number, + best_hash: handshake.best_hash, + genesis_hash: handshake.genesis_hash, + }; + + self.on_peer_connected(peer_id, handshake, notifications_sink) + }, + Ok(msg) => { + debug!( + target: "sync", + "Expected Status message from {}, but got {:?}", + peer_id, + msg, + ); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None + } + Err(err) => { + match as DecodeAll>::decode_all(&mut &received_handshake[..]) { + Ok(handshake) => { + self.on_peer_connected(peer_id, handshake, notifications_sink) + } + Err(err2) => { + debug!( + target: "sync", + "Couldn't decode handshake sent by {}: {:?}: {} & {}", + peer_id, + received_handshake, + err.what(), + err2, + ); + self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); + CustomMessageOutcome::None + } + } + } + } + } + GenericProtoOut::CustomProtocolReplaced { peer_id, notifications_sink, .. } => { + CustomMessageOutcome::NotificationStreamReplaced { + remote: peer_id, + protocols: self.protocol_name_by_engine.keys().cloned().collect(), + notifications_sink, + } + }, + GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { + self.on_peer_disconnected(peer_id) + }, + GenericProtoOut::LegacyMessage { peer_id, message } => + self.on_custom_message(peer_id, message), + GenericProtoOut::Notification { peer_id, protocol_name, message } => + match self.legacy_equiv_by_name.get(&protocol_name) { + Some(Fallback::Consensus(engine_id)) => { + CustomMessageOutcome::NotificationsReceived { + remote: peer_id, + messages: vec![(*engine_id, message.freeze())], + } + } + Some(Fallback::Transactions) => { + if let Ok(m) = message::Transactions::decode(&mut message.as_ref()) { + self.on_transactions(peer_id, m); + } else { + warn!(target: "sub-libp2p", "Failed to decode transactions list"); + } + CustomMessageOutcome::None + } + Some(Fallback::BlockAnnounce) => { + if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { + let outcome = self.on_block_announce(peer_id.clone(), announce); + self.update_peer_info(&peer_id); + outcome + } else { + warn!(target: "sub-libp2p", "Failed to decode block announce"); + CustomMessageOutcome::None + } + } + None => { + debug!(target: "sub-libp2p", "Received notification from unknown protocol {:?}", protocol_name); + CustomMessageOutcome::None + } + } + }; + + if let CustomMessageOutcome::None = outcome { + Poll::Pending + } else { + Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) + } + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error + ) { + self.behaviour.inject_addr_reach_failure(peer_id, addr, error) + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + self.behaviour.inject_dial_failure(peer_id) + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + self.behaviour.inject_new_listen_addr(addr) + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + self.behaviour.inject_expired_listen_addr(addr) + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + self.behaviour.inject_new_external_addr(addr) + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { + self.behaviour.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.behaviour.inject_listener_closed(id, reason); + } +} + +impl Drop for Protocol { + fn drop(&mut self) { + debug!(target: "sync", "Network stats:\n{}", self.format_stats()); + } +} diff --git a/client/network/src/protocol/event.rs b/client/network/src/protocol/event.rs new file mode 100644 index 0000000000000..637bf805b5024 --- /dev/null +++ b/client/network/src/protocol/event.rs @@ -0,0 +1,100 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Network event types. These are are not the part of the protocol, but rather +//! events that happen on the network like DHT get/put results received. + +use bytes::Bytes; +use libp2p::core::PeerId; +use libp2p::kad::record::Key; +use sp_runtime::ConsensusEngineId; + +/// Events generated by DHT as a response to get_value and put_value requests. +#[derive(Debug, Clone)] +#[must_use] +pub enum DhtEvent { + /// The value was found. + ValueFound(Vec<(Key, Vec)>), + + /// The requested record has not been found in the DHT. + ValueNotFound(Key), + + /// The record has been successfully inserted into the DHT. + ValuePut(Key), + + /// An error has occurred while putting a record into the DHT. + ValuePutFailed(Key), +} + +/// Type for events generated by networking layer. +#[derive(Debug, Clone)] +#[must_use] +pub enum Event { + /// Event generated by a DHT. + Dht(DhtEvent), + + /// Opened a substream with the given node with the given notifications protocol. + /// + /// The protocol is always one of the notification protocols that have been registered. + NotificationStreamOpened { + /// Node we opened the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + /// Role of the remote. + role: ObservedRole, + }, + + /// Closed a substream with the given node. Always matches a corresponding previous + /// `NotificationStreamOpened` message. + NotificationStreamClosed { + /// Node we closed the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + }, + + /// Received one or more messages from the given node using the given protocol. + NotificationsReceived { + /// Node we received the message from. + remote: PeerId, + /// Concerned protocol and associated message. + messages: Vec<(ConsensusEngineId, Bytes)>, + }, +} + +/// Role that the peer sent to us during the handshake, with the addition of what our local node +/// knows about that peer. +#[derive(Debug, Clone)] +pub enum ObservedRole { + /// Full node. + Full, + /// Light node. + Light, + /// When we are a validator node, this is a sentry that protects us. + OurSentry, + /// When we are a sentry node, this is the authority we are protecting. + OurGuardedAuthority, + /// Third-party authority. + Authority, +} + +impl ObservedRole { + /// Returns `true` for `ObservedRole::Light`. + pub fn is_light(&self) -> bool { + matches!(self, ObservedRole::Light) + } +} diff --git a/client/network/src/protocol/generic_proto.rs b/client/network/src/protocol/generic_proto.rs new file mode 100644 index 0000000000000..3133471b0d249 --- /dev/null +++ b/client/network/src/protocol/generic_proto.rs @@ -0,0 +1,29 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Implementation of libp2p's `NetworkBehaviour` trait that opens a single substream with the +//! remote and then allows any communication with them. +//! +//! The `Protocol` struct uses `GenericProto` in order to open substreams with the rest of the +//! network, then performs the Substrate protocol handling on top. + +pub use self::behaviour::{GenericProto, GenericProtoOut}; +pub use self::handler::{NotifsHandlerError, NotificationsSink, Ready, LegacyConnectionKillError}; + +mod behaviour; +mod handler; +mod upgrade; +mod tests; diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs new file mode 100644 index 0000000000000..e7e2cb035d65c --- /dev/null +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -0,0 +1,1474 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::config::ProtocolId; +use crate::protocol::generic_proto::{ + handler::{NotificationsSink, NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn}, + upgrade::RegisteredProtocol +}; + +use bytes::BytesMut; +use fnv::FnvHashMap; +use futures::prelude::*; +use libp2p::core::{ConnectedPoint, Multiaddr, PeerId, connection::ConnectionId}; +use libp2p::swarm::{ + DialPeerCondition, + NetworkBehaviour, + NetworkBehaviourAction, + NotifyHandler, + PollParameters +}; +use log::{debug, error, trace, warn}; +use parking_lot::RwLock; +use rand::distributions::{Distribution as _, Uniform}; +use smallvec::SmallVec; +use std::task::{Context, Poll}; +use std::{borrow::Cow, cmp, collections::{hash_map::Entry, VecDeque}}; +use std::{error, mem, pin::Pin, str, sync::Arc, time::Duration}; +use wasm_timer::Instant; + +/// Network behaviour that handles opening substreams for custom protocols with other peers. +/// +/// ## Legacy vs new protocol +/// +/// The `GenericProto` behaves as following: +/// +/// - Whenever a connection is established, we open a single substream (called "legacy protocol" in +/// the source code) on that connection. This substream name depends on the `protocol_id` and +/// `versions` passed at initialization. If the remote refuses this substream, we close the +/// connection. +/// +/// - For each registered protocol, we also open an additional substream for this protocol. If the +/// remote refuses this substream, then it's fine. +/// +/// - Whenever we want to send a message, we can call either `send_packet` to force the legacy +/// substream, or `write_notification` to indicate a registered protocol. If the registered +/// protocol was refused or isn't supported by the remote, we always use the legacy instead. +/// +/// ## How it works +/// +/// The role of the `GenericProto` is to synchronize the following components: +/// +/// - The libp2p swarm that opens new connections and reports disconnects. +/// - The connection handler (see `handler.rs`) that handles individual connections. +/// - The peerset manager (PSM) that requests links to peers to be established or broken. +/// - The external API, that requires knowledge of the links that have been established. +/// +/// Each connection handler can be in four different states: Enabled+Open, Enabled+Closed, +/// Disabled+Open, or Disabled+Closed. The Enabled/Disabled component must be in sync with the +/// peerset manager. For example, if the peerset manager requires a disconnection, we disable the +/// connection handlers of that peer. The Open/Closed component must be in sync with the external +/// API. +/// +/// However, a connection handler for a peer only exists if we are actually connected to that peer. +/// What this means is that there are six possible states for each peer: Disconnected, Dialing +/// (trying to connect), Enabled+Open, Enabled+Closed, Disabled+Open, Disabled+Closed. +/// Most notably, the Dialing state must correspond to a "link established" state in the peerset +/// manager. In other words, the peerset manager doesn't differentiate whether we are dialing a +/// peer or connected to it. +/// +/// There may be multiple connections to a peer. However, the status of a peer on +/// the API of this behaviour and towards the peerset manager is aggregated in +/// the following way: +/// +/// 1. The enabled/disabled status is the same across all connections, as +/// decided by the peerset manager. +/// 2. `send_packet` and `write_notification` always send all data over +/// the same connection to preserve the ordering provided by the transport, +/// as long as that connection is open. If it closes, a second open +/// connection may take over, if one exists, but that case should be no +/// different than a single connection failing and being re-established +/// in terms of potential reordering and dropped messages. Messages can +/// be received on any connection. +/// 3. The behaviour reports `GenericProtoOut::CustomProtocolOpen` when the +/// first connection reports `NotifsHandlerOut::Open`. +/// 4. The behaviour reports `GenericProtoOut::CustomProtocolClosed` when the +/// last connection reports `NotifsHandlerOut::Closed`. +/// +/// In this way, the number of actual established connections to the peer is +/// an implementation detail of this behaviour. Note that, in practice and at +/// the time of this writing, there may be at most two connections to a peer +/// and only as a result of simultaneous dialing. However, the implementation +/// accommodates for any number of connections. +/// +/// Additionally, there also exists a "banning" system. If we fail to dial a peer, we "ban" it for +/// a few seconds. If the PSM requests connecting to a peer that is currently "banned", the next +/// dialing attempt is delayed until after the ban expires. However, the PSM will still consider +/// the peer to be connected. This "ban" is thus not a ban in a strict sense: If a "banned" peer +/// tries to connect, the connection is accepted. A ban only delays dialing attempts. +/// +pub struct GenericProto { + /// `PeerId` of the local node. + local_peer_id: PeerId, + + /// Legacy protocol to open with peers. Never modified. + legacy_protocol: RegisteredProtocol, + + /// Notification protocols. Entries are only ever added and not removed. + /// Contains, for each protocol, the protocol name and the message to send as part of the + /// initial handshake. + notif_protocols: Vec<(Cow<'static, str>, Arc>>)>, + + /// Receiver for instructions about who to connect to or disconnect from. + peerset: sc_peerset::Peerset, + + /// List of peers in our state. + peers: FnvHashMap, + + /// The elements in `peers` occasionally contain `Delay` objects that we would normally have + /// to be polled one by one. In order to avoid doing so, as an optimization, every `Delay` is + /// instead put inside of `delays` and reference by a [`DelayId`]. This stream + /// yields `PeerId`s whose `DelayId` is potentially ready. + /// + /// By design, we never remove elements from this list. Elements are removed only when the + /// `Delay` triggers. As such, this stream may produce obsolete elements. + delays: stream::FuturesUnordered + Send>>>, + + /// [`DelayId`] to assign to the next delay. + next_delay_id: DelayId, + + /// List of incoming messages we have sent to the peer set manager and that are waiting for an + /// answer. + incoming: SmallVec<[IncomingPeer; 6]>, + + /// We generate indices to identify incoming connections. This is the next value for the index + /// to use when a connection is incoming. + next_incoming_index: sc_peerset::IncomingIndex, + + /// Events to produce from `poll()`. + events: VecDeque>, +} + +/// Identifier for a delay firing. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +struct DelayId(u64); + +/// State of a peer we're connected to. +#[derive(Debug)] +enum PeerState { + /// State is poisoned. This is a temporary state for a peer and we should always switch back + /// to it later. If it is found in the wild, that means there was either a panic or a bug in + /// the state machine code. + Poisoned, + + /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial + /// delay to the connection. + Banned { + /// Until when the peer is banned. + until: Instant, + }, + + /// The peerset requested that we connect to this peer. We are currently not connected. + PendingRequest { + /// When to actually start dialing. References an entry in `delays`. + timer: DelayId, + /// When the `timer` will trigger. + timer_deadline: Instant, + }, + + /// The peerset requested that we connect to this peer. We are currently dialing this peer. + Requested, + + /// We are connected to this peer but the peerset refused it. + /// + /// We may still have ongoing traffic with that peer, but it should cease shortly. + Disabled { + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, + /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. + banned_until: Option, + }, + + /// We are connected to this peer but we are not opening any Substrate substream. The handler + /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, + /// but should get disconnected in a few seconds. + DisabledPendingEnable { + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, + /// When to enable this remote. References an entry in `delays`. + timer: DelayId, + /// When the `timer` will trigger. + timer_deadline: Instant, + }, + + /// We are connected to this peer and the peerset has accepted it. The handler is in the + /// enabled state. + Enabled { + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[(ConnectionId, NotificationsSink); crate::MAX_CONNECTIONS_PER_PEER]>, + }, + + /// We received an incoming connection from this peer and forwarded that + /// connection request to the peerset. The connection handlers are waiting + /// for initialisation, i.e. to be enabled or disabled based on whether + /// the peerset accepts or rejects the peer. + Incoming, +} + +impl PeerState { + /// True if there exists an established connection to the peer + /// that is open for custom protocol traffic. + fn is_open(&self) -> bool { + self.get_open().is_some() + } + + /// Returns the [`NotificationsSink`] of the first established connection + /// that is open for custom protocol traffic. + fn get_open(&self) -> Option<&NotificationsSink> { + match self { + PeerState::Disabled { open, .. } | + PeerState::DisabledPendingEnable { open, .. } | + PeerState::Enabled { open, .. } => + if !open.is_empty() { + Some(&open[0].1) + } else { + None + } + PeerState::Poisoned => None, + PeerState::Banned { .. } => None, + PeerState::PendingRequest { .. } => None, + PeerState::Requested => None, + PeerState::Incoming { .. } => None, + } + } + + /// True if that node has been requested by the PSM. + fn is_requested(&self) -> bool { + match self { + PeerState::Poisoned => false, + PeerState::Banned { .. } => false, + PeerState::PendingRequest { .. } => true, + PeerState::Requested => true, + PeerState::Disabled { .. } => false, + PeerState::DisabledPendingEnable { .. } => true, + PeerState::Enabled { .. } => true, + PeerState::Incoming { .. } => false, + } + } +} + +/// State of an "incoming" message sent to the peer set manager. +#[derive(Debug)] +struct IncomingPeer { + /// Id of the remote peer of the incoming connection. + peer_id: PeerId, + /// If true, this "incoming" still corresponds to an actual connection. If false, then the + /// connection corresponding to it has been closed or replaced already. + alive: bool, + /// Id that the we sent to the peerset. + incoming_id: sc_peerset::IncomingIndex, +} + +/// Event that can be emitted by the `GenericProto`. +#[derive(Debug)] +pub enum GenericProtoOut { + /// Opened a custom protocol with the remote. + CustomProtocolOpen { + /// Id of the peer we are connected to. + peer_id: PeerId, + /// Handshake that was sent to us. + /// This is normally a "Status" message, but this is out of the concern of this code. + received_handshake: Vec, + /// Object that permits sending notifications to the peer. + notifications_sink: NotificationsSink, + }, + + /// The [`NotificationsSink`] object used to send notifications with the given peer must be + /// replaced with a new one. + /// + /// This event is typically emitted when a transport-level connection is closed and we fall + /// back to a secondary connection. + CustomProtocolReplaced { + /// Id of the peer we are connected to. + peer_id: PeerId, + /// Replacement for the previous [`NotificationsSink`]. + notifications_sink: NotificationsSink, + }, + + /// Closed a custom protocol with the remote. The existing [`NotificationsSink`] should + /// be dropped. + CustomProtocolClosed { + /// Id of the peer we were connected to. + peer_id: PeerId, + /// Reason why the substream closed, for debugging purposes. + reason: Cow<'static, str>, + }, + + /// Receives a message on the legacy substream. + LegacyMessage { + /// Id of the peer the message came from. + peer_id: PeerId, + /// Message that has been received. + message: BytesMut, + }, + + /// Receives a message on a custom protocol substream. + /// + /// Also concerns received notifications for the notifications API. + Notification { + /// Id of the peer the message came from. + peer_id: PeerId, + /// Engine corresponding to the message. + protocol_name: Cow<'static, str>, + /// Message that has been received. + message: BytesMut, + }, +} + +impl GenericProto { + /// Creates a `CustomProtos`. + pub fn new( + local_peer_id: PeerId, + protocol: impl Into, + versions: &[u8], + handshake_message: Vec, + peerset: sc_peerset::Peerset, + notif_protocols: impl Iterator, Vec)>, + ) -> Self { + let notif_protocols = notif_protocols + .map(|(n, hs)| (n, Arc::new(RwLock::new(hs)))) + .collect::>(); + + assert!(!notif_protocols.is_empty()); + + let legacy_handshake_message = Arc::new(RwLock::new(handshake_message)); + let legacy_protocol = RegisteredProtocol::new(protocol, versions, legacy_handshake_message); + + GenericProto { + local_peer_id, + legacy_protocol, + notif_protocols, + peerset, + peers: FnvHashMap::default(), + delays: Default::default(), + next_delay_id: DelayId(0), + incoming: SmallVec::new(), + next_incoming_index: sc_peerset::IncomingIndex(0), + events: VecDeque::new(), + } + } + + /// Registers a new notifications protocol. + /// + /// You are very strongly encouraged to call this method very early on. Any open connection + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notif_protocol( + &mut self, + protocol_name: impl Into>, + handshake_msg: impl Into> + ) { + self.notif_protocols.push((protocol_name.into(), Arc::new(RwLock::new(handshake_msg.into())))); + } + + /// Modifies the handshake of the given notifications protocol. + /// + /// Has no effect if the protocol is unknown. + pub fn set_notif_protocol_handshake( + &mut self, + protocol_name: &str, + handshake_message: impl Into> + ) { + if let Some(protocol) = self.notif_protocols.iter_mut().find(|(name, _)| name == protocol_name) { + *protocol.1.write() = handshake_message.into(); + } + } + + /// Modifies the handshake of the legacy protocol. + pub fn set_legacy_handshake_message( + &mut self, + handshake_message: impl Into> + ) { + *self.legacy_protocol.handshake_message().write() = handshake_message.into(); + } + + /// Returns the number of discovered nodes that we keep in memory. + pub fn num_discovered_peers(&self) -> usize { + self.peerset.num_discovered_peers() + } + + /// Returns the list of all the peers we have an open channel to. + pub fn open_peers<'a>(&'a self) -> impl Iterator + 'a { + self.peers.iter().filter(|(_, state)| state.is_open()).map(|(id, _)| id) + } + + /// Returns true if we have an open connection to the given peer. + pub fn is_open(&self, peer_id: &PeerId) -> bool { + self.peers.get(peer_id).map(|p| p.is_open()).unwrap_or(false) + } + + /// Returns the [`NotificationsSink`] that sends notifications to the given peer, or `None` + /// if the custom protocols aren't opened with this peer. + /// + /// If [`GenericProto::is_open`] returns `true` for this `PeerId`, then this method is + /// guaranteed to return `Some`. + pub fn notifications_sink(&self, peer_id: &PeerId) -> Option<&NotificationsSink> { + self.peers.get(peer_id).and_then(|p| p.get_open()) + } + + /// Disconnects the given peer if we are connected to it. + pub fn disconnect_peer(&mut self, peer_id: &PeerId) { + debug!(target: "sub-libp2p", "External API => Disconnect {:?}", peer_id); + self.disconnect_peer_inner(peer_id, None); + } + + /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the peer + /// for the specific duration. + fn disconnect_peer_inner(&mut self, peer_id: &PeerId, ban: Option) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { + entry + } else { + return + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // We're not connected anyway. + st @ PeerState::Disabled { .. } => *entry.into_mut() = st, + st @ PeerState::Requested => *entry.into_mut() = st, + st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, + st @ PeerState::Banned { .. } => *entry.into_mut() = st, + + // DisabledPendingEnable => Disabled. + PeerState::DisabledPendingEnable { + open, + timer_deadline, + timer: _ + } => { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id.clone()); + let banned_until = Some(if let Some(ban) = ban { + cmp::max(timer_deadline, Instant::now() + ban) + } else { + timer_deadline + }); + *entry.into_mut() = PeerState::Disabled { + open, + banned_until + } + }, + + // Enabled => Disabled. + PeerState::Enabled { open } => { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id.clone()); + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + let banned_until = ban.map(|dur| Instant::now() + dur); + *entry.into_mut() = PeerState::Disabled { + open, + banned_until + } + }, + + // Incoming => Disabled. + PeerState::Incoming => { + let inc = if let Some(inc) = self.incoming.iter_mut() + .find(|i| i.peer_id == *entry.key() && i.alive) { + inc + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + incoming for incoming peer"); + return + }; + + inc.alive = false; + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + let banned_until = ban.map(|dur| Instant::now() + dur); + *entry.into_mut() = PeerState::Disabled { + open: SmallVec::new(), + banned_until + } + }, + + PeerState::Poisoned => + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), + } + } + + /// Returns the list of all the peers that the peerset currently requests us to be connected to. + pub fn requested_peers<'a>(&'a self) -> impl Iterator + 'a { + self.peers.iter().filter(|(_, state)| state.is_requested()).map(|(id, _)| id) + } + + /// Returns true if we try to open protocols with the given peer. + pub fn is_enabled(&self, peer_id: &PeerId) -> bool { + match self.peers.get(peer_id) { + None => false, + Some(PeerState::Disabled { .. }) => false, + Some(PeerState::DisabledPendingEnable { .. }) => false, + Some(PeerState::Enabled { .. }) => true, + Some(PeerState::Incoming { .. }) => false, + Some(PeerState::Requested) => false, + Some(PeerState::PendingRequest { .. }) => false, + Some(PeerState::Banned { .. }) => false, + Some(PeerState::Poisoned) => false, + } + } + + /// Notify the behaviour that we have learned about the existence of nodes. + /// + /// Can be called multiple times with the same `PeerId`s. + pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { + let local_peer_id = &self.local_peer_id; + self.peerset.discovered(peer_ids.filter_map(|peer_id| { + if peer_id == *local_peer_id { + error!( + target: "sub-libp2p", + "Discovered our own identity. This is a minor inconsequential bug." + ); + return None; + } + + debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); + Some(peer_id) + })); + } + + /// Sends a notification to a peer. + /// + /// Has no effect if the custom protocol is not open with the given peer. + /// + /// Also note that even if we have a valid open substream, it may in fact be already closed + /// without us knowing, in which case the packet will not be received. + /// + /// The `fallback` parameter is used for backwards-compatibility reason if the remote doesn't + /// support our protocol. One needs to pass the equivalent of what would have been passed + /// with `send_packet`. + pub fn write_notification( + &mut self, + target: &PeerId, + protocol_name: Cow<'static, str>, + message: impl Into>, + ) { + let notifs_sink = match self.peers.get(target).and_then(|p| p.get_open()) { + None => { + debug!(target: "sub-libp2p", + "Tried to sent notification to {:?} without an open channel.", + target); + return + }, + Some(sink) => sink + }; + + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?})", + target, + protocol_name, + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); + notifs_sink.send_sync_notification( + protocol_name, + message + ); + } + + /// Returns the state of the peerset manager, for debugging purposes. + pub fn peerset_debug_info(&mut self) -> serde_json::Value { + self.peerset.debug_info() + } + + /// Function that is called when the peerset wants us to connect to a peer. + fn peerset_report_connect(&mut self, peer_id: PeerId) { + let mut occ_entry = match self.peers.entry(peer_id) { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => { + // If there's no entry in `self.peers`, start dialing. + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", entry.key()); + debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", entry.key()); + self.events.push_back(NetworkBehaviourAction::DialPeer { + peer_id: entry.key().clone(), + condition: DialPeerCondition::Disconnected + }); + entry.insert(PeerState::Requested); + return; + } + }; + + let now = Instant::now(); + + match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { + PeerState::Banned { ref until } if *until > now => { + let peer_id = occ_entry.key().clone(); + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ + until {:?}", peer_id, until); + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(*until - now); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *occ_entry.into_mut() = PeerState::PendingRequest { + timer: delay_id, + timer_deadline: *until, + }; + }, + + PeerState::Banned { .. } => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); + debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); + self.events.push_back(NetworkBehaviourAction::DialPeer { + peer_id: occ_entry.key().clone(), + condition: DialPeerCondition::Disconnected + }); + *occ_entry.into_mut() = PeerState::Requested; + }, + + PeerState::Disabled { + open, + banned_until: Some(ref banned) + } if *banned > now => { + let peer_id = occ_entry.key().clone(); + debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is banned until {:?}", + peer_id, banned); + + let delay_id = self.next_delay_id; + self.next_delay_id.0 += 1; + let delay = futures_timer::Delay::new(*banned - now); + self.delays.push(async move { + delay.await; + (delay_id, peer_id) + }.boxed()); + + *occ_entry.into_mut() = PeerState::DisabledPendingEnable { + open, + timer: delay_id, + timer_deadline: *banned, + }; + }, + + PeerState::Disabled { open, banned_until: _ } => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", + occ_entry.key()); + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: occ_entry.key().clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Enable, + }); + *occ_entry.into_mut() = PeerState::Enabled { open }; + }, + + PeerState::Incoming => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", + occ_entry.key()); + if let Some(inc) = self.incoming.iter_mut() + .find(|i| i.peer_id == *occ_entry.key() && i.alive) { + inc.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + incoming for incoming peer") + } + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: occ_entry.key().clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Enable, + }); + *occ_entry.into_mut() = PeerState::Enabled { open: SmallVec::new() }; + }, + + st @ PeerState::Enabled { .. } => { + warn!(target: "sub-libp2p", + "PSM => Connect({:?}): Already connected.", + occ_entry.key()); + *occ_entry.into_mut() = st; + }, + st @ PeerState::DisabledPendingEnable { .. } => { + warn!(target: "sub-libp2p", + "PSM => Connect({:?}): Already pending enabling.", + occ_entry.key()); + *occ_entry.into_mut() = st; + }, + st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { + warn!(target: "sub-libp2p", + "PSM => Connect({:?}): Duplicate request.", + occ_entry.key()); + *occ_entry.into_mut() = st; + }, + + PeerState::Poisoned => + error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()), + } + } + + /// Function that is called when the peerset wants us to disconnect from a peer. + fn peerset_report_disconnect(&mut self, peer_id: PeerId) { + let mut entry = match self.peers.entry(peer_id) { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); + return + } + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); + *entry.into_mut() = st; + }, + + PeerState::DisabledPendingEnable { + open, + timer_deadline, + timer: _ + } => { + debug!(target: "sub-libp2p", + "PSM => Drop({:?}): Interrupting pending enabling.", + entry.key()); + *entry.into_mut() = PeerState::Disabled { + open, + banned_until: Some(timer_deadline), + }; + }, + + PeerState::Enabled { open } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + *entry.into_mut() = PeerState::Disabled { + open, + banned_until: None + } + }, + st @ PeerState::Incoming => { + error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", + entry.key()); + *entry.into_mut() = st; + }, + PeerState::Requested => { + // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other + // sub-systems (such as the discovery mechanism) may require dialing this peer as + // well at the same time. + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected.", entry.key()); + entry.remove(); + }, + PeerState::PendingRequest { timer_deadline, .. } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); + *entry.into_mut() = PeerState::Banned { until: timer_deadline } + }, + + PeerState::Poisoned => + error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()), + } + } + + /// Function that is called when the peerset wants us to accept a connection + /// request from a peer. + fn peerset_report_accept(&mut self, index: sc_peerset::IncomingIndex) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { + self.incoming.remove(pos) + } else { + error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); + return + }; + + if !incoming.alive { + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, + sending back dropped", index, incoming.peer_id); + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); + self.peerset.dropped(incoming.peer_id); + return + } + + match self.peers.get_mut(&incoming.peer_id) { + Some(state @ PeerState::Incoming) => { + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", + index, incoming.peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id, + handler: NotifyHandler::All, + event: NotifsHandlerIn::Enable, + }); + *state = PeerState::Enabled { open: SmallVec::new() }; + } + peer => error!(target: "sub-libp2p", + "State mismatch in libp2p: Expected alive incoming. Got {:?}.", + peer) + } + } + + /// Function that is called when the peerset wants us to reject an incoming peer. + fn peerset_report_reject(&mut self, index: sc_peerset::IncomingIndex) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { + self.incoming.remove(pos) + } else { + error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); + return + }; + + if !incoming.alive { + debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Obsolete incoming, \ + ignoring", index, incoming.peer_id); + return + } + + match self.peers.get_mut(&incoming.peer_id) { + Some(state @ PeerState::Incoming) => { + debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", + index, incoming.peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id, + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + *state = PeerState::Disabled { + open: SmallVec::new(), + banned_until: None + }; + } + peer => error!(target: "sub-libp2p", + "State mismatch in libp2p: Expected alive incoming. Got {:?}.", + peer) + } + } +} + +impl NetworkBehaviour for GenericProto { + type ProtocolsHandler = NotifsHandlerProto; + type OutEvent = GenericProtoOut; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + NotifsHandlerProto::new( + self.legacy_protocol.clone(), + self.notif_protocols.clone(), + ) + } + + fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, _: &PeerId) { + } + + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.", + conn, endpoint, peer_id); + match (self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned), endpoint) { + (st @ &mut PeerState::Requested, endpoint) | + (st @ &mut PeerState::PendingRequest { .. }, endpoint) => { + debug!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", + peer_id, endpoint + ); + *st = PeerState::Enabled { open: SmallVec::new() }; + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Enable + }); + } + + // Note: it may seem weird that "Banned" peers get treated as if they were absent. + // This is because the word "Banned" means "temporarily prevent outgoing connections to + // this peer", and not "banned" in the sense that we would refuse the peer altogether. + (st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) | + (st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => { + let incoming_id = self.next_incoming_index; + self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Overflow in next_incoming_index"); + return + } + }; + debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Incoming connection", + peer_id, endpoint); + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + peer_id, incoming_id); + self.peerset.incoming(peer_id.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: peer_id.clone(), + alive: true, + incoming_id, + }); + *st = PeerState::Incoming { }; + } + + (st @ &mut PeerState::Poisoned, endpoint) | + (st @ &mut PeerState::Banned { .. }, endpoint) => { + let banned_until = if let PeerState::Banned { until } = st { + Some(*until) + } else { + None + }; + debug!(target: "sub-libp2p", + "Libp2p => Connected({},{:?}): Not requested by PSM, disabling.", + peer_id, endpoint); + *st = PeerState::Disabled { open: SmallVec::new(), banned_until }; + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Disable + }); + } + + (PeerState::Incoming { .. }, _) => { + debug!(target: "sub-libp2p", + "Secondary connection {:?} to {} waiting for PSM decision.", + conn, peer_id); + }, + + (PeerState::Enabled { .. }, _) => { + debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection", + peer_id, conn); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Enable + }); + } + + (PeerState::Disabled { .. }, _) | (PeerState::DisabledPendingEnable { .. }, _) => { + debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection", + peer_id, conn); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Disable + }); + } + } + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} closed.", + conn, endpoint, peer_id); + match self.peers.get_mut(peer_id) { + Some(PeerState::Disabled { open, .. }) | + Some(PeerState::DisabledPendingEnable { open, .. }) | + Some(PeerState::Enabled { open, .. }) => { + // Check if the "link" to the peer is already considered closed, + // i.e. there is no connection that is open for custom protocols, + // in which case `CustomProtocolClosed` was already emitted. + let closed = open.is_empty(); + let sink_closed = open.get(0).map_or(false, |(c, _)| c == conn); + open.retain(|(c, _)| c != conn); + if !closed { + if let Some((_, sink)) = open.get(0) { + if sink_closed { + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: peer_id.clone(), + notifications_sink: sink.clone(), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } else { + debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + reason: "Disconnected by libp2p".into(), + }; + + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } + } + _ => {} + } + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + match self.peers.remove(peer_id) { + None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) | + Some(PeerState::Banned { .. }) => + // This is a serious bug either in this state machine or in libp2p. + error!(target: "sub-libp2p", + "`inject_disconnected` called for unknown peer {}", + peer_id), + + Some(PeerState::Disabled { open, banned_until, .. }) => { + if !open.is_empty() { + debug_assert!(false); + error!( + target: "sub-libp2p", + "State mismatch: disconnected from {} with non-empty list of connections", + peer_id + ); + } + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled.", peer_id); + if let Some(until) = banned_until { + self.peers.insert(peer_id.clone(), PeerState::Banned { until }); + } + } + + Some(PeerState::DisabledPendingEnable { open, timer_deadline, .. }) => { + if !open.is_empty() { + debug_assert!(false); + error!( + target: "sub-libp2p", + "State mismatch: disconnected from {} with non-empty list of connections", + peer_id + ); + } + debug!(target: "sub-libp2p", + "Libp2p => Disconnected({}): Was disabled but pending enable.", + peer_id); + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); + self.peerset.dropped(peer_id.clone()); + self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); + } + + Some(PeerState::Enabled { open, .. }) => { + if !open.is_empty() { + debug_assert!(false); + error!( + target: "sub-libp2p", + "State mismatch: disconnected from {} with non-empty list of connections", + peer_id + ); + } + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was enabled.", peer_id); + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); + self.peerset.dropped(peer_id.clone()); + let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); + self.peers.insert(peer_id.clone(), PeerState::Banned { + until: Instant::now() + Duration::from_secs(ban_dur) + }); + } + + // In the incoming state, we don't report "Dropped". Instead we will just ignore the + // corresponding Accept/Reject. + Some(PeerState::Incoming { }) => { + if let Some(state) = self.incoming.iter_mut() + .find(|i| i.alive && i.peer_id == *peer_id) + { + debug!(target: "sub-libp2p", + "Libp2p => Disconnected({}): Was in incoming mode with id {:?}.", + peer_id, state.incoming_id); + state.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ + corresponding to an incoming state in peers") + } + } + + Some(PeerState::Poisoned) => + error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id), + } + } + + fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn error::Error) { + trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // The peer is not in our list. + st @ PeerState::Banned { .. } => { + trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + *entry.into_mut() = st; + }, + + // "Basic" situation: we failed to reach a peer that the peerset requested. + PeerState::Requested | PeerState::PendingRequest { .. } => { + debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + *entry.into_mut() = PeerState::Banned { + until: Instant::now() + Duration::from_secs(5) + }; + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id.clone()) + }, + + // We can still get dial failures even if we are already connected to the peer, + // as an extra diagnostic for an earlier attempt. + st @ PeerState::Disabled { .. } | st @ PeerState::Enabled { .. } | + st @ PeerState::DisabledPendingEnable { .. } | st @ PeerState::Incoming { .. } => { + debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + *entry.into_mut() = st; + }, + + PeerState::Poisoned => + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), + } + + } else { + // The peer is not in our list. + trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + } + } + + fn inject_event( + &mut self, + source: PeerId, + connection: ConnectionId, + event: NotifsHandlerOut, + ) { + match event { + NotifsHandlerOut::Closed { endpoint, reason } => { + debug!(target: "sub-libp2p", + "Handler({:?}) => Endpoint {:?} closed for custom protocols: {}", + source, endpoint, reason); + + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { + entry + } else { + error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); + return + }; + + let (last, new_notifications_sink) = match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Enabled { mut open } => { + let pos = open.iter().position(|(c, _)| c == &connection); + let sink_closed = pos == Some(0); + if let Some(pos) = pos { + open.remove(pos); + } else { + debug_assert!(false); + error!( + target: "sub-libp2p", + "State mismatch with {}: unknown closed connection", + source + ); + } + + // TODO: We switch the entire peer state to "disabled" because of possible + // race conditions involving the legacy substream. + // Once https://github.com/paritytech/substrate/issues/5670 is done, this + // should be changed to stay in the `Enabled` state. + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(source.clone()); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id: source.clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + + let last = open.is_empty(); + let new_notifications_sink = open.iter().next().and_then(|(_, sink)| + if sink_closed { + Some(sink.clone()) + } else { + None + }); + + *entry.into_mut() = PeerState::Disabled { + open, + banned_until: None + }; + + (last, new_notifications_sink) + }, + PeerState::Disabled { mut open, banned_until } => { + let pos = open.iter().position(|(c, _)| c == &connection); + let sink_closed = pos == Some(0); + if let Some(pos) = pos { + open.remove(pos); + } else { + debug_assert!(false); + error!( + target: "sub-libp2p", + "State mismatch with {}: unknown closed connection", + source + ); + } + + let last = open.is_empty(); + let new_notifications_sink = open.iter().next().and_then(|(_, sink)| + if sink_closed { + Some(sink.clone()) + } else { + None + }); + + *entry.into_mut() = PeerState::Disabled { + open, + banned_until + }; + + (last, new_notifications_sink) + }, + PeerState::DisabledPendingEnable { + mut open, + timer, + timer_deadline + } => { + let pos = open.iter().position(|(c, _)| c == &connection); + let sink_closed = pos == Some(0); + if let Some(pos) = pos { + open.remove(pos); + } else { + debug_assert!(false); + error!( + target: "sub-libp2p", + "State mismatch with {}: unknown closed connection", + source + ); + } + + let last = open.is_empty(); + let new_notifications_sink = open.iter().next().and_then(|(_, sink)| + if sink_closed { + Some(sink.clone()) + } else { + None + }); + + *entry.into_mut() = PeerState::DisabledPendingEnable { + open, + timer, + timer_deadline + }; + + (last, new_notifications_sink) + }, + state => { + error!(target: "sub-libp2p", + "Unexpected state in the custom protos handler: {:?}", + state); + return + } + }; + + if last { + debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); + let event = GenericProtoOut::CustomProtocolClosed { + reason, + peer_id: source, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + + } else { + if let Some(new_notifications_sink) = new_notifications_sink { + let event = GenericProtoOut::CustomProtocolReplaced { + peer_id: source, + notifications_sink: new_notifications_sink, + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); + } + } + + NotifsHandlerOut::Open { endpoint, received_handshake, notifications_sink } => { + debug!(target: "sub-libp2p", + "Handler({:?}) => Endpoint {:?} open for custom protocols.", + source, endpoint); + + let first = match self.peers.get_mut(&source) { + Some(PeerState::Enabled { ref mut open, .. }) | + Some(PeerState::DisabledPendingEnable { ref mut open, .. }) | + Some(PeerState::Disabled { ref mut open, .. }) => { + let first = open.is_empty(); + if !open.iter().any(|(c, _)| *c == connection) { + open.push((connection, notifications_sink.clone())); + } else { + error!( + target: "sub-libp2p", + "State mismatch: connection with {} opened a second time", + source + ); + } + first + } + state => { + error!(target: "sub-libp2p", + "Open: Unexpected state in the custom protos handler: {:?}", + state); + return + } + }; + + if first { + debug!(target: "sub-libp2p", "External API <= Open({:?})", source); + let event = GenericProtoOut::CustomProtocolOpen { + peer_id: source, + received_handshake, + notifications_sink + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + + } else { + // In normal situations, the handshake is supposed to be a Status message, and + // we would discard Status messages received from secondary connections. + // However, in Polkadot 0.8.10 and below, nodes don't send a Status message + // when opening secondary connections and instead directly consider the + // substream as open. When connecting to such a node, the first message sent + // by the remote will always be considered by our local node as the handshake, + // even when it is a regular message. + // In order to maintain backwards compatibility, we therefore report the + // handshake as if it was a regular message, and the upper layer will ignore + // any superfluous Status message. + // The code below should be removed once Polkadot 0.8.10 and below are no + // longer widely in use, and should be replaced with simply printing a log + // entry. + debug!( + target: "sub-libp2p", + "Handler({:?}) => Secondary connection opened custom protocol", + source + ); + trace!(target: "sub-libp2p", "External API <= Message({:?})", source); + let event = GenericProtoOut::LegacyMessage { + peer_id: source, + message: From::from(&received_handshake[..]), + }; + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + } + + NotifsHandlerOut::CustomMessage { message } => { + debug_assert!(self.is_open(&source)); + trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); + trace!(target: "sub-libp2p", "External API <= Message({:?})", source); + let event = GenericProtoOut::LegacyMessage { + peer_id: source, + message, + }; + + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + NotifsHandlerOut::Notification { protocol_name, message } => { + debug_assert!(self.is_open(&source)); + trace!( + target: "sub-libp2p", + "Handler({:?}) => Notification({:?})", + source, + protocol_name, + ); + trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); + let event = GenericProtoOut::Notification { + peer_id: source, + protocol_name, + message, + }; + + self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); + } + + // Don't do anything for non-severe errors except report them. + NotifsHandlerOut::ProtocolError { is_severe, ref error } if !is_severe => { + debug!(target: "sub-libp2p", "Handler({:?}) => Benign protocol error: {:?}", + source, error) + } + + NotifsHandlerOut::ProtocolError { error, .. } => { + debug!(target: "sub-libp2p", + "Handler({:?}) => Severe protocol error: {:?}", + source, error); + // A severe protocol error happens when we detect a "bad" peer, such as a peer on + // a different chain, or a peer that doesn't speak the same protocol(s). We + // decrease the peer's reputation, hence lowering the chances we try this peer + // again in the short term. + self.peerset.report_peer( + source.clone(), + sc_peerset::ReputationChange::new(i32::min_value(), "Protocol error") + ); + self.disconnect_peer_inner(&source, Some(Duration::from_secs(5))); + } + } + } + + fn poll( + &mut self, + cx: &mut Context, + _params: &mut impl PollParameters, + ) -> Poll< + NetworkBehaviourAction< + NotifsHandlerIn, + Self::OutEvent, + >, + > { + if let Some(event) = self.events.pop_front() { + return Poll::Ready(event); + } + + // Poll for instructions from the peerset. + // Note that the peerset is a *best effort* crate, and we have to use defensive programming. + loop { + match futures::Stream::poll_next(Pin::new(&mut self.peerset), cx) { + Poll::Ready(Some(sc_peerset::Message::Accept(index))) => { + self.peerset_report_accept(index); + } + Poll::Ready(Some(sc_peerset::Message::Reject(index))) => { + self.peerset_report_reject(index); + } + Poll::Ready(Some(sc_peerset::Message::Connect(id))) => { + self.peerset_report_connect(id); + } + Poll::Ready(Some(sc_peerset::Message::Drop(id))) => { + self.peerset_report_disconnect(id); + } + Poll::Ready(None) => { + error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); + break; + } + Poll::Pending => break, + } + } + + while let Poll::Ready(Some((delay_id, peer_id))) = + Pin::new(&mut self.delays).poll_next(cx) { + let peer_state = match self.peers.get_mut(&peer_id) { + Some(s) => s, + // We intentionally never remove elements from `delays`, and it may + // thus contain peers which are now gone. This is a normal situation. + None => continue, + }; + + match peer_state { + PeerState::PendingRequest { timer, .. } if *timer == delay_id => { + debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); + self.events.push_back(NetworkBehaviourAction::DialPeer { + peer_id, + condition: DialPeerCondition::Disconnected + }); + *peer_state = PeerState::Requested; + } + + PeerState::DisabledPendingEnable { timer, open, .. } if *timer == delay_id => { + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable (ban expired)", peer_id); + self.events.push_back(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler: NotifyHandler::All, + event: NotifsHandlerIn::Enable, + }); + *peer_state = PeerState::Enabled { open: mem::replace(open, Default::default()) }; + } + + // We intentionally never remove elements from `delays`, and it may + // thus contain obsolete entries. This is a normal situation. + _ => {}, + } + } + + if let Some(event) = self.events.pop_front() { + return Poll::Ready(event); + } + + Poll::Pending + } +} diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs new file mode 100644 index 0000000000000..5845130a7db87 --- /dev/null +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -0,0 +1,27 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +pub use self::group::{ + NotificationsSink, NotifsHandlerError, Ready, NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut +}; +pub use self::legacy::ConnectionKillError as LegacyConnectionKillError; + +mod group; +mod legacy; +mod notif_in; +mod notif_out; diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs new file mode 100644 index 0000000000000..f355fba60fb04 --- /dev/null +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -0,0 +1,775 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming +//! and outgoing substreams for all gossiping protocols together. +//! +//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the +//! protocols that are Substrate-related and outside of the scope of libp2p. +//! +//! # Usage +//! +//! The handler can be in one of the following states: `Initial`, `Enabled`, `Disabled`. +//! +//! The `Initial` state is the state that the handler initially is in. It is a temporary state +//! during which the user must either enable or disable the handler. After that, the handler stays +//! either enabled or disabled. +//! +//! On the wire, we try to open the following substreams: +//! +//! - One substream for each notification protocol passed as parameter to the +//! `NotifsHandlerProto::new` function. +//! - One "legacy" substream used for anything non-related to gossiping, and used as a fallback +//! in case the notification protocol can't be opened. +//! +//! When the handler is in the `Enabled` state, we immediately open and try to maintain all the +//! aforementioned substreams. When the handler is in the `Disabled` state, we immediately close +//! (or abort opening) all these substreams. It is intended that in the future we allow states in +//! which some protocols are open and not others. Symmetrically, we allow incoming +//! Substrate-related substreams if and only if we are in the `Enabled` state. +//! +//! The user has the choice between sending a message with `SendNotification`, to send a +//! notification, and `SendLegacy`, to send any other kind of message. +//! + +use crate::protocol::generic_proto::{ + handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut}, + handler::notif_in::{NotifsInHandlerProto, NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut}, + handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, + upgrade::{NotificationsIn, NotificationsOut, NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec}, +}; + +use bytes::BytesMut; +use libp2p::core::{either::{EitherError, EitherOutput}, ConnectedPoint, PeerId}; +use libp2p::core::upgrade::{EitherUpgrade, UpgradeError, SelectUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, +}; +use futures::{ + channel::mpsc, + lock::{Mutex as FuturesMutex, MutexGuard as FuturesMutexGuard}, + prelude::* +}; +use log::{debug, error}; +use parking_lot::{Mutex, RwLock}; +use std::{borrow::Cow, error, io, str, sync::Arc, task::{Context, Poll}}; + +/// Number of pending notifications in asynchronous contexts. +/// See [`NotificationsSink::reserve_notification`] for context. +const ASYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 8; +/// Number of pending notifications in synchronous contexts. +const SYNC_NOTIFICATIONS_BUFFER_SIZE: usize = 2048; + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsHandler`]. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandlerProto { + /// Prototypes for handlers for inbound substreams, and the message we respond with in the + /// handshake. + in_handlers: Vec<(NotifsInHandlerProto, Arc>>)>, + + /// Prototypes for handlers for outbound substreams, and the initial handshake message we send. + out_handlers: Vec<(NotifsOutHandlerProto, Arc>>)>, + + /// Prototype for handler for backwards-compatibility. + legacy: LegacyProtoHandlerProto, +} + +/// The actual handler once the connection has been established. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandler { + /// Handlers for inbound substreams, and the message we respond with in the handshake. + in_handlers: Vec<(NotifsInHandler, Arc>>)>, + + /// Handlers for outbound substreams, and the initial handshake message we send. + out_handlers: Vec<(NotifsOutHandler, Arc>>)>, + + /// Whether we are the connection dialer or listener. + endpoint: ConnectedPoint, + + /// Handler for backwards-compatibility. + legacy: LegacyProtoHandler, + + /// In the situation where either the legacy substream has been opened or the handshake-bearing + /// notifications protocol is open, but we haven't sent out any [`NotifsHandlerOut::Open`] + /// event yet, this contains the received handshake waiting to be reported through the + /// external API. + pending_handshake: Option>, + + /// State of this handler. + enabled: EnabledState, + + /// If we receive inbound substream requests while in initialization mode, + /// we push the corresponding index here and process them when the handler + /// gets enabled/disabled. + pending_in: Vec, + + /// If `Some`, contains the two `Receiver`s connected to the [`NotificationsSink`] that has + /// been sent out. The notifications to send out can be pulled from this receivers. + /// We use two different channels in order to have two different channel sizes, but from the + /// receiving point of view, the two channels are the same. + /// The receivers are fused in case the user drops the [`NotificationsSink`] entirely. + /// + /// Contains `Some` if and only if it has been reported to the user that the substreams are + /// open. + notifications_sink_rx: Option< + stream::Select< + stream::Fuse>, + stream::Fuse> + > + >, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum EnabledState { + Initial, + Enabled, + Disabled, +} + +impl IntoProtocolsHandler for NotifsHandlerProto { + type Handler = NotifsHandler; + + fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { + let in_handlers = self.in_handlers.iter() + .map(|(h, _)| h.inbound_protocol()) + .collect::>(); + + SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) + } + + fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + NotifsHandler { + in_handlers: self.in_handlers + .into_iter() + .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) + .collect(), + out_handlers: self.out_handlers + .into_iter() + .map(|(proto, msg)| (proto.into_handler(remote_peer_id, connected_point), msg)) + .collect(), + endpoint: connected_point.clone(), + legacy: self.legacy.into_handler(remote_peer_id, connected_point), + pending_handshake: None, + enabled: EnabledState::Initial, + pending_in: Vec::new(), + notifications_sink_rx: None, + } + } +} + +/// Event that can be received by a `NotifsHandler`. +#[derive(Debug, Clone)] +pub enum NotifsHandlerIn { + /// The node should start using custom protocols. + Enable, + + /// The node should stop using custom protocols. + Disable, +} + +/// Event that can be emitted by a `NotifsHandler`. +#[derive(Debug)] +pub enum NotifsHandlerOut { + /// The connection is open for custom protocols. + Open { + /// The endpoint of the connection that is open for custom protocols. + endpoint: ConnectedPoint, + /// Handshake that was sent to us. + /// This is normally a "Status" message, but this out of the concern of this code. + received_handshake: Vec, + /// How notifications can be sent to this node. + notifications_sink: NotificationsSink, + }, + + /// The connection is closed for custom protocols. + Closed { + /// The reason for closing, for diagnostic purposes. + reason: Cow<'static, str>, + /// The endpoint of the connection that closed for custom protocols. + endpoint: ConnectedPoint, + }, + + /// Received a non-gossiping message on the legacy substream. + CustomMessage { + /// Message that has been received. + /// + /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a + /// notification. + message: BytesMut, + }, + + /// Received a message on a custom protocol substream. + Notification { + /// Name of the protocol of the message. + protocol_name: Cow<'static, str>, + + /// Message that has been received. + message: BytesMut, + }, + + /// An error has happened on the protocol level with this node. + ProtocolError { + /// If true the error is severe, such as a protocol violation. + is_severe: bool, + /// The error that happened. + error: Box, + }, +} + +/// Sink connected directly to the node background task. Allows sending notifications to the peer. +/// +/// Can be cloned in order to obtain multiple references to the same peer. +#[derive(Debug, Clone)] +pub struct NotificationsSink { + inner: Arc, +} + +#[derive(Debug)] +struct NotificationsSinkInner { + /// Sender to use in asynchronous contexts. Uses an asynchronous mutex. + async_channel: FuturesMutex>, + /// Sender to use in synchronous contexts. Uses a synchronous mutex. + /// This channel has a large capacity and is meant to be used in contexts where + /// back-pressure cannot be properly exerted. + /// It will be removed in a future version. + sync_channel: Mutex>, +} + +/// Message emitted through the [`NotificationsSink`] and processed by the background task +/// dedicated to the peer. +#[derive(Debug)] +enum NotificationsSinkMessage { + /// Message emitted by [`NotificationsSink::reserve_notification`] and + /// [`NotificationsSink::write_notification_now`]. + Notification { + protocol_name: Cow<'static, str>, + message: Vec, + }, + + /// Must close the connection. + ForceClose, +} + +impl NotificationsSink { + /// Sends a notification to the peer. + /// + /// If too many messages are already buffered, the notification is silently discarded and the + /// connection to the peer will be closed shortly after. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + /// + /// This method will be removed in a future version. + pub fn send_sync_notification<'a>( + &'a self, + protocol_name: Cow<'static, str>, + message: impl Into> + ) { + let mut lock = self.inner.sync_channel.lock(); + let result = lock.try_send(NotificationsSinkMessage::Notification { + protocol_name, + message: message.into() + }); + + if result.is_err() { + // Cloning the `mpsc::Sender` guarantees the allocation of an extra spot in the + // buffer, and therefore that `try_send` will succeed. + let _result2 = lock.clone().try_send(NotificationsSinkMessage::ForceClose); + debug_assert!(_result2.map(|()| true).unwrap_or_else(|err| err.is_disconnected())); + } + } + + /// Wait until the remote is ready to accept a notification. + /// + /// Returns an error in the case where the connection is closed. + /// + /// The protocol name is expected to be checked ahead of calling this method. It is a logic + /// error to send a notification using an unknown protocol. + pub async fn reserve_notification<'a>(&'a self, protocol_name: Cow<'static, str>) -> Result, ()> { + let mut lock = self.inner.async_channel.lock().await; + + let poll_ready = future::poll_fn(|cx| lock.poll_ready(cx)).await; + if poll_ready.is_ok() { + Ok(Ready { protocol_name: protocol_name, lock }) + } else { + Err(()) + } + } +} + +/// Notification slot is reserved and the notification can actually be sent. +#[must_use] +#[derive(Debug)] +pub struct Ready<'a> { + /// Guarded channel. The channel inside is guaranteed to not be full. + lock: FuturesMutexGuard<'a, mpsc::Sender>, + /// Name of the protocol. Should match one of the protocols passed at initialization. + protocol_name: Cow<'static, str>, +} + +impl<'a> Ready<'a> { + /// Consumes this slots reservation and actually queues the notification. + /// + /// Returns an error if the substream has been closed. + pub fn send( + mut self, + notification: impl Into> + ) -> Result<(), ()> { + self.lock.start_send(NotificationsSinkMessage::Notification { + protocol_name: self.protocol_name, + message: notification.into(), + }).map_err(|_| ()) + } +} + +/// Error specific to the collection of protocols. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum NotifsHandlerError { + /// Channel of synchronous notifications is full. + SyncNotificationsClogged, + /// Error in legacy protocol. + Legacy(::Error), +} + +impl NotifsHandlerProto { + /// Builds a new handler. + /// + /// `list` is a list of notification protocols names, and the message to send as part of the + /// handshake. At the moment, the message is always the same whether we open a substream + /// ourselves or respond to handshake from the remote. + /// + /// The first protocol in `list` is special-cased as the protocol that contains the handshake + /// to report through the [`NotifsHandlerOut::Open`] event. + /// + /// # Panic + /// + /// - Panics if `list` is empty. + /// + pub fn new( + legacy: RegisteredProtocol, + list: impl Into, Arc>>)>>, + ) -> Self { + let list = list.into(); + assert!(!list.is_empty()); + + let out_handlers = list + .clone() + .into_iter() + .map(|(proto_name, initial_message)| { + (NotifsOutHandlerProto::new(proto_name), initial_message) + }).collect(); + + let in_handlers = list.clone() + .into_iter() + .map(|(proto_name, msg)| (NotifsInHandlerProto::new(proto_name), msg)) + .collect(); + + NotifsHandlerProto { + in_handlers, + out_handlers, + legacy: LegacyProtoHandlerProto::new(legacy), + } + } +} + +impl ProtocolsHandler for NotifsHandler { + type InEvent = NotifsHandlerIn; + type OutEvent = NotifsHandlerOut; + type Error = NotifsHandlerError; + type InboundProtocol = SelectUpgrade, RegisteredProtocol>; + type OutboundProtocol = EitherUpgrade; + // Index within the `out_handlers`; None for legacy + type OutboundOpenInfo = Option; + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + let in_handlers = self.in_handlers.iter() + .map(|(h, _)| h.listen_protocol().into_upgrade().1) + .collect::>(); + + let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); + SubstreamProtocol::new(proto, ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + out: >::Output, + (): () + ) { + match out { + EitherOutput::First((out, num)) => + self.in_handlers[num].0.inject_fully_negotiated_inbound(out, ()), + EitherOutput::Second(out) => + self.legacy.inject_fully_negotiated_inbound(out, ()), + } + } + + fn inject_fully_negotiated_outbound( + &mut self, + out: >::Output, + num: Self::OutboundOpenInfo + ) { + match (out, num) { + (EitherOutput::First(out), Some(num)) => + self.out_handlers[num].0.inject_fully_negotiated_outbound(out, ()), + (EitherOutput::Second(out), None) => + self.legacy.inject_fully_negotiated_outbound(out, ()), + _ => error!("inject_fully_negotiated_outbound called with wrong parameters"), + } + } + + fn inject_event(&mut self, message: NotifsHandlerIn) { + match message { + NotifsHandlerIn::Enable => { + if let EnabledState::Enabled = self.enabled { + debug!("enabling already-enabled handler"); + } + self.enabled = EnabledState::Enabled; + self.legacy.inject_event(LegacyProtoHandlerIn::Enable); + for (handler, initial_message) in &mut self.out_handlers { + // We create `initial_message` on a separate line to be sure that the lock + // is released as soon as possible. + let initial_message = initial_message.read().clone(); + handler.inject_event(NotifsOutHandlerIn::Enable { + initial_message, + }); + } + for num in self.pending_in.drain(..) { + // We create `handshake_message` on a separate line to be sure + // that the lock is released as soon as possible. + let handshake_message = self.in_handlers[num].1.read().clone(); + self.in_handlers[num].0 + .inject_event(NotifsInHandlerIn::Accept(handshake_message)); + } + }, + NotifsHandlerIn::Disable => { + if let EnabledState::Disabled = self.enabled { + debug!("disabling already-disabled handler"); + } + self.legacy.inject_event(LegacyProtoHandlerIn::Disable); + // The notifications protocols start in the disabled state. If we were in the + // "Initial" state, then we shouldn't disable the notifications protocols again. + if self.enabled != EnabledState::Initial { + for (handler, _) in &mut self.out_handlers { + handler.inject_event(NotifsOutHandlerIn::Disable); + } + } + self.enabled = EnabledState::Disabled; + for num in self.pending_in.drain(..) { + self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); + } + }, + } + } + + fn inject_dial_upgrade_error( + &mut self, + num: Option, + err: ProtocolsHandlerUpgrErr> + ) { + match (err, num) { + (ProtocolsHandlerUpgrErr::Timeout, Some(num)) => + self.out_handlers[num].0.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Timeout + ), + (ProtocolsHandlerUpgrErr::Timeout, None) => + self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout), + (ProtocolsHandlerUpgrErr::Timer, Some(num)) => + self.out_handlers[num].0.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Timer + ), + (ProtocolsHandlerUpgrErr::Timer, None) => + self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), Some(num)) => + self.out_handlers[num].0.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), None) => + self.legacy.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))), Some(num)) => + self.out_handlers[num].0.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(err))), None) => + self.legacy.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) + ), + _ => error!("inject_dial_upgrade_error called with bad parameters"), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + // Iterate over each handler and return the maximum value. + + let mut ret = self.legacy.connection_keep_alive(); + if ret.is_yes() { + return KeepAlive::Yes; + } + + for (handler, _) in &self.in_handlers { + let val = handler.connection_keep_alive(); + if val.is_yes() { + return KeepAlive::Yes; + } + if ret < val { ret = val; } + } + + for (handler, _) in &self.out_handlers { + let val = handler.connection_keep_alive(); + if val.is_yes() { + return KeepAlive::Yes; + } + if ret < val { ret = val; } + } + + ret + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent + > { + if let Some(notifications_sink_rx) = &mut self.notifications_sink_rx { + 'poll_notifs_sink: loop { + // Before we poll the notifications sink receiver, check that all the notification + // channels are ready to send a message. + // TODO: it is planned that in the future we switch to one `NotificationsSink` per + // protocol, in which case each sink should wait only for its corresponding handler + // to be ready, and not all handlers + // see https://github.com/paritytech/substrate/issues/5670 + for (out_handler, _) in &mut self.out_handlers { + match out_handler.poll_ready(cx) { + Poll::Ready(_) => {}, + Poll::Pending => break 'poll_notifs_sink, + } + } + + let message = match notifications_sink_rx.poll_next_unpin(cx) { + Poll::Ready(Some(msg)) => msg, + Poll::Ready(None) | Poll::Pending => break, + }; + + match message { + NotificationsSinkMessage::Notification { + protocol_name, + message + } => { + let mut found_any_with_name = false; + + for (handler, _) in &mut self.out_handlers { + if *handler.protocol_name() == protocol_name { + found_any_with_name = true; + if handler.is_open() { + handler.send_or_discard(message); + continue 'poll_notifs_sink; + } + } + } + + // This code can be reached via the following scenarios: + // + // - User tried to send a notification on a non-existing protocol. This + // most likely relates to https://github.com/paritytech/substrate/issues/6827 + // - User tried to send a notification to a peer we're not or no longer + // connected to. This happens in a normal scenario due to the racy nature + // of connections and disconnections, and is benign. + // + // We print a warning in the former condition. + if !found_any_with_name { + log::warn!( + target: "sub-libp2p", + "Tried to send a notification on non-registered protocol: {:?}", + protocol_name + ); + } + } + NotificationsSinkMessage::ForceClose => { + return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::SyncNotificationsClogged)); + } + } + } + } + + // If `self.pending_handshake` is `Some`, we are in a state where the handshake-bearing + // substream (either the legacy substream or the one special-cased as providing the + // handshake) is open but the user isn't aware yet of the substreams being open. + // When that is the case, neither the legacy substream nor the incoming notifications + // substreams should be polled, otherwise there is a risk of receiving messages from them. + if self.pending_handshake.is_none() { + while let Poll::Ready(ev) = self.legacy.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol } => + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol + .map_upgrade(EitherUpgrade::B) + .map_info(|()| None) + }), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { + received_handshake, + .. + }) => { + if self.notifications_sink_rx.is_none() { + debug_assert!(self.pending_handshake.is_none()); + self.pending_handshake = Some(received_handshake); + } + cx.waker().wake_by_ref(); + return Poll::Pending; + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason, .. }) => { + // We consciously drop the receivers despite notifications being potentially + // still buffered up. + self.notifications_sink_rx = None; + + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Closed { endpoint: self.endpoint.clone(), reason } + )) + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => { + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message } + )) + }, + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::ProtocolError { is_severe, error } + )), + ProtocolsHandlerEvent::Close(err) => + return Poll::Ready(ProtocolsHandlerEvent::Close(NotifsHandlerError::Legacy(err))), + } + } + } + + for (handler_num, (handler, handshake_message)) in self.in_handlers.iter_mut().enumerate() { + loop { + let poll = if self.notifications_sink_rx.is_some() { + handler.poll(cx) + } else { + handler.poll_process(cx) + }; + + let ev = match poll { + Poll::Ready(e) => e, + Poll::Pending => break, + }; + + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => + error!("Incoming substream handler tried to open a substream"), + ProtocolsHandlerEvent::Close(err) => void::unreachable(err), + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => + match self.enabled { + EnabledState::Initial => self.pending_in.push(handler_num), + EnabledState::Enabled => { + // We create `handshake_message` on a separate line to be sure + // that the lock is released as soon as possible. + let handshake_message = handshake_message.read().clone(); + handler.inject_event(NotifsInHandlerIn::Accept(handshake_message)) + }, + EnabledState::Disabled => + handler.inject_event(NotifsInHandlerIn::Refuse), + }, + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { + debug_assert!(self.pending_handshake.is_none()); + if self.notifications_sink_rx.is_some() { + let msg = NotifsHandlerOut::Notification { + message, + protocol_name: handler.protocol_name().clone(), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); + } + }, + } + } + } + + for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() { + while let Poll::Ready(ev) = handler.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol } => + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol + .map_upgrade(EitherUpgrade::A) + .map_info(|()| Some(handler_num)) + }), + ProtocolsHandlerEvent::Close(err) => void::unreachable(err), + + // Opened substream on the handshake-bearing notification protocol. + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { handshake }) + if handler_num == 0 => + { + if self.notifications_sink_rx.is_none() && self.pending_handshake.is_none() { + self.pending_handshake = Some(handshake); + } + }, + + // Nothing to do in response to other notification substreams being opened + // or closed. + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {}, + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {}, + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {}, + } + } + } + + if self.out_handlers.iter().all(|(h, _)| h.is_open() || h.is_refused()) { + if let Some(handshake) = self.pending_handshake.take() { + let (async_tx, async_rx) = mpsc::channel(ASYNC_NOTIFICATIONS_BUFFER_SIZE); + let (sync_tx, sync_rx) = mpsc::channel(SYNC_NOTIFICATIONS_BUFFER_SIZE); + let notifications_sink = NotificationsSink { + inner: Arc::new(NotificationsSinkInner { + async_channel: FuturesMutex::new(async_tx), + sync_channel: Mutex::new(sync_tx), + }), + }; + + debug_assert!(self.notifications_sink_rx.is_none()); + self.notifications_sink_rx = Some(stream::select(async_rx.fuse(), sync_rx.fuse())); + + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Open { + endpoint: self.endpoint.clone(), + received_handshake: handshake, + notifications_sink + } + )) + } + } + + Poll::Pending + } +} diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs new file mode 100644 index 0000000000000..d17b5e612daf2 --- /dev/null +++ b/client/network/src/protocol/generic_proto/handler/legacy.rs @@ -0,0 +1,611 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::protocol::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; +use bytes::BytesMut; +use futures::prelude::*; +use futures_timer::Delay; +use libp2p::core::{ConnectedPoint, PeerId, Endpoint}; +use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, +}; +use log::{debug, error}; +use smallvec::{smallvec, SmallVec}; +use std::{borrow::Cow, collections::VecDeque, error, fmt, io, mem, time::Duration}; +use std::{pin::Pin, task::{Context, Poll}}; + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a `LegacyProtoHandler`. It then handles all communications that are specific +/// to Substrate on that single connection. +/// +/// Note that there can be multiple instance of this struct simultaneously for same peer, +/// if there are multiple established connections to the peer. +/// +/// ## State of the handler +/// +/// There are six possible states for the handler: +/// +/// - Enabled and open, which is a normal operation. +/// - Enabled and closed, in which case it will try to open substreams. +/// - Disabled and open, in which case it will try to close substreams. +/// - Disabled and closed, in which case the handler is idle. The connection will be +/// garbage-collected after a few seconds if nothing more happens. +/// - Initializing and open. +/// - Initializing and closed, which is the state the handler starts in. +/// +/// The Init/Enabled/Disabled state is entirely controlled by the user by sending `Enable` or +/// `Disable` messages to the handler. The handler itself never transitions automatically between +/// these states. For example, if the handler reports a network misbehaviour, it will close the +/// substreams but it is the role of the user to send a `Disabled` event if it wants the connection +/// to close. Otherwise, the handler will try to reopen substreams. +/// +/// The handler starts in the "Initializing" state and must be transitionned to Enabled or Disabled +/// as soon as possible. +/// +/// The Open/Closed state is decided by the handler and is reported with the `CustomProtocolOpen` +/// and `CustomProtocolClosed` events. The `CustomMessage` event can only be generated if the +/// handler is open. +/// +/// ## How it works +/// +/// When the handler is created, it is initially in the `Init` state and waits for either a +/// `Disable` or an `Enable` message from the outer layer. At any time, the outer layer is free to +/// toggle the handler between the disabled and enabled states. +/// +/// When the handler switches to "enabled", it opens a substream and negotiates the protocol named +/// `/substrate/xxx`, where `xxx` is chosen by the user and depends on the chain. +/// +/// For backwards compatibility reasons, when we switch to "enabled" for the first time (while we +/// are still in "init" mode) and we are the connection listener, we don't open a substream. +/// +/// In order the handle the situation where both the remote and us get enabled at the same time, +/// we tolerate multiple substreams open at the same time. Messages are transmitted on an arbitrary +/// substream. The endpoints don't try to agree on a single substream. +/// +/// We consider that we are now "closed" if the remote closes all the existing substreams. +/// Re-opening it can then be performed by closing all active substream and re-opening one. +/// +pub struct LegacyProtoHandlerProto { + /// Configuration for the protocol upgrade to negotiate. + protocol: RegisteredProtocol, +} + +impl LegacyProtoHandlerProto { + /// Builds a new `LegacyProtoHandlerProto`. + pub fn new(protocol: RegisteredProtocol) -> Self { + LegacyProtoHandlerProto { + protocol, + } + } +} + +impl IntoProtocolsHandler for LegacyProtoHandlerProto { + type Handler = LegacyProtoHandler; + + fn inbound_protocol(&self) -> RegisteredProtocol { + self.protocol.clone() + } + + fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + LegacyProtoHandler { + protocol: self.protocol, + endpoint: connected_point.clone(), + remote_peer_id: remote_peer_id.clone(), + state: ProtocolState::Init { + substreams: SmallVec::new(), + init_deadline: Delay::new(Duration::from_secs(20)) + }, + events_queue: VecDeque::new(), + } + } +} + +/// The actual handler once the connection has been established. +pub struct LegacyProtoHandler { + /// Configuration for the protocol upgrade to negotiate. + protocol: RegisteredProtocol, + + /// State of the communications with the remote. + state: ProtocolState, + + /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have + /// any influence on the behaviour. + remote_peer_id: PeerId, + + /// Whether we are the connection dialer or listener. Used to determine who, between the local + /// node and the remote node, has priority. + endpoint: ConnectedPoint, + + /// Queue of events to send to the outside. + /// + /// This queue must only ever be modified to insert elements at the back, or remove the first + /// element. + events_queue: VecDeque>, +} + +/// State of the handler. +enum ProtocolState { + /// Waiting for the behaviour to tell the handler whether it is enabled or disabled. + Init { + /// List of substreams opened by the remote but that haven't been processed yet. + /// For each substream, also includes the handshake message that we have received. + substreams: SmallVec<[(RegisteredProtocolSubstream, Vec); 6]>, + /// Deadline after which the initialization is abnormally long. + init_deadline: Delay, + }, + + /// Handler is opening a substream in order to activate itself. + /// If we are in this state, we haven't sent any `CustomProtocolOpen` yet. + Opening { + /// Deadline after which the opening is abnormally long. + deadline: Delay, + }, + + /// Normal operating mode. Contains the substreams that are open. + /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. + Normal { + /// The substreams where bidirectional communications happen. + substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, + /// Contains substreams which are being shut down. + shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, + }, + + /// We are disabled. Contains substreams that are being closed. + /// If we are in this state, either we have sent a `CustomProtocolClosed` message to the + /// outside or we have never sent any `CustomProtocolOpen` in the first place. + Disabled { + /// List of substreams to shut down. + shutdown: SmallVec<[RegisteredProtocolSubstream; 6]>, + + /// If true, we should reactivate the handler after all the substreams in `shutdown` have + /// been closed. + /// + /// Since we don't want to mix old and new substreams, we wait for all old substreams to + /// be closed before opening any new one. + reenable: bool, + }, + + /// In this state, we don't care about anything anymore and need to kill the connection as soon + /// as possible. + KillAsap, + + /// We sometimes temporarily switch to this state during processing. If we are in this state + /// at the beginning of a method, that means something bad happened in the source code. + Poisoned, +} + +/// Event that can be received by a `LegacyProtoHandler`. +#[derive(Debug)] +pub enum LegacyProtoHandlerIn { + /// The node should start using custom protocols. + Enable, + + /// The node should stop using custom protocols. + Disable, +} + +/// Event that can be emitted by a `LegacyProtoHandler`. +#[derive(Debug)] +pub enum LegacyProtoHandlerOut { + /// Opened a custom protocol with the remote. + CustomProtocolOpen { + /// Version of the protocol that has been opened. + version: u8, + /// Handshake message that has been sent to us. + /// This is normally a "Status" message, but this out of the concern of this code. + received_handshake: Vec, + }, + + /// Closed a custom protocol with the remote. + CustomProtocolClosed { + /// Reason why the substream closed, for diagnostic purposes. + reason: Cow<'static, str>, + }, + + /// Receives a message on a custom protocol substream. + CustomMessage { + /// Message that has been received. + message: BytesMut, + }, + + /// An error has happened on the protocol level with this node. + ProtocolError { + /// If true the error is severe, such as a protocol violation. + is_severe: bool, + /// The error that happened. + error: Box, + }, +} + +impl LegacyProtoHandler { + /// Enables the handler. + fn enable(&mut self) { + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + self.remote_peer_id); + ProtocolState::Poisoned + } + + ProtocolState::Init { substreams: mut incoming, .. } => { + if incoming.is_empty() { + if let ConnectedPoint::Dialer { .. } = self.endpoint { + self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(self.protocol.clone(), ()), + }); + } + ProtocolState::Opening { + deadline: Delay::new(Duration::from_secs(60)) + } + } else { + let event = LegacyProtoHandlerOut::CustomProtocolOpen { + version: incoming[0].0.protocol_version(), + received_handshake: mem::replace(&mut incoming[0].1, Vec::new()), + }; + self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Normal { + substreams: incoming.into_iter().map(|(s, _)| s).collect(), + shutdown: SmallVec::new() + } + } + } + + st @ ProtocolState::KillAsap => st, + st @ ProtocolState::Opening { .. } => st, + st @ ProtocolState::Normal { .. } => st, + ProtocolState::Disabled { shutdown, .. } => { + ProtocolState::Disabled { shutdown, reenable: true } + } + } + } + + /// Disables the handler. + fn disable(&mut self) { + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + self.remote_peer_id); + ProtocolState::Poisoned + } + + ProtocolState::Init { substreams: shutdown, .. } => { + let mut shutdown = shutdown.into_iter().map(|(s, _)| s).collect::>(); + for s in &mut shutdown { + s.shutdown(); + } + ProtocolState::Disabled { shutdown, reenable: false } + } + + ProtocolState::Opening { .. } | ProtocolState::Normal { .. } => + // At the moment, if we get disabled while things were working, we kill the entire + // connection in order to force a reset of the state. + // This is obviously an extremely shameful way to do things, but at the time of + // the writing of this comment, the networking works very poorly and a solution + // needs to be found. + ProtocolState::KillAsap, + + ProtocolState::Disabled { shutdown, .. } => + ProtocolState::Disabled { shutdown, reenable: false }, + + ProtocolState::KillAsap => ProtocolState::KillAsap, + }; + } + + /// Polls the state for events. Optionally returns an event to produce. + #[must_use] + fn poll_state(&mut self, cx: &mut Context) + -> Option> { + match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + self.remote_peer_id); + self.state = ProtocolState::Poisoned; + None + } + + ProtocolState::Init { substreams, mut init_deadline } => { + match Pin::new(&mut init_deadline).poll(cx) { + Poll::Ready(()) => { + error!(target: "sub-libp2p", "Handler initialization process is too long \ + with {:?}", self.remote_peer_id); + self.state = ProtocolState::KillAsap; + }, + Poll::Pending => { + self.state = ProtocolState::Init { substreams, init_deadline }; + } + } + + None + } + + ProtocolState::Opening { mut deadline } => { + match Pin::new(&mut deadline).poll(cx) { + Poll::Ready(()) => { + let event = LegacyProtoHandlerOut::ProtocolError { + is_severe: true, + error: "Timeout when opening protocol".to_string().into(), + }; + self.state = ProtocolState::KillAsap; + Some(ProtocolsHandlerEvent::Custom(event)) + }, + Poll::Pending => { + self.state = ProtocolState::Opening { deadline }; + None + }, + } + } + + ProtocolState::Normal { mut substreams, mut shutdown } => { + for n in (0..substreams.len()).rev() { + let mut substream = substreams.swap_remove(n); + match Pin::new(&mut substream).poll_next(cx) { + Poll::Pending => substreams.push(substream), + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { + let event = LegacyProtoHandlerOut::CustomMessage { + message + }; + substreams.push(substream); + self.state = ProtocolState::Normal { substreams, shutdown }; + return Some(ProtocolsHandlerEvent::Custom(event)); + }, + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) => { + shutdown.push(substream); + if substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolClosed { + reason: "Legacy substream clogged".into(), + }; + self.state = ProtocolState::Disabled { + shutdown: shutdown.into_iter().collect(), + reenable: true + }; + return Some(ProtocolsHandlerEvent::Custom(event)); + } + } + Poll::Ready(None) => { + shutdown.push(substream); + if substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolClosed { + reason: "All substreams have been closed by the remote".into(), + }; + self.state = ProtocolState::Disabled { + shutdown: shutdown.into_iter().collect(), + reenable: true + }; + return Some(ProtocolsHandlerEvent::Custom(event)); + } + } + Poll::Ready(Some(Err(err))) => { + if substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolClosed { + reason: format!("Error on the last substream: {:?}", err).into(), + }; + self.state = ProtocolState::Disabled { + shutdown: shutdown.into_iter().collect(), + reenable: true + }; + return Some(ProtocolsHandlerEvent::Custom(event)); + } else { + debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err); + } + } + } + } + + // This code is reached is none if and only if none of the substreams are in a ready state. + self.state = ProtocolState::Normal { substreams, shutdown }; + None + } + + ProtocolState::Disabled { mut shutdown, reenable } => { + shutdown_list(&mut shutdown, cx); + // If `reenable` is `true`, that means we should open the substreams system again + // after all the substreams are closed. + if reenable && shutdown.is_empty() { + self.state = ProtocolState::Opening { + deadline: Delay::new(Duration::from_secs(60)) + }; + Some(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(self.protocol.clone(), ()), + }) + } else { + self.state = ProtocolState::Disabled { shutdown, reenable }; + None + } + } + + ProtocolState::KillAsap => None, + } + } + + /// Called by `inject_fully_negotiated_inbound` and `inject_fully_negotiated_outbound`. + fn inject_fully_negotiated( + &mut self, + mut substream: RegisteredProtocolSubstream, + received_handshake: Vec, + ) { + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + self.remote_peer_id); + ProtocolState::Poisoned + } + + ProtocolState::Init { mut substreams, init_deadline } => { + if substream.endpoint() == Endpoint::Dialer { + error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \ + initialization", self.remote_peer_id); + } + substreams.push((substream, received_handshake)); + ProtocolState::Init { substreams, init_deadline } + } + + ProtocolState::Opening { .. } => { + let event = LegacyProtoHandlerOut::CustomProtocolOpen { + version: substream.protocol_version(), + received_handshake, + }; + self.events_queue.push_back(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Normal { + substreams: smallvec![substream], + shutdown: SmallVec::new() + } + } + + ProtocolState::Normal { substreams: mut existing, shutdown } => { + existing.push(substream); + ProtocolState::Normal { substreams: existing, shutdown } + } + + ProtocolState::Disabled { mut shutdown, .. } => { + substream.shutdown(); + shutdown.push(substream); + ProtocolState::Disabled { shutdown, reenable: false } + } + + ProtocolState::KillAsap => ProtocolState::KillAsap, + }; + } +} + +impl ProtocolsHandler for LegacyProtoHandler { + type InEvent = LegacyProtoHandlerIn; + type OutEvent = LegacyProtoHandlerOut; + type Error = ConnectionKillError; + type InboundProtocol = RegisteredProtocol; + type OutboundProtocol = RegisteredProtocol; + type OutboundOpenInfo = (); + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(self.protocol.clone(), ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + (substream, handshake): >::Output, + (): () + ) { + self.inject_fully_negotiated(substream, handshake); + } + + fn inject_fully_negotiated_outbound( + &mut self, + (substream, handshake): >::Output, + _: Self::OutboundOpenInfo + ) { + self.inject_fully_negotiated(substream, handshake); + } + + fn inject_event(&mut self, message: LegacyProtoHandlerIn) { + match message { + LegacyProtoHandlerIn::Disable => self.disable(), + LegacyProtoHandlerIn::Enable => self.enable(), + } + } + + fn inject_dial_upgrade_error(&mut self, _: (), err: ProtocolsHandlerUpgrErr) { + let is_severe = match err { + ProtocolsHandlerUpgrErr::Upgrade(_) => true, + _ => false, + }; + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { + is_severe, + error: Box::new(err), + })); + } + + fn connection_keep_alive(&self) -> KeepAlive { + match self.state { + ProtocolState::Init { .. } | ProtocolState::Opening { .. } | + ProtocolState::Normal { .. } => KeepAlive::Yes, + ProtocolState::Disabled { .. } | ProtocolState::Poisoned | + ProtocolState::KillAsap => KeepAlive::No, + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent + > { + // Flush the events queue if necessary. + if let Some(event) = self.events_queue.pop_front() { + return Poll::Ready(event) + } + + // Kill the connection if needed. + if let ProtocolState::KillAsap = self.state { + return Poll::Ready(ProtocolsHandlerEvent::Close(ConnectionKillError)); + } + + // Process all the substreams. + if let Some(event) = self.poll_state(cx) { + return Poll::Ready(event) + } + + Poll::Pending + } +} + +impl fmt::Debug for LegacyProtoHandler { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("LegacyProtoHandler") + .finish() + } +} + +/// Given a list of substreams, tries to shut them down. The substreams that have been successfully +/// shut down are removed from the list. +fn shutdown_list + (list: &mut SmallVec>>, + cx: &mut Context) +{ + 'outer: for n in (0..list.len()).rev() { + let mut substream = list.swap_remove(n); + loop { + match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(_))) => {} + Poll::Pending => break, + Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, + } + } + list.push(substream); + } +} + +/// Error returned when switching from normal to disabled. +#[derive(Debug)] +pub struct ConnectionKillError; + +impl error::Error for ConnectionKillError { +} + +impl fmt::Display for ConnectionKillError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Connection kill when switching from normal to disabled") + } +} diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs new file mode 100644 index 0000000000000..d3b505e0de3e2 --- /dev/null +++ b/client/network/src/protocol/generic_proto/handler/notif_in.rs @@ -0,0 +1,293 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for ingoing +//! substreams for a single gossiping protocol. +//! +//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple +//! > protocols, you need to create multiple instances and group them. +//! + +use crate::protocol::generic_proto::upgrade::{NotificationsIn, NotificationsInSubstream}; +use bytes::BytesMut; +use futures::prelude::*; +use libp2p::core::{ConnectedPoint, PeerId}; +use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, +}; +use log::{error, warn}; +use std::{borrow::Cow, collections::VecDeque, fmt, pin::Pin, task::{Context, Poll}}; + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsInHandler`]. +pub struct NotifsInHandlerProto { + /// Configuration for the protocol upgrade to negotiate. + in_protocol: NotificationsIn, +} + +/// The actual handler once the connection has been established. +pub struct NotifsInHandler { + /// Configuration for the protocol upgrade to negotiate for inbound substreams. + in_protocol: NotificationsIn, + + /// Substream that is open with the remote. + substream: Option>, + + /// If the substream is opened and closed rapidly, we can emit several `OpenRequest` and + /// `Closed` messages in a row without the handler having time to respond with `Accept` or + /// `Refuse`. + /// + /// In order to keep the state consistent, we increment this variable every time an + /// `OpenRequest` is emitted and decrement it every time an `Accept` or `Refuse` is received. + pending_accept_refuses: usize, + + /// Queue of events to send to the outside. + /// + /// This queue is only ever modified to insert elements at the back, or remove the first + /// element. + events_queue: VecDeque>, +} + +/// Event that can be received by a `NotifsInHandler`. +#[derive(Debug, Clone)] +pub enum NotifsInHandlerIn { + /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send + /// to the remote. + /// + /// After sending this to the handler, the substream is now considered open and `Notif` events + /// can be received. + Accept(Vec), + + /// Can be sent back as a response to an `OpenRequest`. + Refuse, +} + +/// Event that can be emitted by a `NotifsInHandler`. +#[derive(Debug)] +pub enum NotifsInHandlerOut { + /// The remote wants to open a substream. Contains the initial message sent by the remote + /// when the substream has been opened. + /// + /// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent + /// back even if a `Closed` is received. + OpenRequest(Vec), + + /// The notifications substream has been closed by the remote. In order to avoid race + /// conditions, this does **not** cancel any previously-sent `OpenRequest`. + Closed, + + /// Received a message on the notifications substream. + /// + /// Can only happen after an `Accept` and before a `Closed`. + Notif(BytesMut), +} + +impl NotifsInHandlerProto { + /// Builds a new `NotifsInHandlerProto`. + pub fn new( + protocol_name: impl Into> + ) -> Self { + NotifsInHandlerProto { + in_protocol: NotificationsIn::new(protocol_name), + } + } +} + +impl IntoProtocolsHandler for NotifsInHandlerProto { + type Handler = NotifsInHandler; + + fn inbound_protocol(&self) -> NotificationsIn { + self.in_protocol.clone() + } + + fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { + NotifsInHandler { + in_protocol: self.in_protocol, + substream: None, + pending_accept_refuses: 0, + events_queue: VecDeque::new(), + } + } +} + +impl NotifsInHandler { + /// Returns the name of the protocol that we accept. + pub fn protocol_name(&self) -> &Cow<'static, str> { + self.in_protocol.protocol_name() + } + + /// Equivalent to the `poll` method of `ProtocolsHandler`, except that it is guaranteed to + /// never generate [`NotifsInHandlerOut::Notif`]. + /// + /// Use this method in situations where it is not desirable to receive events but still + /// necessary to drive any potential incoming handshake or request. + pub fn poll_process( + &mut self, + cx: &mut Context + ) -> Poll< + ProtocolsHandlerEvent + > { + if let Some(event) = self.events_queue.pop_front() { + return Poll::Ready(event) + } + + match self.substream.as_mut().map(|s| NotificationsInSubstream::poll_process(Pin::new(s), cx)) { + None | Some(Poll::Pending) => {}, + Some(Poll::Ready(Ok(v))) => match v {}, + Some(Poll::Ready(Err(_))) => { + self.substream = None; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); + }, + } + + Poll::Pending + } +} + +impl ProtocolsHandler for NotifsInHandler { + type InEvent = NotifsInHandlerIn; + type OutEvent = NotifsInHandlerOut; + type Error = void::Void; + type InboundProtocol = NotificationsIn; + type OutboundProtocol = DeniedUpgrade; + type OutboundOpenInfo = (); + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(self.in_protocol.clone(), ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + (msg, proto): >::Output, + (): () + ) { + // If a substream already exists, we drop it and replace it with the new incoming one. + if self.substream.is_some() { + self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); + } + + // Note that we drop the existing substream, which will send an equivalent to a TCP "RST" + // to the remote and force-close the substream. It might seem like an unclean way to get + // rid of a substream. However, keep in mind that it is invalid for the remote to open + // multiple such substreams, and therefore sending a "RST" is not an incorrect thing to do. + self.substream = Some(proto); + + self.events_queue.push_back(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg))); + self.pending_accept_refuses = self.pending_accept_refuses + .checked_add(1) + .unwrap_or_else(|| { + error!(target: "sub-libp2p", "Overflow in pending_accept_refuses"); + usize::max_value() + }); + } + + fn inject_fully_negotiated_outbound( + &mut self, + out: >::Output, + _: Self::OutboundOpenInfo + ) { + // We never emit any outgoing substream. + void::unreachable(out) + } + + fn inject_event(&mut self, message: NotifsInHandlerIn) { + self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) { + Some(v) => v, + None => { + error!( + target: "sub-libp2p", + "Inconsistent state: received Accept/Refuse when no pending request exists" + ); + return; + } + }; + + // If we send multiple `OpenRequest`s in a row, we will receive back multiple + // `Accept`/`Refuse` messages. All of them are obsolete except the last one. + if self.pending_accept_refuses != 0 { + return; + } + + match (message, self.substream.as_mut()) { + (NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message), + (NotifsInHandlerIn::Accept(_), None) => {}, + (NotifsInHandlerIn::Refuse, _) => self.substream = None, + } + } + + fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { + error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler"); + } + + fn connection_keep_alive(&self) -> KeepAlive { + if self.substream.is_some() { + KeepAlive::Yes + } else { + KeepAlive::No + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent + > { + // Flush the events queue if necessary. + if let Some(event) = self.events_queue.pop_front() { + return Poll::Ready(event) + } + + match self.substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { + None | Some(Poll::Pending) => {}, + Some(Poll::Ready(Some(Ok(msg)))) => { + if self.pending_accept_refuses != 0 { + warn!( + target: "sub-libp2p", + "Bad state in inbound-only handler: notif before accepting substream" + ); + } + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg))) + }, + Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => { + self.substream = None; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); + }, + } + + Poll::Pending + } +} + +impl fmt::Debug for NotifsInHandler { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("NotifsInHandler") + .field("substream_open", &self.substream.is_some()) + .finish() + } +} diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs new file mode 100644 index 0000000000000..414e62c0d135f --- /dev/null +++ b/client/network/src/protocol/generic_proto/handler/notif_out.rs @@ -0,0 +1,444 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for outgoing +//! substreams of a single gossiping protocol. +//! +//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple +//! > protocols, you need to create multiple instances and group them. +//! + +use crate::protocol::generic_proto::upgrade::{NotificationsOut, NotificationsOutSubstream, NotificationsHandshakeError}; +use futures::prelude::*; +use libp2p::core::{ConnectedPoint, PeerId}; +use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, +}; +use log::{debug, warn, error}; +use std::{ + borrow::Cow, collections::VecDeque, fmt, mem, pin::Pin, task::{Context, Poll, Waker}, + time::Duration +}; +use wasm_timer::Instant; + +/// Maximum duration to open a substream and receive the handshake message. After that, we +/// consider that we failed to open the substream. +const OPEN_TIMEOUT: Duration = Duration::from_secs(10); +/// After successfully establishing a connection with the remote, we keep the connection open for +/// at least this amount of time in order to give the rest of the code the chance to notify us to +/// open substreams. +const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsOutHandler`]. +/// +/// See the documentation of [`NotifsOutHandler`] for more information. +pub struct NotifsOutHandlerProto { + /// Name of the protocol to negotiate. + protocol_name: Cow<'static, str>, +} + +impl NotifsOutHandlerProto { + /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the + /// notifications substream. + pub fn new(protocol_name: impl Into>) -> Self { + NotifsOutHandlerProto { + protocol_name: protocol_name.into(), + } + } +} + +impl IntoProtocolsHandler for NotifsOutHandlerProto { + type Handler = NotifsOutHandler; + + fn inbound_protocol(&self) -> DeniedUpgrade { + DeniedUpgrade + } + + fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { + NotifsOutHandler { + protocol_name: self.protocol_name, + when_connection_open: Instant::now(), + state: State::Disabled, + events_queue: VecDeque::new(), + } + } +} + +/// Handler for an outbound notification substream. +/// +/// When a connection is established, this handler starts in the "disabled" state, meaning that +/// no substream will be open. +/// +/// One can try open a substream by sending an [`NotifsOutHandlerIn::Enable`] message to the +/// handler. Once done, the handler will try to establish then maintain an outbound substream with +/// the remote for the purpose of sending notifications to it. +pub struct NotifsOutHandler { + /// Name of the protocol to negotiate. + protocol_name: Cow<'static, str>, + + /// Relationship with the node we're connected to. + state: State, + + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, + + /// Queue of events to send to the outside. + /// + /// This queue must only ever be modified to insert elements at the back, or remove the first + /// element. + events_queue: VecDeque>, +} + +/// Our relationship with the node we're connected to. +enum State { + /// The handler is disabled and idle. No substream is open. + Disabled, + + /// The handler is disabled. A substream is still open and needs to be closed. + /// + /// > **Important**: Having this state means that `poll_close` has been called at least once, + /// > but the `Sink` API is unclear about whether or not the stream can then + /// > be recovered. Because of that, we must never switch from the + /// > `DisabledOpen` state to the `Open` state while keeping the same substream. + DisabledOpen(NotificationsOutSubstream), + + /// The handler is disabled but we are still trying to open a substream with the remote. + /// + /// If the handler gets enabled again, we can immediately switch to `Opening`. + DisabledOpening, + + /// The handler is enabled and we are trying to open a substream with the remote. + Opening { + /// The initial message that we sent. Necessary if we need to re-open a substream. + initial_message: Vec, + }, + + /// The handler is enabled. We have tried opening a substream in the past but the remote + /// refused it. + Refused, + + /// The handler is enabled and substream is open. + Open { + /// Substream that is currently open. + substream: NotificationsOutSubstream, + /// Waker for the last task that got `Poll::Pending` from `poll_ready`, to notify + /// when the open substream closes due to being disabled or encountering an + /// error, i.e. to notify the task as soon as the substream becomes unavailable, + /// without waiting for an underlying I/O task wakeup. + close_waker: Option, + /// The initial message that we sent. Necessary if we need to re-open a substream. + initial_message: Vec, + }, + + /// Poisoned state. Shouldn't be found in the wild. + Poisoned, +} + +/// Event that can be received by a `NotifsOutHandler`. +#[derive(Debug)] +pub enum NotifsOutHandlerIn { + /// Enables the notifications substream for this node. The handler will try to maintain a + /// substream with the remote. + Enable { + /// Initial message to send to remote nodes when we open substreams. + initial_message: Vec, + }, + + /// Disables the notifications substream for this node. This is the default state. + Disable, +} + +/// Event that can be emitted by a `NotifsOutHandler`. +#[derive(Debug)] +pub enum NotifsOutHandlerOut { + /// The notifications substream has been accepted by the remote. + Open { + /// Handshake message sent by the remote after we opened the substream. + handshake: Vec, + }, + + /// The notifications substream has been closed by the remote. + Closed, + + /// We tried to open a notifications substream, but the remote refused it. + /// + /// Can only happen if we're in a closed state. + Refused, +} + +impl NotifsOutHandler { + /// Returns true if the substream is currently open. + pub fn is_open(&self) -> bool { + match &self.state { + State::Disabled => false, + State::DisabledOpening => false, + State::DisabledOpen(_) => true, + State::Opening { .. } => false, + State::Refused => false, + State::Open { .. } => true, + State::Poisoned => false, + } + } + + /// Returns `true` if there has been an attempt to open the substream, but the remote refused + /// the substream. + /// + /// Always returns `false` if the handler is in a disabled state. + pub fn is_refused(&self) -> bool { + match &self.state { + State::Disabled => false, + State::DisabledOpening => false, + State::DisabledOpen(_) => false, + State::Opening { .. } => false, + State::Refused => true, + State::Open { .. } => false, + State::Poisoned => false, + } + } + + /// Returns the name of the protocol that we negotiate. + pub fn protocol_name(&self) -> &Cow<'static, str> { + &self.protocol_name + } + + /// Polls whether the outbound substream is ready to send a notification. + /// + /// - Returns `Poll::Pending` if the substream is open but not ready to send a notification. + /// - Returns `Poll::Ready(true)` if the substream is ready to send a notification. + /// - Returns `Poll::Ready(false)` if the substream is closed. + /// + pub fn poll_ready(&mut self, cx: &mut Context) -> Poll { + if let State::Open { substream, close_waker, .. } = &mut self.state { + match substream.poll_ready_unpin(cx) { + Poll::Ready(Ok(())) => Poll::Ready(true), + Poll::Ready(Err(_)) => Poll::Ready(false), + Poll::Pending => { + *close_waker = Some(cx.waker().clone()); + Poll::Pending + } + } + } else { + Poll::Ready(false) + } + } + + /// Sends out a notification. + /// + /// If the substream is closed, or not ready to send out a notification yet, then the + /// notification is silently discarded. + /// + /// You are encouraged to call [`NotifsOutHandler::poll_ready`] beforehand to determine + /// whether this will succeed. If `Poll::Ready(true)` is returned, then this method will send + /// out a notification. + pub fn send_or_discard(&mut self, notification: Vec) { + if let State::Open { substream, .. } = &mut self.state { + let _ = substream.start_send_unpin(notification); + } + } +} + +impl ProtocolsHandler for NotifsOutHandler { + type InEvent = NotifsOutHandlerIn; + type OutEvent = NotifsOutHandlerOut; + type Error = void::Void; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = NotificationsOut; + type OutboundOpenInfo = (); + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade, ()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + proto: >::Output, + (): () + ) { + // We should never reach here. `proto` is a `Void`. + void::unreachable(proto) + } + + fn inject_fully_negotiated_outbound( + &mut self, + (handshake_msg, substream): >::Output, + _: () + ) { + match mem::replace(&mut self.state, State::Poisoned) { + State::Opening { initial_message } => { + let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg }; + self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); + self.state = State::Open { substream, initial_message, close_waker: None }; + }, + // If the handler was disabled while we were negotiating the protocol, immediately + // close it. + State::DisabledOpening => self.state = State::DisabledOpen(substream), + + // Any other situation should never happen. + State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) => + error!("☎️ State mismatch in notifications handler: substream already open"), + State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), + } + } + + fn inject_event(&mut self, message: NotifsOutHandlerIn) { + match message { + NotifsOutHandlerIn::Enable { initial_message } => { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled => { + let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); + self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), + }); + self.state = State::Opening { initial_message }; + }, + State::DisabledOpening => self.state = State::Opening { initial_message }, + State::DisabledOpen(mut sub) => { + // As documented above, in this state we have already called `poll_close` + // once on the substream, and it is unclear whether the substream can then + // be recovered. When in doubt, let's drop the existing substream and + // open a new one. + if sub.close().now_or_never().is_none() { + warn!( + target: "sub-libp2p", + "📞 Improperly closed outbound notifications substream" + ); + } + + let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); + self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), + }); + self.state = State::Opening { initial_message }; + }, + st @ State::Opening { .. } | st @ State::Refused | st @ State::Open { .. } => { + debug!(target: "sub-libp2p", + "Tried to enable notifications handler that was already enabled"); + self.state = st; + } + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + } + + NotifsOutHandlerIn::Disable => { + match mem::replace(&mut self.state, State::Poisoned) { + st @ State::Disabled | st @ State::DisabledOpen(_) | st @ State::DisabledOpening => { + debug!(target: "sub-libp2p", + "Tried to disable notifications handler that was already disabled"); + self.state = st; + } + State::Opening { .. } => self.state = State::DisabledOpening, + State::Refused => self.state = State::Disabled, + State::Open { substream, close_waker, .. } => { + if let Some(close_waker) = close_waker { + close_waker.wake(); + } + self.state = State::DisabledOpen(substream) + }, + State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), + } + } + } + } + + fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled => {}, + State::DisabledOpen(_) | State::Refused | State::Open { .. } => + error!("☎️ State mismatch in NotificationsOut"), + State::Opening { .. } => { + self.state = State::Refused; + let ev = NotifsOutHandlerOut::Refused; + self.events_queue.push_back(ProtocolsHandlerEvent::Custom(ev)); + }, + State::DisabledOpening => self.state = State::Disabled, + State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + match self.state { + // We have a small grace period of `INITIAL_KEEPALIVE_TIME` during which we keep the + // connection open no matter what, in order to avoid closing and reopening + // connections all the time. + State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => + KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), + State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, + State::Refused | State::Poisoned => KeepAlive::No, + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll> { + // Flush the events queue if necessary. + if let Some(event) = self.events_queue.pop_front() { + return Poll::Ready(event) + } + + match &mut self.state { + State::Open { substream, initial_message, close_waker } => + match Sink::poll_flush(Pin::new(substream), cx) { + Poll::Pending | Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(_)) => { + if let Some(close_waker) = close_waker.take() { + close_waker.wake(); + } + + // We try to re-open a substream. + let initial_message = mem::replace(initial_message, Vec::new()); + self.state = State::Opening { initial_message: initial_message.clone() }; + let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message); + self.events_queue.push_back(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto, ()).with_timeout(OPEN_TIMEOUT), + }); + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); + } + }, + + State::DisabledOpen(sub) => match Sink::poll_close(Pin::new(sub), cx) { + Poll::Pending => {}, + Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => { + self.state = State::Disabled; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); + }, + }, + + _ => {} + } + + Poll::Pending + } +} + +impl fmt::Debug for NotifsOutHandler { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("NotifsOutHandler") + .field("open", &self.is_open()) + .finish() + } +} diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs new file mode 100644 index 0000000000000..d604645d4ac87 --- /dev/null +++ b/client/network/src/protocol/generic_proto/tests.rs @@ -0,0 +1,314 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +#![cfg(test)] + +use crate::protocol::generic_proto::{GenericProto, GenericProtoOut}; + +use futures::prelude::*; +use libp2p::{PeerId, Multiaddr, Transport}; +use libp2p::core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, + muxing, + transport::MemoryTransport, + upgrade +}; +use libp2p::{identity, noise, yamux}; +use libp2p::swarm::{ + Swarm, ProtocolsHandler, IntoProtocolsHandler, PollParameters, + NetworkBehaviour, NetworkBehaviourAction +}; +use std::{error, io, iter, task::{Context, Poll}, time::Duration}; + +/// Builds two nodes that have each other as bootstrap nodes. +/// This is to be used only for testing, and a panic will happen if something goes wrong. +fn build_nodes() -> (Swarm, Swarm) { + let mut out = Vec::with_capacity(2); + + let keypairs: Vec<_> = (0..2).map(|_| identity::Keypair::generate_ed25519()).collect(); + let addrs: Vec = (0..2) + .map(|_| format!("/memory/{}", rand::random::()).parse().unwrap()) + .collect(); + + for index in 0 .. 2 { + let keypair = keypairs[index].clone(); + let local_peer_id = keypair.public().into_peer_id(); + + let noise_keys = noise::Keypair::::new() + .into_authentic(&keypair) + .unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(yamux::Config::default()) + .map(|(peer, muxer), _| (peer, muxing::StreamMuxerBox::new(muxer))) + .timeout(Duration::from_secs(20)) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .boxed(); + + let (peerset, _) = sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: if index == 0 { + keypairs + .iter() + .skip(1) + .map(|keypair| keypair.public().into_peer_id()) + .collect() + } else { + vec![] + }, + reserved_only: false, + priority_groups: Vec::new(), + }); + + let behaviour = CustomProtoWithAddr { + inner: GenericProto::new( + local_peer_id, "test", &[1], vec![], peerset, + iter::once(("/foo".into(), Vec::new())) + ), + addrs: addrs + .iter() + .enumerate() + .filter_map(|(n, a)| if n != index { + Some((keypairs[n].public().into_peer_id(), a.clone())) + } else { + None + }) + .collect(), + }; + + let mut swarm = Swarm::new( + transport, + behaviour, + keypairs[index].public().into_peer_id() + ); + Swarm::listen_on(&mut swarm, addrs[index].clone()).unwrap(); + out.push(swarm); + } + + // Final output + let mut out_iter = out.into_iter(); + let first = out_iter.next().unwrap(); + let second = out_iter.next().unwrap(); + (first, second) +} + +/// Wraps around the `CustomBehaviour` network behaviour, and adds hardcoded node addresses to it. +struct CustomProtoWithAddr { + inner: GenericProto, + addrs: Vec<(PeerId, Multiaddr)>, +} + +impl std::ops::Deref for CustomProtoWithAddr { + type Target = GenericProto; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl std::ops::DerefMut for CustomProtoWithAddr { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + +impl NetworkBehaviour for CustomProtoWithAddr { + type ProtocolsHandler = ::ProtocolsHandler; + type OutEvent = ::OutEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + self.inner.new_handler() + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + let mut list = self.inner.addresses_of_peer(peer_id); + for (p, a) in self.addrs.iter() { + if p == peer_id { + list.push(a.clone()); + } + } + list + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + self.inner.inject_connected(peer_id) + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.inner.inject_disconnected(peer_id) + } + + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.inner.inject_connection_established(peer_id, conn, endpoint) + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.inner.inject_connection_closed(peer_id, conn, endpoint) + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + event: <::Handler as ProtocolsHandler>::OutEvent + ) { + self.inner.inject_event(peer_id, connection, event) + } + + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters + ) -> Poll< + NetworkBehaviourAction< + <::Handler as ProtocolsHandler>::InEvent, + Self::OutEvent + > + > { + self.inner.poll(cx, params) + } + + fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn std::error::Error) { + self.inner.inject_addr_reach_failure(peer_id, addr, error) + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + self.inner.inject_dial_failure(peer_id) + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + self.inner.inject_new_listen_addr(addr) + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + self.inner.inject_expired_listen_addr(addr) + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + self.inner.inject_new_external_addr(addr) + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { + self.inner.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.inner.inject_listener_closed(id, reason); + } +} + +#[test] +fn reconnect_after_disconnect() { + // We connect two nodes together, then force a disconnect (through the API of the `Service`), + // check that the disconnect worked, and finally check whether they successfully reconnect. + + let (mut service1, mut service2) = build_nodes(); + + // For this test, the services can be in the following states. + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + enum ServiceState { NotConnected, FirstConnec, Disconnected, ConnectedAgain } + let mut service1_state = ServiceState::NotConnected; + let mut service2_state = ServiceState::NotConnected; + + futures::executor::block_on(async move { + loop { + // Grab next event from services. + let event = { + let s1 = service1.next(); + let s2 = service2.next(); + futures::pin_mut!(s1, s2); + match future::select(s1, s2).await { + future::Either::Left((ev, _)) => future::Either::Left(ev), + future::Either::Right((ev, _)) => future::Either::Right(ev), + } + }; + + match event { + future::Either::Left(GenericProtoOut::CustomProtocolOpen { .. }) => { + match service1_state { + ServiceState::NotConnected => { + service1_state = ServiceState::FirstConnec; + if service2_state == ServiceState::FirstConnec { + service1.disconnect_peer(Swarm::local_peer_id(&service2)); + } + }, + ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + } + }, + future::Either::Left(GenericProtoOut::CustomProtocolClosed { .. }) => { + match service1_state { + ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain| ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), + } + }, + future::Either::Right(GenericProtoOut::CustomProtocolOpen { .. }) => { + match service2_state { + ServiceState::NotConnected => { + service2_state = ServiceState::FirstConnec; + if service1_state == ServiceState::FirstConnec { + service1.disconnect_peer(Swarm::local_peer_id(&service2)); + } + }, + ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + } + }, + future::Either::Right(GenericProtoOut::CustomProtocolClosed { .. }) => { + match service2_state { + ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain| ServiceState::NotConnected | + ServiceState::Disconnected => panic!(), + } + }, + _ => {} + } + + if service1_state == ServiceState::ConnectedAgain && service2_state == ServiceState::ConnectedAgain { + break; + } + } + + // Now that the two services have disconnected and reconnected, wait for 3 seconds and + // check whether they're still connected. + let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); + + loop { + // Grab next event from services. + let event = { + let s1 = service1.next(); + let s2 = service2.next(); + futures::pin_mut!(s1, s2); + match future::select(future::select(s1, s2), &mut delay).await { + future::Either::Right(_) => break, // success + future::Either::Left((future::Either::Left((ev, _)), _)) => ev, + future::Either::Left((future::Either::Right((ev, _)), _)) => ev, + } + }; + + match event { + GenericProtoOut::CustomProtocolOpen { .. } | + GenericProtoOut::CustomProtocolClosed { .. } => panic!(), + _ => {} + } + } + }); +} diff --git a/client/network/src/protocol/generic_proto/upgrade.rs b/client/network/src/protocol/generic_proto/upgrade.rs new file mode 100644 index 0000000000000..6322a10b572a9 --- /dev/null +++ b/client/network/src/protocol/generic_proto/upgrade.rs @@ -0,0 +1,36 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +pub use self::collec::UpgradeCollec; +pub use self::legacy::{ + RegisteredProtocol, + RegisteredProtocolEvent, + RegisteredProtocolName, + RegisteredProtocolSubstream +}; +pub use self::notifications::{ + NotificationsIn, + NotificationsInSubstream, + NotificationsOut, + NotificationsOutSubstream, + NotificationsHandshakeError, + NotificationsOutError, +}; + +mod collec; +mod legacy; +mod notifications; diff --git a/client/network/src/protocol/generic_proto/upgrade/collec.rs b/client/network/src/protocol/generic_proto/upgrade/collec.rs new file mode 100644 index 0000000000000..f8d199974940f --- /dev/null +++ b/client/network/src/protocol/generic_proto/upgrade/collec.rs @@ -0,0 +1,97 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use futures::prelude::*; +use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; +use std::{iter::FromIterator, pin::Pin, task::{Context, Poll}, vec}; + +// TODO: move this to libp2p => https://github.com/libp2p/rust-libp2p/issues/1445 + +/// Upgrade that combines multiple upgrades of the same type into one. Supports all the protocols +/// supported by either sub-upgrade. +#[derive(Debug, Clone)] +pub struct UpgradeCollec(pub Vec); + +impl From> for UpgradeCollec { + fn from(list: Vec) -> Self { + UpgradeCollec(list) + } +} + +impl FromIterator for UpgradeCollec { + fn from_iter>(iter: I) -> Self { + UpgradeCollec(iter.into_iter().collect()) + } +} + +impl UpgradeInfo for UpgradeCollec { + type Info = ProtoNameWithUsize; + type InfoIter = vec::IntoIter; + + fn protocol_info(&self) -> Self::InfoIter { + self.0.iter().enumerate() + .flat_map(|(n, p)| + p.protocol_info().into_iter().map(move |i| ProtoNameWithUsize(i, n))) + .collect::>() + .into_iter() + } +} + +impl InboundUpgrade for UpgradeCollec +where + T: InboundUpgrade, +{ + type Output = (T::Output, usize); + type Error = (T::Error, usize); + type Future = FutWithUsize; + + fn upgrade_inbound(mut self, sock: C, info: Self::Info) -> Self::Future { + let fut = self.0.remove(info.1).upgrade_inbound(sock, info.0); + FutWithUsize(fut, info.1) + } +} + +/// Groups a `ProtocolName` with a `usize`. +#[derive(Debug, Clone)] +pub struct ProtoNameWithUsize(T, usize); + +impl ProtocolName for ProtoNameWithUsize { + fn protocol_name(&self) -> &[u8] { + self.0.protocol_name() + } +} + +/// Equivalent to `fut.map_ok(|v| (v, num)).map_err(|e| (e, num))`, where `fut` and `num` are +/// the two fields of this struct. +#[pin_project::pin_project] +pub struct FutWithUsize(#[pin] T, usize); + +impl>, O, E> Future for FutWithUsize { + type Output = Result<(O, usize), (E, usize)>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = self.project(); + match Future::poll(this.0, cx) { + Poll::Ready(Ok(v)) => Poll::Ready(Ok((v, *this.1))), + Poll::Ready(Err(e)) => Poll::Ready(Err((e, *this.1))), + Poll::Pending => Poll::Pending, + } + } +} diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs new file mode 100644 index 0000000000000..1b2b97253d1ae --- /dev/null +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -0,0 +1,313 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::config::ProtocolId; +use bytes::BytesMut; +use futures::prelude::*; +use futures_codec::Framed; +use libp2p::core::{Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; +use parking_lot::RwLock; +use std::{collections::VecDeque, io, pin::Pin, sync::Arc, vec::IntoIter as VecIntoIter}; +use std::task::{Context, Poll}; +use unsigned_varint::codec::UviBytes; + +/// Connection upgrade for a single protocol. +/// +/// Note that "a single protocol" here refers to `par` for example. However +/// each protocol can have multiple different versions for networking purposes. +pub struct RegisteredProtocol { + /// Id of the protocol for API purposes. + id: ProtocolId, + /// Base name of the protocol as advertised on the network. + /// Ends with `/` so that we can append a version number behind. + base_name: Vec, + /// List of protocol versions that we support. + /// Ordered in descending order so that the best comes first. + supported_versions: Vec, + /// Handshake to send after the substream is open. + handshake_message: Arc>>, +} + +impl RegisteredProtocol { + /// Creates a new `RegisteredProtocol`. + pub fn new(protocol: impl Into, versions: &[u8], handshake_message: Arc>>) + -> Self { + let protocol = protocol.into(); + let mut base_name = b"/substrate/".to_vec(); + base_name.extend_from_slice(protocol.as_ref().as_bytes()); + base_name.extend_from_slice(b"/"); + + RegisteredProtocol { + base_name, + id: protocol, + supported_versions: { + let mut tmp = versions.to_vec(); + tmp.sort_by(|a, b| b.cmp(&a)); + tmp + }, + handshake_message, + } + } + + /// Returns the `Arc` to the handshake message that was passed at initialization. + pub fn handshake_message(&self) -> &Arc>> { + &self.handshake_message + } +} + +impl Clone for RegisteredProtocol { + fn clone(&self) -> Self { + RegisteredProtocol { + id: self.id.clone(), + base_name: self.base_name.clone(), + supported_versions: self.supported_versions.clone(), + handshake_message: self.handshake_message.clone(), + } + } +} + +/// Output of a `RegisteredProtocol` upgrade. +pub struct RegisteredProtocolSubstream { + /// If true, we are in the process of closing the sink. + is_closing: bool, + /// Whether the local node opened this substream (dialer), or we received this substream from + /// the remote (listener). + endpoint: Endpoint, + /// Buffer of packets to send. + send_queue: VecDeque, + /// If true, we should call `poll_complete` on the inner sink. + requires_poll_flush: bool, + /// The underlying substream. + inner: stream::Fuse>>, + /// Version of the protocol that was negotiated. + protocol_version: u8, + /// If true, we have sent a "remote is clogged" event recently and shouldn't send another one + /// unless the buffer empties then fills itself again. + clogged_fuse: bool, +} + +impl RegisteredProtocolSubstream { + /// Returns the version of the protocol that was negotiated. + pub fn protocol_version(&self) -> u8 { + self.protocol_version + } + + /// Returns whether the local node opened this substream (dialer), or we received this + /// substream from the remote (listener). + pub fn endpoint(&self) -> Endpoint { + self.endpoint + } + + /// Starts a graceful shutdown process on this substream. + /// + /// Note that "graceful" means that we sent a closing message. We don't wait for any + /// confirmation from the remote. + /// + /// After calling this, the stream is guaranteed to finish soon-ish. + pub fn shutdown(&mut self) { + self.is_closing = true; + self.send_queue.clear(); + } +} + +/// Event produced by the `RegisteredProtocolSubstream`. +#[derive(Debug, Clone)] +pub enum RegisteredProtocolEvent { + /// Received a message from the remote. + Message(BytesMut), + + /// Diagnostic event indicating that the connection is clogged and we should avoid sending too + /// many messages to it. + Clogged, +} + +impl Stream for RegisteredProtocolSubstream +where TSubstream: AsyncRead + AsyncWrite + Unpin { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + // Flushing the local queue. + while !self.send_queue.is_empty() { + match Pin::new(&mut self.inner).poll_ready(cx) { + Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(err)) => return Poll::Ready(Some(Err(err))), + Poll::Pending => break, + } + + if let Some(packet) = self.send_queue.pop_front() { + Pin::new(&mut self.inner).start_send(packet)?; + self.requires_poll_flush = true; + } + } + + // If we are closing, close as soon as the Sink is closed. + if self.is_closing { + return match Pin::new(&mut self.inner).poll_close(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(Ok(_)) => Poll::Ready(None), + Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), + } + } + + // Indicating that the remote is clogged if that's the case. + if self.send_queue.len() >= 1536 { + if !self.clogged_fuse { + // Note: this fuse is important not just for preventing us from flooding the logs; + // if you remove the fuse, then we will always return early from this function and + // thus never read any message from the network. + self.clogged_fuse = true; + return Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged))) + } + } else { + self.clogged_fuse = false; + } + + // Flushing if necessary. + if self.requires_poll_flush { + if let Poll::Ready(()) = Pin::new(&mut self.inner).poll_flush(cx)? { + self.requires_poll_flush = false; + } + } + + // Receiving incoming packets. + // Note that `inner` is wrapped in a `Fuse`, therefore we can poll it forever. + match Pin::new(&mut self.inner).poll_next(cx)? { + Poll::Ready(Some(data)) => { + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(data)))) + } + Poll::Ready(None) => + if !self.requires_poll_flush && self.send_queue.is_empty() { + Poll::Ready(None) + } else { + Poll::Pending + } + Poll::Pending => Poll::Pending, + } + } +} + +impl UpgradeInfo for RegisteredProtocol { + type Info = RegisteredProtocolName; + type InfoIter = VecIntoIter; + + #[inline] + fn protocol_info(&self) -> Self::InfoIter { + // Report each version as an individual protocol. + self.supported_versions.iter().map(|&version| { + let num = version.to_string(); + + let mut name = self.base_name.clone(); + name.extend_from_slice(num.as_bytes()); + RegisteredProtocolName { + name, + version, + } + }).collect::>().into_iter() + } +} + +/// Implementation of `ProtocolName` for a custom protocol. +#[derive(Debug, Clone)] +pub struct RegisteredProtocolName { + /// Protocol name, as advertised on the wire. + name: Vec, + /// Version number. Stored in string form in `name`, but duplicated here for easier retrieval. + version: u8, +} + +impl ProtocolName for RegisteredProtocolName { + fn protocol_name(&self) -> &[u8] { + &self.name + } +} + +impl InboundUpgrade for RegisteredProtocol +where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +{ + type Output = (RegisteredProtocolSubstream, Vec); + type Future = Pin> + Send>>; + type Error = io::Error; + + fn upgrade_inbound( + self, + socket: TSubstream, + info: Self::Info, + ) -> Self::Future { + Box::pin(async move { + let mut framed = { + let mut codec = UviBytes::default(); + codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. + Framed::new(socket, codec) + }; + + let handshake = BytesMut::from(&self.handshake_message.read()[..]); + framed.send(handshake).await?; + let received_handshake = framed.next().await + .ok_or_else(|| io::ErrorKind::UnexpectedEof)??; + + Ok((RegisteredProtocolSubstream { + is_closing: false, + endpoint: Endpoint::Listener, + send_queue: VecDeque::new(), + requires_poll_flush: false, + inner: framed.fuse(), + protocol_version: info.version, + clogged_fuse: false, + }, received_handshake.to_vec())) + }) + } +} + +impl OutboundUpgrade for RegisteredProtocol +where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +{ + type Output = >::Output; + type Future = >::Future; + type Error = >::Error; + + fn upgrade_outbound( + self, + socket: TSubstream, + info: Self::Info, + ) -> Self::Future { + Box::pin(async move { + let mut framed = { + let mut codec = UviBytes::default(); + codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. + Framed::new(socket, codec) + }; + + let handshake = BytesMut::from(&self.handshake_message.read()[..]); + framed.send(handshake).await?; + let received_handshake = framed.next().await + .ok_or_else(|| { + io::Error::new(io::ErrorKind::UnexpectedEof, "Failed to receive handshake") + })??; + + Ok((RegisteredProtocolSubstream { + is_closing: false, + endpoint: Endpoint::Dialer, + send_queue: VecDeque::new(), + requires_poll_flush: false, + inner: framed.fuse(), + protocol_version: info.version, + clogged_fuse: false, + }, received_handshake.to_vec())) + }) + } +} diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/generic_proto/upgrade/notifications.rs new file mode 100644 index 0000000000000..64b4b980da002 --- /dev/null +++ b/client/network/src/protocol/generic_proto/upgrade/notifications.rs @@ -0,0 +1,611 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +/// Notifications protocol. +/// +/// The Substrate notifications protocol consists in the following: +/// +/// - Node A opens a substream to node B and sends a message which contains some protocol-specific +/// higher-level logic. This message is prefixed with a variable-length integer message length. +/// This message can be empty, in which case `0` is sent. +/// - If node B accepts the substream, it sends back a message with the same properties. +/// - If instead B refuses the connection (which typically happens because no empty slot is +/// available), then it immediately closes the substream without sending back anything. +/// - Node A can then send notifications to B, prefixed with a variable-length integer indicating +/// the length of the message. +/// - Either node A or node B can signal that it doesn't want this notifications substream anymore +/// by closing its writing side. The other party should respond by also closing their own +/// writing side soon after. +/// +/// Notification substreams are unidirectional. If A opens a substream with B, then B is +/// encouraged but not required to open a substream to A as well. +/// + +use bytes::BytesMut; +use futures::prelude::*; +use futures_codec::Framed; +use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; +use log::error; +use std::{borrow::Cow, convert::Infallible, io, iter, mem, pin::Pin, task::{Context, Poll}}; +use unsigned_varint::codec::UviBytes; + +/// Maximum allowed size of the two handshake messages, in bytes. +const MAX_HANDSHAKE_SIZE: usize = 1024; + +/// Upgrade that accepts a substream, sends back a status message, then becomes a unidirectional +/// stream of messages. +#[derive(Debug, Clone)] +pub struct NotificationsIn { + /// Protocol name to use when negotiating the substream. + protocol_name: Cow<'static, str>, +} + +/// Upgrade that opens a substream, waits for the remote to accept by sending back a status +/// message, then becomes a unidirectional sink of data. +#[derive(Debug, Clone)] +pub struct NotificationsOut { + /// Protocol name to use when negotiating the substream. + protocol_name: Cow<'static, str>, + /// Message to send when we start the handshake. + initial_message: Vec, +} + +/// A substream for incoming notification messages. +/// +/// When creating, this struct starts in a state in which we must first send back a handshake +/// message to the remote. No message will come before this has been done. +#[pin_project::pin_project] +pub struct NotificationsInSubstream { + #[pin] + socket: Framed>>>, + handshake: NotificationsInSubstreamHandshake, +} + +/// State of the handshake sending back process. +enum NotificationsInSubstreamHandshake { + /// Waiting for the user to give us the handshake message. + NotSent, + /// User gave us the handshake message. Trying to push it in the socket. + PendingSend(Vec), + /// Handshake message was pushed in the socket. Still need to flush. + Flush, + /// Handshake message successfully sent and flushed. + Sent, + /// Remote has closed their writing side. We close our own writing side in return. + ClosingInResponseToRemote, + /// Both our side and the remote have closed their writing side. + BothSidesClosed, +} + +/// A substream for outgoing notification messages. +#[pin_project::pin_project] +pub struct NotificationsOutSubstream { + /// Substream where to send messages. + #[pin] + socket: Framed>>>, +} + +impl NotificationsIn { + /// Builds a new potential upgrade. + pub fn new(protocol_name: impl Into>) -> Self { + NotificationsIn { + protocol_name: protocol_name.into(), + } + } + + /// Returns the name of the protocol that we accept. + pub fn protocol_name(&self) -> &Cow<'static, str> { + &self.protocol_name + } +} + +impl UpgradeInfo for NotificationsIn { + type Info = Cow<'static, [u8]>; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + let bytes: Cow<'static, [u8]> = match &self.protocol_name { + Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()), + Cow::Owned(s) => Cow::Owned(s.as_bytes().to_vec()) + }; + iter::once(bytes) + } +} + +impl InboundUpgrade for NotificationsIn +where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +{ + type Output = (Vec, NotificationsInSubstream); + type Future = Pin> + Send>>; + type Error = NotificationsHandshakeError; + + fn upgrade_inbound( + self, + mut socket: TSubstream, + _: Self::Info, + ) -> Self::Future { + Box::pin(async move { + let initial_message_len = unsigned_varint::aio::read_usize(&mut socket).await?; + if initial_message_len > MAX_HANDSHAKE_SIZE { + return Err(NotificationsHandshakeError::TooLarge { + requested: initial_message_len, + max: MAX_HANDSHAKE_SIZE, + }); + } + + let mut initial_message = vec![0u8; initial_message_len]; + if !initial_message.is_empty() { + socket.read_exact(&mut initial_message).await?; + } + + let substream = NotificationsInSubstream { + socket: Framed::new(socket, UviBytes::default()), + handshake: NotificationsInSubstreamHandshake::NotSent, + }; + + Ok((initial_message, substream)) + }) + } +} + +impl NotificationsInSubstream +where TSubstream: AsyncRead + AsyncWrite + Unpin, +{ + /// Sends the handshake in order to inform the remote that we accept the substream. + pub fn send_handshake(&mut self, message: impl Into>) { + if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) { + error!(target: "sub-libp2p", "Tried to send handshake twice"); + return; + } + + self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); + } + + /// Equivalent to `Stream::poll_next`, except that it only drives the handshake and is + /// guaranteed to not generate any notification. + pub fn poll_process(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + + loop { + match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { + NotificationsInSubstreamHandshake::PendingSend(msg) => + match Sink::poll_ready(this.socket.as_mut(), cx) { + Poll::Ready(_) => { + *this.handshake = NotificationsInSubstreamHandshake::Flush; + match Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg)) { + Ok(()) => {}, + Err(err) => return Poll::Ready(Err(err)), + } + }, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); + return Poll::Pending + } + }, + NotificationsInSubstreamHandshake::Flush => + match Sink::poll_flush(this.socket.as_mut(), cx)? { + Poll::Ready(()) => + *this.handshake = NotificationsInSubstreamHandshake::Sent, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::Flush; + return Poll::Pending + } + }, + + st @ NotificationsInSubstreamHandshake::NotSent | + st @ NotificationsInSubstreamHandshake::Sent | + st @ NotificationsInSubstreamHandshake::ClosingInResponseToRemote | + st @ NotificationsInSubstreamHandshake::BothSidesClosed => { + *this.handshake = st; + return Poll::Pending; + } + } + } + } +} + +impl Stream for NotificationsInSubstream +where TSubstream: AsyncRead + AsyncWrite + Unpin, +{ + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + + // This `Stream` implementation first tries to send back the handshake if necessary. + loop { + match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { + NotificationsInSubstreamHandshake::NotSent => { + *this.handshake = NotificationsInSubstreamHandshake::NotSent; + return Poll::Pending + }, + NotificationsInSubstreamHandshake::PendingSend(msg) => + match Sink::poll_ready(this.socket.as_mut(), cx) { + Poll::Ready(_) => { + *this.handshake = NotificationsInSubstreamHandshake::Flush; + match Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg)) { + Ok(()) => {}, + Err(err) => return Poll::Ready(Some(Err(err))), + } + }, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); + return Poll::Pending + } + }, + NotificationsInSubstreamHandshake::Flush => + match Sink::poll_flush(this.socket.as_mut(), cx)? { + Poll::Ready(()) => + *this.handshake = NotificationsInSubstreamHandshake::Sent, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::Flush; + return Poll::Pending + } + }, + + NotificationsInSubstreamHandshake::Sent => { + match Stream::poll_next(this.socket.as_mut(), cx) { + Poll::Ready(None) => *this.handshake = + NotificationsInSubstreamHandshake::ClosingInResponseToRemote, + Poll::Ready(Some(msg)) => { + *this.handshake = NotificationsInSubstreamHandshake::Sent; + return Poll::Ready(Some(msg)) + }, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::Sent; + return Poll::Pending + }, + } + }, + + NotificationsInSubstreamHandshake::ClosingInResponseToRemote => + match Sink::poll_close(this.socket.as_mut(), cx)? { + Poll::Ready(()) => + *this.handshake = NotificationsInSubstreamHandshake::BothSidesClosed, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::ClosingInResponseToRemote; + return Poll::Pending + } + }, + + NotificationsInSubstreamHandshake::BothSidesClosed => + return Poll::Ready(None), + } + } + } +} + +impl NotificationsOut { + /// Builds a new potential upgrade. + pub fn new(protocol_name: impl Into>, initial_message: impl Into>) -> Self { + let initial_message = initial_message.into(); + if initial_message.len() > MAX_HANDSHAKE_SIZE { + error!(target: "sub-libp2p", "Outbound networking handshake is above allowed protocol limit"); + } + + NotificationsOut { + protocol_name: protocol_name.into(), + initial_message, + } + } +} + +impl UpgradeInfo for NotificationsOut { + type Info = Cow<'static, [u8]>; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + let bytes: Cow<'static, [u8]> = match &self.protocol_name { + Cow::Borrowed(s) => Cow::Borrowed(s.as_bytes()), + Cow::Owned(s) => Cow::Owned(s.as_bytes().to_vec()) + }; + iter::once(bytes) + } +} + +impl OutboundUpgrade for NotificationsOut +where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +{ + type Output = (Vec, NotificationsOutSubstream); + type Future = Pin> + Send>>; + type Error = NotificationsHandshakeError; + + fn upgrade_outbound( + self, + mut socket: TSubstream, + _: Self::Info, + ) -> Self::Future { + Box::pin(async move { + upgrade::write_with_len_prefix(&mut socket, &self.initial_message).await?; + + // Reading handshake. + let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; + if handshake_len > MAX_HANDSHAKE_SIZE { + return Err(NotificationsHandshakeError::TooLarge { + requested: handshake_len, + max: MAX_HANDSHAKE_SIZE, + }); + } + + let mut handshake = vec![0u8; handshake_len]; + if !handshake.is_empty() { + socket.read_exact(&mut handshake).await?; + } + + Ok((handshake, NotificationsOutSubstream { + socket: Framed::new(socket, UviBytes::default()), + })) + }) + } +} + +impl Sink> for NotificationsOutSubstream + where TSubstream: AsyncRead + AsyncWrite + Unpin, +{ + type Error = NotificationsOutError; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + Sink::poll_ready(this.socket.as_mut(), cx) + .map_err(NotificationsOutError::Io) + } + + fn start_send(self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { + let mut this = self.project(); + Sink::start_send(this.socket.as_mut(), io::Cursor::new(item)) + .map_err(NotificationsOutError::Io) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + Sink::poll_flush(this.socket.as_mut(), cx) + .map_err(NotificationsOutError::Io) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + Sink::poll_close(this.socket.as_mut(), cx) + .map_err(NotificationsOutError::Io) + } +} + +/// Error generated by sending on a notifications out substream. +#[derive(Debug, derive_more::From, derive_more::Display)] +pub enum NotificationsHandshakeError { + /// I/O error on the substream. + Io(io::Error), + + /// Initial message or handshake was too large. + #[display(fmt = "Initial message or handshake was too large: {}", requested)] + TooLarge { + /// Size requested by the remote. + requested: usize, + /// Maximum allowed, + max: usize, + }, + + /// Error while decoding the variable-length integer. + VarintDecode(unsigned_varint::decode::Error), +} + +impl From for NotificationsHandshakeError { + fn from(err: unsigned_varint::io::ReadError) -> Self { + match err { + unsigned_varint::io::ReadError::Io(err) => NotificationsHandshakeError::Io(err), + unsigned_varint::io::ReadError::Decode(err) => NotificationsHandshakeError::VarintDecode(err), + _ => { + log::warn!("Unrecognized varint decoding error"); + NotificationsHandshakeError::Io(From::from(io::ErrorKind::InvalidData)) + } + } + } +} + +/// Error generated by sending on a notifications out substream. +#[derive(Debug, derive_more::From, derive_more::Display)] +pub enum NotificationsOutError { + /// I/O error on the substream. + Io(io::Error), +} + +#[cfg(test)] +mod tests { + use super::{NotificationsIn, NotificationsOut}; + + use async_std::net::{TcpListener, TcpStream}; + use futures::{prelude::*, channel::oneshot}; + use libp2p::core::upgrade; + use std::borrow::Cow; + + #[test] + fn basic_works() { + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let (handshake, mut substream) = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), + upgrade::Version::V1 + ).await.unwrap(); + + assert_eq!(handshake, b"hello world"); + substream.send(b"test message".to_vec()).await.unwrap(); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await.unwrap(); + + assert_eq!(initial_message, b"initial message"); + substream.send_handshake(&b"hello world"[..]); + + let msg = substream.next().await.unwrap().unwrap(); + assert_eq!(msg.as_ref(), b"test message"); + }); + + async_std::task::block_on(client); + } + + #[test] + fn empty_handshake() { + // Check that everything still works when the handshake messages are empty. + + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let (handshake, mut substream) = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, vec![]), + upgrade::Version::V1 + ).await.unwrap(); + + assert!(handshake.is_empty()); + substream.send(Default::default()).await.unwrap(); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await.unwrap(); + + assert!(initial_message.is_empty()); + substream.send_handshake(vec![]); + + let msg = substream.next().await.unwrap().unwrap(); + assert!(msg.as_ref().is_empty()); + }); + + async_std::task::block_on(client); + } + + #[test] + fn refused() { + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let outcome = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, &b"hello"[..]), + upgrade::Version::V1 + ).await; + + // Despite the protocol negotiation being successfully conducted on the listener + // side, we have to receive an error here because the listener didn't send the + // handshake. + assert!(outcome.is_err()); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_msg, substream) = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await.unwrap(); + + assert_eq!(initial_msg, b"hello"); + + // We successfully upgrade to the protocol, but then close the substream. + drop(substream); + }); + + async_std::task::block_on(client); + } + + #[test] + fn large_initial_message_refused() { + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let ret = upgrade::apply_outbound( + socket, + // We check that an initial message that is too large gets refused. + NotificationsOut::new(PROTO_NAME, (0..32768).map(|_| 0).collect::>()), + upgrade::Version::V1 + ).await; + assert!(ret.is_err()); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let ret = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await; + assert!(ret.is_err()); + }); + + async_std::task::block_on(client); + } + + #[test] + fn large_handshake_refused() { + const PROTO_NAME: Cow<'static, str> = Cow::Borrowed("/test/proto/1"); + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let ret = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), + upgrade::Version::V1 + ).await; + assert!(ret.is_err()); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await.unwrap(); + assert_eq!(initial_message, b"initial message"); + + // We check that a handshake that is too large gets refused. + substream.send_handshake((0..32768).map(|_| 0).collect::>()); + let _ = substream.next().await; + }); + + async_std::task::block_on(client); + } +} diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs new file mode 100644 index 0000000000000..1cd78c0ed1dda --- /dev/null +++ b/client/network/src/protocol/message.rs @@ -0,0 +1,571 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Network packet message types. These get serialized and put into the lower level protocol payload. + +use bitflags::bitflags; +use sp_runtime::{ConsensusEngineId, traits::{Block as BlockT, Header as HeaderT}}; +use codec::{Encode, Decode, Input, Output, Error}; +pub use self::generic::{ + BlockAnnounce, RemoteCallRequest, RemoteReadRequest, + RemoteHeaderRequest, RemoteHeaderResponse, + RemoteChangesRequest, RemoteChangesResponse, + FinalityProofRequest, FinalityProofResponse, + FromBlock, RemoteReadChildRequest, Roles, +}; +use sc_client_api::StorageProof; + +/// A unique ID of a request. +pub type RequestId = u64; + +/// Type alias for using the message type using block type parameters. +pub type Message = generic::Message< + ::Header, + ::Hash, + <::Header as HeaderT>::Number, + ::Extrinsic, +>; + +/// Type alias for using the block request type using block type parameters. +pub type BlockRequest = generic::BlockRequest< + ::Hash, + <::Header as HeaderT>::Number, +>; + +/// Type alias for using the BlockData type using block type parameters. +pub type BlockData = generic::BlockData< + ::Header, + ::Hash, + ::Extrinsic, +>; + +/// Type alias for using the BlockResponse type using block type parameters. +pub type BlockResponse = generic::BlockResponse< + ::Header, + ::Hash, + ::Extrinsic, +>; + +/// A set of transactions. +pub type Transactions = Vec; + +// Bits of block data and associated artifacts to request. +bitflags! { + /// Node roles bitmask. + pub struct BlockAttributes: u8 { + /// Include block header. + const HEADER = 0b00000001; + /// Include block body. + const BODY = 0b00000010; + /// Include block receipt. + const RECEIPT = 0b00000100; + /// Include block message queue. + const MESSAGE_QUEUE = 0b00001000; + /// Include a justification for the block. + const JUSTIFICATION = 0b00010000; + } +} + +impl BlockAttributes { + /// Encodes attributes as big endian u32, compatible with SCALE-encoding (i.e the + /// significant byte has zero index). + pub fn to_be_u32(&self) -> u32 { + u32::from_be_bytes([self.bits(), 0, 0, 0]) + } + + /// Decodes attributes, encoded with the `encode_to_be_u32()` call. + pub fn from_be_u32(encoded: u32) -> Result { + BlockAttributes::from_bits(encoded.to_be_bytes()[0]) + .ok_or_else(|| Error::from("Invalid BlockAttributes")) + } +} + +impl Encode for BlockAttributes { + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } +} + +impl codec::EncodeLike for BlockAttributes {} + +impl Decode for BlockAttributes { + fn decode(input: &mut I) -> Result { + Self::from_bits(input.read_byte()?).ok_or_else(|| Error::from("Invalid bytes")) + } +} + +#[derive(Debug, PartialEq, Eq, Clone, Copy, Encode, Decode)] +/// Block enumeration direction. +pub enum Direction { + /// Enumerate in ascending order (from child to parent). + Ascending = 0, + /// Enumerate in descending order (from parent to canonical child). + Descending = 1, +} + +/// Block state in the chain. +#[derive(Debug, PartialEq, Eq, Clone, Copy, Encode, Decode)] +pub enum BlockState { + /// Block is not part of the best chain. + Normal, + /// Latest best block. + Best, +} + +/// Remote call response. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct RemoteCallResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Execution proof. + pub proof: StorageProof, +} + +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +/// Remote read response. +pub struct RemoteReadResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Read proof. + pub proof: StorageProof, +} + +/// Generic types. +pub mod generic { + use bitflags::bitflags; + use codec::{Encode, Decode, Input, Output}; + use sp_runtime::Justification; + use super::{ + RemoteReadResponse, Transactions, Direction, + RequestId, BlockAttributes, RemoteCallResponse, ConsensusEngineId, + BlockState, StorageProof, + }; + + bitflags! { + /// Bitmask of the roles that a node fulfills. + pub struct Roles: u8 { + /// No network. + const NONE = 0b00000000; + /// Full node, does not participate in consensus. + const FULL = 0b00000001; + /// Light client node. + const LIGHT = 0b00000010; + /// Act as an authority + const AUTHORITY = 0b00000100; + } + } + + impl Roles { + /// Does this role represents a client that holds full chain data locally? + pub fn is_full(&self) -> bool { + self.intersects(Roles::FULL | Roles::AUTHORITY) + } + + /// Does this role represents a client that does not participates in the consensus? + pub fn is_authority(&self) -> bool { + *self == Roles::AUTHORITY + } + + /// Does this role represents a client that does not hold full chain data locally? + pub fn is_light(&self) -> bool { + !self.is_full() + } + } + + impl<'a> From<&'a crate::config::Role> for Roles { + fn from(roles: &'a crate::config::Role) -> Self { + match roles { + crate::config::Role::Full => Roles::FULL, + crate::config::Role::Light => Roles::LIGHT, + crate::config::Role::Sentry { .. } => Roles::AUTHORITY, + crate::config::Role::Authority { .. } => Roles::AUTHORITY, + } + } + } + + impl codec::Encode for Roles { + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } + } + + impl codec::EncodeLike for Roles {} + + impl codec::Decode for Roles { + fn decode(input: &mut I) -> Result { + Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) + } + } + + /// Consensus is mostly opaque to us + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct ConsensusMessage { + /// Identifies consensus engine. + pub engine_id: ConsensusEngineId, + /// Message payload. + pub data: Vec, + } + + /// Block data sent in the response. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockData { + /// Block header hash. + pub hash: Hash, + /// Block header if requested. + pub header: Option

, + /// Block body if requested. + pub body: Option>, + /// Block receipt if requested. + pub receipt: Option>, + /// Block message queue if requested. + pub message_queue: Option>, + /// Justification if requested. + pub justification: Option, + } + + /// Identifies starting point of a block sequence. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub enum FromBlock { + /// Start with given hash. + Hash(Hash), + /// Start with given block number. + Number(Number), + } + + /// A network message. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub enum Message { + /// Status packet. + Status(Status), + /// Block request. + BlockRequest(BlockRequest), + /// Block response. + BlockResponse(BlockResponse), + /// Block announce. + BlockAnnounce(BlockAnnounce
), + /// Transactions. + Transactions(Transactions), + /// Consensus protocol message. + Consensus(ConsensusMessage), + /// Remote method call request. + RemoteCallRequest(RemoteCallRequest), + /// Remote method call response. + RemoteCallResponse(RemoteCallResponse), + /// Remote storage read request. + RemoteReadRequest(RemoteReadRequest), + /// Remote storage read response. + RemoteReadResponse(RemoteReadResponse), + /// Remote header request. + RemoteHeaderRequest(RemoteHeaderRequest), + /// Remote header response. + RemoteHeaderResponse(RemoteHeaderResponse
), + /// Remote changes request. + RemoteChangesRequest(RemoteChangesRequest), + /// Remote changes response. + RemoteChangesResponse(RemoteChangesResponse), + /// Remote child storage read request. + RemoteReadChildRequest(RemoteReadChildRequest), + /// Finality proof request. + FinalityProofRequest(FinalityProofRequest), + /// Finality proof response. + FinalityProofResponse(FinalityProofResponse), + /// Batch of consensus protocol messages. + ConsensusBatch(Vec), + } + + impl Message { + /// Message id useful for logging. + pub fn id(&self) -> &'static str { + match self { + Message::Status(_) => "Status", + Message::BlockRequest(_) => "BlockRequest", + Message::BlockResponse(_) => "BlockResponse", + Message::BlockAnnounce(_) => "BlockAnnounce", + Message::Transactions(_) => "Transactions", + Message::Consensus(_) => "Consensus", + Message::RemoteCallRequest(_) => "RemoteCallRequest", + Message::RemoteCallResponse(_) => "RemoteCallResponse", + Message::RemoteReadRequest(_) => "RemoteReadRequest", + Message::RemoteReadResponse(_) => "RemoteReadResponse", + Message::RemoteHeaderRequest(_) => "RemoteHeaderRequest", + Message::RemoteHeaderResponse(_) => "RemoteHeaderResponse", + Message::RemoteChangesRequest(_) => "RemoteChangesRequest", + Message::RemoteChangesResponse(_) => "RemoteChangesResponse", + Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", + Message::FinalityProofRequest(_) => "FinalityProofRequest", + Message::FinalityProofResponse(_) => "FinalityProofResponse", + Message::ConsensusBatch(_) => "ConsensusBatch", + } + } + } + + /// Status sent on connection. + // TODO https://github.com/paritytech/substrate/issues/4674: replace the `Status` + // struct with this one, after waiting a few releases beyond `NetworkSpecialization`'s + // removal (https://github.com/paritytech/substrate/pull/4665) + // + // and set MIN_VERSION to 6. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct CompactStatus { + /// Protocol version. + pub version: u32, + /// Minimum supported version. + pub min_supported_version: u32, + /// Supported roles. + pub roles: Roles, + /// Best block number. + pub best_number: Number, + /// Best block hash. + pub best_hash: Hash, + /// Genesis block hash. + pub genesis_hash: Hash, + } + + /// Status sent on connection. + #[derive(Debug, PartialEq, Eq, Clone, Encode)] + pub struct Status { + /// Protocol version. + pub version: u32, + /// Minimum supported version. + pub min_supported_version: u32, + /// Supported roles. + pub roles: Roles, + /// Best block number. + pub best_number: Number, + /// Best block hash. + pub best_hash: Hash, + /// Genesis block hash. + pub genesis_hash: Hash, + /// DEPRECATED. Chain-specific status. + pub chain_status: Vec, + } + + impl Decode for Status { + fn decode(value: &mut I) -> Result { + const LAST_CHAIN_STATUS_VERSION: u32 = 5; + let compact = CompactStatus::decode(value)?; + let chain_status = match >::decode(value) { + Ok(v) => v, + Err(e) => if compact.version <= LAST_CHAIN_STATUS_VERSION { + return Err(e) + } else { + Vec::new() + } + }; + + let CompactStatus { + version, + min_supported_version, + roles, + best_number, + best_hash, + genesis_hash, + } = compact; + + Ok(Status { + version, + min_supported_version, + roles, + best_number, + best_hash, + genesis_hash, + chain_status, + }) + } + } + + /// Request block data from a peer. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockRequest { + /// Unique request id. + pub id: RequestId, + /// Bits of block data to request. + pub fields: BlockAttributes, + /// Start from this block. + pub from: FromBlock, + /// End at this block. An implementation defined maximum is used when unspecified. + pub to: Option, + /// Sequence direction. + pub direction: Direction, + /// Maximum number of blocks to return. An implementation defined maximum is used when unspecified. + pub max: Option, + } + + /// Response to `BlockRequest` + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Block data for the requested sequence. + pub blocks: Vec>, + } + + /// Announce a new complete relay chain block on the network. + #[derive(Debug, PartialEq, Eq, Clone)] + pub struct BlockAnnounce { + /// New block header. + pub header: H, + /// Block state. TODO: Remove `Option` and custom encoding when v4 becomes common. + pub state: Option, + /// Data associated with this block announcement, e.g. a candidate message. + pub data: Option>, + } + + // Custom Encode/Decode impl to maintain backwards compatibility with v3. + // This assumes that the packet contains nothing but the announcement message. + // TODO: Get rid of it once protocol v4 is common. + impl Encode for BlockAnnounce { + fn encode_to(&self, dest: &mut T) { + self.header.encode_to(dest); + if let Some(state) = &self.state { + state.encode_to(dest); + } + if let Some(data) = &self.data { + data.encode_to(dest) + } + } + } + + impl Decode for BlockAnnounce { + fn decode(input: &mut I) -> Result { + let header = H::decode(input)?; + let state = BlockState::decode(input).ok(); + let data = Vec::decode(input).ok(); + Ok(BlockAnnounce { + header, + state, + data, + }) + } + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote call request. + pub struct RemoteCallRequest { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Method name. + pub method: String, + /// Call data. + pub data: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote storage read request. + pub struct RemoteReadRequest { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Storage key. + pub keys: Vec>, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote storage read child request. + pub struct RemoteReadChildRequest { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Child Storage key. + pub storage_key: Vec, + /// Storage key. + pub keys: Vec>, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote header request. + pub struct RemoteHeaderRequest { + /// Unique request id. + pub id: RequestId, + /// Block number to request header for. + pub block: N, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote header response. + pub struct RemoteHeaderResponse
{ + /// Id of a request this response was made for. + pub id: RequestId, + /// Header. None if proof generation has failed (e.g. header is unknown). + pub header: Option
, + /// Header proof. + pub proof: StorageProof, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote changes request. + pub struct RemoteChangesRequest { + /// Unique request id. + pub id: RequestId, + /// Hash of the first block of the range (including first) where changes are requested. + pub first: H, + /// Hash of the last block of the range (including last) where changes are requested. + pub last: H, + /// Hash of the first block for which the requester has the changes trie root. All other + /// affected roots must be proved. + pub min: H, + /// Hash of the last block that we can use when querying changes. + pub max: H, + /// Storage child node key which changes are requested. + pub storage_key: Option>, + /// Storage key which changes are requested. + pub key: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote changes response. + pub struct RemoteChangesResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Proof has been generated using block with this number as a max block. Should be + /// less than or equal to the RemoteChangesRequest::max block number. + pub max: N, + /// Changes proof. + pub proof: Vec>, + /// Changes tries roots missing on the requester' node. + pub roots: Vec<(N, H)>, + /// Missing changes tries roots proof. + pub roots_proof: StorageProof, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Finality proof request. + pub struct FinalityProofRequest { + /// Unique request id. + pub id: RequestId, + /// Hash of the block to request proof for. + pub block: H, + /// Additional data blob (that both requester and provider understood) required for proving finality. + pub request: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Finality proof response. + pub struct FinalityProofResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Hash of the block (the same as in the FinalityProofRequest). + pub block: H, + /// Finality proof (if available). + pub proof: Option>, + } +} diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs new file mode 100644 index 0000000000000..88c03c4b119ce --- /dev/null +++ b/client/network/src/protocol/sync.rs @@ -0,0 +1,1643 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. +// +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Contains the state of the chain synchronization process +//! +//! At any given point in time, a running node tries as much as possible to be at the head of the +//! chain. This module handles the logic of which blocks to request from remotes, and processing +//! responses. It yields blocks to check and potentially move to the database. +//! +//! # Usage +//! +//! The `ChainSync` struct maintains the state of the block requests. Whenever something happens on +//! the network, or whenever a block has been successfully verified, call the appropriate method in +//! order to update it. +//! + +use codec::Encode; +use blocks::BlockCollection; +use sp_blockchain::{Error as ClientError, Info as BlockchainInfo, HeaderMetadata}; +use sp_consensus::{BlockOrigin, BlockStatus, + block_validation::{BlockAnnounceValidator, Validation}, + import_queue::{IncomingBlock, BlockImportResult, BlockImportError} +}; +use crate::{ + config::BoxFinalityProofRequestBuilder, + protocol::message::{self, generic::FinalityProofRequest, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, + FinalityProofResponse, Roles}, +}; +use either::Either; +use extra_requests::ExtraRequests; +use libp2p::PeerId; +use log::{debug, trace, warn, info, error}; +use sp_runtime::{ + Justification, + generic::BlockId, + traits::{Block as BlockT, Header, NumberFor, Zero, One, CheckedSub, SaturatedConversion, Hash, HashFor} +}; +use sp_arithmetic::traits::Saturating; +use std::{fmt, ops::Range, collections::{HashMap, HashSet, VecDeque}, sync::Arc}; + +mod blocks; +mod extra_requests; + +/// Maximum blocks to request in a single packet. +const MAX_BLOCKS_TO_REQUEST: usize = 128; + +/// Maximum blocks to store in the import queue. +const MAX_IMPORTING_BLOCKS: usize = 2048; + +/// Maximum blocks to download ahead of any gap. +const MAX_DOWNLOAD_AHEAD: u32 = 2048; + +/// We use a heuristic that with a high likelihood, by the time +/// `MAJOR_SYNC_BLOCKS` have been imported we'll be on the same +/// chain as (or at least closer to) the peer so we want to delay +/// the ancestor search to not waste time doing that when we are +/// so far behind. +const MAJOR_SYNC_BLOCKS: u8 = 5; + +/// Number of recently announced blocks to track for each peer. +const ANNOUNCE_HISTORY_SIZE: usize = 64; + +mod rep { + use sc_peerset::ReputationChange as Rep; + /// Reputation change when a peer sent us a message that led to a + /// database read error. + pub const BLOCKCHAIN_READ_ERROR: Rep = Rep::new(-(1 << 16), "DB Error"); + + /// Reputation change when a peer sent us a status message with a different + /// genesis than us. + pub const GENESIS_MISMATCH: Rep = Rep::new(i32::min_value(), "Genesis mismatch"); + + /// Reputation change for peers which send us a block with an incomplete header. + pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); + + /// Reputation change for peers which send us a block which we fail to verify. + pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed"); + + /// Reputation change for peers which send us a known bad block. + pub const BAD_BLOCK: Rep = Rep::new(-(1 << 29), "Bad block"); + + /// Peer did not provide us with advertised block data. + pub const NO_BLOCK: Rep = Rep::new(-(1 << 29), "No requested block data"); + + /// Reputation change for peers which send us a known block. + pub const KNOWN_BLOCK: Rep = Rep::new(-(1 << 29), "Duplicate block"); + + /// Reputation change for peers which send us a block with bad justifications. + pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); + + /// Reputation change for peers which send us a block with bad finality proof. + pub const BAD_FINALITY_PROOF: Rep = Rep::new(-(1 << 16), "Bad finality proof"); + + /// Reputation change when a peer sent us invlid ancestry result. + pub const UNKNOWN_ANCESTOR:Rep = Rep::new(-(1 << 16), "DB Error"); +} + +enum PendingRequests { + Some(HashSet), + All, +} + +impl PendingRequests { + fn add(&mut self, id: &PeerId) { + match self { + PendingRequests::Some(set) => { + set.insert(id.clone()); + } + PendingRequests::All => {}, + } + } + + fn take(&mut self) -> PendingRequests { + std::mem::take(self) + } + + fn set_all(&mut self) { + *self = PendingRequests::All; + } + + fn contains(&self, id: &PeerId) -> bool { + match self { + PendingRequests::Some(set) => set.contains(id), + PendingRequests::All => true, + } + } + + fn is_empty(&self) -> bool { + match self { + PendingRequests::Some(set) => set.is_empty(), + PendingRequests::All => false, + } + } +} + +impl Default for PendingRequests { + fn default() -> Self { + PendingRequests::Some(HashSet::default()) + } +} + +/// The main data structure which contains all the state for a chains +/// active syncing strategy. +pub struct ChainSync { + /// Chain client. + client: Arc>, + /// The active peers that we are using to sync and their PeerSync status + peers: HashMap>, + /// A `BlockCollection` of blocks that are being downloaded from peers + blocks: BlockCollection, + /// The best block number in our queue of blocks to import + best_queued_number: NumberFor, + /// The best block hash in our queue of blocks to import + best_queued_hash: B::Hash, + /// The role of this node, e.g. light or full + role: Roles, + /// What block attributes we require for this node, usually derived from + /// what role we are, but could be customized + required_block_attributes: message::BlockAttributes, + /// Any extra finality proof requests. + extra_finality_proofs: ExtraRequests, + /// Any extra justification requests. + extra_justifications: ExtraRequests, + /// A set of hashes of blocks that are being downloaded or have been + /// downloaded and are queued for import. + queue_blocks: HashSet, + /// The best block number that was successfully imported into the chain. + /// This can not decrease. + best_imported_number: NumberFor, + /// Finality proof handler. + request_builder: Option>, + /// Fork sync targets. + fork_targets: HashMap>, + /// A set of peers for which there might be potential block requests + pending_requests: PendingRequests, + /// A type to check incoming block announcements. + block_announce_validator: Box + Send>, + /// Maximum number of peers to ask the same blocks in parallel. + max_parallel_downloads: u32, + /// Total number of downloaded blocks. + downloaded_blocks: usize, +} + +/// All the data we have about a Peer that we are trying to sync with +#[derive(Debug, Clone)] +pub struct PeerSync { + /// The common number is the block number that is a common point of + /// ancestry for both our chains (as far as we know). + pub common_number: NumberFor, + /// The hash of the best block that we've seen for this peer. + pub best_hash: B::Hash, + /// The number of the best block that we've seen for this peer. + pub best_number: NumberFor, + /// The state of syncing this peer is in for us, generally categories + /// into `Available` or "busy" with something as defined by `PeerSyncState`. + pub state: PeerSyncState, + /// A queue of blocks that this peer has announced to us, should only + /// contain `ANNOUNCE_HISTORY_SIZE` entries. + pub recently_announced: VecDeque +} + +/// The sync status of a peer we are trying to sync with +#[derive(Debug)] +pub struct PeerInfo { + /// Their best block hash. + pub best_hash: B::Hash, + /// Their best block number. + pub best_number: NumberFor +} + +struct ForkTarget { + number: NumberFor, + parent_hash: Option, + peers: HashSet, +} + +/// The state of syncing between a Peer and ourselves. +/// +/// Generally two categories, "busy" or `Available`. If busy, the enum +/// defines what we are busy with. +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub enum PeerSyncState { + /// Available for sync requests. + Available, + /// Searching for ancestors the Peer has in common with us. + AncestorSearch { + start: NumberFor, + current: NumberFor, + state: AncestorSearchState, + }, + /// Actively downloading new blocks, starting from the given Number. + DownloadingNew(NumberFor), + /// Downloading a stale block with given Hash. Stale means that it is a + /// block with a number that is lower than our best number. It might be + /// from a fork and not necessarily already imported. + DownloadingStale(B::Hash), + /// Downloading justification for given block hash. + DownloadingJustification(B::Hash), + /// Downloading finality proof for given block hash. + DownloadingFinalityProof(B::Hash) +} + +impl PeerSyncState { + pub fn is_available(&self) -> bool { + if let PeerSyncState::Available = self { + true + } else { + false + } + } +} + +/// Reported sync state. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum SyncState { + /// Initial sync is complete, keep-up sync is active. + Idle, + /// Actively catching up with the chain. + Downloading +} + +/// Syncing status and statistics. +#[derive(Clone)] +pub struct Status { + /// Current global sync state. + pub state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_peers: u32, + /// Number of blocks queued for import + pub queued_blocks: u32, +} + +/// A peer did not behave as expected and should be reported. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BadPeer(pub PeerId, pub sc_peerset::ReputationChange); + +impl fmt::Display for BadPeer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) + } +} + +impl std::error::Error for BadPeer {} + +/// Result of [`ChainSync::on_block_data`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockData { + /// The block should be imported. + Import(BlockOrigin, Vec>), + /// A new block request needs to be made to the given peer. + Request(PeerId, BlockRequest) +} + +/// Result of [`ChainSync::on_block_announce`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockAnnounce { + /// The announcement does not require further handling. + Nothing, + /// The announcement header should be imported. + ImportHeader, +} + +/// Result of [`ChainSync::on_block_justification`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockJustification { + /// The justification needs no further handling. + Nothing, + /// The justification should be imported. + Import { + peer: PeerId, + hash: B::Hash, + number: NumberFor, + justification: Justification + } +} + +/// Result of [`ChainSync::on_block_finality_proof`]. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum OnBlockFinalityProof { + /// The proof needs no further handling. + Nothing, + /// The proof should be imported. + Import { + peer: PeerId, + hash: B::Hash, + number: NumberFor, + proof: Vec + } +} + +impl ChainSync { + /// Create a new instance. + pub fn new( + role: Roles, + client: Arc>, + info: &BlockchainInfo, + request_builder: Option>, + block_announce_validator: Box + Send>, + max_parallel_downloads: u32, + ) -> Self { + let mut required_block_attributes = BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION; + + if role.is_full() { + required_block_attributes |= BlockAttributes::BODY + } + + ChainSync { + client, + peers: HashMap::new(), + blocks: BlockCollection::new(), + best_queued_hash: info.best_hash, + best_queued_number: info.best_number, + best_imported_number: info.best_number, + extra_finality_proofs: ExtraRequests::new("finality proof"), + extra_justifications: ExtraRequests::new("justification"), + role, + required_block_attributes, + queue_blocks: Default::default(), + request_builder, + fork_targets: Default::default(), + pending_requests: Default::default(), + block_announce_validator, + max_parallel_downloads, + downloaded_blocks: 0, + } + } + + /// Returns the state of the sync of the given peer. + /// + /// Returns `None` if the peer is unknown. + pub fn peer_info(&self, who: &PeerId) -> Option> { + self.peers.get(who).map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) + } + + /// Returns the current sync status. + pub fn status(&self) -> Status { + let best_seen = self.peers.values().map(|p| p.best_number).max(); + let sync_state = + if let Some(n) = best_seen { + // A chain is classified as downloading if the provided best block is + // more than `MAJOR_SYNC_BLOCKS` behind the best queued block. + if n > self.best_queued_number && n - self.best_queued_number > MAJOR_SYNC_BLOCKS.into() { + SyncState::Downloading + } else { + SyncState::Idle + } + } else { + SyncState::Idle + }; + + Status { + state: sync_state, + best_seen_block: best_seen, + num_peers: self.peers.len() as u32, + queued_blocks: self.queue_blocks.len() as u32, + } + } + + /// Number of active sync requests. + pub fn num_sync_requests(&self) -> usize { + self.fork_targets.len() + } + + /// Number of downloaded blocks. + pub fn num_downloaded_blocks(&self) -> usize { + self.downloaded_blocks + } + + /// Handle a new connected peer. + /// + /// Call this method whenever we connect to a new peer. + pub fn new_peer(&mut self, who: PeerId, best_hash: B::Hash, best_number: NumberFor) + -> Result>, BadPeer> + { + // There is nothing sync can get from the node that has no blockchain data. + match self.block_status(&best_hash) { + Err(e) => { + debug!(target:"sync", "Error reading blockchain: {:?}", e); + Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) + } + Ok(BlockStatus::KnownBad) => { + info!("💔 New peer with known bad best block {} ({}).", best_hash, best_number); + Err(BadPeer(who, rep::BAD_BLOCK)) + } + Ok(BlockStatus::Unknown) => { + if best_number.is_zero() { + info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); + return Err(BadPeer(who, rep::GENESIS_MISMATCH)); + } + // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have + // enough to do in the import queue that it's not worth kicking off + // an ancestor search, which is what we do in the next match case below. + if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { + debug!( + target:"sync", + "New peer with unknown best hash {} ({}), assuming common block.", + self.best_queued_hash, + self.best_queued_number + ); + self.peers.insert(who, PeerSync { + common_number: self.best_queued_number, + best_hash, + best_number, + state: PeerSyncState::Available, + recently_announced: Default::default() + }); + return Ok(None) + } + + // If we are at genesis, just start downloading. + if self.best_queued_number.is_zero() { + debug!(target:"sync", "New peer with best hash {} ({}).", best_hash, best_number); + self.peers.insert(who.clone(), PeerSync { + common_number: Zero::zero(), + best_hash, + best_number, + state: PeerSyncState::Available, + recently_announced: Default::default(), + }); + self.pending_requests.add(&who); + return Ok(None) + } + + let common_best = std::cmp::min(self.best_queued_number, best_number); + + debug!(target:"sync", + "New peer with unknown best hash {} ({}), searching for common ancestor.", + best_hash, + best_number + ); + + self.pending_requests.add(&who); + self.peers.insert(who, PeerSync { + common_number: Zero::zero(), + best_hash, + best_number, + state: PeerSyncState::AncestorSearch { + current: common_best, + start: self.best_queued_number, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }, + recently_announced: Default::default() + }); + + Ok(Some(ancestry_request::(common_best))) + } + Ok(BlockStatus::Queued) | Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) => { + debug!(target:"sync", "New peer with known best hash {} ({}).", best_hash, best_number); + self.peers.insert(who.clone(), PeerSync { + common_number: best_number, + best_hash, + best_number, + state: PeerSyncState::Available, + recently_announced: Default::default(), + }); + self.pending_requests.add(&who); + Ok(None) + } + } + } + + /// Signal that a new best block has been imported. + /// `ChainSync` state with that information. + pub fn update_chain_info(&mut self, best_hash: &B::Hash, best_number: NumberFor) { + self.on_block_queued(best_hash, best_number); + } + + /// Schedule a justification request for the given block. + pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + self.extra_justifications.schedule((*hash, number), |base, block| { + is_descendent_of(&**client, base, block) + }) + } + + /// Schedule a finality proof request for the given block. + pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + self.extra_finality_proofs.schedule((*hash, number), |base, block| { + is_descendent_of(&**client, base, block) + }) + } + + /// Request syncing for the given block from given set of peers. + // The implementation is similar to on_block_announce with unknown parent hash. + pub fn set_sync_fork_request( + &mut self, + mut peers: Vec, + hash: &B::Hash, + number: NumberFor, + ) { + if peers.is_empty() { + peers = self.peers.iter() + // Only request blocks from peers who are ahead or on a par. + .filter(|(_, peer)| peer.best_number >= number) + .map(|(id, _)| id.clone()) + .collect(); + + debug!( + target: "sync", + "Explicit sync request for block {:?} with no peers specified. \ + Syncing from these peers {:?} instead.", + hash, peers, + ); + } else { + debug!(target: "sync", "Explicit sync request for block {:?} with {:?}", hash, peers); + } + + if self.is_known(&hash) { + debug!(target: "sync", "Refusing to sync known hash {:?}", hash); + return; + } + + trace!(target: "sync", "Downloading requested old fork {:?}", hash); + for peer_id in &peers { + if let Some(peer) = self.peers.get_mut(peer_id) { + if let PeerSyncState::AncestorSearch {..} = peer.state { + continue; + } + + if number > peer.best_number { + peer.best_number = number; + peer.best_hash = *hash; + } + self.pending_requests.add(peer_id); + } + } + + self.fork_targets + .entry(hash.clone()) + .or_insert_with(|| ForkTarget { + number, + peers: Default::default(), + parent_hash: None, + }) + .peers.extend(peers); + } + + /// Get an iterator over all scheduled justification requests. + pub fn justification_requests(&mut self) -> impl Iterator)> + '_ { + let peers = &mut self.peers; + let mut matcher = self.extra_justifications.matcher(); + std::iter::from_fn(move || { + if let Some((peer, request)) = matcher.next(&peers) { + peers.get_mut(&peer) + .expect("`Matcher::next` guarantees the `PeerId` comes from the given peers; qed") + .state = PeerSyncState::DownloadingJustification(request.0); + let req = message::generic::BlockRequest { + id: 0, + fields: BlockAttributes::JUSTIFICATION, + from: message::FromBlock::Hash(request.0), + to: None, + direction: message::Direction::Ascending, + max: Some(1) + }; + Some((peer, req)) + } else { + None + } + }) + } + + /// Get an iterator over all scheduled finality proof requests. + pub fn finality_proof_requests(&mut self) -> impl Iterator)> + '_ { + let peers = &mut self.peers; + let request_builder = &mut self.request_builder; + let mut matcher = self.extra_finality_proofs.matcher(); + std::iter::from_fn(move || { + if let Some((peer, request)) = matcher.next(&peers) { + peers.get_mut(&peer) + .expect("`Matcher::next` guarantees the `PeerId` comes from the given peers; qed") + .state = PeerSyncState::DownloadingFinalityProof(request.0); + let req = message::generic::FinalityProofRequest { + id: 0, + block: request.0, + request: request_builder.as_mut() + .map(|builder| builder.build_request_data(&request.0)) + .unwrap_or_default() + }; + Some((peer, req)) + } else { + None + } + }) + } + + /// Get an iterator over all block requests of all peers. + pub fn block_requests(&mut self) -> impl Iterator)> + '_ { + if self.pending_requests.is_empty() { + return Either::Left(std::iter::empty()) + } + if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { + trace!(target: "sync", "Too many blocks in the queue."); + return Either::Left(std::iter::empty()) + } + let major_sync = self.status().state == SyncState::Downloading; + let blocks = &mut self.blocks; + let attrs = &self.required_block_attributes; + let fork_targets = &mut self.fork_targets; + let last_finalized = self.client.info().finalized_number; + let best_queued = self.best_queued_number; + let client = &self.client; + let queue = &self.queue_blocks; + let pending_requests = self.pending_requests.take(); + let max_parallel = if major_sync { 1 } else { self.max_parallel_downloads }; + let iter = self.peers.iter_mut().filter_map(move |(id, peer)| { + if !peer.state.is_available() || !pending_requests.contains(id) { + return None + } + + if let Some((range, req)) = peer_block_request( + id, + peer, + blocks, + attrs, + max_parallel, + last_finalized, + best_queued, + ) { + peer.state = PeerSyncState::DownloadingNew(range.start); + trace!( + target: "sync", + "New block request for {}, (best:{}, common:{}) {:?}", + id, + peer.best_number, + peer.common_number, + req, + ); + Some((id, req)) + } else if let Some((hash, req)) = fork_sync_request( + id, + fork_targets, + best_queued, + last_finalized, + attrs, + |hash| if queue.contains(hash) { + BlockStatus::Queued + } else { + client.block_status(&BlockId::Hash(*hash)).unwrap_or(BlockStatus::Unknown) + }, + ) { + trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); + peer.state = PeerSyncState::DownloadingStale(hash); + Some((id, req)) + } else { + None + } + }); + Either::Right(iter) + } + + /// Handle a response from the remote to a block request that we made. + /// + /// `request` must be the original request that triggered `response`. + /// or `None` if data comes from the block announcement. + /// + /// If this corresponds to a valid block, this outputs the block that + /// must be imported in the import queue. + pub fn on_block_data( + &mut self, + who: &PeerId, + request: Option>, + response: BlockResponse + ) -> Result, BadPeer> { + self.downloaded_blocks += response.blocks.len(); + let mut new_blocks: Vec> = + if let Some(peer) = self.peers.get_mut(who) { + let mut blocks = response.blocks; + if request.as_ref().map_or(false, |r| r.direction == message::Direction::Descending) { + trace!(target: "sync", "Reversing incoming block list"); + blocks.reverse() + } + self.pending_requests.add(who); + if request.is_some() { + match &mut peer.state { + PeerSyncState::DownloadingNew(start_block) => { + self.blocks.clear_peer_download(who); + let start_block = *start_block; + peer.state = PeerSyncState::Available; + validate_blocks::(&blocks, who)?; + self.blocks.insert(start_block, blocks, who.clone()); + self.blocks + .drain(self.best_queued_number + One::one()) + .into_iter() + .map(|block_data| { + IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + justification: block_data.block.justification, + origin: block_data.origin, + allow_missing_state: true, + import_existing: false, + } + }).collect() + } + PeerSyncState::DownloadingStale(_) => { + peer.state = PeerSyncState::Available; + if blocks.is_empty() { + debug!(target: "sync", "Empty block response from {}", who); + return Err(BadPeer(who.clone(), rep::NO_BLOCK)); + } + validate_blocks::(&blocks, who)?; + blocks.into_iter().map(|b| { + IncomingBlock { + hash: b.hash, + header: b.header, + body: b.body, + justification: b.justification, + origin: Some(who.clone()), + allow_missing_state: true, + import_existing: false, + } + }).collect() + } + PeerSyncState::AncestorSearch { current, start, state } => { + let matching_hash = match (blocks.get(0), self.client.hash(*current)) { + (Some(block), Ok(maybe_our_block_hash)) => { + trace!(target: "sync", "Got ancestry block #{} ({}) from peer {}", current, block.hash, who); + maybe_our_block_hash.filter(|x| x == &block.hash) + }, + (None, _) => { + debug!(target: "sync", "Invalid response when searching for ancestor from {}", who); + return Err(BadPeer(who.clone(), rep::UNKNOWN_ANCESTOR)) + }, + (_, Err(e)) => { + info!("❌ Error answering legitimate blockchain query: {:?}", e); + return Err(BadPeer(who.clone(), rep::BLOCKCHAIN_READ_ERROR)) + } + }; + if matching_hash.is_some() { + if *start < self.best_queued_number && self.best_queued_number <= peer.best_number { + // We've made progress on this chain since the search was started. + // Opportunistically set common number to updated number + // instead of the one that started the search. + peer.common_number = self.best_queued_number; + } + else if peer.common_number < *current { + peer.common_number = *current; + } + } + if matching_hash.is_none() && current.is_zero() { + trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); + return Err(BadPeer(who.clone(), rep::GENESIS_MISMATCH)) + } + if let Some((next_state, next_num)) = handle_ancestor_search_state(state, *current, matching_hash.is_some()) { + peer.state = PeerSyncState::AncestorSearch { + current: next_num, + start: *start, + state: next_state, + }; + return Ok(OnBlockData::Request(who.clone(), ancestry_request::(next_num))) + } else { + // Ancestry search is complete. Check if peer is on a stale fork unknown to us and + // add it to sync targets if necessary. + trace!(target: "sync", "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", + self.best_queued_hash, + self.best_queued_number, + peer.best_hash, + peer.best_number, + matching_hash, + peer.common_number, + ); + if peer.common_number < peer.best_number + && peer.best_number < self.best_queued_number + { + trace!(target: "sync", "Added fork target {} for {}" , peer.best_hash, who); + self.fork_targets + .entry(peer.best_hash.clone()) + .or_insert_with(|| ForkTarget { + number: peer.best_number, + parent_hash: None, + peers: Default::default(), + }) + .peers.insert(who.clone()); + } + peer.state = PeerSyncState::Available; + Vec::new() + } + } + + | PeerSyncState::Available + | PeerSyncState::DownloadingJustification(..) + | PeerSyncState::DownloadingFinalityProof(..) => Vec::new() + } + } else { + // When request.is_none() this is a block announcement. Just accept blocks. + validate_blocks::(&blocks, who)?; + blocks.into_iter().map(|b| { + IncomingBlock { + hash: b.hash, + header: b.header, + body: b.body, + justification: b.justification, + origin: Some(who.clone()), + allow_missing_state: true, + import_existing: false, + } + }).collect() + } + } else { + Vec::new() + }; + + // When doing initial sync we don't request blocks in parallel. + // So the only way this can happen is when peers lie about the + // common block. + let is_recent = new_blocks.first() + .map(|block| { + self.peers.iter().any(|(_, peer)| peer.recently_announced.contains(&block.hash)) + }) + .unwrap_or(false); + + if !is_recent && new_blocks.last().map_or(false, |b| self.is_known(&b.hash)) { + // When doing initial sync we don't request blocks in parallel. + // So the only way this can happen is when peers lie about the + // common block. + debug!(target: "sync", "Ignoring known blocks from {}", who); + return Err(BadPeer(who.clone(), rep::KNOWN_BLOCK)); + } + let orig_len = new_blocks.len(); + new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); + if new_blocks.len() != orig_len { + debug!(target: "sync", "Ignoring {} blocks that are already queued", orig_len - new_blocks.len()); + } + + let origin = + if is_recent { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; + + if let Some((h, n)) = new_blocks.last().and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) { + trace!(target:"sync", "Accepted {} blocks ({:?}) with origin {:?}", new_blocks.len(), h, origin); + self.on_block_queued(h, n) + } + + self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); + + Ok(OnBlockData::Import(origin, new_blocks)) + } + + /// Handle a response from the remote to a justification request that we made. + /// + /// `request` must be the original request that triggered `response`. + /// + /// Returns `Some` if this produces a justification that must be imported + /// into the import queue. + pub fn on_block_justification + (&mut self, who: PeerId, response: BlockResponse) -> Result, BadPeer> + { + let peer = + if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); + return Ok(OnBlockJustification::Nothing) + }; + + self.pending_requests.add(&who); + if let PeerSyncState::DownloadingJustification(hash) = peer.state { + peer.state = PeerSyncState::Available; + + // We only request one justification at a time + let justification = if let Some(block) = response.blocks.into_iter().next() { + if hash != block.hash { + info!( + target: "sync", + "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", who, hash, block.hash + ); + return Err(BadPeer(who, rep::BAD_JUSTIFICATION)); + } + + block.justification + } else { + // we might have asked the peer for a justification on a block that we assumed it + // had but didn't (regardless of whether it had a justification for it or not). + trace!(target: "sync", + "Peer {:?} provided empty response for justification request {:?}", + who, + hash, + ); + + None + }; + + if let Some((peer, hash, number, j)) = self.extra_justifications.on_response(who, justification) { + return Ok(OnBlockJustification::Import { peer, hash, number, justification: j }) + } + } + + Ok(OnBlockJustification::Nothing) + } + + /// Handle new finality proof data. + pub fn on_block_finality_proof + (&mut self, who: PeerId, resp: FinalityProofResponse) -> Result, BadPeer> + { + let peer = + if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!(target: "sync", "💔 Called on_block_finality_proof_data with a bad peer ID"); + return Ok(OnBlockFinalityProof::Nothing) + }; + + self.pending_requests.add(&who); + if let PeerSyncState::DownloadingFinalityProof(hash) = peer.state { + peer.state = PeerSyncState::Available; + + // We only request one finality proof at a time. + if hash != resp.block { + info!( + target: "sync", + "💔 Invalid block finality proof provided: requested: {:?} got: {:?}", + hash, + resp.block + ); + return Err(BadPeer(who, rep::BAD_FINALITY_PROOF)); + } + + if let Some((peer, hash, number, p)) = self.extra_finality_proofs.on_response(who, resp.proof) { + return Ok(OnBlockFinalityProof::Import { peer, hash, number, proof: p }) + } + } + + Ok(OnBlockFinalityProof::Nothing) + } + + /// A batch of blocks have been processed, with or without errors. + /// + /// Call this when a batch of blocks have been processed by the import + /// queue, with or without errors. + /// + /// `peer_info` is passed in case of a restart. + pub fn on_blocks_processed<'a>( + &'a mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)>, + ) -> impl Iterator), BadPeer>> + 'a { + trace!(target: "sync", "Imported {} of {}", imported, count); + + let mut output = Vec::new(); + + let mut has_error = false; + for (_, hash) in &results { + self.queue_blocks.remove(&hash); + } + for (result, hash) in results { + if has_error { + continue; + } + + if result.is_err() { + has_error = true; + } + + match result { + Ok(BlockImportResult::ImportedKnown(_number)) => {} + Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { + if aux.clear_justification_requests { + trace!( + target: "sync", + "Block imported clears all pending justification requests {}: {:?}", + number, + hash + ); + self.extra_justifications.reset() + } + + if aux.needs_justification { + trace!(target: "sync", "Block imported but requires justification {}: {:?}", number, hash); + self.request_justification(&hash, number); + } + + if aux.bad_justification { + if let Some(peer) = who { + info!("💔 Sent block with bad justification to import"); + output.push(Err(BadPeer(peer, rep::BAD_JUSTIFICATION))); + } + } + + if aux.needs_finality_proof { + trace!(target: "sync", "Block imported but requires finality proof {}: {:?}", number, hash); + self.request_finality_proof(&hash, number); + } + + if number > self.best_imported_number { + self.best_imported_number = number; + } + }, + Err(BlockImportError::IncompleteHeader(who)) => { + if let Some(peer) = who { + warn!("💔 Peer sent block with incomplete header to import"); + output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); + output.extend(self.restart()); + } + }, + Err(BlockImportError::VerificationFailed(who, e)) => { + if let Some(peer) = who { + warn!("💔 Verification failed for block {:?} received from peer: {}, {:?}", hash, peer, e); + output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); + output.extend(self.restart()); + } + }, + Err(BlockImportError::BadBlock(who)) => { + if let Some(peer) = who { + info!("💔 Block {:?} received from peer {} has been blacklisted", hash, peer); + output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); + } + }, + Err(BlockImportError::MissingState) => { + // This may happen if the chain we were requesting upon has been discarded + // in the meantime because other chain has been finalized. + // Don't mark it as bad as it still may be synced if explicitly requested. + trace!(target: "sync", "Obsolete block {:?}", hash); + }, + e @ Err(BlockImportError::UnknownParent) | + e @ Err(BlockImportError::Other(_)) => { + warn!(target: "sync", "💔 Error importing block {:?}: {:?}", hash, e); + output.extend(self.restart()); + }, + Err(BlockImportError::Cancelled) => {} + }; + } + + self.pending_requests.set_all(); + output.into_iter() + } + + /// Call this when a justification has been processed by the import queue, + /// with or without errors. + pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; + self.extra_justifications.try_finalize_root((hash, number), finalization_result, true); + self.pending_requests.set_all(); + } + + pub fn on_finality_proof_import(&mut self, req: (B::Hash, NumberFor), res: Result<(B::Hash, NumberFor), ()>) { + self.extra_finality_proofs.try_finalize_root(req, res, true); + self.pending_requests.set_all(); + } + + /// Notify about finalization of the given block. + pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + let r = self.extra_finality_proofs.on_block_finalized(hash, number, |base, block| { + is_descendent_of(&**client, base, block) + }); + + if let Err(err) = r { + warn!(target: "sync", "💔 Error cleaning up pending extra finality proof data requests: {:?}", err) + } + + let client = &self.client; + let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { + is_descendent_of(&**client, base, block) + }); + + if let Err(err) = r { + warn!(target: "sync", "💔 Error cleaning up pending extra justification data requests: {:?}", err); + } + } + + /// Called when a block has been queued for import. + /// + /// Updates our internal state for best queued block and then goes + /// through all peers to update our view of their state as well. + fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { + if self.fork_targets.remove(&hash).is_some() { + trace!(target: "sync", "Completed fork sync {:?}", hash); + } + if number > self.best_queued_number { + self.best_queued_number = number; + self.best_queued_hash = *hash; + // Update common blocks + for (n, peer) in self.peers.iter_mut() { + if let PeerSyncState::AncestorSearch {..} = peer.state { + // Wait for ancestry search to complete first. + continue; + } + let new_common_number = if peer.best_number >= number { + number + } else { + peer.best_number + }; + trace!( + target: "sync", + "Updating peer {} info, ours={}, common={}->{}, their best={}", + n, + number, + peer.common_number, + new_common_number, + peer.best_number, + ); + peer.common_number = new_common_number; + } + } + self.pending_requests.set_all(); + } + + /// Call when a node announces a new block. + /// + /// If `OnBlockAnnounce::ImportHeader` is returned, then the caller MUST try to import passed + /// header (call `on_block_data`). The network request isn't sent + /// in this case. Both hash and header is passed as an optimization + /// to avoid rehashing the header. + pub fn on_block_announce(&mut self, who: &PeerId, hash: B::Hash, announce: &BlockAnnounce, is_best: bool) + -> OnBlockAnnounce + { + let header = &announce.header; + let number = *header.number(); + debug!(target: "sync", "Received block announcement {:?} with number {:?} from {}", hash, number, who); + if number.is_zero() { + warn!(target: "sync", "💔 Ignored genesis block (#0) announcement from {}: {}", who, hash); + return OnBlockAnnounce::Nothing + } + let parent_status = self.block_status(header.parent_hash()).ok().unwrap_or(BlockStatus::Unknown); + let known_parent = parent_status != BlockStatus::Unknown; + let ancient_parent = parent_status == BlockStatus::InChainPruned; + + let known = self.is_known(&hash); + let peer = if let Some(peer) = self.peers.get_mut(who) { + peer + } else { + error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); + return OnBlockAnnounce::Nothing + }; + while peer.recently_announced.len() >= ANNOUNCE_HISTORY_SIZE { + peer.recently_announced.pop_front(); + } + peer.recently_announced.push_back(hash.clone()); + + // Let external validator check the block announcement. + let assoc_data = announce.data.as_ref().map_or(&[][..], |v| v.as_slice()); + let is_best = match self.block_announce_validator.validate(&header, assoc_data) { + Ok(Validation::Success { is_new_best }) => is_new_best || is_best, + Ok(Validation::Failure) => { + debug!(target: "sync", "Block announcement validation of block {} from {} failed", hash, who); + return OnBlockAnnounce::Nothing + } + Err(e) => { + error!(target: "sync", "💔 Block announcement validation errored: {}", e); + return OnBlockAnnounce::Nothing + } + }; + + if is_best { + // update their best block + peer.best_number = number; + peer.best_hash = hash; + } + if let PeerSyncState::AncestorSearch {..} = peer.state { + return OnBlockAnnounce::Nothing + } + // If the announced block is the best they have and is not ahead of us, our common number + // is either one further ahead or it's the one they just announced, if we know about it. + if is_best { + if known && self.best_queued_number >= number { + peer.common_number = number + } else if header.parent_hash() == &self.best_queued_hash + || known_parent && self.best_queued_number >= number + { + peer.common_number = number - One::one(); + } + } + self.pending_requests.add(who); + + // known block case + if known || self.is_already_downloading(&hash) { + trace!(target: "sync", "Known block announce from {}: {}", who, hash); + if let Some(target) = self.fork_targets.get_mut(&hash) { + target.peers.insert(who.clone()); + } + return OnBlockAnnounce::Nothing + } + + if ancient_parent { + trace!(target: "sync", "Ignored ancient block announced from {}: {} {:?}", who, hash, header); + return OnBlockAnnounce::Nothing + } + + let requires_additional_data = !self.role.is_light() || !known_parent; + if !requires_additional_data { + trace!(target: "sync", "Importing new header announced from {}: {} {:?}", who, hash, header); + return OnBlockAnnounce::ImportHeader + } + + if number <= self.best_queued_number { + trace!( + target: "sync", + "Added sync target for block announced from {}: {} {:?}", who, hash, header + ); + self.fork_targets + .entry(hash.clone()) + .or_insert_with(|| ForkTarget { + number, + parent_hash: Some(*header.parent_hash()), + peers: Default::default(), + }) + .peers.insert(who.clone()); + } + + OnBlockAnnounce::Nothing + } + + /// Call when a peer has disconnected. + pub fn peer_disconnected(&mut self, who: &PeerId) { + self.blocks.clear_peer_download(who); + self.peers.remove(who); + self.extra_justifications.peer_disconnected(who); + self.extra_finality_proofs.peer_disconnected(who); + self.pending_requests.set_all(); + } + + /// Restart the sync process. + fn restart<'a>(&'a mut self) -> impl Iterator), BadPeer>> + 'a { + self.blocks.clear(); + let info = self.client.info(); + self.best_queued_hash = info.best_hash; + self.best_queued_number = std::cmp::max(info.best_number, self.best_imported_number); + self.pending_requests.set_all(); + debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); + let old_peers = std::mem::take(&mut self.peers); + old_peers.into_iter().filter_map(move |(id, p)| { + match self.new_peer(id.clone(), p.best_hash, p.best_number) { + Ok(None) => None, + Ok(Some(x)) => Some(Ok((id, x))), + Err(e) => Some(Err(e)) + } + }) + } + + /// What is the status of the block corresponding to the given hash? + fn block_status(&self, hash: &B::Hash) -> Result { + if self.queue_blocks.contains(hash) { + return Ok(BlockStatus::Queued) + } + self.client.block_status(&BlockId::Hash(*hash)) + } + + /// Is the block corresponding to the given hash known? + fn is_known(&self, hash: &B::Hash) -> bool { + self.block_status(hash).ok().map_or(false, |s| s != BlockStatus::Unknown) + } + + /// Is any peer downloading the given hash? + fn is_already_downloading(&self, hash: &B::Hash) -> bool { + self.peers.iter().any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) + } + + /// Return some key metrics. + pub(crate) fn metrics(&self) -> Metrics { + use std::convert::TryInto; + Metrics { + queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), + fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), + finality_proofs: self.extra_finality_proofs.metrics(), + justifications: self.extra_justifications.metrics(), + _priv: () + } + } +} + +#[derive(Debug)] +pub(crate) struct Metrics { + pub(crate) queued_blocks: u32, + pub(crate) fork_targets: u32, + pub(crate) finality_proofs: extra_requests::Metrics, + pub(crate) justifications: extra_requests::Metrics, + _priv: () +} + +/// Request the ancestry for a block. Sends a request for header and justification for the given +/// block number. Used during ancestry search. +fn ancestry_request(block: NumberFor) -> BlockRequest { + message::generic::BlockRequest { + id: 0, + fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, + from: message::FromBlock::Number(block), + to: None, + direction: message::Direction::Ascending, + max: Some(1) + } +} + +/// The ancestor search state expresses which algorithm, and its stateful parameters, we are using to +/// try to find an ancestor block +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub enum AncestorSearchState { + /// Use exponential backoff to find an ancestor, then switch to binary search. + /// We keep track of the exponent. + ExponentialBackoff(NumberFor), + /// Using binary search to find the best ancestor. + /// We keep track of left and right bounds. + BinarySearch(NumberFor, NumberFor), +} + +/// This function handles the ancestor search strategy used. The goal is to find a common point +/// that both our chains agree on that is as close to the tip as possible. +/// The way this works is we first have an exponential backoff strategy, where we try to step +/// forward until we find a block hash mismatch. The size of the step doubles each step we take. +/// +/// When we've found a block hash mismatch we then fall back to a binary search between the two +/// last known points to find the common block closest to the tip. +fn handle_ancestor_search_state( + state: &AncestorSearchState, + curr_block_num: NumberFor, + block_hash_match: bool +) -> Option<(AncestorSearchState, NumberFor)> { + let two = >::one() + >::one(); + match state { + AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { + let next_distance_to_tip = *next_distance_to_tip; + if block_hash_match && next_distance_to_tip == One::one() { + // We found the ancestor in the first step so there is no need to execute binary search. + return None; + } + if block_hash_match { + let left = curr_block_num; + let right = left + next_distance_to_tip / two; + let middle = left + (right - left) / two; + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } else { + let next_block_num = curr_block_num.checked_sub(&next_distance_to_tip) + .unwrap_or_else(Zero::zero); + let next_distance_to_tip = next_distance_to_tip * two; + Some((AncestorSearchState::ExponentialBackoff(next_distance_to_tip), next_block_num)) + } + } + AncestorSearchState::BinarySearch(mut left, mut right) => { + if left >= curr_block_num { + return None; + } + if block_hash_match { + left = curr_block_num; + } else { + right = curr_block_num; + } + assert!(right >= left); + let middle = left + (right - left) / two; + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } + } +} + +/// Get a new block request for the peer if any. +fn peer_block_request( + id: &PeerId, + peer: &PeerSync, + blocks: &mut BlockCollection, + attrs: &message::BlockAttributes, + max_parallel_downloads: u32, + finalized: NumberFor, + best_num: NumberFor, +) -> Option<(Range>, BlockRequest)> { + if best_num >= peer.best_number { + // Will be downloaded as alternative fork instead. + return None; + } + if peer.common_number < finalized { + trace!( + target: "sync", + "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", + id, finalized, peer.common_number, peer.best_number, best_num, + ); + } + if let Some(range) = blocks.needed_blocks( + id.clone(), + MAX_BLOCKS_TO_REQUEST, + peer.best_number, + peer.common_number, + max_parallel_downloads, + MAX_DOWNLOAD_AHEAD, + ) { + // The end is not part of the range. + let last = range.end.saturating_sub(One::one()); + + let from = if peer.best_number == last { + message::FromBlock::Hash(peer.best_hash) + } else { + message::FromBlock::Number(last) + }; + + let request = message::generic::BlockRequest { + id: 0, + fields: attrs.clone(), + from, + to: None, + direction: message::Direction::Descending, + max: Some((range.end - range.start).saturated_into::()) + }; + + Some((range, request)) + } else { + None + } +} + +/// Get pending fork sync targets for a peer. +fn fork_sync_request( + id: &PeerId, + targets: &mut HashMap>, + best_num: NumberFor, + finalized: NumberFor, + attributes: &message::BlockAttributes, + check_block: impl Fn(&B::Hash) -> BlockStatus, +) -> Option<(B::Hash, BlockRequest)> +{ + targets.retain(|hash, r| { + if r.number <= finalized { + trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); + return false; + } + if check_block(hash) != BlockStatus::Unknown { + trace!(target: "sync", "Removed obsolete fork sync request {:?} (#{})", hash, r.number); + return false; + } + true + }); + for (hash, r) in targets { + if !r.peers.contains(id) { + continue + } + if r.number <= best_num { + let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); + let count = if parent_status == BlockStatus::Unknown { + (r.number - finalized).saturated_into::() // up to the last finalized block + } else { + // request only single block + 1 + }; + trace!(target: "sync", "Downloading requested fork {:?} from {}, {} blocks", hash, id, count); + return Some((hash.clone(), message::generic::BlockRequest { + id: 0, + fields: attributes.clone(), + from: message::FromBlock::Hash(hash.clone()), + to: None, + direction: message::Direction::Descending, + max: Some(count), + })) + } + } + None +} + +/// Returns `true` if the given `block` is a descendent of `base`. +fn is_descendent_of(client: &T, base: &Block::Hash, block: &Block::Hash) -> sp_blockchain::Result + where + Block: BlockT, + T: HeaderMetadata + ?Sized, +{ + if base == block { + return Ok(false); + } + + let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; + + Ok(ancestor.hash == *base) +} + +fn validate_blocks(blocks: &Vec>, who: &PeerId) -> Result<(), BadPeer> { + for b in blocks { + if let Some(header) = &b.header { + let hash = header.hash(); + if hash != b.hash { + debug!( + target:"sync", + "Bad header received from {}. Expected hash {:?}, got {:?}", + who, + b.hash, + hash, + ); + return Err(BadPeer(who.clone(), rep::BAD_BLOCK)) + } + } + //FIXME add extrinsic root check + // if let (Some(header), Some(body)) = (&b.header, &b.body) { + // let expected = *header.extrinsics_root(); + // let got = HashFor::::ordered_trie_root(body.iter().map(Encode::encode).collect()); + // if expected != got { + // debug!( + // target:"sync", + // "Bad extrinsic root for a block {} received from {}. Expected {:?}, got {:?}", + // b.hash, + // who, + // expected, + // got, + // ); + // return Err(BadPeer(who.clone(), rep::BAD_BLOCK)) + // } + // } + } + Ok(()) +} + +#[cfg(test)] +mod test { + use super::*; + use super::message::FromBlock; + use substrate_test_runtime_client::{ + runtime::Block, + DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + }; + use sp_blockchain::HeaderBackend; + use sc_block_builder::BlockBuilderProvider; + use sp_consensus::block_validation::DefaultBlockAnnounceValidator; + + #[test] + fn processes_empty_response_on_justification_request_for_unknown_block() { + // if we ask for a justification for a given block to a peer that doesn't know that block + // (different from not having a justification), the peer will reply with an empty response. + // internally we should process the response as the justification not being available. + + let client = Arc::new(TestClientBuilder::new().build()); + let info = client.info(); + let block_announce_validator = Box::new(DefaultBlockAnnounceValidator); + let peer_id = PeerId::random(); + + let mut sync = ChainSync::new( + Roles::AUTHORITY, + client.clone(), + &info, + None, + block_announce_validator, + 1, + ); + + let (a1_hash, a1_number) = { + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + (a1.hash(), *a1.header.number()) + }; + + // add a new peer with the same best block + sync.new_peer(peer_id.clone(), a1_hash, a1_number).unwrap(); + + // and request a justification for the block + sync.request_justification(&a1_hash, a1_number); + + // the justification request should be scheduled to that peer + assert!( + sync.justification_requests().any(|(who, request)| { + who == peer_id && request.from == FromBlock::Hash(a1_hash) + }) + ); + + // there are no extra pending requests + assert_eq!( + sync.extra_justifications.pending_requests().count(), + 0, + ); + + // there's one in-flight extra request to the expected peer + assert!( + sync.extra_justifications.active_requests().any(|(who, (hash, number))| { + *who == peer_id && *hash == a1_hash && *number == a1_number + }) + ); + + // if the peer replies with an empty response (i.e. it doesn't know the block), + // the active request should be cleared. + assert_eq!( + sync.on_block_justification( + peer_id.clone(), + BlockResponse:: { + id: 0, + blocks: vec![], + } + ), + Ok(OnBlockJustification::Nothing), + ); + + // there should be no in-flight requests + assert_eq!( + sync.extra_justifications.active_requests().count(), + 0, + ); + + // and the request should now be pending again, waiting for reschedule + assert!( + sync.extra_justifications.pending_requests().any(|(hash, number)| { + *hash == a1_hash && *number == a1_number + }) + ); + } +} diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs new file mode 100644 index 0000000000000..b64c9e053e97b --- /dev/null +++ b/client/network/src/protocol/sync/blocks.rs @@ -0,0 +1,307 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cmp; +use std::ops::Range; +use std::collections::{HashMap, BTreeMap}; +use log::trace; +use libp2p::PeerId; +use sp_runtime::traits::{Block as BlockT, NumberFor, One}; +use crate::protocol::message; + +/// Block data with origin. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BlockData { + /// The Block Message from the wire + pub block: message::BlockData, + /// The peer, we received this from + pub origin: Option, +} + +#[derive(Debug)] +enum BlockRangeState { + Downloading { + len: NumberFor, + downloading: u32, + }, + Complete(Vec>), +} + +impl BlockRangeState { + pub fn len(&self) -> NumberFor { + match *self { + BlockRangeState::Downloading { len, .. } => len, + BlockRangeState::Complete(ref blocks) => (blocks.len() as u32).into(), + } + } +} + +/// A collection of blocks being downloaded. +#[derive(Default)] +pub struct BlockCollection { + /// Downloaded blocks. + blocks: BTreeMap, BlockRangeState>, + peer_requests: HashMap>, +} + +impl BlockCollection { + /// Create a new instance. + pub fn new() -> Self { + BlockCollection { + blocks: BTreeMap::new(), + peer_requests: HashMap::new(), + } + } + + /// Clear everything. + pub fn clear(&mut self) { + self.blocks.clear(); + self.peer_requests.clear(); + } + + /// Insert a set of blocks into collection. + pub fn insert(&mut self, start: NumberFor, blocks: Vec>, who: PeerId) { + if blocks.is_empty() { + return; + } + + match self.blocks.get(&start) { + Some(&BlockRangeState::Downloading { .. }) => { + trace!(target: "sync", "Inserting block data still marked as being downloaded: {}", start); + }, + Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { + trace!(target: "sync", "Ignored block data already downloaded: {}", start); + return; + }, + _ => (), + } + + self.blocks.insert(start, BlockRangeState::Complete(blocks.into_iter() + .map(|b| BlockData { origin: Some(who.clone()), block: b }).collect())); + } + + /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. + pub fn needed_blocks( + &mut self, + who: PeerId, + count: usize, + peer_best: NumberFor, + common: NumberFor, + max_parallel: u32, + max_ahead: u32, + ) -> Option>> { + if peer_best <= common { + // Bail out early + return None; + } + // First block number that we need to download + let first_different = common + >::one(); + let count = (count as u32).into(); + let (mut range, downloading) = { + let mut downloading_iter = self.blocks.iter().peekable(); + let mut prev: Option<(&NumberFor, &BlockRangeState)> = None; + loop { + let next = downloading_iter.next(); + break match (prev, next) { + (Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) + if downloading < max_parallel => + (*start .. *start + *len, downloading), + (Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => + (*start + r.len() .. cmp::min(*next_start, *start + r.len() + count), 0), // gap + (Some((start, r)), None) => + (*start + r.len() .. *start + r.len() + count, 0), // last range + (None, None) => + (first_different .. first_different + count, 0), // empty + (None, Some((start, _))) if *start > first_different => + (first_different .. cmp::min(first_different + count, *start), 0), // gap at the start + _ => { + prev = next; + continue + }, + } + } + }; + // crop to peers best + if range.start > peer_best { + trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best); + return None; + } + range.end = cmp::min(peer_best + One::one(), range.end); + + if self.blocks.iter().next().map_or(false, |(n, _)| range.start > *n + max_ahead.into()) { + trace!(target: "sync", "Too far ahead for peer {} ({})", who, range.start); + return None; + } + + self.peer_requests.insert(who, range.start); + self.blocks.insert(range.start, BlockRangeState::Downloading { + len: range.end - range.start, + downloading: downloading + 1 + }); + if range.end <= range.start { + panic!("Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", + range, count, peer_best, common, self.blocks); + } + Some(range) + } + + /// Get a valid chain of blocks ordered in descending order and ready for importing into blockchain. + pub fn drain(&mut self, from: NumberFor) -> Vec> { + let mut drained = Vec::new(); + let mut ranges = Vec::new(); + + let mut prev = from; + for (start, range_data) in &mut self.blocks { + match range_data { + BlockRangeState::Complete(blocks) if *start <= prev => { + prev = *start + (blocks.len() as u32).into(); + // Remove all elements from `blocks` and add them to `drained` + drained.append(blocks); + ranges.push(*start); + }, + _ => break, + } + } + + for r in ranges { + self.blocks.remove(&r); + } + trace!(target: "sync", "Drained {} blocks", drained.len()); + drained + } + + pub fn clear_peer_download(&mut self, who: &PeerId) { + if let Some(start) = self.peer_requests.remove(who) { + let remove = match self.blocks.get_mut(&start) { + Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) if *downloading > 1 => { + *downloading -= 1; + false + }, + Some(&mut BlockRangeState::Downloading { .. }) => { + true + }, + _ => { + false + } + }; + if remove { + self.blocks.remove(&start); + } + } + } +} + +#[cfg(test)] +mod test { + use super::{BlockCollection, BlockData, BlockRangeState}; + use crate::{protocol::message, PeerId}; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + use sp_core::H256; + + type Block = RawBlock>; + + fn is_empty(bc: &BlockCollection) -> bool { + bc.blocks.is_empty() && + bc.peer_requests.is_empty() + } + + fn generate_blocks(n: usize) -> Vec> { + (0 .. n).map(|_| message::generic::BlockData { + hash: H256::random(), + header: None, + body: None, + message_queue: None, + receipt: None, + justification: None, + }).collect() + } + + #[test] + fn create_clear() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + bc.insert(1, generate_blocks(100), PeerId::random()); + assert!(!is_empty(&bc)); + bc.clear(); + assert!(is_empty(&bc)); + } + + #[test] + fn insert_blocks() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + + let blocks = generate_blocks(150); + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1 .. 41)); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41 .. 81)); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81 .. 121)); + + bc.clear_peer_download(&peer1); + bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); + assert_eq!(bc.drain(1), vec![]); + assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121 .. 151)); + bc.clear_peer_download(&peer0); + bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); + + assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11 .. 41)); + assert_eq!(bc.drain(1), blocks[1..11].iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()); + + bc.clear_peer_download(&peer0); + bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); + + let drained = bc.drain(12); + assert_eq!(drained[..30], blocks[11..41].iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()[..]); + assert_eq!(drained[30..], blocks[41..81].iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); + + bc.clear_peer_download(&peer2); + assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81 .. 121)); + bc.clear_peer_download(&peer2); + bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); + bc.clear_peer_download(&peer1); + bc.insert(121, blocks[121..150].to_vec(), peer1.clone()); + + assert_eq!(bc.drain(80), vec![]); + let drained = bc.drain(81); + assert_eq!(drained[..40], blocks[81..121].iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }).collect::>()[..]); + assert_eq!(drained[40..], blocks[121..150].iter() + .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); + } + + #[test] + fn large_gap() { + let mut bc: BlockCollection = BlockCollection::new(); + bc.blocks.insert(100, BlockRangeState::Downloading { + len: 128, + downloading: 1, + }); + let blocks = generate_blocks(10).into_iter().map(|b| BlockData { block: b, origin: None }).collect(); + bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); + + let peer0 = PeerId::random(); + assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1 .. 100)); + assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), None); // too far ahead + assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), Some(100 + 128 .. 100 + 128 + 128)); + } +} diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs new file mode 100644 index 0000000000000..df336c25339fd --- /dev/null +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -0,0 +1,572 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sp_blockchain::Error as ClientError; +use crate::protocol::sync::{PeerSync, PeerSyncState}; +use fork_tree::ForkTree; +use libp2p::PeerId; +use log::{debug, trace, warn}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::time::Duration; +use wasm_timer::Instant; + +// Time to wait before trying to get the same extra data from the same peer. +const EXTRA_RETRY_WAIT: Duration = Duration::from_secs(10); + +/// Pending extra data request for the given block (hash and number). +pub(crate) type ExtraRequest = (::Hash, NumberFor); + +/// Manages pending block extra data (e.g. justification) requests. +/// +/// Multiple extras may be requested for competing forks, or for the same branch +/// at different (increasing) heights. This structure will guarantee that extras +/// are fetched in-order, and that obsolete changes are pruned (when finalizing a +/// competing fork). +#[derive(Debug)] +pub(crate) struct ExtraRequests { + tree: ForkTree, ()>, + /// best finalized block number that we have seen since restart + best_seen_finalized_number: NumberFor, + /// requests which have been queued for later processing + pending_requests: VecDeque>, + /// requests which are currently underway to some peer + active_requests: HashMap>, + /// previous requests without response + failed_requests: HashMap, Vec<(PeerId, Instant)>>, + /// successful requests + importing_requests: HashSet>, + /// the name of this type of extra request (useful for logging.) + request_type_name: &'static str, +} + +#[derive(Debug)] +pub(crate) struct Metrics { + pub(crate) pending_requests: u32, + pub(crate) active_requests: u32, + pub(crate) importing_requests: u32, + pub(crate) failed_requests: u32, + _priv: () +} + +impl ExtraRequests { + pub(crate) fn new(request_type_name: &'static str) -> Self { + ExtraRequests { + tree: ForkTree::new(), + best_seen_finalized_number: Zero::zero(), + pending_requests: VecDeque::new(), + active_requests: HashMap::new(), + failed_requests: HashMap::new(), + importing_requests: HashSet::new(), + request_type_name, + } + } + + /// Reset all state as if returned from `new`. + pub(crate) fn reset(&mut self) { + self.tree = ForkTree::new(); + self.pending_requests.clear(); + self.active_requests.clear(); + self.failed_requests.clear(); + } + + /// Returns an iterator-like struct that yields peers which extra + /// requests can be sent to. + pub(crate) fn matcher(&mut self) -> Matcher { + Matcher::new(self) + } + + /// Queue an extra data request to be considered by the `Matcher`. + pub(crate) fn schedule(&mut self, request: ExtraRequest, is_descendent_of: F) + where F: Fn(&B::Hash, &B::Hash) -> Result + { + match self.tree.import(request.0, request.1, (), &is_descendent_of) { + Ok(true) => { + // this is a new root so we add it to the current `pending_requests` + self.pending_requests.push_back((request.0, request.1)); + } + Err(fork_tree::Error::Revert) => { + // we have finalized further than the given request, presumably + // by some other part of the system (not sync). we can safely + // ignore the `Revert` error. + }, + Err(err) => { + debug!(target: "sync", "Failed to insert request {:?} into tree: {:?}", request, err); + } + _ => () + } + } + + /// Retry any pending request if a peer disconnected. + pub(crate) fn peer_disconnected(&mut self, who: &PeerId) { + if let Some(request) = self.active_requests.remove(who) { + self.pending_requests.push_front(request); + } + } + + /// Processes the response for the request previously sent to the given peer. + pub(crate) fn on_response(&mut self, who: PeerId, resp: Option) -> Option<(PeerId, B::Hash, NumberFor, R)> { + // we assume that the request maps to the given response, this is + // currently enforced by the outer network protocol before passing on + // messages to chain sync. + if let Some(request) = self.active_requests.remove(&who) { + if let Some(r) = resp { + trace!(target: "sync", "Queuing import of {} from {:?} for {:?}", + self.request_type_name, + who, + request, + ); + + self.importing_requests.insert(request); + return Some((who, request.0, request.1, r)) + } else { + trace!(target: "sync", "Empty {} response from {:?} for {:?}", + self.request_type_name, + who, + request, + ); + } + self.failed_requests.entry(request).or_default().push((who, Instant::now())); + self.pending_requests.push_front(request); + } else { + trace!(target: "sync", "No active {} request to {:?}", + self.request_type_name, + who, + ); + } + None + } + + /// Removes any pending extra requests for blocks lower than the given best finalized. + pub(crate) fn on_block_finalized( + &mut self, + best_finalized_hash: &B::Hash, + best_finalized_number: NumberFor, + is_descendent_of: F + ) -> Result<(), fork_tree::Error> + where F: Fn(&B::Hash, &B::Hash) -> Result + { + let request = (*best_finalized_hash, best_finalized_number); + + if self.try_finalize_root::<()>(request, Ok(request), false) { + return Ok(()) + } + + if best_finalized_number > self.best_seen_finalized_number { + // normally we'll receive finality notifications for every block => finalize would be enough + // but if many blocks are finalized at once, some notifications may be omitted + // => let's use finalize_with_ancestors here + match self.tree.finalize_with_ancestors( + best_finalized_hash, + best_finalized_number, + &is_descendent_of, + ) { + Err(fork_tree::Error::Revert) => { + // we might have finalized further already in which case we + // will get a `Revert` error which we can safely ignore. + }, + Err(err) => return Err(err), + Ok(_) => {}, + } + + self.best_seen_finalized_number = best_finalized_number; + } + + let roots = self.tree.roots().collect::>(); + + self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &()))); + self.active_requests.retain(|_, (h, n)| roots.contains(&(h, n, &()))); + self.failed_requests.retain(|(h, n), _| roots.contains(&(h, n, &()))); + + Ok(()) + } + + /// Try to finalize pending root. + /// + /// Returns true if import of this request has been scheduled. + pub(crate) fn try_finalize_root( + &mut self, + request: ExtraRequest, + result: Result, E>, + reschedule_on_failure: bool + ) -> bool + { + if !self.importing_requests.remove(&request) { + return false + } + + let (finalized_hash, finalized_number) = match result { + Ok(req) => (req.0, req.1), + Err(_) => { + if reschedule_on_failure { + self.pending_requests.push_front(request); + } + return true + } + }; + + if self.tree.finalize_root(&finalized_hash).is_none() { + warn!(target: "sync", "‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}", + finalized_hash, + finalized_number, + self.tree.roots().collect::>() + ); + return true + } + + self.failed_requests.clear(); + self.active_requests.clear(); + self.pending_requests.clear(); + self.pending_requests.extend(self.tree.roots().map(|(&h, &n, _)| (h, n))); + self.best_seen_finalized_number = finalized_number; + + true + } + + /// Returns an iterator over all active (in-flight) requests and associated peer id. + #[cfg(test)] + pub(crate) fn active_requests(&self) -> impl Iterator)> { + self.active_requests.iter() + } + + /// Returns an iterator over all scheduled pending requests. + #[cfg(test)] + pub(crate) fn pending_requests(&self) -> impl Iterator> { + self.pending_requests.iter() + } + + /// Get some key metrics. + pub(crate) fn metrics(&self) -> Metrics { + use std::convert::TryInto; + Metrics { + pending_requests: self.pending_requests.len().try_into().unwrap_or(std::u32::MAX), + active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX), + failed_requests: self.failed_requests.len().try_into().unwrap_or(std::u32::MAX), + importing_requests: self.importing_requests.len().try_into().unwrap_or(std::u32::MAX), + _priv: () + } + } +} + +/// Matches peers with pending extra requests. +#[derive(Debug)] +pub(crate) struct Matcher<'a, B: BlockT> { + /// Length of pending requests collection. + /// Used to ensure we do not loop more than once over all pending requests. + remaining: usize, + extras: &'a mut ExtraRequests +} + +impl<'a, B: BlockT> Matcher<'a, B> { + fn new(extras: &'a mut ExtraRequests) -> Self { + Matcher { + remaining: extras.pending_requests.len(), + extras + } + } + + /// Finds a peer to which a pending request can be sent. + /// + /// Peers are filtered according to the current known best block (i.e. we won't + /// send an extra request for block #10 to a peer at block #2), and we also + /// throttle requests to the same peer if a previous request yielded no results. + /// + /// This method returns as soon as it finds a peer that should be able to answer + /// our request. If no request is pending or no peer can handle it, `None` is + /// returned instead. + /// + /// # Note + /// + /// The returned `PeerId` (if any) is guaranteed to come from the given `peers` + /// argument. + pub(crate) fn next(&mut self, peers: &HashMap>) -> Option<(PeerId, ExtraRequest)> { + if self.remaining == 0 { + return None + } + + // clean up previously failed requests so we can retry again + for requests in self.extras.failed_requests.values_mut() { + requests.retain(|(_, instant)| instant.elapsed() < EXTRA_RETRY_WAIT); + } + + while let Some(request) = self.extras.pending_requests.pop_front() { + for (peer, sync) in peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available) { + // only ask peers that have synced at least up to the block number that we're asking the extra for + if sync.best_number < request.1 { + continue + } + // don't request to any peers that already have pending requests + if self.extras.active_requests.contains_key(peer) { + continue + } + // only ask if the same request has not failed for this peer before + if self.extras.failed_requests.get(&request).map(|rr| rr.iter().any(|i| &i.0 == peer)).unwrap_or(false) { + continue + } + self.extras.active_requests.insert(peer.clone(), request); + + trace!(target: "sync", "Sending {} request to {:?} for {:?}", + self.extras.request_type_name, + peer, + request, + ); + + return Some((peer.clone(), request)) + } + + self.extras.pending_requests.push_back(request); + self.remaining -= 1; + + if self.remaining == 0 { + break + } + } + + None + } +} + +#[cfg(test)] +mod tests { + use crate::protocol::sync::PeerSync; + use sp_blockchain::Error as ClientError; + use quickcheck::{Arbitrary, Gen, QuickCheck, StdThreadGen}; + use rand::Rng; + use std::collections::{HashMap, HashSet}; + use super::*; + use sp_test_primitives::{Block, BlockNumber, Hash}; + + #[test] + fn requests_are_processed_in_order() { + fn property(mut peers: ArbitraryPeers) { + let mut requests = ExtraRequests::::new("test"); + + let num_peers_available = peers.0.values() + .filter(|s| s.state == PeerSyncState::Available).count(); + + for i in 0 .. num_peers_available { + requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) + } + + let pending = requests.pending_requests.clone(); + let mut m = requests.matcher(); + + for p in &pending { + let (peer, r) = m.next(&peers.0).unwrap(); + assert_eq!(p, &r); + peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); + } + } + + QuickCheck::with_gen(StdThreadGen::new(19)) + .quickcheck(property as fn(ArbitraryPeers)) + } + + #[test] + fn new_roots_schedule_new_request() { + fn property(data: Vec) { + let mut requests = ExtraRequests::::new("test"); + for (i, number) in data.into_iter().enumerate() { + let hash = [i as u8; 32].into(); + let pending = requests.pending_requests.len(); + let is_root = requests.tree.roots().any(|(&h, &n, _)| hash == h && number == n); + requests.schedule((hash, number), |a, b| Ok(a[0] >= b[0])); + if !is_root { + assert_eq!(1 + pending, requests.pending_requests.len()) + } + } + } + QuickCheck::new().quickcheck(property as fn(Vec)) + } + + #[test] + fn disconnecting_implies_rescheduling() { + fn property(mut peers: ArbitraryPeers) -> bool { + let mut requests = ExtraRequests::::new("test"); + + let num_peers_available = peers.0.values() + .filter(|s| s.state == PeerSyncState::Available).count(); + + for i in 0 .. num_peers_available { + requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) + } + + let mut m = requests.matcher(); + while let Some((peer, r)) = m.next(&peers.0) { + peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); + } + + assert!(requests.pending_requests.is_empty()); + + let active_peers = requests.active_requests.keys().cloned().collect::>(); + let previously_active = requests.active_requests.values().cloned().collect::>(); + + for peer in &active_peers { + requests.peer_disconnected(peer) + } + + assert!(requests.active_requests.is_empty()); + + previously_active == requests.pending_requests.iter().cloned().collect::>() + } + + QuickCheck::with_gen(StdThreadGen::new(19)) + .quickcheck(property as fn(ArbitraryPeers) -> bool) + } + + #[test] + fn no_response_reschedules() { + fn property(mut peers: ArbitraryPeers) { + let mut requests = ExtraRequests::::new("test"); + + let num_peers_available = peers.0.values() + .filter(|s| s.state == PeerSyncState::Available).count(); + + for i in 0 .. num_peers_available { + requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) + } + + let mut m = requests.matcher(); + while let Some((peer, r)) = m.next(&peers.0) { + peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); + } + + let active = requests.active_requests.iter().map(|(p, &r)| (p.clone(), r)).collect::>(); + + for (peer, req) in &active { + assert!(requests.failed_requests.get(req).is_none()); + assert!(!requests.pending_requests.contains(req)); + assert!(requests.on_response::<()>(peer.clone(), None).is_none()); + assert!(requests.pending_requests.contains(req)); + assert_eq!(1, requests.failed_requests.get(req).unwrap().iter().filter(|(p, _)| p == peer).count()) + } + } + + QuickCheck::with_gen(StdThreadGen::new(19)) + .quickcheck(property as fn(ArbitraryPeers)) + } + + #[test] + fn request_is_rescheduled_when_earlier_block_is_finalized() { + sp_tracing::try_init_simple(); + + let mut finality_proofs = ExtraRequests::::new("test"); + + let hash4 = [4; 32].into(); + let hash5 = [5; 32].into(); + let hash6 = [6; 32].into(); + let hash7 = [7; 32].into(); + + fn is_descendent_of(base: &Hash, target: &Hash) -> Result { + Ok(target[0] >= base[0]) + } + + // make #4 last finalized block + finality_proofs.tree.import(hash4, 4, (), &is_descendent_of).unwrap(); + finality_proofs.tree.finalize_root(&hash4); + + // schedule request for #6 + finality_proofs.schedule((hash6, 6), is_descendent_of); + + // receive finality proof for #5 + finality_proofs.importing_requests.insert((hash6, 6)); + finality_proofs.on_block_finalized(&hash5, 5, is_descendent_of).unwrap(); + finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash5, 5)), true); + + // ensure that request for #6 is still pending + assert_eq!(finality_proofs.pending_requests.iter().collect::>(), vec![&(hash6, 6)]); + + // receive finality proof for #7 + finality_proofs.importing_requests.insert((hash6, 6)); + finality_proofs.on_block_finalized(&hash6, 6, is_descendent_of).unwrap(); + finality_proofs.on_block_finalized(&hash7, 7, is_descendent_of).unwrap(); + finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash7, 7)), true); + + // ensure that there's no request for #6 + assert_eq!(finality_proofs.pending_requests.iter().collect::>(), Vec::<&(Hash, u64)>::new()); + } + + #[test] + fn ancestor_roots_are_finalized_when_finality_notification_is_missed() { + let mut finality_proofs = ExtraRequests::::new("test"); + + let hash4 = [4; 32].into(); + let hash5 = [5; 32].into(); + + fn is_descendent_of(base: &Hash, target: &Hash) -> Result { + Ok(target[0] >= base[0]) + } + + // schedule request for #4 + finality_proofs.schedule((hash4, 4), is_descendent_of); + + // receive finality notification for #5 (missing notification for #4!!!) + finality_proofs.importing_requests.insert((hash4, 5)); + finality_proofs.on_block_finalized(&hash5, 5, is_descendent_of).unwrap(); + assert_eq!(finality_proofs.tree.roots().count(), 0); + } + + // Some Arbitrary instances to allow easy construction of random peer sets: + + #[derive(Debug, Clone)] + struct ArbitraryPeerSyncState(PeerSyncState); + + impl Arbitrary for ArbitraryPeerSyncState { + fn arbitrary(g: &mut G) -> Self { + let s = match g.gen::() % 5 { + 0 => PeerSyncState::Available, + // TODO: 1 => PeerSyncState::AncestorSearch(g.gen(), AncestorSearchState), + 1 => PeerSyncState::DownloadingNew(g.gen::()), + 2 => PeerSyncState::DownloadingStale(Hash::random()), + 3 => PeerSyncState::DownloadingJustification(Hash::random()), + _ => PeerSyncState::DownloadingFinalityProof(Hash::random()) + }; + ArbitraryPeerSyncState(s) + } + } + + #[derive(Debug, Clone)] + struct ArbitraryPeerSync(PeerSync); + + impl Arbitrary for ArbitraryPeerSync { + fn arbitrary(g: &mut G) -> Self { + let ps = PeerSync { + common_number: g.gen(), + best_hash: Hash::random(), + best_number: g.gen(), + state: ArbitraryPeerSyncState::arbitrary(g).0, + recently_announced: Default::default() + }; + ArbitraryPeerSync(ps) + } + } + + #[derive(Debug, Clone)] + struct ArbitraryPeers(HashMap>); + + impl Arbitrary for ArbitraryPeers { + fn arbitrary(g: &mut G) -> Self { + let mut peers = HashMap::with_capacity(g.size()); + for _ in 0 .. g.size() { + peers.insert(PeerId::random(), ArbitraryPeerSync::arbitrary(g).0); + } + ArbitraryPeers(peers) + } + } + +} diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs new file mode 100644 index 0000000000000..5141e6db70141 --- /dev/null +++ b/client/network/src/request_responses.rs @@ -0,0 +1,872 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Collection of request-response protocols. +//! +//! The [`RequestResponse`] struct defined in this module provides support for zero or more +//! so-called "request-response" protocols. +//! +//! A request-response protocol works in the following way: +//! +//! - For every emitted request, a new substream is open and the protocol is negotiated. If the +//! remote supports the protocol, the size of the request is sent as a LEB128 number, followed +//! with the request itself. The remote then sends the size of the response as a LEB128 number, +//! followed with the response. +//! +//! - Requests have a certain time limit before they time out. This time includes the time it +//! takes to send/receive the request and response. +//! +//! - If provided, a ["requests processing"](ProtocolConfig::inbound_queue) channel +//! is used to handle incoming requests. +//! + +use futures::{channel::{mpsc, oneshot}, prelude::*}; +use libp2p::{ + core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, Multiaddr, PeerId, + }, + request_response::{ + RequestResponse, RequestResponseCodec, RequestResponseConfig, RequestResponseEvent, + RequestResponseMessage, ResponseChannel, ProtocolSupport + }, + swarm::{ + protocols_handler::multi::MultiHandler, NetworkBehaviour, NetworkBehaviourAction, + PollParameters, ProtocolsHandler, + }, +}; +use std::{ + borrow::Cow, collections::{hash_map::Entry, HashMap}, convert::TryFrom as _, io, iter, + pin::Pin, task::{Context, Poll}, time::Duration, +}; + +pub use libp2p::request_response::{InboundFailure, OutboundFailure, RequestId}; + +/// Configuration for a single request-response protocol. +#[derive(Debug, Clone)] +pub struct ProtocolConfig { + /// Name of the protocol on the wire. Should be something like `/foo/bar`. + pub name: Cow<'static, str>, + + /// Maximum allowed size, in bytes, of a request. + /// + /// Any request larger than this value will be declined as a way to avoid allocating too + /// much memory for it. + pub max_request_size: u64, + + /// Maximum allowed size, in bytes, of a response. + /// + /// Any response larger than this value will be declined as a way to avoid allocating too + /// much memory for it. + pub max_response_size: u64, + + /// Duration after which emitted requests are considered timed out. + /// + /// If you expect the response to come back quickly, you should set this to a smaller duration. + pub request_timeout: Duration, + + /// Channel on which the networking service will send incoming requests. + /// + /// Every time a peer sends a request to the local node using this protocol, the networking + /// service will push an element on this channel. The receiving side of this channel then has + /// to pull this element, process the request, and send back the response to send back to the + /// peer. + /// + /// The size of the channel has to be carefully chosen. If the channel is full, the networking + /// service will discard the incoming request send back an error to the peer. Consequently, + /// the channel being full is an indicator that the node is overloaded. + /// + /// You can typically set the size of the channel to `T / d`, where `T` is the + /// `request_timeout` and `d` is the expected average duration of CPU and I/O it takes to + /// build a response. + /// + /// Can be `None` if the local node does not support answering incoming requests. + /// If this is `None`, then the local node will not advertise support for this protocol towards + /// other peers. If this is `Some` but the channel is closed, then the local node will + /// advertise support for this protocol, but any incoming request will lead to an error being + /// sent back. + pub inbound_queue: Option>, +} + +/// A single request received by a peer on a request-response protocol. +#[derive(Debug)] +pub struct IncomingRequest { + /// Who sent the request. + pub peer: PeerId, + + /// Request sent by the remote. Will always be smaller than + /// [`ProtocolConfig::max_request_size`]. + pub payload: Vec, + + /// Channel to send back the response to. + pub pending_response: oneshot::Sender>, +} + +/// Event generated by the [`RequestResponsesBehaviour`]. +#[derive(Debug)] +pub enum Event { + /// A remote sent a request and either we have successfully answered it or an error happened. + /// + /// This event is generated for statistics purposes. + InboundRequest { + /// Peer which has emitted the request. + peer: PeerId, + /// Name of the protocol in question. + protocol: Cow<'static, str>, + /// If `Ok`, contains the time elapsed between when we received the request and when we + /// sent back the response. If `Err`, the error that happened. + result: Result, + }, + + /// A request initiated using [`RequestResponsesBehaviour::send_request`] has succeeded or + /// failed. + RequestFinished { + /// Request that has succeeded. + request_id: RequestId, + /// Response sent by the remote or reason for failure. + result: Result, RequestFailure>, + }, +} + +/// Implementation of `NetworkBehaviour` that provides support for request-response protocols. +pub struct RequestResponsesBehaviour { + /// The multiple sub-protocols, by name. + /// Contains the underlying libp2p `RequestResponse` behaviour, plus an optional + /// "response builder" used to build responses for incoming requests. + protocols: HashMap< + Cow<'static, str>, + (RequestResponse, Option>) + >, + + /// Whenever an incoming request arrives, a `Future` is added to this list and will yield the + /// response to send back to the remote. + pending_responses: stream::FuturesUnordered< + Pin + Send>> + >, +} + +/// Generated by the response builder and waiting to be processed. +enum RequestProcessingOutcome { + Response { + protocol: Cow<'static, str>, + inner_channel: ResponseChannel, ()>>, + response: Vec, + }, + Busy { + peer: PeerId, + protocol: Cow<'static, str>, + }, +} + +impl RequestResponsesBehaviour { + /// Creates a new behaviour. Must be passed a list of supported protocols. Returns an error if + /// the same protocol is passed twice. + pub fn new(list: impl Iterator) -> Result { + let mut protocols = HashMap::new(); + for protocol in list { + let mut cfg = RequestResponseConfig::default(); + cfg.set_connection_keep_alive(Duration::from_secs(10)); + cfg.set_request_timeout(protocol.request_timeout); + + let protocol_support = if protocol.inbound_queue.is_some() { + ProtocolSupport::Full + } else { + ProtocolSupport::Outbound + }; + + let rq_rp = RequestResponse::new(GenericCodec { + max_request_size: protocol.max_request_size, + max_response_size: protocol.max_response_size, + }, iter::once((protocol.name.as_bytes().to_vec(), protocol_support)), cfg); + + match protocols.entry(protocol.name) { + Entry::Vacant(e) => e.insert((rq_rp, protocol.inbound_queue)), + Entry::Occupied(e) => + return Err(RegisterError::DuplicateProtocol(e.key().clone())), + }; + } + + Ok(Self { + protocols, + pending_responses: stream::FuturesUnordered::new(), + }) + } + + /// Initiates sending a request. + /// + /// An error is returned if we are not connected to the target peer or if the protocol doesn't + /// match one that has been registered. + pub fn send_request(&mut self, target: &PeerId, protocol: &str, request: Vec) + -> Result + { + if let Some((protocol, _)) = self.protocols.get_mut(protocol) { + if protocol.is_connected(target) { + Ok(protocol.send_request(target, request)) + } else { + Err(SendRequestError::NotConnected) + } + } else { + Err(SendRequestError::UnknownProtocol) + } + } +} + +impl NetworkBehaviour for RequestResponsesBehaviour { + type ProtocolsHandler = MultiHandler< + String, + as NetworkBehaviour>::ProtocolsHandler, + >; + type OutEvent = Event; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + let iter = self.protocols.iter_mut() + .map(|(p, (r, _))| (p.to_string(), NetworkBehaviour::new_handler(r))); + + MultiHandler::try_from_iter(iter) + .expect("Protocols are in a HashMap and there can be at most one handler per \ + protocol name, which is the only possible error; qed") + } + + fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_connection_established(p, peer_id, conn, endpoint) + } + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_connected(p, peer_id) + } + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_connection_closed(p, peer_id, conn, endpoint) + } + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_disconnected(p, peer_id) + } + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error + ) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_addr_reach_failure(p, peer_id, addr, error) + } + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + (p_name, event): ::OutEvent, + ) { + if let Some((proto, _)) = self.protocols.get_mut(&*p_name) { + return proto.inject_event(peer_id, connection, event) + } + + log::warn!(target: "sub-libp2p", + "inject_node_event: no request-response instance registered for protocol {:?}", + p_name) + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_new_external_addr(p, addr) + } + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_expired_listen_addr(p, addr) + } + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_dial_failure(p, peer_id) + } + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_new_listen_addr(p, addr) + } + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_listener_error(p, id, err) + } + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_listener_closed(p, id, reason) + } + } + + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters, + ) -> Poll< + NetworkBehaviourAction< + ::InEvent, + Self::OutEvent, + >, + > { + 'poll_all: loop { + // Poll to see if any response is ready to be sent back. + while let Poll::Ready(Some(result)) = self.pending_responses.poll_next_unpin(cx) { + match result { + RequestProcessingOutcome::Response { + protocol, inner_channel, response + } => { + if let Some((protocol, _)) = self.protocols.get_mut(&*protocol) { + protocol.send_response(inner_channel, Ok(response)); + } + } + RequestProcessingOutcome::Busy { peer, protocol } => { + let out = Event::InboundRequest { + peer, + protocol, + result: Err(ResponseFailure::Busy), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + } + } + } + + // Poll request-responses protocols. + for (protocol, (behaviour, resp_builder)) in &mut self.protocols { + while let Poll::Ready(ev) = behaviour.poll(cx, params) { + let ev = match ev { + // Main events we are interested in. + NetworkBehaviourAction::GenerateEvent(ev) => ev, + + // Other events generated by the underlying behaviour are transparently + // passed through. + NetworkBehaviourAction::DialAddress { address } => { + log::error!("The request-response isn't supposed to start dialing peers"); + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) + } + NetworkBehaviourAction::DialPeer { peer_id, condition } => { + log::error!("The request-response isn't supposed to start dialing peers"); + return Poll::Ready(NetworkBehaviourAction::DialPeer { + peer_id, + condition, + }) + } + NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + } => { + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event: ((*protocol).to_string(), event), + }) + } + NetworkBehaviourAction::ReportObservedAddr { address } => { + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { + address, + }) + } + }; + + match ev { + // Received a request from a remote. + RequestResponseEvent::Message { + peer, + message: RequestResponseMessage::Request { request, channel, .. }, + } => { + let (tx, rx) = oneshot::channel(); + + // Submit the request to the "response builder" passed by the user at + // initialization. + if let Some(resp_builder) = resp_builder { + // If the response builder is too busy, silently drop `tx`. + // This will be reported as a `Busy` error. + let _ = resp_builder.try_send(IncomingRequest { + peer: peer.clone(), + payload: request, + pending_response: tx, + }); + } + + let protocol = protocol.clone(); + self.pending_responses.push(Box::pin(async move { + // The `tx` created above can be dropped if we are not capable of + // processing this request, which is reflected as a "Busy" error. + if let Ok(response) = rx.await { + RequestProcessingOutcome::Response { + protocol, inner_channel: channel, response + } + } else { + RequestProcessingOutcome::Busy { peer, protocol } + } + })); + + // This `continue` makes sure that `pending_responses` gets polled + // after we have added the new element. + continue 'poll_all; + } + + // Received a response from a remote to one of our requests. + RequestResponseEvent::Message { + message: + RequestResponseMessage::Response { + request_id, + response, + }, + .. + } => { + let out = Event::RequestFinished { + request_id, + result: response.map_err(|()| RequestFailure::Refused), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + } + + // One of our requests has failed. + RequestResponseEvent::OutboundFailure { + request_id, + error, + .. + } => { + let out = Event::RequestFinished { + request_id, + result: Err(RequestFailure::Network(error)), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + } + + // Remote has tried to send a request but failed. + RequestResponseEvent::InboundFailure { peer, error, .. } => { + let out = Event::InboundRequest { + peer, + protocol: protocol.clone(), + result: Err(ResponseFailure::Network(error)), + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); + } + }; + } + } + + break Poll::Pending; + } + } +} + +/// Error when registering a protocol. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum RegisterError { + /// A protocol has been specified multiple times. + DuplicateProtocol(#[error(ignore)] Cow<'static, str>), +} + +/// Error when sending a request. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum SendRequestError { + /// We are not currently connected to the requested peer. + NotConnected, + /// Given protocol hasn't been registered. + UnknownProtocol, +} + +/// Error in a request. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum RequestFailure { + /// Remote has closed the substream before answering, thereby signaling that it considers the + /// request as valid, but refused to answer it. + Refused, + /// Problem on the network. + #[display(fmt = "Problem on the network")] + Network(#[error(ignore)] OutboundFailure), +} + +/// Error when processing a request sent by a remote. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum ResponseFailure { + /// Internal response builder is too busy to process this request. + Busy, + /// Problem on the network. + #[display(fmt = "Problem on the network")] + Network(#[error(ignore)] InboundFailure), +} + +/// Implements the libp2p [`RequestResponseCodec`] trait. Defines how streams of bytes are turned +/// into requests and responses and vice-versa. +#[derive(Debug, Clone)] +#[doc(hidden)] // Needs to be public in order to satisfy the Rust compiler. +pub struct GenericCodec { + max_request_size: u64, + max_response_size: u64, +} + +#[async_trait::async_trait] +impl RequestResponseCodec for GenericCodec { + type Protocol = Vec; + type Request = Vec; + type Response = Result, ()>; + + async fn read_request( + &mut self, + _: &Self::Protocol, + mut io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + // Read the length. + let length = unsigned_varint::aio::read_usize(&mut io).await + .map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?; + if length > usize::try_from(self.max_request_size).unwrap_or(usize::max_value()) { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("Request size exceeds limit: {} > {}", length, self.max_request_size) + )); + } + + // Read the payload. + let mut buffer = vec![0; length]; + io.read_exact(&mut buffer).await?; + Ok(buffer) + } + + async fn read_response( + &mut self, + _: &Self::Protocol, + mut io: &mut T, + ) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + // Note that this function returns a `Result>`. Returning an `Err` is + // considered as a protocol error and will result in the entire connection being closed. + // Returning `Ok(Err(_))` signifies that a response has successfully been fetched, and + // that this response is an error. + + // Read the length. + let length = match unsigned_varint::aio::read_usize(&mut io).await { + Ok(l) => l, + Err(unsigned_varint::io::ReadError::Io(err)) + if matches!(err.kind(), io::ErrorKind::UnexpectedEof) => + { + return Ok(Err(())); + } + Err(err) => return Err(io::Error::new(io::ErrorKind::InvalidInput, err)), + }; + + if length > usize::try_from(self.max_response_size).unwrap_or(usize::max_value()) { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + format!("Response size exceeds limit: {} > {}", length, self.max_response_size) + )); + } + + // Read the payload. + let mut buffer = vec![0; length]; + io.read_exact(&mut buffer).await?; + Ok(Ok(buffer)) + } + + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut T, + req: Self::Request, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + // TODO: check the length? + // Write the length. + { + let mut buffer = unsigned_varint::encode::usize_buffer(); + io.write_all(unsigned_varint::encode::usize(req.len(), &mut buffer)).await?; + } + + // Write the payload. + io.write_all(&req).await?; + + io.close().await?; + Ok(()) + } + + async fn write_response( + &mut self, + _: &Self::Protocol, + io: &mut T, + res: Self::Response, + ) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + // If `res` is an `Err`, we jump to closing the substream without writing anything on it. + if let Ok(res) = res { + // TODO: check the length? + // Write the length. + { + let mut buffer = unsigned_varint::encode::usize_buffer(); + io.write_all(unsigned_varint::encode::usize(res.len(), &mut buffer)).await?; + } + + // Write the payload. + io.write_all(&res).await?; + } + + io.close().await?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use futures::{channel::mpsc, prelude::*}; + use libp2p::identity::Keypair; + use libp2p::Multiaddr; + use libp2p::core::upgrade; + use libp2p::core::transport::{Transport, MemoryTransport}; + use libp2p::noise; + use libp2p::swarm::{Swarm, SwarmEvent}; + use std::{iter, time::Duration}; + + #[test] + fn basic_request_response_works() { + let protocol_name = "/test/req-rep/1"; + + // Build swarms whose behaviour is `RequestResponsesBehaviour`. + let mut swarms = (0..2) + .map(|_| { + let keypair = Keypair::generate_ed25519(); + + let noise_keys = noise::Keypair::::new() + .into_authentic(&keypair) + .unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(libp2p::yamux::Config::default()); + + let behaviour = { + let (tx, mut rx) = mpsc::channel(64); + + let b = super::RequestResponsesBehaviour::new(iter::once(super::ProtocolConfig { + name: From::from(protocol_name), + max_request_size: 1024, + max_response_size: 1024 * 1024, + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx), + })).unwrap(); + + async_std::task::spawn(async move { + while let Some(rq) = rx.next().await { + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(b"this is a response".to_vec()); + } + }); + + b + }; + + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); + (swarm, listen_addr) + }) + .collect::>(); + + // Ask `swarm[0]` to dial `swarm[1]`. There isn't any discovery mechanism in place in + // this test, so they wouldn't connect to each other. + { + let dial_addr = swarms[1].1.clone(); + Swarm::dial_addr(&mut swarms[0].0, dial_addr).unwrap(); + } + + // Running `swarm[0]` in the background until a `InboundRequest` event happens, + // which is a hint about the test having ended. + async_std::task::spawn({ + let (mut swarm, _) = swarms.remove(0); + async move { + loop { + match swarm.next_event().await { + SwarmEvent::Behaviour(super::Event::InboundRequest { result, .. }) => { + assert!(result.is_ok()); + break + }, + _ => {} + } + } + } + }); + + // Remove and run the remaining swarm. + let (mut swarm, _) = swarms.remove(0); + async_std::task::block_on(async move { + let mut sent_request_id = None; + + loop { + match swarm.next_event().await { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + let id = swarm.send_request( + &peer_id, + protocol_name, + b"this is a request".to_vec() + ).unwrap(); + assert!(sent_request_id.is_none()); + sent_request_id = Some(id); + } + SwarmEvent::Behaviour(super::Event::RequestFinished { + request_id, + result, + }) => { + assert_eq!(Some(request_id), sent_request_id); + let result = result.unwrap(); + assert_eq!(result, b"this is a response"); + break; + } + _ => {} + } + } + }); + } + + #[test] + fn max_response_size_exceeded() { + let protocol_name = "/test/req-rep/1"; + + // Build swarms whose behaviour is `RequestResponsesBehaviour`. + let mut swarms = (0..2) + .map(|_| { + let keypair = Keypair::generate_ed25519(); + + let noise_keys = noise::Keypair::::new() + .into_authentic(&keypair) + .unwrap(); + + let transport = MemoryTransport + .upgrade(upgrade::Version::V1) + .authenticate(noise::NoiseConfig::xx(noise_keys).into_authenticated()) + .multiplex(libp2p::yamux::Config::default()); + + let behaviour = { + let (tx, mut rx) = mpsc::channel(64); + + let b = super::RequestResponsesBehaviour::new(iter::once(super::ProtocolConfig { + name: From::from(protocol_name), + max_request_size: 1024, + max_response_size: 8, // <-- important for the test + request_timeout: Duration::from_secs(30), + inbound_queue: Some(tx), + })).unwrap(); + + async_std::task::spawn(async move { + while let Some(rq) = rx.next().await { + assert_eq!(rq.payload, b"this is a request"); + let _ = rq.pending_response.send(b"this response exceeds the limit".to_vec()); + } + }); + + b + }; + + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); + + Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); + (swarm, listen_addr) + }) + .collect::>(); + + // Ask `swarm[0]` to dial `swarm[1]`. There isn't any discovery mechanism in place in + // this test, so they wouldn't connect to each other. + { + let dial_addr = swarms[1].1.clone(); + Swarm::dial_addr(&mut swarms[0].0, dial_addr).unwrap(); + } + + // Running `swarm[0]` in the background until a `InboundRequest` event happens, + // which is a hint about the test having ended. + async_std::task::spawn({ + let (mut swarm, _) = swarms.remove(0); + async move { + loop { + match swarm.next_event().await { + SwarmEvent::Behaviour(super::Event::InboundRequest { result, .. }) => { + assert!(result.is_ok()); + break + }, + _ => {} + } + } + } + }); + + // Remove and run the remaining swarm. + let (mut swarm, _) = swarms.remove(0); + async_std::task::block_on(async move { + let mut sent_request_id = None; + + loop { + match swarm.next_event().await { + SwarmEvent::ConnectionEstablished { peer_id, .. } => { + let id = swarm.send_request( + &peer_id, + protocol_name, + b"this is a request".to_vec() + ).unwrap(); + assert!(sent_request_id.is_none()); + sent_request_id = Some(id); + } + SwarmEvent::Behaviour(super::Event::RequestFinished { + request_id, + result, + }) => { + assert_eq!(Some(request_id), sent_request_id); + match result { + Err(super::RequestFailure::Network(super::OutboundFailure::ConnectionClosed)) => {}, + _ => panic!() + } + break; + } + _ => {} + } + } + }); + } +} diff --git a/client/network/src/schema.rs b/client/network/src/schema.rs new file mode 100644 index 0000000000000..44fbbffd25406 --- /dev/null +++ b/client/network/src/schema.rs @@ -0,0 +1,29 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Include sources generated from protobuf definitions. + +pub mod v1 { + include!(concat!(env!("OUT_DIR"), "/api.v1.rs")); + pub mod finality { + include!(concat!(env!("OUT_DIR"), "/api.v1.finality.rs")); + } + pub mod light { + include!(concat!(env!("OUT_DIR"), "/api.v1.light.rs")); + } +} diff --git a/client/network/src/schema/api.v1.proto b/client/network/src/schema/api.v1.proto new file mode 100644 index 0000000000000..a933c5811c109 --- /dev/null +++ b/client/network/src/schema/api.v1.proto @@ -0,0 +1,60 @@ +// Schema definition for block request/response messages. + +syntax = "proto3"; + +package api.v1; + +// Block enumeration direction. +enum Direction { + // Enumerate in ascending order (from child to parent). + Ascending = 0; + // Enumerate in descending order (from parent to canonical child). + Descending = 1; +} + +// Request block data from a peer. +message BlockRequest { + // Bits of block data to request. + uint32 fields = 1; + // Start from this block. + oneof from_block { + // Start with given hash. + bytes hash = 2; + // Start with given block number. + bytes number = 3; + } + // End at this block. An implementation defined maximum is used when unspecified. + bytes to_block = 4; // optional + // Sequence direction. + Direction direction = 5; + // Maximum number of blocks to return. An implementation defined maximum is used when unspecified. + uint32 max_blocks = 6; // optional +} + +// Response to `BlockRequest` +message BlockResponse { + // Block data for the requested sequence. + repeated BlockData blocks = 1; +} + +// Block data sent in the response. +message BlockData { + // Block header hash. + bytes hash = 1; + // Block header if requested. + bytes header = 2; // optional + // Block body if requested. + repeated bytes body = 3; // optional + // Block receipt if requested. + bytes receipt = 4; // optional + // Block message queue if requested. + bytes message_queue = 5; // optional + // Justification if requested. + bytes justification = 6; // optional + // True if justification should be treated as present but empty. + // This hack is unfortunately necessary because shortcomings in the protobuf format otherwise + // doesn't make in possible to differentiate between a lack of justification and an empty + // justification. + bool is_empty_justification = 7; // optional, false if absent +} + diff --git a/client/network/src/schema/finality.v1.proto b/client/network/src/schema/finality.v1.proto new file mode 100644 index 0000000000000..843bc4eca0990 --- /dev/null +++ b/client/network/src/schema/finality.v1.proto @@ -0,0 +1,19 @@ +// Schema definition for finality proof request/responses. + +syntax = "proto3"; + +package api.v1.finality; + +// Request a finality proof from a peer. +message FinalityProofRequest { + // SCALE-encoded hash of the block to request. + bytes block_hash = 1; + // Opaque chain-specific additional request data. + bytes request = 2; +} + +// Response to a finality proof request. +message FinalityProofResponse { + // Opaque chain-specific finality proof. Empty if no such proof exists. + bytes proof = 1; // optional +} diff --git a/client/network/src/schema/light.v1.proto b/client/network/src/schema/light.v1.proto new file mode 100644 index 0000000000000..9b5d47719dc28 --- /dev/null +++ b/client/network/src/schema/light.v1.proto @@ -0,0 +1,120 @@ +// Schema definition for light client messages. + +syntax = "proto3"; + +package api.v1.light; + +// A pair of arbitrary bytes. +message Pair { + // The first element of the pair. + bytes fst = 1; + // The second element of the pair. + bytes snd = 2; +} + +// Enumerate all possible light client request messages. +message Request { + oneof request { + RemoteCallRequest remote_call_request = 1; + RemoteReadRequest remote_read_request = 2; + RemoteHeaderRequest remote_header_request = 3; + RemoteReadChildRequest remote_read_child_request = 4; + RemoteChangesRequest remote_changes_request = 5; + } +} + +// Enumerate all possible light client response messages. +message Response { + oneof response { + RemoteCallResponse remote_call_response = 1; + RemoteReadResponse remote_read_response = 2; + RemoteHeaderResponse remote_header_response = 3; + RemoteChangesResponse remote_changes_response = 4; + } +} + +// Remote call request. +message RemoteCallRequest { + // Block at which to perform call. + bytes block = 2; + // Method name. + string method = 3; + // Call data. + bytes data = 4; +} + +// Remote call response. +message RemoteCallResponse { + // Execution proof. + bytes proof = 2; +} + +// Remote storage read request. +message RemoteReadRequest { + // Block at which to perform call. + bytes block = 2; + // Storage keys. + repeated bytes keys = 3; +} + +// Remote read response. +message RemoteReadResponse { + // Read proof. + bytes proof = 2; +} + +// Remote storage read child request. +message RemoteReadChildRequest { + // Block at which to perform call. + bytes block = 2; + // Child Storage key, this is relative + // to the child type storage location. + bytes storage_key = 3; + // Storage keys. + repeated bytes keys = 6; +} + +// Remote header request. +message RemoteHeaderRequest { + // Block number to request header for. + bytes block = 2; +} + +// Remote header response. +message RemoteHeaderResponse { + // Header. None if proof generation has failed (e.g. header is unknown). + bytes header = 2; // optional + // Header proof. + bytes proof = 3; +} + +/// Remote changes request. +message RemoteChangesRequest { + // Hash of the first block of the range (including first) where changes are requested. + bytes first = 2; + // Hash of the last block of the range (including last) where changes are requested. + bytes last = 3; + // Hash of the first block for which the requester has the changes trie root. All other + // affected roots must be proved. + bytes min = 4; + // Hash of the last block that we can use when querying changes. + bytes max = 5; + // Storage child node key which changes are requested. + bytes storage_key = 6; // optional + // Storage key which changes are requested. + bytes key = 7; +} + +// Remote changes response. +message RemoteChangesResponse { + // Proof has been generated using block with this number as a max block. Should be + // less than or equal to the RemoteChangesRequest::max block number. + bytes max = 2; + // Changes proof. + repeated bytes proof = 3; + // Changes tries roots missing on the requester' node. + repeated Pair roots = 4; + // Missing changes tries roots proof. + bytes roots_proof = 5; +} + diff --git a/client/network/src/service.rs b/client/network/src/service.rs new file mode 100644 index 0000000000000..59f55f01a45d1 --- /dev/null +++ b/client/network/src/service.rs @@ -0,0 +1,1791 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Main entry point of the sc-network crate. +//! +//! There are two main structs in this module: [`NetworkWorker`] and [`NetworkService`]. +//! The [`NetworkWorker`] *is* the network and implements the `Future` trait. It must be polled in +//! order for the network to advance. +//! The [`NetworkService`] is merely a shared version of the [`NetworkWorker`]. You can obtain an +//! `Arc` by calling [`NetworkWorker::service`]. +//! +//! The methods of the [`NetworkService`] are implemented by sending a message over a channel, +//! which is then processed by [`NetworkWorker::poll`]. + +use crate::{ + ExHashT, NetworkStateInfo, NetworkStatus, + behaviour::{self, Behaviour, BehaviourOut}, + config::{parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig}, + DhtEvent, + discovery::DiscoveryConfig, + error::Error, + network_state::{ + NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, + }, + on_demand_layer::AlwaysBadChecker, + light_client_handler, block_requests, finality_requests, + protocol::{self, event::Event, NotifsHandlerError, LegacyConnectionKillError, NotificationsSink, Ready, sync::SyncState, PeerInfo, Protocol}, + transport, ReputationChange, +}; +use futures::{channel::oneshot, prelude::*}; +use libp2p::{PeerId, multiaddr, Multiaddr}; +use libp2p::core::{ConnectedPoint, Executor, connection::{ConnectionError, PendingConnectionError}, either::EitherError}; +use libp2p::kad::record; +use libp2p::ping::handler::PingFailure; +use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent, protocols_handler::NodeHandlerWrapperError}; +use log::{error, info, trace, warn}; +use metrics::{Metrics, MetricSources, Histogram, HistogramVec}; +use parking_lot::Mutex; +use sc_peerset::PeersetHandle; +use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; +use sp_runtime::{ + traits::{Block as BlockT, NumberFor}, + ConsensusEngineId, +}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + borrow::{Borrow, Cow}, + collections::{HashMap, HashSet}, + fs, + marker::PhantomData, + num:: NonZeroUsize, + pin::Pin, + str, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, + task::Poll, +}; +use wasm_timer::Instant; + +pub use behaviour::{ResponseFailure, InboundFailure, RequestFailure, OutboundFailure}; + +mod metrics; +mod out_events; +#[cfg(test)] +mod tests; + +/// Substrate network service. Handles network IO and manages connectivity. +pub struct NetworkService { + /// Number of peers we're connected to. + num_connected: Arc, + /// The local external addresses. + external_addresses: Arc>>, + /// Are we actively catching up with the chain? + is_major_syncing: Arc, + /// Local copy of the `PeerId` of the local node. + local_peer_id: PeerId, + /// Bandwidth logging system. Can be queried to know the average bandwidth consumed. + bandwidth: Arc, + /// Peerset manager (PSM); manages the reputation of nodes and indicates the network which + /// nodes it should be connected to or not. + peerset: PeersetHandle, + /// Channel that sends messages to the actual worker. + to_worker: TracingUnboundedSender>, + /// For each peer and protocol combination, an object that allows sending notifications to + /// that peer. Updated by the [`NetworkWorker`]. + peers_notifications_sinks: Arc>>, + /// For each legacy gossiping engine ID, the corresponding new protocol name. + protocol_name_by_engine: Mutex>>, + /// Field extracted from the [`Metrics`] struct and necessary to report the + /// notifications-related metrics. + notifications_sizes_metric: Option, + /// Marker to pin the `H` generic. Serves no purpose except to not break backwards + /// compatibility. + _marker: PhantomData, +} + +impl NetworkWorker { + /// Creates the network service. + /// + /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order + /// for the network processing to advance. From it, you can extract a `NetworkService` using + /// `worker.service()`. The `NetworkService` can be shared through the codebase. + pub fn new(params: Params) -> Result, Error> { + // Ensure the listen addresses are consistent with the transport. + ensure_addresses_consistent_with_transport( + params.network_config.listen_addresses.iter(), + ¶ms.network_config.transport, + )?; + ensure_addresses_consistent_with_transport( + params.network_config.boot_nodes.iter().map(|x| &x.multiaddr), + ¶ms.network_config.transport, + )?; + ensure_addresses_consistent_with_transport( + params.network_config.reserved_nodes.iter().map(|x| &x.multiaddr), + ¶ms.network_config.transport, + )?; + ensure_addresses_consistent_with_transport( + params.network_config.public_addresses.iter(), + ¶ms.network_config.transport, + )?; + + let (to_worker, from_service) = tracing_unbounded("mpsc_network_worker"); + + if let Some(path) = params.network_config.net_config_path { + fs::create_dir_all(&path)?; + } + + // List of multiaddresses that we know in the network. + let mut known_addresses = Vec::new(); + let mut bootnodes = Vec::new(); + let mut boot_node_ids = HashSet::new(); + + // Process the bootnodes. + for bootnode in params.network_config.boot_nodes.iter() { + bootnodes.push(bootnode.peer_id.clone()); + boot_node_ids.insert(bootnode.peer_id.clone()); + known_addresses.push((bootnode.peer_id.clone(), bootnode.multiaddr.clone())); + } + + let boot_node_ids = Arc::new(boot_node_ids); + + // Check for duplicate bootnodes. + known_addresses.iter() + .try_for_each(|(peer_id, addr)| + if let Some(other) = known_addresses + .iter() + .find(|o| o.1 == *addr && o.0 != *peer_id) + { + Err(Error::DuplicateBootnode { + address: addr.clone(), + first_id: peer_id.clone(), + second_id: other.0.clone(), + }) + } else { + Ok(()) + } + )?; + + // Initialize the peers we should always be connected to. + let priority_groups = { + let mut reserved_nodes = HashSet::new(); + for reserved in params.network_config.reserved_nodes.iter() { + reserved_nodes.insert(reserved.peer_id.clone()); + known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); + } + + let print_deprecated_message = match ¶ms.role { + Role::Sentry { .. } => true, + Role::Authority { sentry_nodes } if !sentry_nodes.is_empty() => true, + _ => false, + }; + if print_deprecated_message { + log::warn!( + "🙇 Sentry nodes are deprecated, and the `--sentry` and `--sentry-nodes` \ + CLI options will eventually be removed in a future version. The Substrate \ + and Polkadot networking protocol require validators to be \ + publicly-accessible. Please do not block access to your validator nodes. \ + For details, see https://github.com/paritytech/substrate/issues/6845." + ); + } + + let mut sentries_and_validators = HashSet::new(); + match ¶ms.role { + Role::Sentry { validators } => { + for validator in validators { + sentries_and_validators.insert(validator.peer_id.clone()); + reserved_nodes.insert(validator.peer_id.clone()); + known_addresses.push((validator.peer_id.clone(), validator.multiaddr.clone())); + } + } + Role::Authority { sentry_nodes } => { + for sentry_node in sentry_nodes { + sentries_and_validators.insert(sentry_node.peer_id.clone()); + reserved_nodes.insert(sentry_node.peer_id.clone()); + known_addresses.push((sentry_node.peer_id.clone(), sentry_node.multiaddr.clone())); + } + } + _ => {} + } + + vec![ + ("reserved".to_owned(), reserved_nodes), + ("sentries_and_validators".to_owned(), sentries_and_validators), + ] + }; + + let peerset_config = sc_peerset::PeersetConfig { + in_peers: params.network_config.in_peers, + out_peers: params.network_config.out_peers, + bootnodes, + reserved_only: params.network_config.non_reserved_mode == NonReservedPeerMode::Deny, + priority_groups, + }; + + // Private and public keys configuration. + let local_identity = params.network_config.node_key.clone().into_keypair()?; + let local_public = local_identity.public(); + let local_peer_id = local_public.clone().into_peer_id(); + let local_peer_id_legacy = bs58::encode(Borrow::<[u8]>::borrow(&local_peer_id)).into_string(); + info!( + target: "sub-libp2p", + "🏷 Local node identity is: {} (legacy representation: {})", + local_peer_id.to_base58(), + local_peer_id_legacy + ); + + let checker = params.on_demand.as_ref() + .map(|od| od.checker().clone()) + .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); + + let num_connected = Arc::new(AtomicUsize::new(0)); + let is_major_syncing = Arc::new(AtomicBool::new(false)); + let (protocol, peerset_handle) = Protocol::new( + protocol::ProtocolConfig { + roles: From::from(¶ms.role), + max_parallel_downloads: params.network_config.max_parallel_downloads, + }, + local_peer_id.clone(), + params.chain.clone(), + params.transaction_pool, + params.finality_proof_request_builder, + params.protocol_id.clone(), + peerset_config, + params.block_announce_validator, + params.metrics_registry.as_ref(), + boot_node_ids.clone(), + )?; + + // Build the swarm. + let (mut swarm, bandwidth): (Swarm, _) = { + let user_agent = format!( + "{} ({})", + params.network_config.client_version, + params.network_config.node_name + ); + let block_requests = { + let config = block_requests::Config::new(¶ms.protocol_id); + block_requests::BlockRequests::new(config, params.chain.clone()) + }; + let finality_proof_requests = { + let config = finality_requests::Config::new(¶ms.protocol_id); + finality_requests::FinalityProofRequests::new(config, params.finality_proof_provider.clone()) + }; + let light_client_handler = { + let config = light_client_handler::Config::new(¶ms.protocol_id); + light_client_handler::LightClientHandler::new( + config, + params.chain, + checker, + peerset_handle.clone(), + ) + }; + + let discovery_config = { + let mut config = DiscoveryConfig::new(local_public.clone()); + config.with_user_defined(known_addresses); + config.discovery_limit(u64::from(params.network_config.out_peers) + 15); + config.add_protocol(params.protocol_id.clone()); + config.allow_non_globals_in_dht(params.network_config.allow_non_globals_in_dht); + + match params.network_config.transport { + TransportConfig::MemoryOnly => { + config.with_mdns(false); + config.allow_private_ipv4(false); + } + TransportConfig::Normal { enable_mdns, allow_private_ipv4, .. } => { + config.with_mdns(enable_mdns); + config.allow_private_ipv4(allow_private_ipv4); + } + } + + config + }; + + let mut behaviour = { + let result = Behaviour::new( + protocol, + params.role, + user_agent, + local_public, + block_requests, + finality_proof_requests, + light_client_handler, + discovery_config, + params.network_config.request_response_protocols, + ); + + match result { + Ok(b) => b, + Err(crate::request_responses::RegisterError::DuplicateProtocol(proto)) => { + return Err(Error::DuplicateRequestResponseProtocol { + protocol: proto, + }) + }, + } + }; + + for (engine_id, protocol_name) in ¶ms.network_config.notifications_protocols { + behaviour.register_notifications_protocol(*engine_id, protocol_name.clone()); + } + let (transport, bandwidth) = { + let (config_mem, config_wasm, flowctrl) = match params.network_config.transport { + TransportConfig::MemoryOnly => (true, None, false), + TransportConfig::Normal { wasm_external_transport, use_yamux_flow_control, .. } => + (false, wasm_external_transport, use_yamux_flow_control) + }; + transport::build_transport(local_identity, config_mem, config_wasm, flowctrl) + }; + let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) + .peer_connection_limit(crate::MAX_CONNECTIONS_PER_PEER) + .notify_handler_buffer_size(NonZeroUsize::new(32).expect("32 != 0; qed")) + .connection_event_buffer_size(1024); + if let Some(spawner) = params.executor { + struct SpawnImpl(F); + impl + Send>>)> Executor for SpawnImpl { + fn exec(&self, f: Pin + Send>>) { + (self.0)(f) + } + } + builder = builder.executor(Box::new(SpawnImpl(spawner))); + } + (builder.build(), bandwidth) + }; + + // Initialize the metrics. + let metrics = match ¶ms.metrics_registry { + Some(registry) => { + Some(metrics::register(registry, MetricSources { + bandwidth: bandwidth.clone(), + major_syncing: is_major_syncing.clone(), + connected_peers: num_connected.clone(), + })?) + } + None => None + }; + + // Listen on multiaddresses. + for addr in ¶ms.network_config.listen_addresses { + if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { + warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) + } + } + + // Add external addresses. + for addr in ¶ms.network_config.public_addresses { + Swarm::::add_external_address(&mut swarm, addr.clone()); + } + + let external_addresses = Arc::new(Mutex::new(Vec::new())); + let peers_notifications_sinks = Arc::new(Mutex::new(HashMap::new())); + let protocol_name_by_engine = Mutex::new({ + params.network_config.notifications_protocols.iter().cloned().collect() + }); + + let service = Arc::new(NetworkService { + bandwidth, + external_addresses: external_addresses.clone(), + num_connected: num_connected.clone(), + is_major_syncing: is_major_syncing.clone(), + peerset: peerset_handle, + local_peer_id, + to_worker, + peers_notifications_sinks: peers_notifications_sinks.clone(), + protocol_name_by_engine, + notifications_sizes_metric: + metrics.as_ref().map(|metrics| metrics.notifications_sizes.clone()), + _marker: PhantomData, + }); + + Ok(NetworkWorker { + external_addresses, + num_connected, + is_major_syncing, + network_service: swarm, + service, + import_queue: params.import_queue, + from_service, + light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), + event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, + peers_notifications_sinks, + metrics, + boot_node_ids, + pending_requests: HashMap::with_capacity(128), + }) + } + + /// High-level network status information. + pub fn status(&self) -> NetworkStatus { + NetworkStatus { + sync_state: self.sync_state(), + best_seen_block: self.best_seen_block(), + num_sync_peers: self.num_sync_peers(), + num_connected_peers: self.num_connected_peers(), + num_active_peers: self.num_active_peers(), + total_bytes_inbound: self.total_bytes_inbound(), + total_bytes_outbound: self.total_bytes_outbound(), + } + } + + /// Returns the total number of bytes received so far. + pub fn total_bytes_inbound(&self) -> u64 { + self.service.bandwidth.total_inbound() + } + + /// Returns the total number of bytes sent so far. + pub fn total_bytes_outbound(&self) -> u64 { + self.service.bandwidth.total_outbound() + } + + /// Returns the number of peers we're connected to. + pub fn num_connected_peers(&self) -> usize { + self.network_service.user_protocol().num_connected_peers() + } + + /// Returns the number of peers we're connected to and that are being queried. + pub fn num_active_peers(&self) -> usize { + self.network_service.user_protocol().num_active_peers() + } + + /// Current global sync state. + pub fn sync_state(&self) -> SyncState { + self.network_service.user_protocol().sync_state() + } + + /// Target sync block number. + pub fn best_seen_block(&self) -> Option> { + self.network_service.user_protocol().best_seen_block() + } + + /// Number of peers participating in syncing. + pub fn num_sync_peers(&self) -> u32 { + self.network_service.user_protocol().num_sync_peers() + } + + /// Number of blocks in the import queue. + pub fn num_queued_blocks(&self) -> u32 { + self.network_service.user_protocol().num_queued_blocks() + } + + /// Returns the number of downloaded blocks. + pub fn num_downloaded_blocks(&self) -> usize { + self.network_service.user_protocol().num_downloaded_blocks() + } + + /// Number of active sync requests. + pub fn num_sync_requests(&self) -> usize { + self.network_service.user_protocol().num_sync_requests() + } + + /// Adds an address for a node. + pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { + self.network_service.add_known_address(peer_id, addr); + } + + /// Return a `NetworkService` that can be shared through the code base and can be used to + /// manipulate the worker. + pub fn service(&self) -> &Arc> { + &self.service + } + + /// You must call this when a new block is finalized by the client. + pub fn on_block_finalized(&mut self, hash: B::Hash, header: B::Header) { + self.network_service.user_protocol_mut().on_block_finalized(hash, &header); + } + + /// This should be called when blocks are added to the + /// chain by something other than the import queue. + /// Currently this is only useful for tests. + pub fn update_chain(&mut self) { + self.network_service.user_protocol_mut().update_chain(); + } + + /// Returns the local `PeerId`. + pub fn local_peer_id(&self) -> &PeerId { + Swarm::::local_peer_id(&self.network_service) + } + + /// Returns the list of addresses we are listening on. + /// + /// Does **NOT** include a trailing `/p2p/` with our `PeerId`. + pub fn listen_addresses(&self) -> impl Iterator { + Swarm::::listeners(&self.network_service) + } + + /// Get network state. + /// + /// **Note**: Use this only for debugging. This API is unstable. There are warnings literally + /// everywhere about this. Please don't use this function to retrieve actual information. + pub fn network_state(&mut self) -> NetworkState { + let swarm = &mut self.network_service; + let open = swarm.user_protocol().open_peers().cloned().collect::>(); + + let connected_peers = { + let swarm = &mut *swarm; + open.iter().filter_map(move |peer_id| { + let known_addresses = NetworkBehaviour::addresses_of_peer(&mut **swarm, peer_id) + .into_iter().collect(); + + let endpoint = if let Some(e) = swarm.node(peer_id).map(|i| i.endpoint()) { + e.clone().into() + } else { + error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \ + and debug information about {:?}", peer_id); + return None + }; + + Some((peer_id.to_base58(), NetworkStatePeer { + endpoint, + version_string: swarm.node(peer_id) + .and_then(|i| i.client_version().map(|s| s.to_owned())), + latest_ping_time: swarm.node(peer_id).and_then(|i| i.latest_ping()), + enabled: swarm.user_protocol().is_enabled(&peer_id), + open: swarm.user_protocol().is_open(&peer_id), + known_addresses, + })) + }).collect() + }; + + let not_connected_peers = { + let swarm = &mut *swarm; + swarm.known_peers().into_iter() + .filter(|p| open.iter().all(|n| n != p)) + .map(move |peer_id| { + (peer_id.to_base58(), NetworkStateNotConnectedPeer { + version_string: swarm.node(&peer_id) + .and_then(|i| i.client_version().map(|s| s.to_owned())), + latest_ping_time: swarm.node(&peer_id).and_then(|i| i.latest_ping()), + known_addresses: NetworkBehaviour::addresses_of_peer(&mut **swarm, &peer_id) + .into_iter().collect(), + }) + }) + .collect() + }; + + NetworkState { + peer_id: Swarm::::local_peer_id(&swarm).to_base58(), + listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), + external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), + connected_peers, + not_connected_peers, + peerset: swarm.user_protocol_mut().peerset_debug_info(), + } + } + + /// Get currently connected peers. + pub fn peers_debug_info(&mut self) -> Vec<(PeerId, PeerInfo)> { + self.network_service.user_protocol_mut() + .peers_info() + .map(|(id, info)| (id.clone(), info.clone())) + .collect() + } + + /// Removes a `PeerId` from the list of reserved peers. + pub fn remove_reserved_peer(&self, peer: PeerId) { + self.service.remove_reserved_peer(peer); + } + + /// Adds a `PeerId` and its address as reserved. The string should encode the address + /// and peer ID of the remote node. + pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + self.service.add_reserved_peer(peer) + } +} + +impl NetworkService { + /// Returns the local `PeerId`. + pub fn local_peer_id(&self) -> &PeerId { + &self.local_peer_id + } + + /// Set authorized peers. + /// + /// Need a better solution to manage authorized peers, but now just use reserved peers for + /// prototyping. + pub fn set_authorized_peers(&self, peers: HashSet) { + self.peerset.set_reserved_peers(peers) + } + + /// Set authorized_only flag. + /// + /// Need a better solution to decide authorized_only, but now just use reserved_only flag for + /// prototyping. + pub fn set_authorized_only(&self, reserved_only: bool) { + self.peerset.set_reserved_only(reserved_only) + } + + /// Appends a notification to the buffer of pending outgoing notifications with the given peer. + /// Has no effect if the notifications channel with this protocol name is not open. + /// + /// If the buffer of pending outgoing notifications with that peer is full, the notification + /// is silently dropped and the connection to the remote will start being shut down. This + /// happens if you call this method at a higher rate than the rate at which the peer processes + /// these notifications, or if the available network bandwidth is too low. + /// + /// For this reason, this method is considered soft-deprecated. You are encouraged to use + /// [`NetworkService::notification_sender`] instead. + /// + /// > **Note**: The reason why this is a no-op in the situation where we have no channel is + /// > that we don't guarantee message delivery anyway. Networking issues can cause + /// > connections to drop at any time, and higher-level logic shouldn't differentiate + /// > between the remote voluntarily closing a substream or a network error + /// > preventing the message from being delivered. + /// + /// The protocol must have been registered with `register_notifications_protocol` or + /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). + /// + pub fn write_notification(&self, target: PeerId, engine_id: ConsensusEngineId, message: Vec) { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + sink.clone() + } else { + // Notification silently discarded, as documented. + return; + } + }; + + // Used later for the metrics report. + let message_len = message.len(); + + // Determine the wire protocol name corresponding to this `engine_id`. + let protocol_name = self.protocol_name_by_engine.lock().get(&engine_id).cloned(); + if let Some(protocol_name) = protocol_name { + sink.send_sync_notification(protocol_name, message); + } else { + return; + } + + if let Some(notifications_sizes_metric) = self.notifications_sizes_metric.as_ref() { + notifications_sizes_metric + .with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) + .observe(message_len as f64); + } + } + + /// Obtains a [`NotificationSender`] for a connected peer, if it exists. + /// + /// A `NotificationSender` is scoped to a particular connection to the peer that holds + /// a receiver. With a `NotificationSender` at hand, sending a notification is done in two steps: + /// + /// 1. [`NotificationSender::ready`] is used to wait for the sender to become ready + /// for another notification, yielding a [`NotificationSenderReady`] token. + /// 2. [`NotificationSenderReady::send`] enqueues the notification for sending. This operation + /// can only fail if the underlying notification substream or connection has suddenly closed. + /// + /// An error is returned by [`NotificationSenderReady::send`] if there exists no open + /// notifications substream with that combination of peer and protocol, or if the remote + /// has asked to close the notifications substream. If that happens, it is guaranteed that an + /// [`Event::NotificationStreamClosed`] has been generated on the stream returned by + /// [`NetworkService::event_stream`]. + /// + /// If the remote requests to close the notifications substream, all notifications successfully + /// enqueued using [`NotificationSenderReady::send`] will finish being sent out before the + /// substream actually gets closed, but attempting to enqueue more notifications will now + /// return an error. It is however possible for the entire connection to be abruptly closed, + /// in which case enqueued notifications will be lost. + /// + /// The protocol must have been registered with `register_notifications_protocol` or + /// [`NetworkConfiguration::notifications_protocols`](crate::config::NetworkConfiguration::notifications_protocols). + /// + /// # Usage + /// + /// This method returns a struct that allows waiting until there is space available in the + /// buffer of messages towards the given peer. If the peer processes notifications at a slower + /// rate than we send them, this buffer will quickly fill up. + /// + /// As such, you should never do something like this: + /// + /// ```ignore + /// // Do NOT do this + /// for peer in peers { + /// if let Ok(n) = network.notification_sender(peer, ...) { + /// if let Ok(s) = n.ready().await { + /// let _ = s.send(...); + /// } + /// } + /// } + /// ``` + /// + /// Doing so would slow down all peers to the rate of the slowest one. A malicious or + /// malfunctioning peer could intentionally process notifications at a very slow rate. + /// + /// Instead, you are encouraged to maintain your own buffer of notifications on top of the one + /// maintained by `sc-network`, and use `notification_sender` to progressively send out + /// elements from your buffer. If this additional buffer is full (which will happen at some + /// point if the peer is too slow to process notifications), appropriate measures can be taken, + /// such as removing non-critical notifications from the buffer or disconnecting the peer + /// using [`NetworkService::disconnect_peer`]. + /// + /// + /// Notifications Per-peer buffer + /// broadcast +-------> of notifications +--> `notification_sender` +--> Internet + /// ^ (not covered by + /// | sc-network) + /// + + /// Notifications should be dropped + /// if buffer is full + /// + /// + /// See also the [`gossip`](crate::gossip) module for a higher-level way to send + /// notifications. + /// + pub fn notification_sender( + &self, + target: PeerId, + engine_id: ConsensusEngineId, + ) -> Result { + // We clone the `NotificationsSink` in order to be able to unlock the network-wide + // `peers_notifications_sinks` mutex as soon as possible. + let sink = { + let peers_notifications_sinks = self.peers_notifications_sinks.lock(); + if let Some(sink) = peers_notifications_sinks.get(&(target, engine_id)) { + sink.clone() + } else { + return Err(NotificationSenderError::Closed); + } + }; + + // Determine the wire protocol name corresponding to this `engine_id`. + let protocol_name = match self.protocol_name_by_engine.lock().get(&engine_id).cloned() { + Some(p) => p, + None => return Err(NotificationSenderError::BadProtocol), + }; + + Ok(NotificationSender { + sink, + protocol_name, + notification_size_metric: self.notifications_sizes_metric.as_ref().map(|histogram| { + histogram.with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) + }), + }) + } + + /// Returns a stream containing the events that happen on the network. + /// + /// If this method is called multiple times, the events are duplicated. + /// + /// The stream never ends (unless the `NetworkWorker` gets shut down). + /// + /// The name passed is used to identify the channel in the Prometheus metrics. Note that the + /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having + /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory + pub fn event_stream(&self, name: &'static str) -> impl Stream { + let (tx, rx) = out_events::channel(name); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); + rx + } + + /// Sends a single targeted request to a specific peer. On success, returns the response of + /// the peer. + /// + /// Request-response protocols are a way to complement notifications protocols, but + /// notifications should remain the default ways of communicating information. For example, a + /// peer can announce something through a notification, after which the recipient can obtain + /// more information by performing a request. + /// As such, this function is meant to be called only with peers we are already connected to. + /// Calling this method with a `target` we are not connected to will *not* attempt to connect + /// to said peer. + /// + /// No limit or throttling of concurrent outbound requests per peer and protocol are enforced. + /// Such restrictions, if desired, need to be enforced at the call site(s). + /// + /// The protocol must have been registered through + /// [`NetworkConfiguration::request_response_protocols`]( + /// crate::config::NetworkConfiguration::request_response_protocols). + pub async fn request( + &self, + target: PeerId, + protocol: impl Into>, + request: Vec + ) -> Result, RequestFailure> { + let (tx, rx) = oneshot::channel(); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::Request { + target, + protocol: protocol.into(), + request, + pending_response: tx + }); + + match rx.await { + Ok(v) => v, + // The channel can only be closed if the network worker no longer exists. If the + // network worker no longer exists, then all connections to `target` are necessarily + // closed, and we legitimately report this situation as a "ConnectionClosed". + Err(_) => Err(RequestFailure::Network(OutboundFailure::ConnectionClosed)), + } + } + + /// Registers a new notifications protocol. + /// + /// After a protocol has been registered, you can call `write_notifications`. + /// + /// **Important**: This method is a work-around, and you are instead strongly encouraged to + /// pass the protocol in the `NetworkConfiguration::notifications_protocols` list instead. + /// If you have no other choice but to use this method, you are very strongly encouraged to + /// call it very early on. Any connection open will retain the protocols that were registered + /// then, and not any new one. + /// + /// Please call `event_stream` before registering a protocol, otherwise you may miss events + /// about the protocol that you have registered. + // TODO: remove this method after https://github.com/paritytech/substrate/issues/4587 + pub fn register_notifications_protocol( + &self, + engine_id: ConsensusEngineId, + protocol_name: impl Into>, + ) { + let protocol_name = protocol_name.into(); + self.protocol_name_by_engine.lock().insert(engine_id, protocol_name.clone()); + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RegisterNotifProtocol { + engine_id, + protocol_name, + }); + } + + /// You may call this when new transactons are imported by the transaction pool. + /// + /// All transactions will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn trigger_repropagate(&self) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransactions); + } + + /// You must call when new transaction is imported by the transaction pool. + /// + /// This transaction will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn propagate_transaction(&self, hash: H) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateTransaction(hash)); + } + + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. This function forces such an announcement. + pub fn announce_block(&self, hash: B::Hash, data: Vec) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); + } + + /// Report a given peer as either beneficial (+) or costly (-) according to the + /// given scalar. + pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.peerset.report_peer(who, cost_benefit); + } + + /// Disconnect from a node as soon as possible. + /// + /// This triggers the same effects as if the connection had closed itself spontaneously. + pub fn disconnect_peer(&self, who: PeerId) { + let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who)); + } + + /// Request a justification for the given block from the network. + /// + /// On success, the justification will be passed to the import queue that was part at + /// initialization as part of the configuration. + pub fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::RequestJustification(*hash, number)); + } + + /// Are we in the process of downloading the chain? + pub fn is_major_syncing(&self) -> bool { + self.is_major_syncing.load(Ordering::Relaxed) + } + + /// Start getting a value from the DHT. + /// + /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an + /// item on the [`NetworkWorker`] stream. + pub fn get_value(&self, key: &record::Key) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); + } + + /// Start putting a value in the DHT. + /// + /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an + /// item on the [`NetworkWorker`] stream. + pub fn put_value(&self, key: record::Key, value: Vec) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); + } + + /// Connect to unreserved peers and allow unreserved peers to connect. + pub fn accept_unreserved_peers(&self) { + self.peerset.set_reserved_only(false); + } + + /// Disconnect from unreserved peers and deny new unreserved peers to connect. + pub fn deny_unreserved_peers(&self) { + self.peerset.set_reserved_only(true); + } + + /// Removes a `PeerId` from the list of reserved peers. + pub fn remove_reserved_peer(&self, peer: PeerId) { + self.peerset.remove_reserved_peer(peer); + } + + /// Adds a `PeerId` and its address as reserved. The string should encode the address + /// and peer ID of the remote node. + /// + /// Returns an `Err` if the given string is not a valid multiaddress + /// or contains an invalid peer ID (which includes the local peer ID). + pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; + // Make sure the local peer ID is never added to the PSM. + if peer_id == self.local_peer_id { + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + } + self.peerset.add_reserved_peer(peer_id.clone()); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + Ok(()) + } + + /// Configure an explicit fork sync request. + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// `set_sync_fork_request` should only be used if external code detects that there's + /// a stale fork missing. + /// Passing empty `peers` set effectively removes the sync request. + pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + } + + /// Modify a peerset priority group. + /// + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. + /// + /// Returns an `Err` if one of the given addresses is invalid or contains an + /// invalid peer ID (which includes the local peer ID). + pub fn set_priority_group(&self, group_id: String, peers: HashSet) -> Result<(), String> { + let peers = peers.into_iter() + .map(|mut addr| { + let peer = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) + .map_err(|_| "Invalid PeerId format".to_string())?, + _ => return Err("Missing PeerId from address".to_string()), + }; + + // Make sure the local peer ID is never added to the PSM + // or added as a "known address", even if given. + if peer == self.local_peer_id { + Err("Local peer ID in priority group.".to_string()) + } else { + Ok((peer, addr)) + } + }) + .collect::, String>>()?; + + let peer_ids = peers.iter().map(|(peer_id, _addr)| peer_id.clone()).collect(); + + self.peerset.set_priority_group(group_id, peer_ids); + + for (peer_id, addr) in peers.into_iter() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + } + + Ok(()) + } + + /// Returns the number of peers we're connected to. + pub fn num_connected(&self) -> usize { + self.num_connected.load(Ordering::Relaxed) + } + + /// This function should be called when blocks are added to the chain by something other + /// than the import queue. + /// + /// > **Important**: This function is a hack and can be removed at any time. Do **not** use it. + pub fn update_chain(&self) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::UpdateChain); + } + + /// Inform the network service about an own imported block. + pub fn own_block_imported(&self, hash: B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::OwnBlockImported(hash, number)); + } +} + +impl sp_consensus::SyncOracle + for NetworkService +{ + fn is_major_syncing(&mut self) -> bool { + NetworkService::is_major_syncing(self) + } + + fn is_offline(&mut self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 + } +} + +impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle + for &'a NetworkService +{ + fn is_major_syncing(&mut self) -> bool { + NetworkService::is_major_syncing(self) + } + + fn is_offline(&mut self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 + } +} + +impl NetworkStateInfo for NetworkService + where + B: sp_runtime::traits::Block, + H: ExHashT, +{ + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec { + self.external_addresses.lock().clone() + } + + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId { + self.local_peer_id.clone() + } +} + +/// A `NotificationSender` allows for sending notifications to a peer with a chosen protocol. +#[must_use] +pub struct NotificationSender { + sink: NotificationsSink, + + /// Name of the protocol on the wire. + protocol_name: Cow<'static, str>, + + /// Field extracted from the [`Metrics`] struct and necessary to report the + /// notifications-related metrics. + notification_size_metric: Option, +} + +impl NotificationSender { + /// Returns a future that resolves when the `NotificationSender` is ready to send a notification. + pub async fn ready<'a>(&'a self) -> Result, NotificationSenderError> { + Ok(NotificationSenderReady { + ready: match self.sink.reserve_notification(self.protocol_name.clone()).await { + Ok(r) => r, + Err(()) => return Err(NotificationSenderError::Closed), + }, + notification_size_metric: self.notification_size_metric.clone(), + }) + } +} + +/// Reserved slot in the notifications buffer, ready to accept data. +#[must_use] +pub struct NotificationSenderReady<'a> { + ready: Ready<'a>, + + /// Field extracted from the [`Metrics`] struct and necessary to report the + /// notifications-related metrics. + notification_size_metric: Option, +} + +impl<'a> NotificationSenderReady<'a> { + /// Consumes this slots reservation and actually queues the notification. + pub fn send(self, notification: impl Into>) -> Result<(), NotificationSenderError> { + let notification = notification.into(); + + if let Some(notification_size_metric) = &self.notification_size_metric { + notification_size_metric.observe(notification.len() as f64); + } + + self.ready + .send(notification) + .map_err(|()| NotificationSenderError::Closed) + } +} + +/// Error returned by [`NetworkService::send_notification`]. +#[derive(Debug, derive_more::Display, derive_more::Error)] +pub enum NotificationSenderError { + /// The notification receiver has been closed, usually because the underlying connection closed. + /// + /// Some of the notifications most recently sent may not have been received. However, + /// the peer may still be connected and a new `NotificationSender` for the same + /// protocol obtained from [`NetworkService::notification_sender`]. + Closed, + /// Protocol name hasn't been registered. + BadProtocol, +} + +/// Messages sent from the `NetworkService` to the `NetworkWorker`. +/// +/// Each entry corresponds to a method of `NetworkService`. +enum ServiceToWorkerMsg { + PropagateTransaction(H), + PropagateTransactions, + RequestJustification(B::Hash, NumberFor), + AnnounceBlock(B::Hash, Vec), + GetValue(record::Key), + PutValue(record::Key, Vec), + AddKnownAddress(PeerId, Multiaddr), + SyncFork(Vec, B::Hash, NumberFor), + EventStream(out_events::Sender), + Request { + target: PeerId, + protocol: Cow<'static, str>, + request: Vec, + pending_response: oneshot::Sender, RequestFailure>>, + }, + RegisterNotifProtocol { + engine_id: ConsensusEngineId, + protocol_name: Cow<'static, str>, + }, + DisconnectPeer(PeerId), + UpdateChain, + OwnBlockImported(B::Hash, NumberFor), +} + +/// Main network worker. Must be polled in order for the network to advance. +/// +/// You are encouraged to poll this in a separate background thread or task. +#[must_use = "The NetworkWorker must be polled in order for the network to advance"] +pub struct NetworkWorker { + /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. + external_addresses: Arc>>, + /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. + num_connected: Arc, + /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. + is_major_syncing: Arc, + /// The network service that can be extracted and shared through the codebase. + service: Arc>, + /// The *actual* network. + network_service: Swarm, + /// The import queue that was passed at initialization. + import_queue: Box>, + /// Messages from the [`NetworkService`] that must be processed. + from_service: TracingUnboundedReceiver>, + /// Receiver for queries from the light client that must be processed. + light_client_rqs: Option>>, + /// Senders for events that happen on the network. + event_streams: out_events::OutChannels, + /// Prometheus network metrics. + metrics: Option, + /// The `PeerId`'s of all boot nodes. + boot_node_ids: Arc>, + /// Requests started using [`NetworkService::request`]. Includes the channel to send back the + /// response, when the request has started, and the name of the protocol for diagnostic + /// purposes. + pending_requests: HashMap< + behaviour::RequestId, + (oneshot::Sender, RequestFailure>>, Instant, String) + >, + /// For each peer and protocol combination, an object that allows sending notifications to + /// that peer. Shared with the [`NetworkService`]. + peers_notifications_sinks: Arc>>, +} + +impl Future for NetworkWorker { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll { + let this = &mut *self; + + // Poll the import queue for actions to perform. + this.import_queue.poll_actions(cx, &mut NetworkLink { + protocol: &mut this.network_service, + }); + + // Check for new incoming light client requests. + if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { + while let Poll::Ready(Some(rq)) = light_client_rqs.poll_next_unpin(cx) { + // This can error if there are too many queued requests already. + if this.network_service.light_client_request(rq).is_err() { + log::warn!("Couldn't start light client request: too many pending requests"); + } + if let Some(metrics) = this.metrics.as_ref() { + metrics.issued_light_requests.inc(); + } + } + } + + // At the time of writing of this comment, due to a high volume of messages, the network + // worker sometimes takes a long time to process the loop below. When that happens, the + // rest of the polling is frozen. In order to avoid negative side-effects caused by this + // freeze, a limit to the number of iterations is enforced below. If the limit is reached, + // the task is interrupted then scheduled again. + // + // This allows for a more even distribution in the time taken by each sub-part of the + // polling. + let mut num_iterations = 0; + loop { + num_iterations += 1; + if num_iterations >= 100 { + cx.waker().wake_by_ref(); + break; + } + + // Process the next message coming from the `NetworkService`. + let msg = match this.from_service.poll_next_unpin(cx) { + Poll::Ready(Some(msg)) => msg, + Poll::Ready(None) => return Poll::Ready(()), + Poll::Pending => break, + }; + + match msg { + ServiceToWorkerMsg::AnnounceBlock(hash, data) => + this.network_service.user_protocol_mut().announce_block(hash, data), + ServiceToWorkerMsg::RequestJustification(hash, number) => + this.network_service.user_protocol_mut().request_justification(&hash, number), + ServiceToWorkerMsg::PropagateTransaction(hash) => + this.network_service.user_protocol_mut().propagate_transaction(&hash), + ServiceToWorkerMsg::PropagateTransactions => + this.network_service.user_protocol_mut().propagate_transactions(), + ServiceToWorkerMsg::GetValue(key) => + this.network_service.get_value(&key), + ServiceToWorkerMsg::PutValue(key, value) => + this.network_service.put_value(key, value), + ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => + this.network_service.add_known_address(peer_id, addr), + ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => + this.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), + ServiceToWorkerMsg::EventStream(sender) => + this.event_streams.push(sender), + ServiceToWorkerMsg::Request { target, protocol, request, pending_response } => { + // Calling `send_request` can fail immediately in some circumstances. + // This is handled by sending back an error on the channel. + match this.network_service.send_request(&target, &protocol, request) { + Ok(request_id) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.requests_out_started_total + .with_label_values(&[&protocol]) + .inc(); + } + this.pending_requests.insert( + request_id, + (pending_response, Instant::now(), protocol.to_string()) + ); + }, + Err(behaviour::SendRequestError::NotConnected) => { + let err = RequestFailure::Network(OutboundFailure::ConnectionClosed); + let _ = pending_response.send(Err(err)); + }, + Err(behaviour::SendRequestError::UnknownProtocol) => { + let err = RequestFailure::Network(OutboundFailure::UnsupportedProtocols); + let _ = pending_response.send(Err(err)); + }, + } + }, + ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, protocol_name } => { + this.network_service + .register_notifications_protocol(engine_id, protocol_name); + }, + ServiceToWorkerMsg::DisconnectPeer(who) => + this.network_service.user_protocol_mut().disconnect_peer(&who), + ServiceToWorkerMsg::UpdateChain => + this.network_service.user_protocol_mut().update_chain(), + ServiceToWorkerMsg::OwnBlockImported(hash, number) => + this.network_service.user_protocol_mut().own_block_imported(hash, number), + } + } + + // `num_iterations` serves the same purpose as in the previous loop. + // See the previous loop for explanations. + let mut num_iterations = 0; + loop { + num_iterations += 1; + if num_iterations >= 1000 { + cx.waker().wake_by_ref(); + break; + } + + // Process the next action coming from the network. + let next_event = this.network_service.next_event(); + futures::pin_mut!(next_event); + let poll_value = next_event.poll_unpin(cx); + + match poll_value { + Poll::Pending => break, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockImport(origin, blocks))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.import_queue_blocks_submitted.inc(); + } + this.import_queue.import_blocks(origin, blocks); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport(origin, hash, nb, justification))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.import_queue_justifications_submitted.inc(); + } + this.import_queue.import_justification(origin, hash, nb, justification); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::FinalityProofImport(origin, hash, nb, proof))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.import_queue_finality_proofs_submitted.inc(); + } + this.import_queue.import_finality_proof(origin, hash, nb, proof); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::InboundRequest { protocol, result, .. })) => { + if let Some(metrics) = this.metrics.as_ref() { + match result { + Ok(serve_time) => { + metrics.requests_in_success_total + .with_label_values(&[&protocol]) + .observe(serve_time.as_secs_f64()); + } + Err(err) => { + let reason = match err { + ResponseFailure::Busy => "busy", + ResponseFailure::Network(InboundFailure::Timeout) => "timeout", + ResponseFailure::Network(InboundFailure::UnsupportedProtocols) => + "unsupported", + ResponseFailure::Network(InboundFailure::ConnectionClosed) => + "connection-closed", + }; + + metrics.requests_in_failure_total + .with_label_values(&[&protocol, reason]) + .inc(); + } + } + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RequestFinished { request_id, result })) => { + if let Some((send_back, started, protocol)) = this.pending_requests.remove(&request_id) { + if let Some(metrics) = this.metrics.as_ref() { + match &result { + Ok(_) => { + metrics.requests_out_success_total + .with_label_values(&[&protocol]) + .observe(started.elapsed().as_secs_f64()); + } + Err(err) => { + let reason = match err { + RequestFailure::Refused => "refused", + RequestFailure::Network(OutboundFailure::DialFailure) => + "dial-failure", + RequestFailure::Network(OutboundFailure::Timeout) => + "timeout", + RequestFailure::Network(OutboundFailure::ConnectionClosed) => + "connection-closed", + RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => + "unsupported", + }; + + metrics.requests_out_failure_total + .with_label_values(&[&protocol, reason]) + .inc(); + } + } + } + let _ = send_back.send(result); + } else { + error!("Request not in pending_requests"); + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::OpaqueRequestStarted { protocol, .. })) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.requests_out_started_total + .with_label_values(&[&protocol]) + .inc(); + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::OpaqueRequestFinished { protocol, request_duration, .. })) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.requests_out_success_total + .with_label_values(&[&protocol]) + .observe(request_duration.as_secs_f64()); + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted(protocol))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.kademlia_random_queries_total + .with_label_values(&[&protocol.as_ref()]) + .inc(); + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamOpened { remote, engine_id, notifications_sink, role })) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.notifications_streams_opened_total + .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id)]).inc(); + } + { + let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); + peers_notifications_sinks.insert((remote.clone(), engine_id), notifications_sink); + } + this.event_streams.send(Event::NotificationStreamOpened { + remote, + engine_id, + role, + }); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamReplaced { remote, engine_id, notifications_sink })) => { + let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); + if let Some(s) = peers_notifications_sinks.get_mut(&(remote, engine_id)) { + *s = notifications_sink; + } else { + log::error!( + target: "sub-libp2p", + "NotificationStreamReplaced for non-existing substream" + ); + } + + // TODO: Notifications might have been lost as a result of the previous + // connection being dropped, and as a result it would be preferable to notify + // the users of this fact by simulating the substream being closed then + // reopened. + // The code below doesn't compile because `role` is unknown. Propagating the + // handshake of the secondary connections is quite an invasive change and + // would conflict with https://github.com/paritytech/substrate/issues/6403. + // Considering that dropping notifications is generally regarded as + // acceptable, this bug is at the moment intentionally left there and is + // intended to be fixed at the same time as + // https://github.com/paritytech/substrate/issues/6403. + /*this.event_streams.send(Event::NotificationStreamClosed { + remote, + engine_id, + }); + this.event_streams.send(Event::NotificationStreamOpened { + remote, + engine_id, + role, + });*/ + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationStreamClosed { remote, engine_id })) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.notifications_streams_closed_total + .with_label_values(&[&maybe_utf8_bytes_to_string(&engine_id[..])]).inc(); + } + this.event_streams.send(Event::NotificationStreamClosed { + remote: remote.clone(), + engine_id, + }); + { + let mut peers_notifications_sinks = this.peers_notifications_sinks.lock(); + peers_notifications_sinks.remove(&(remote.clone(), engine_id)); + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::NotificationsReceived { remote, messages })) => { + if let Some(metrics) = this.metrics.as_ref() { + for (engine_id, message) in &messages { + metrics.notifications_sizes + .with_label_values(&["in", &maybe_utf8_bytes_to_string(engine_id)]) + .observe(message.len() as f64); + } + } + this.event_streams.send(Event::NotificationsReceived { + remote, + messages, + }); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Dht(event, duration))) => { + if let Some(metrics) = this.metrics.as_ref() { + let query_type = match event { + DhtEvent::ValueFound(_) => "value-found", + DhtEvent::ValueNotFound(_) => "value-not-found", + DhtEvent::ValuePut(_) => "value-put", + DhtEvent::ValuePutFailed(_) => "value-put-failed", + }; + metrics.kademlia_query_duration.with_label_values(&[query_type]) + .observe(duration.as_secs_f64()); + } + + this.event_streams.send(Event::Dht(event)); + }, + Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, num_established }) => { + trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); + + if let Some(metrics) = this.metrics.as_ref() { + let direction = match endpoint { + ConnectedPoint::Dialer { .. } => "out", + ConnectedPoint::Listener { .. } => "in", + }; + metrics.connections_opened_total.with_label_values(&[direction]).inc(); + + if num_established.get() == 1 { + metrics.distinct_peers_connections_opened_total.inc(); + } + } + }, + Poll::Ready(SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, num_established }) => { + trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); + if let Some(metrics) = this.metrics.as_ref() { + let direction = match endpoint { + ConnectedPoint::Dialer { .. } => "out", + ConnectedPoint::Listener { .. } => "in", + }; + let reason = match cause { + Some(ConnectionError::IO(_)) => "transport-error", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(EitherError::A(EitherError::A(EitherError::B( + EitherError::A(PingFailure::Timeout)))))))))) => "ping-timeout", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(EitherError::A(EitherError::A(EitherError::A( + NotifsHandlerError::Legacy(LegacyConnectionKillError)))))))))) => "force-closed", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(EitherError::A(EitherError::A(EitherError::A( + NotifsHandlerError::SyncNotificationsClogged))))))))) => "sync-notifications-clogged", + Some(ConnectionError::Handler(NodeHandlerWrapperError::Handler(_))) => "protocol-error", + Some(ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout)) => "keep-alive-timeout", + None => "actively-closed", + }; + metrics.connections_closed_total.with_label_values(&[direction, reason]).inc(); + + // `num_established` represents the number of *remaining* connections. + if num_established == 0 { + metrics.distinct_peers_connections_closed_total.inc(); + } + } + }, + Poll::Ready(SwarmEvent::NewListenAddr(addr)) => { + trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", addr); + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_local_addresses.inc(); + } + }, + Poll::Ready(SwarmEvent::ExpiredListenAddr(addr)) => { + info!(target: "sub-libp2p", "📪 No longer listening on {}", addr); + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_local_addresses.dec(); + } + }, + Poll::Ready(SwarmEvent::UnreachableAddr { peer_id, address, error, .. }) => { + trace!( + target: "sub-libp2p", "Libp2p => Failed to reach {:?} through {:?}: {}", + peer_id, + address, + error, + ); + + if this.boot_node_ids.contains(&peer_id) { + if let PendingConnectionError::InvalidPeerId = error { + error!( + "💔 The bootnode you want to connect to at `{}` provided a different peer ID than the one you expect: `{}`.", + address, + peer_id, + ); + } + } + + if let Some(metrics) = this.metrics.as_ref() { + match error { + PendingConnectionError::ConnectionLimit(_) => + metrics.pending_connections_errors_total.with_label_values(&["limit-reached"]).inc(), + PendingConnectionError::InvalidPeerId => + metrics.pending_connections_errors_total.with_label_values(&["invalid-peer-id"]).inc(), + PendingConnectionError::Transport(_) | PendingConnectionError::IO(_) => + metrics.pending_connections_errors_total.with_label_values(&["transport-error"]).inc(), + } + } + } + Poll::Ready(SwarmEvent::Dialing(peer_id)) => + trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id), + Poll::Ready(SwarmEvent::IncomingConnection { local_addr, send_back_addr }) => { + trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({},{}))", + local_addr, send_back_addr); + if let Some(metrics) = this.metrics.as_ref() { + metrics.incoming_connections_total.inc(); + } + }, + Poll::Ready(SwarmEvent::IncomingConnectionError { local_addr, send_back_addr, error }) => { + trace!(target: "sub-libp2p", "Libp2p => IncomingConnectionError({},{}): {}", + local_addr, send_back_addr, error); + if let Some(metrics) = this.metrics.as_ref() { + let reason = match error { + PendingConnectionError::ConnectionLimit(_) => "limit-reached", + PendingConnectionError::InvalidPeerId => "invalid-peer-id", + PendingConnectionError::Transport(_) | + PendingConnectionError::IO(_) => "transport-error", + }; + + metrics.incoming_connections_errors_total.with_label_values(&[reason]).inc(); + } + }, + Poll::Ready(SwarmEvent::BannedPeer { peer_id, endpoint }) => { + trace!(target: "sub-libp2p", "Libp2p => BannedPeer({}). Connected via {:?}.", + peer_id, endpoint); + if let Some(metrics) = this.metrics.as_ref() { + metrics.incoming_connections_errors_total.with_label_values(&["banned"]).inc(); + } + }, + Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => + trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}", + address, error), + Poll::Ready(SwarmEvent::ListenerClosed { reason, addresses }) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_local_addresses.sub(addresses.len() as u64); + } + let addrs = addresses.into_iter().map(|a| a.to_string()) + .collect::>().join(", "); + match reason { + Ok(()) => error!( + target: "sub-libp2p", + "📪 Libp2p listener ({}) closed gracefully", + addrs + ), + Err(e) => error!( + target: "sub-libp2p", + "📪 Libp2p listener ({}) closed: {}", + addrs, e + ), + } + }, + Poll::Ready(SwarmEvent::ListenerError { error }) => { + trace!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error); + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_errors_total.inc(); + } + }, + }; + } + + let num_connected_peers = this.network_service.user_protocol_mut().num_connected_peers(); + + // Update the variables shared with the `NetworkService`. + this.num_connected.store(num_connected_peers, Ordering::Relaxed); + { + let external_addresses = Swarm::::external_addresses(&this.network_service).cloned().collect(); + *this.external_addresses.lock() = external_addresses; + } + + let is_major_syncing = match this.network_service.user_protocol_mut().sync_state() { + SyncState::Idle => false, + SyncState::Downloading => true, + }; + + this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); + + if let Some(metrics) = this.metrics.as_ref() { + for (proto, buckets) in this.network_service.num_entries_per_kbucket() { + for (lower_ilog2_bucket_bound, num_entries) in buckets { + metrics.kbuckets_num_nodes + .with_label_values(&[&proto.as_ref(), &lower_ilog2_bucket_bound.to_string()]) + .set(num_entries as u64); + } + } + for (proto, num_entries) in this.network_service.num_kademlia_records() { + metrics.kademlia_records_count.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); + } + for (proto, num_entries) in this.network_service.kademlia_records_total_size() { + metrics.kademlia_records_sizes_total.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); + } + metrics.peerset_num_discovered.set(this.network_service.user_protocol().num_discovered_peers() as u64); + metrics.peerset_num_requested.set(this.network_service.user_protocol().requested_peers().count() as u64); + metrics.pending_connections.set(Swarm::network_info(&this.network_service).num_connections_pending as u64); + } + + Poll::Pending + } +} + +impl Unpin for NetworkWorker { +} + +/// Turns bytes that are potentially UTF-8 into a reasonable representable string. +/// +/// Meant to be used only for debugging or metrics-reporting purposes. +pub(crate) fn maybe_utf8_bytes_to_string(id: &[u8]) -> Cow { + if let Ok(s) = std::str::from_utf8(&id[..]) { + Cow::Borrowed(s) + } else { + Cow::Owned(format!("{:?}", id)) + } +} + +/// The libp2p swarm, customized for our needs. +type Swarm = libp2p::swarm::Swarm>; + +// Implementation of `import_queue::Link` trait using the available local variables. +struct NetworkLink<'a, B: BlockT, H: ExHashT> { + protocol: &'a mut Swarm, +} + +impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { + fn blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)> + ) { + self.protocol.user_protocol_mut().on_blocks_processed(imported, count, results) + } + fn justification_imported(&mut self, who: PeerId, hash: &B::Hash, number: NumberFor, success: bool) { + self.protocol.user_protocol_mut().justification_import_result(hash.clone(), number, success); + if !success { + info!("💔 Invalid justification provided by {} for #{}", who, hash); + self.protocol.user_protocol_mut().disconnect_peer(&who); + self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid justification")); + } + } + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + self.protocol.user_protocol_mut().request_justification(hash, number) + } + fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { + self.protocol.user_protocol_mut().request_finality_proof(hash, number) + } + fn finality_proof_imported( + &mut self, + who: PeerId, + request_block: (B::Hash, NumberFor), + finalization_result: Result<(B::Hash, NumberFor), ()>, + ) { + let success = finalization_result.is_ok(); + self.protocol.user_protocol_mut().finality_proof_import_result(request_block, finalization_result); + if !success { + info!("💔 Invalid finality proof provided by {} for #{}", who, request_block.0); + self.protocol.user_protocol_mut().disconnect_peer(&who); + self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid finality proof")); + } + } +} + +fn ensure_addresses_consistent_with_transport<'a>( + addresses: impl Iterator, + transport: &TransportConfig, +) -> Result<(), Error> { + if matches!(transport, TransportConfig::MemoryOnly) { + let addresses: Vec<_> = addresses + .filter(|x| x.iter() + .any(|y| !matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) + ) + .cloned() + .collect(); + + if !addresses.is_empty() { + return Err(Error::AddressesForAnotherTransport { + transport: transport.clone(), + addresses, + }); + } + } else { + let addresses: Vec<_> = addresses + .filter(|x| x.iter() + .any(|y| matches!(y, libp2p::core::multiaddr::Protocol::Memory(_))) + ) + .cloned() + .collect(); + + if !addresses.is_empty() { + return Err(Error::AddressesForAnotherTransport { + transport: transport.clone(), + addresses, + }); + } + } + + Ok(()) +} diff --git a/client/network/src/service/metrics.rs b/client/network/src/service/metrics.rs new file mode 100644 index 0000000000000..a63ce7a18a519 --- /dev/null +++ b/client/network/src/service/metrics.rs @@ -0,0 +1,357 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::transport::BandwidthSinks; +use prometheus_endpoint::{ + self as prometheus, + Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, + PrometheusError, Registry, U64, Opts, + SourcedCounter, SourcedGauge, MetricSource, +}; +use std::{ + str, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, +}; + +pub use prometheus_endpoint::{Histogram, HistogramVec}; + +/// Registers all networking metrics with the given registry. +pub fn register(registry: &Registry, sources: MetricSources) -> Result { + BandwidthCounters::register(registry, sources.bandwidth)?; + MajorSyncingGauge::register(registry, sources.major_syncing)?; + NumConnectedGauge::register(registry, sources.connected_peers)?; + Metrics::register(registry) +} + +/// Predefined metric sources that are fed directly into prometheus. +pub struct MetricSources { + pub bandwidth: Arc, + pub major_syncing: Arc, + pub connected_peers: Arc, +} + +/// Dedicated metrics. +pub struct Metrics { + // This list is ordered alphabetically + pub connections_closed_total: CounterVec, + pub connections_opened_total: CounterVec, + pub distinct_peers_connections_closed_total: Counter, + pub distinct_peers_connections_opened_total: Counter, + pub import_queue_blocks_submitted: Counter, + pub import_queue_finality_proofs_submitted: Counter, + pub import_queue_justifications_submitted: Counter, + pub incoming_connections_errors_total: CounterVec, + pub incoming_connections_total: Counter, + pub issued_light_requests: Counter, + pub kademlia_query_duration: HistogramVec, + pub kademlia_random_queries_total: CounterVec, + pub kademlia_records_count: GaugeVec, + pub kademlia_records_sizes_total: GaugeVec, + pub kbuckets_num_nodes: GaugeVec, + pub listeners_local_addresses: Gauge, + pub listeners_errors_total: Counter, + pub notifications_sizes: HistogramVec, + pub notifications_streams_closed_total: CounterVec, + pub notifications_streams_opened_total: CounterVec, + pub peerset_num_discovered: Gauge, + pub peerset_num_requested: Gauge, + pub pending_connections: Gauge, + pub pending_connections_errors_total: CounterVec, + pub requests_in_failure_total: CounterVec, + pub requests_in_success_total: HistogramVec, + pub requests_out_failure_total: CounterVec, + pub requests_out_success_total: HistogramVec, + pub requests_out_started_total: CounterVec, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + // This list is ordered alphabetically + connections_closed_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_connections_closed_total", + "Total number of connections closed, by direction and reason" + ), + &["direction", "reason"] + )?, registry)?, + connections_opened_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_connections_opened_total", + "Total number of connections opened by direction" + ), + &["direction"] + )?, registry)?, + distinct_peers_connections_closed_total: prometheus::register(Counter::new( + "sub_libp2p_distinct_peers_connections_closed_total", + "Total number of connections closed with distinct peers" + )?, registry)?, + distinct_peers_connections_opened_total: prometheus::register(Counter::new( + "sub_libp2p_distinct_peers_connections_opened_total", + "Total number of connections opened with distinct peers" + )?, registry)?, + import_queue_blocks_submitted: prometheus::register(Counter::new( + "import_queue_blocks_submitted", + "Number of blocks submitted to the import queue.", + )?, registry)?, + import_queue_finality_proofs_submitted: prometheus::register(Counter::new( + "import_queue_finality_proofs_submitted", + "Number of finality proofs submitted to the import queue.", + )?, registry)?, + import_queue_justifications_submitted: prometheus::register(Counter::new( + "import_queue_justifications_submitted", + "Number of justifications submitted to the import queue.", + )?, registry)?, + incoming_connections_errors_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_incoming_connections_handshake_errors_total", + "Total number of incoming connections that have failed during the \ + initial handshake" + ), + &["reason"] + )?, registry)?, + incoming_connections_total: prometheus::register(Counter::new( + "sub_libp2p_incoming_connections_total", + "Total number of incoming connections on the listening sockets" + )?, registry)?, + issued_light_requests: prometheus::register(Counter::new( + "issued_light_requests", + "Number of light client requests that our node has issued.", + )?, registry)?, + kademlia_query_duration: prometheus::register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_kademlia_query_duration", + "Duration of Kademlia queries per query type" + ), + buckets: prometheus::exponential_buckets(0.5, 2.0, 10) + .expect("parameters are always valid values; qed"), + }, + &["type"] + )?, registry)?, + kademlia_random_queries_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_kademlia_random_queries_total", + "Number of random Kademlia queries started" + ), + &["protocol"] + )?, registry)?, + kademlia_records_count: prometheus::register(GaugeVec::new( + Opts::new( + "sub_libp2p_kademlia_records_count", + "Number of records in the Kademlia records store" + ), + &["protocol"] + )?, registry)?, + kademlia_records_sizes_total: prometheus::register(GaugeVec::new( + Opts::new( + "sub_libp2p_kademlia_records_sizes_total", + "Total size of all the records in the Kademlia records store" + ), + &["protocol"] + )?, registry)?, + kbuckets_num_nodes: prometheus::register(GaugeVec::new( + Opts::new( + "sub_libp2p_kbuckets_num_nodes", + "Number of nodes per kbucket per Kademlia instance" + ), + &["protocol", "lower_ilog2_bucket_bound"] + )?, registry)?, + listeners_local_addresses: prometheus::register(Gauge::new( + "sub_libp2p_listeners_local_addresses", "Number of local addresses we're listening on" + )?, registry)?, + listeners_errors_total: prometheus::register(Counter::new( + "sub_libp2p_listeners_errors_total", + "Total number of non-fatal errors reported by a listener" + )?, registry)?, + notifications_sizes: prometheus::register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_notifications_sizes", + "Sizes of the notifications send to and received from all nodes" + ), + buckets: prometheus::exponential_buckets(64.0, 4.0, 8) + .expect("parameters are always valid values; qed"), + }, + &["direction", "protocol"] + )?, registry)?, + notifications_streams_closed_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_notifications_streams_closed_total", + "Total number of notification substreams that have been closed" + ), + &["protocol"] + )?, registry)?, + notifications_streams_opened_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_notifications_streams_opened_total", + "Total number of notification substreams that have been opened" + ), + &["protocol"] + )?, registry)?, + peerset_num_discovered: prometheus::register(Gauge::new( + "sub_libp2p_peerset_num_discovered", "Number of nodes stored in the peerset manager", + )?, registry)?, + peerset_num_requested: prometheus::register(Gauge::new( + "sub_libp2p_peerset_num_requested", "Number of nodes that the peerset manager wants us to be connected to", + )?, registry)?, + pending_connections: prometheus::register(Gauge::new( + "sub_libp2p_pending_connections", + "Number of connections in the process of being established", + )?, registry)?, + pending_connections_errors_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_pending_connections_errors_total", + "Total number of pending connection errors" + ), + &["reason"] + )?, registry)?, + requests_in_failure_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_requests_in_failure_total", + "Total number of incoming requests that the node has failed to answer" + ), + &["protocol", "reason"] + )?, registry)?, + requests_in_success_total: prometheus::register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_requests_in_success_total", + "Total number of requests received and answered" + ), + buckets: prometheus::exponential_buckets(0.001, 2.0, 16) + .expect("parameters are always valid values; qed"), + }, + &["protocol"] + )?, registry)?, + requests_out_failure_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_requests_out_failure_total", + "Total number of requests that have failed" + ), + &["protocol", "reason"] + )?, registry)?, + requests_out_success_total: prometheus::register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_requests_out_success_total", + "For successful requests, time between a request's start and finish" + ), + buckets: prometheus::exponential_buckets(0.001, 2.0, 16) + .expect("parameters are always valid values; qed"), + }, + &["protocol"] + )?, registry)?, + requests_out_started_total: prometheus::register(CounterVec::new( + Opts::new( + "sub_libp2p_requests_out_started_total", + "Total number of requests emitted" + ), + &["protocol"] + )?, registry)?, + }) + } +} + +/// The bandwidth counter metric. +#[derive(Clone)] +pub struct BandwidthCounters(Arc); + +impl BandwidthCounters { + /// Registers the `BandwidthCounters` metric whose values are + /// obtained from the given sinks. + fn register(registry: &Registry, sinks: Arc) -> Result<(), PrometheusError> { + prometheus::register(SourcedCounter::new( + &Opts::new( + "sub_libp2p_network_bytes_total", + "Total bandwidth usage" + ).variable_label("direction"), + BandwidthCounters(sinks), + )?, registry)?; + + Ok(()) + } +} + +impl MetricSource for BandwidthCounters { + type N = u64; + + fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) { + set(&[&"in"], self.0.total_inbound()); + set(&[&"out"], self.0.total_outbound()); + } +} + +/// The "major syncing" metric. +#[derive(Clone)] +pub struct MajorSyncingGauge(Arc); + +impl MajorSyncingGauge { + /// Registers the `MajorSyncGauge` metric whose value is + /// obtained from the given `AtomicBool`. + fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { + prometheus::register(SourcedGauge::new( + &Opts::new( + "sub_libp2p_is_major_syncing", + "Whether the node is performing a major sync or not.", + ), + MajorSyncingGauge(value), + )?, registry)?; + + Ok(()) + } +} + +impl MetricSource for MajorSyncingGauge { + type N = u64; + + fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) { + set(&[], self.0.load(Ordering::Relaxed) as u64); + } +} + +/// The connected peers metric. +#[derive(Clone)] +pub struct NumConnectedGauge(Arc); + +impl NumConnectedGauge { + /// Registers the `MajorSyncingGauge` metric whose value is + /// obtained from the given `AtomicUsize`. + fn register(registry: &Registry, value: Arc) -> Result<(), PrometheusError> { + prometheus::register(SourcedGauge::new( + &Opts::new( + "sub_libp2p_peers_count", + "Number of connected peers", + ), + NumConnectedGauge(value), + )?, registry)?; + + Ok(()) + } +} + +impl MetricSource for NumConnectedGauge { + type N = u64; + + fn collect(&self, mut set: impl FnMut(&[&str], Self::N)) { + set(&[], self.0.load(Ordering::Relaxed) as u64); + } +} diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs new file mode 100644 index 0000000000000..1b86a5fa4317d --- /dev/null +++ b/client/network/src/service/out_events.rs @@ -0,0 +1,283 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Registering events streams. +//! +//! This code holds the logic that is used for the network service to inform other parts of +//! Substrate about what is happening. +//! +//! # Usage +//! +//! - Create an instance of [`OutChannels`]. +//! - Create channels using the [`channel`] function. The receiving side implements the `Stream` +//! trait. +//! - You cannot directly send an event on a sender. Instead, you have to call +//! [`OutChannels::push`] to put the sender within a [`OutChannels`]. +//! - Send events by calling [`OutChannels::send`]. Events are cloned for each sender in the +//! collection. +//! + +use crate::Event; +use super::maybe_utf8_bytes_to_string; + +use futures::{prelude::*, channel::mpsc, ready, stream::FusedStream}; +use parking_lot::Mutex; +use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; +use std::{ + convert::TryFrom as _, + fmt, pin::Pin, sync::Arc, + task::{Context, Poll} +}; + +/// Creates a new channel that can be associated to a [`OutChannels`]. +/// +/// The name is used in Prometheus reports. +pub fn channel(name: &'static str) -> (Sender, Receiver) { + let (tx, rx) = mpsc::unbounded(); + let metrics = Arc::new(Mutex::new(None)); + let tx = Sender { inner: tx, name, metrics: metrics.clone() }; + let rx = Receiver { inner: rx, name, metrics }; + (tx, rx) +} + +/// Sending side of a channel. +/// +/// Must be associated with an [`OutChannels`] before anything can be sent on it +/// +/// > **Note**: Contrary to regular channels, this `Sender` is purposefully designed to not +/// implement the `Clone` trait e.g. in Order to not complicate the logic keeping the metrics in +/// sync on drop. If someone adds a `#[derive(Clone)]` below, it is **wrong**. +pub struct Sender { + inner: mpsc::UnboundedSender, + name: &'static str, + /// Clone of [`Receiver::metrics`]. + metrics: Arc>>>>, +} + +impl fmt::Debug for Sender { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("Sender").finish() + } +} + +impl Drop for Sender { + fn drop(&mut self) { + let metrics = self.metrics.lock(); + if let Some(Some(metrics)) = metrics.as_ref().map(|m| &**m) { + metrics.num_channels.with_label_values(&[self.name]).dec(); + } + } +} + +/// Receiving side of a channel. +pub struct Receiver { + inner: mpsc::UnboundedReceiver, + name: &'static str, + /// Initially contains `None`, and will be set to a value once the corresponding [`Sender`] + /// is assigned to an instance of [`OutChannels`]. + metrics: Arc>>>>, +} + +impl Stream for Receiver { + type Item = Event; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + if let Some(ev) = ready!(Pin::new(&mut self.inner).poll_next(cx)) { + let metrics = self.metrics.lock().clone(); + match metrics.as_ref().map(|m| m.as_ref()) { + Some(Some(metrics)) => metrics.event_out(&ev, self.name), + Some(None) => (), // no registry + None => log::warn!("Inconsistency in out_events: event happened before sender associated"), + } + Poll::Ready(Some(ev)) + } else { + Poll::Ready(None) + } + } +} + +impl fmt::Debug for Receiver { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("Receiver").finish() + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + if !self.inner.is_terminated() { + // Empty the list to properly decrease the metrics. + while let Some(Some(_)) = self.next().now_or_never() {} + } + } +} + +/// Collection of senders. +pub struct OutChannels { + event_streams: Vec, + /// The metrics we collect. A clone of this is sent to each [`Receiver`] associated with this + /// object. + metrics: Arc>, +} + +impl OutChannels { + /// Creates a new empty collection of senders. + pub fn new(registry: Option<&Registry>) -> Result { + let metrics = if let Some(registry) = registry { + Some(Metrics::register(registry)?) + } else { + None + }; + + Ok(OutChannels { + event_streams: Vec::new(), + metrics: Arc::new(metrics), + }) + } + + /// Adds a new [`Sender`] to the collection. + pub fn push(&mut self, sender: Sender) { + let mut metrics = sender.metrics.lock(); + debug_assert!(metrics.is_none()); + *metrics = Some(self.metrics.clone()); + drop(metrics); + + if let Some(metrics) = &*self.metrics { + metrics.num_channels.with_label_values(&[sender.name]).inc(); + } + + self.event_streams.push(sender); + } + + /// Sends an event. + pub fn send(&mut self, event: Event) { + self.event_streams.retain(|sender| { + sender.inner.unbounded_send(event.clone()).is_ok() + }); + + if let Some(metrics) = &*self.metrics { + for ev in &self.event_streams { + metrics.event_in(&event, 1, ev.name); + } + } + } +} + +impl fmt::Debug for OutChannels { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("OutChannels") + .field("num_channels", &self.event_streams.len()) + .finish() + } +} + +struct Metrics { + // This list is ordered alphabetically + events_total: CounterVec, + notifications_sizes: CounterVec, + num_channels: GaugeVec, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + events_total: register(CounterVec::new( + Opts::new( + "sub_libp2p_out_events_events_total", + "Number of broadcast network events that have been sent or received across all \ + channels" + ), + &["event_name", "action", "name"] + )?, registry)?, + notifications_sizes: register(CounterVec::new( + Opts::new( + "sub_libp2p_out_events_notifications_sizes", + "Size of notification events that have been sent or received across all \ + channels" + ), + &["protocol", "action", "name"] + )?, registry)?, + num_channels: register(GaugeVec::new( + Opts::new( + "sub_libp2p_out_events_num_channels", + "Number of internal active channels that broadcast network events", + ), + &["name"] + )?, registry)?, + }) + } + + fn event_in(&self, event: &Event, num: u64, name: &str) { + match event { + Event::Dht(_) => { + self.events_total + .with_label_values(&["dht", "sent", name]) + .inc_by(num); + } + Event::NotificationStreamOpened { engine_id, .. } => { + self.events_total + .with_label_values(&[&format!("notif-open-{:?}", engine_id), "sent", name]) + .inc_by(num); + }, + Event::NotificationStreamClosed { engine_id, .. } => { + self.events_total + .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "sent", name]) + .inc_by(num); + }, + Event::NotificationsReceived { messages, .. } => { + for (engine_id, message) in messages { + self.events_total + .with_label_values(&[&format!("notif-{:?}", engine_id), "sent", name]) + .inc_by(num); + self.notifications_sizes + .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "sent", name]) + .inc_by(num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::max_value()))); + } + }, + } + } + + fn event_out(&self, event: &Event, name: &str) { + match event { + Event::Dht(_) => { + self.events_total + .with_label_values(&["dht", "received", name]) + .inc(); + } + Event::NotificationStreamOpened { engine_id, .. } => { + self.events_total + .with_label_values(&[&format!("notif-open-{:?}", engine_id), "received", name]) + .inc(); + }, + Event::NotificationStreamClosed { engine_id, .. } => { + self.events_total + .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "received", name]) + .inc(); + }, + Event::NotificationsReceived { messages, .. } => { + for (engine_id, message) in messages { + self.events_total + .with_label_values(&[&format!("notif-{:?}", engine_id), "received", name]) + .inc(); + self.notifications_sizes + .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "received", name]) + .inc_by(u64::try_from(message.len()).unwrap_or(u64::max_value())); + } + }, + } + } +} diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs new file mode 100644 index 0000000000000..4b6f9dd156482 --- /dev/null +++ b/client/network/src/service/tests.rs @@ -0,0 +1,514 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{config, Event, NetworkService, NetworkWorker}; + +use libp2p::PeerId; +use futures::prelude::*; +use sp_runtime::traits::{Block as BlockT, Header as _}; +use std::{sync::Arc, time::Duration}; +use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; + +type TestNetworkService = NetworkService< + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::Hash, +>; + +/// Builds a full node to be used for testing. Returns the node service and its associated events +/// stream. +/// +/// > **Note**: We return the events stream in order to not possibly lose events between the +/// > construction of the service and the moment the events stream is grabbed. +fn build_test_full_node(config: config::NetworkConfiguration) + -> (Arc, impl Stream) +{ + let client = Arc::new( + TestClientBuilder::with_default_backend() + .build_with_longest_chain() + .0, + ); + + #[derive(Clone)] + struct PassThroughVerifier(bool); + impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + fn verify( + &mut self, + origin: sp_consensus::BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result< + ( + sp_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"babe")) + }) + }) + .map(|blob| { + vec![( + sp_blockchain::well_known_cache_keys::AUTHORITIES, + blob.to_vec(), + )] + }); + + let mut import = sp_consensus::BlockImportParams::new(origin, header); + import.body = body; + import.finalized = self.0; + import.justification = justification; + import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + Ok((import, maybe_keys)) + } + } + + let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + None, + &sp_core::testing::TaskExecutor::new(), + None, + )); + + let worker = NetworkWorker::new(config::Params { + role: config::Role::Full, + executor: None, + network_config: config, + chain: client.clone(), + finality_proof_provider: None, + finality_proof_request_builder: None, + on_demand: None, + transaction_pool: Arc::new(crate::config::EmptyTransactionPool), + protocol_id: config::ProtocolId::from("/test-protocol-name"), + import_queue, + block_announce_validator: Box::new( + sp_consensus::block_validation::DefaultBlockAnnounceValidator, + ), + metrics_registry: None, + }) + .unwrap(); + + let service = worker.service().clone(); + let event_stream = service.event_stream("test"); + + async_std::task::spawn(async move { + futures::pin_mut!(worker); + let _ = worker.await; + }); + + (service, event_stream) +} + +const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; + +/// Builds two nodes and their associated events stream. +/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +fn build_nodes_one_proto() + -> (Arc, impl Stream, Arc, impl Stream) +{ + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + listen_addresses: vec![], + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + (node1, events_stream1, node2, events_stream2) +} + +#[ignore] +#[test] +fn notifications_state_consistent() { + // Runs two nodes and ensures that events are propagated out of the API in a consistent + // correct order, which means no notification received on a closed substream. + + let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + + // Write some initial notifications that shouldn't get through. + for _ in 0..(rand::random::() % 5) { + node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + } + for _ in 0..(rand::random::() % 5) { + node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + } + + async_std::task::block_on(async move { + // True if we have an active substream from node1 to node2. + let mut node1_to_node2_open = false; + // True if we have an active substream from node2 to node1. + let mut node2_to_node1_open = false; + // We stop the test after a certain number of iterations. + let mut iterations = 0; + // Safe guard because we don't want the test to pass if no substream has been open. + let mut something_happened = false; + + loop { + iterations += 1; + if iterations >= 1_000 { + assert!(something_happened); + break; + } + + // Start by sending a notification from node1 to node2 and vice-versa. Part of the + // test consists in ensuring that notifications get ignored if the stream isn't open. + if rand::random::() % 5 >= 3 { + node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + } + if rand::random::() % 5 >= 3 { + node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + } + + // Also randomly disconnect the two nodes from time to time. + if rand::random::() % 20 == 0 { + node1.disconnect_peer(node2.local_peer_id().clone()); + } + if rand::random::() % 20 == 0 { + node2.disconnect_peer(node1.local_peer_id().clone()); + } + + // Grab next event from either `events_stream1` or `events_stream2`. + let next_event = { + let next1 = events_stream1.next(); + let next2 = events_stream2.next(); + // We also await on a small timer, otherwise it is possible for the test to wait + // forever while nothing at all happens on the network. + let continue_test = futures_timer::Delay::new(Duration::from_millis(20)); + match future::select(future::select(next1, next2), continue_test).await { + future::Either::Left((future::Either::Left((Some(ev), _)), _)) => + future::Either::Left(ev), + future::Either::Left((future::Either::Right((Some(ev), _)), _)) => + future::Either::Right(ev), + future::Either::Right(_) => continue, + _ => break, + } + }; + + match next_event { + future::Either::Left(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + something_happened = true; + assert!(!node1_to_node2_open); + node1_to_node2_open = true; + assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Right(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + something_happened = true; + assert!(!node2_to_node1_open); + node2_to_node1_open = true; + assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Left(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + assert!(node1_to_node2_open); + node1_to_node2_open = false; + assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Right(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + assert!(node2_to_node1_open); + node2_to_node1_open = false; + assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Left(Event::NotificationsReceived { remote, .. }) => { + assert!(node1_to_node2_open); + assert_eq!(remote, *node2.local_peer_id()); + if rand::random::() % 5 >= 4 { + node1.write_notification( + node2.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec() + ); + } + } + future::Either::Right(Event::NotificationsReceived { remote, .. }) => { + assert!(node2_to_node1_open); + assert_eq!(remote, *node1.local_peer_id()); + if rand::random::() % 5 >= 4 { + node2.write_notification( + node1.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec() + ); + } + } + + // Add new events here. + future::Either::Left(Event::Dht(_)) => {} + future::Either::Right(Event::Dht(_)) => {} + }; + } + }); +} + +#[test] +fn lots_of_incoming_peers_works() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (main_node, _) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + listen_addresses: vec![listen_addr.clone()], + in_peers: u32::max_value(), + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + let main_node_peer_id = main_node.local_peer_id().clone(); + + // We spawn background tasks and push them in this `Vec`. They will all be waited upon before + // this test ends. + let mut background_tasks_to_wait = Vec::new(); + + for _ in 0..32 { + let main_node_peer_id = main_node_peer_id.clone(); + + let (_dialing_node, event_stream) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from("/foo"))], + listen_addresses: vec![], + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr.clone(), + peer_id: main_node_peer_id.clone(), + }], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + background_tasks_to_wait.push(async_std::task::spawn(async move { + // Create a dummy timer that will "never" fire, and that will be overwritten when we + // actually need the timer. Using an Option would be technically cleaner, but it would + // make the code below way more complicated. + let mut timer = futures_timer::Delay::new(Duration::from_secs(3600 * 24 * 7)).fuse(); + + let mut event_stream = event_stream.fuse(); + loop { + futures::select! { + _ = timer => { + // Test succeeds when timer fires. + return; + } + ev = event_stream.next() => { + match ev.unwrap() { + Event::NotificationStreamOpened { remote, .. } => { + assert_eq!(remote, main_node_peer_id); + // Test succeeds after 5 seconds. This timer is here in order to + // detect a potential problem after opening. + timer = futures_timer::Delay::new(Duration::from_secs(5)).fuse(); + } + Event::NotificationStreamClosed { .. } => { + // Test failed. + panic!(); + } + _ => {} + } + } + } + } + })); + } + + futures::executor::block_on(async move { + future::join_all(background_tasks_to_wait).await + }); +} + +#[test] +fn notifications_back_pressure() { + // Node 1 floods node 2 with notifications. Random sleeps are done on node 2 to simulate the + // node being busy. We make sure that all notifications are received. + + const TOTAL_NOTIFS: usize = 10_000; + + let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let node2_id = node2.local_peer_id(); + + let receiver = async_std::task::spawn(async move { + let mut received_notifications = 0; + + while received_notifications < TOTAL_NOTIFS { + match events_stream2.next().await.unwrap() { + Event::NotificationStreamClosed { .. } => panic!(), + Event::NotificationsReceived { messages, .. } => { + for message in messages { + assert_eq!(message.0, ENGINE_ID); + assert_eq!(message.1, format!("hello #{}", received_notifications)); + received_notifications += 1; + } + } + _ => {} + }; + + if rand::random::() < 2 { + async_std::task::sleep(Duration::from_millis(rand::random::() % 750)).await; + } + } + }); + + async_std::task::block_on(async move { + // Wait for the `NotificationStreamOpened`. + loop { + match events_stream1.next().await.unwrap() { + Event::NotificationStreamOpened { .. } => break, + _ => {} + }; + } + + // Sending! + for num in 0..TOTAL_NOTIFS { + let notif = node1.notification_sender(node2_id.clone(), ENGINE_ID).unwrap(); + notif.ready().await.unwrap().send(format!("hello #{}", num)).unwrap(); + } + + receiver.await; + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_listen_addresses_consistent_with_transport_memory() { + let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_listen_addresses_consistent_with_transport_not_memory() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_boot_node_addresses_consistent_with_transport_memory() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + let boot_node = config::MultiaddrWithPeerId { + multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], + peer_id: PeerId::random(), + }; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + boot_nodes: vec![boot_node], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_boot_node_addresses_consistent_with_transport_not_memory() { + let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + let boot_node = config::MultiaddrWithPeerId { + multiaddr: config::build_multiaddr![Memory(rand::random::())], + peer_id: PeerId::random(), + }; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + boot_nodes: vec![boot_node], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_reserved_node_addresses_consistent_with_transport_memory() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + let reserved_node = config::MultiaddrWithPeerId { + multiaddr: config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)], + peer_id: PeerId::random(), + }; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + reserved_nodes: vec![reserved_node], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_reserved_node_addresses_consistent_with_transport_not_memory() { + let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + let reserved_node = config::MultiaddrWithPeerId { + multiaddr: config::build_multiaddr![Memory(rand::random::())], + peer_id: PeerId::random(), + }; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + reserved_nodes: vec![reserved_node], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_public_addresses_consistent_with_transport_memory() { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + let public_address = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + public_addresses: vec![public_address], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} + +#[test] +#[should_panic(expected = "don't match the transport")] +fn ensure_public_addresses_consistent_with_transport_not_memory() { + let listen_addr = config::build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(0_u16)]; + let public_address = config::build_multiaddr![Memory(rand::random::())]; + + let _ = build_test_full_node(config::NetworkConfiguration { + listen_addresses: vec![listen_addr.clone()], + public_addresses: vec![public_address], + .. config::NetworkConfiguration::new("test-node", "test-client", Default::default(), None) + }); +} diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs new file mode 100644 index 0000000000000..626f84b6b5f01 --- /dev/null +++ b/client/network/src/transport.rs @@ -0,0 +1,133 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use libp2p::{ + InboundUpgradeExt, OutboundUpgradeExt, PeerId, Transport, + core::{ + self, either::EitherOutput, muxing::StreamMuxerBox, + transport::{boxed::Boxed, OptionalTransport}, upgrade + }, + mplex, identity, bandwidth, wasm_ext, noise +}; +#[cfg(not(target_os = "unknown"))] +use libp2p::{tcp, dns, websocket}; +use std::{io, sync::Arc, time::Duration}; + +pub use self::bandwidth::BandwidthSinks; + +/// Builds the transport that serves as a common ground for all connections. +/// +/// If `memory_only` is true, then only communication within the same process are allowed. Only +/// addresses with the format `/memory/...` are allowed. +/// +/// Returns a `BandwidthSinks` object that allows querying the average bandwidth produced by all +/// the connections spawned with this transport. +pub fn build_transport( + keypair: identity::Keypair, + memory_only: bool, + wasm_external_transport: Option, + use_yamux_flow_control: bool +) -> (Boxed<(PeerId, StreamMuxerBox), io::Error>, Arc) { + // Build the base layer of the transport. + let transport = if let Some(t) = wasm_external_transport { + OptionalTransport::some(t) + } else { + OptionalTransport::none() + }; + #[cfg(not(target_os = "unknown"))] + let transport = transport.or_transport(if !memory_only { + let desktop_trans = tcp::TcpConfig::new(); + let desktop_trans = websocket::WsConfig::new(desktop_trans.clone()) + .or_transport(desktop_trans); + OptionalTransport::some(if let Ok(dns) = dns::DnsConfig::new(desktop_trans.clone()) { + dns.boxed() + } else { + desktop_trans.map_err(dns::DnsErr::Underlying).boxed() + }) + } else { + OptionalTransport::none() + }); + + let transport = transport.or_transport(if memory_only { + OptionalTransport::some(libp2p::core::transport::MemoryTransport::default()) + } else { + OptionalTransport::none() + }); + + let (transport, bandwidth) = bandwidth::BandwidthLogging::new(transport); + + let authentication_config = { + // For more information about these two panics, see in "On the Importance of + // Checking Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, + // and Richard J. Lipton. + let noise_keypair_legacy = noise::Keypair::::new().into_authentic(&keypair) + .expect("can only fail in case of a hardware bug; since this signing is performed only \ + once and at initialization, we're taking the bet that the inconvenience of a very \ + rare panic here is basically zero"); + let noise_keypair_spec = noise::Keypair::::new().into_authentic(&keypair) + .expect("can only fail in case of a hardware bug; since this signing is performed only \ + once and at initialization, we're taking the bet that the inconvenience of a very \ + rare panic here is basically zero"); + + // Legacy noise configurations for backward compatibility. + let mut noise_legacy = noise::LegacyConfig::default(); + noise_legacy.send_legacy_handshake = true; + noise_legacy.recv_legacy_handshake = true; + + let mut xx_config = noise::NoiseConfig::xx(noise_keypair_spec); + xx_config.set_legacy_config(noise_legacy.clone()); + let mut ix_config = noise::NoiseConfig::ix(noise_keypair_legacy); + ix_config.set_legacy_config(noise_legacy); + + let extract_peer_id = |result| match result { + EitherOutput::First((peer_id, o)) => (peer_id, EitherOutput::First(o)), + EitherOutput::Second((peer_id, o)) => (peer_id, EitherOutput::Second(o)), + }; + + core::upgrade::SelectUpgrade::new(xx_config.into_authenticated(), ix_config.into_authenticated()) + .map_inbound(extract_peer_id) + .map_outbound(extract_peer_id) + }; + + let multiplexing_config = { + let mut mplex_config = mplex::MplexConfig::new(); + mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); + mplex_config.max_buffer_len(usize::MAX); + + let mut yamux_config = libp2p::yamux::Config::default(); + + if use_yamux_flow_control { + // Enable proper flow-control: window updates are only sent when + // buffered data has been consumed. + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::OnRead); + } + + core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) + .map_inbound(move |muxer| core::muxing::StreamMuxerBox::new(muxer)) + .map_outbound(move |muxer| core::muxing::StreamMuxerBox::new(muxer)) + }; + + let transport = transport.upgrade(upgrade::Version::V1) + .authenticate(authentication_config) + .multiplex(multiplexing_config) + .timeout(Duration::from_secs(20)) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .boxed(); + + (transport, bandwidth) +} diff --git a/client/network/src/utils.rs b/client/network/src/utils.rs new file mode 100644 index 0000000000000..490e2ced38266 --- /dev/null +++ b/client/network/src/utils.rs @@ -0,0 +1,87 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use futures::{stream::unfold, FutureExt, Stream, StreamExt}; +use futures_timer::Delay; +use linked_hash_set::LinkedHashSet; +use std::time::Duration; +use std::{hash::Hash, num::NonZeroUsize}; + +/// Creates a stream that returns a new value every `duration`. +pub fn interval(duration: Duration) -> impl Stream + Unpin { + unfold((), move |_| Delay::new(duration).map(|_| Some(((), ())))).map(drop) +} + +/// Wrapper around `LinkedHashSet` with bounded growth. +/// +/// In the limit, for each element inserted the oldest existing element will be removed. +#[derive(Debug, Clone)] +pub struct LruHashSet { + set: LinkedHashSet, + limit: NonZeroUsize, +} + +impl LruHashSet { + /// Create a new `LruHashSet` with the given (exclusive) limit. + pub fn new(limit: NonZeroUsize) -> Self { + Self { + set: LinkedHashSet::new(), + limit, + } + } + + /// Insert element into the set. + /// + /// Returns `true` if this is a new element to the set, `false` otherwise. + /// Maintains the limit of the set by removing the oldest entry if necessary. + /// Inserting the same element will update its LRU position. + pub fn insert(&mut self, e: T) -> bool { + if self.set.insert(e) { + if self.set.len() == usize::from(self.limit) { + self.set.pop_front(); // remove oldest entry + } + return true; + } + false + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn maintains_limit() { + let three = NonZeroUsize::new(3).unwrap(); + let mut set = LruHashSet::::new(three); + + // First element. + assert!(set.insert(1)); + assert_eq!(vec![&1], set.set.iter().collect::>()); + + // Second element. + assert!(set.insert(2)); + assert_eq!(vec![&1, &2], set.set.iter().collect::>()); + + // Inserting the same element updates its LRU position. + assert!(!set.insert(1)); + assert_eq!(vec![&2, &1], set.set.iter().collect::>()); + + // We reached the limit. The next element forces the oldest one out. + assert!(set.insert(3)); + assert_eq!(vec![&1, &3], set.set.iter().collect::>()); + } +} diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml new file mode 100644 index 0000000000000..26e1631d9f1aa --- /dev/null +++ b/client/network/test/Cargo.toml @@ -0,0 +1,35 @@ +[package] +description = "Integration tests for Substrate network protocol" +name = "sc-network-test" +version = "0.8.0" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2018" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sc-network = { version = "0.8.0", path = "../" } +log = "0.4.8" +parking_lot = "0.10.0" +futures = "0.3.4" +futures-timer = "3.0.1" +rand = "0.7.2" +libp2p = { version = "0.28.1", default-features = false } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.8.0", path = "../../../client/consensus/common" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sc-block-builder = { version = "0.8.0", path = "../../block-builder" } +sp-consensus-babe = { version = "0.8.0", path = "../../../primitives/consensus/babe" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } +tempfile = "3.1.0" +sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } +sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../service" } diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs new file mode 100644 index 0000000000000..1d2cd3d687de9 --- /dev/null +++ b/client/network/test/src/block_import.rs @@ -0,0 +1,116 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Testing block import logic. + +use sp_consensus::ImportedAux; +use sp_consensus::import_queue::{ + import_single_block, BasicQueue, BlockImportError, BlockImportResult, IncomingBlock, +}; +use substrate_test_runtime_client::{self, prelude::*}; +use substrate_test_runtime_client::runtime::{Block, Hash}; +use sp_runtime::generic::BlockId; +use sc_block_builder::BlockBuilderProvider; +use super::*; + +fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { + let mut client = substrate_test_runtime_client::new(); + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::File, block).unwrap(); + + let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); + let header = client.header(&BlockId::Number(1)).unwrap(); + let justification = client.justification(&BlockId::Number(1)).unwrap(); + let peer_id = PeerId::random(); + (client, hash, number, peer_id.clone(), IncomingBlock { + hash, + header, + body: Some(Vec::new()), + justification, + origin: Some(peer_id.clone()), + allow_missing_state: false, + import_existing: false, + }) +} + +#[test] +fn import_single_good_block_works() { + let (_, _hash, number, peer_id, block) = prepare_good_block(); + + let mut expected_aux = ImportedAux::default(); + expected_aux.is_new_best = true; + + match import_single_block( + &mut substrate_test_runtime_client::new(), + BlockOrigin::File, + block, + &mut PassThroughVerifier::new(true) + ) { + Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) + if *num == number && *aux == expected_aux && *org == Some(peer_id) => {} + r @ _ => panic!("{:?}", r) + } +} + +#[test] +fn import_single_good_known_block_is_ignored() { + let (mut client, _hash, number, _, block) = prepare_good_block(); + match import_single_block( + &mut client, + BlockOrigin::File, + block, + &mut PassThroughVerifier::new(true) + ) { + Ok(BlockImportResult::ImportedKnown(ref n)) if *n == number => {} + _ => panic!() + } +} + +#[test] +fn import_single_good_block_without_header_fails() { + let (_, _, _, peer_id, mut block) = prepare_good_block(); + block.header = None; + match import_single_block( + &mut substrate_test_runtime_client::new(), + BlockOrigin::File, + block, + &mut PassThroughVerifier::new(true) + ) { + Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {} + _ => panic!() + } +} + +#[test] +fn async_import_queue_drops() { + let executor = sp_core::testing::TaskExecutor::new(); + // Perform this test multiple times since it exhibits non-deterministic behavior. + for _ in 0..100 { + let verifier = PassThroughVerifier::new(true); + + let queue = BasicQueue::new( + verifier, + Box::new(substrate_test_runtime_client::new()), + None, + None, + &executor, + None, + ); + drop(queue); + } +} diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs new file mode 100644 index 0000000000000..587feebe55c14 --- /dev/null +++ b/client/network/test/src/lib.rs @@ -0,0 +1,1005 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . +#![allow(missing_docs)] + +#[cfg(test)] +mod block_import; +#[cfg(test)] +mod sync; + +use std::{ + borrow::Cow, collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, + task::{Poll, Context as FutureContext} +}; + +use libp2p::build_multiaddr; +use log::trace; +use sc_network::config::FinalityProofProvider; +use sp_blockchain::{ + HeaderBackend, Result as ClientResult, + well_known_cache_keys::{self, Id as CacheKeyId}, + Info as BlockchainInfo, +}; +use sc_client_api::{ + BlockchainEvents, BlockImportNotification, FinalityNotifications, ImportNotifications, FinalityNotification, + backend::{TransactionFor, AuxStore, Backend, Finalizer}, BlockBackend, +}; +use sc_consensus::LongestChain; +use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; +use sc_network::config::Role; +use sp_consensus::block_validation::{DefaultBlockAnnounceValidator, BlockAnnounceValidator}; +use sp_consensus::import_queue::{ + BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport, +}; +use sp_consensus::block_import::{BlockImport, ImportResult}; +use sp_consensus::Error as ConsensusError; +use sp_consensus::{BlockOrigin, ForkChoiceStrategy, BlockImportParams, BlockCheckParams, JustificationImport}; +use futures::prelude::*; +use sc_network::{NetworkWorker, NetworkService, config::ProtocolId}; +use sc_network::config::{NetworkConfiguration, TransportConfig, BoxFinalityProofRequestBuilder}; +use libp2p::PeerId; +use parking_lot::Mutex; +use sp_core::H256; +use sc_network::config::ProtocolConfig; +use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::{ConsensusEngineId, Justification}; +use substrate_test_runtime_client::{self, AccountKeyring}; +use sc_service::client::Client; +pub use sc_network::config::EmptyTransactionPool; +pub use substrate_test_runtime_client::runtime::{Block, Extrinsic, Hash, Transfer}; +pub use substrate_test_runtime_client::{TestClient, TestClientBuilder, TestClientBuilderExt}; + +type AuthorityId = sp_consensus_babe::AuthorityId; + +/// A Verifier that accepts all blocks and passes them on with the configured +/// finality to be imported. +#[derive(Clone)] +pub struct PassThroughVerifier { + finalized: bool, + fork_choice: ForkChoiceStrategy, +} + +impl PassThroughVerifier { + /// Create a new instance. + /// + /// Every verified block will use `finalized` for the `BlockImportParams`. + pub fn new(finalized: bool) -> Self { + Self { + finalized, + fork_choice: ForkChoiceStrategy::LongestChain, + } + } + + /// Create a new instance. + /// + /// Every verified block will use `finalized` for the `BlockImportParams` and + /// the given [`ForkChoiceStrategy`]. + pub fn new_with_fork_choice(finalized: bool, fork_choice: ForkChoiceStrategy) -> Self { + Self { + finalized, + fork_choice, + } + } +} + +/// This `Verifier` accepts all data as valid. +impl Verifier for PassThroughVerifier { + fn verify( + &mut self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option> + ) -> Result<(BlockImportParams, Option)>>), String> { + let maybe_keys = header.digest() + .log(|l| l.try_as_raw(OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| l.try_as_raw(OpaqueDigestItemId::Consensus(b"babe"))) + ) + .map(|blob| vec![(well_known_cache_keys::AUTHORITIES, blob.to_vec())]); + let mut import = BlockImportParams::new(origin, header); + import.body = body; + import.finalized = self.finalized; + import.justification = justification; + import.fork_choice = Some(self.fork_choice.clone()); + + Ok((import, maybe_keys)) + } +} + +pub type PeersFullClient = Client< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + Block, + substrate_test_runtime_client::runtime::RuntimeApi +>; +pub type PeersLightClient = Client< + substrate_test_runtime_client::LightBackend, + substrate_test_runtime_client::LightExecutor, + Block, + substrate_test_runtime_client::runtime::RuntimeApi +>; + +#[derive(Clone)] +pub enum PeersClient { + Full(Arc, Arc), + Light(Arc, Arc), +} + +impl PeersClient { + pub fn as_full(&self) -> Option> { + match *self { + PeersClient::Full(ref client, ref _backend) => Some(client.clone()), + _ => None, + } + } + + pub fn as_block_import(&self) -> BlockImportAdapter { + match *self { + PeersClient::Full(ref client, ref _backend) => + BlockImportAdapter::new_full(client.clone()), + PeersClient::Light(ref client, ref _backend) => + BlockImportAdapter::Light(Arc::new(Mutex::new(client.clone())), PhantomData), + } + } + + pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { + match *self { + PeersClient::Full(ref client, ref _backend) => client.get_aux(key), + PeersClient::Light(ref client, ref _backend) => client.get_aux(key), + } + } + + pub fn info(&self) -> BlockchainInfo { + match *self { + PeersClient::Full(ref client, ref _backend) => client.chain_info(), + PeersClient::Light(ref client, ref _backend) => client.chain_info(), + } + } + + pub fn header(&self, block: &BlockId) -> ClientResult::Header>> { + match *self { + PeersClient::Full(ref client, ref _backend) => client.header(block), + PeersClient::Light(ref client, ref _backend) => client.header(block), + } + } + + pub fn justification(&self, block: &BlockId) -> ClientResult> { + match *self { + PeersClient::Full(ref client, ref _backend) => client.justification(block), + PeersClient::Light(ref client, ref _backend) => client.justification(block), + } + } + + pub fn finality_notification_stream(&self) -> FinalityNotifications { + match *self { + PeersClient::Full(ref client, ref _backend) => client.finality_notification_stream(), + PeersClient::Light(ref client, ref _backend) => client.finality_notification_stream(), + } + } + + pub fn import_notification_stream(&self) -> ImportNotifications{ + match *self { + PeersClient::Full(ref client, ref _backend) => client.import_notification_stream(), + PeersClient::Light(ref client, ref _backend) => client.import_notification_stream(), + } + } + + pub fn finalize_block( + &self, + id: BlockId, + justification: Option, + notify: bool + ) -> ClientResult<()> { + match *self { + PeersClient::Full(ref client, ref _backend) => client.finalize_block(id, justification, notify), + PeersClient::Light(ref client, ref _backend) => client.finalize_block(id, justification, notify), + } + } +} + +pub struct Peer { + pub data: D, + client: PeersClient, + /// We keep a copy of the verifier so that we can invoke it for locally-generated blocks, + /// instead of going through the import queue. + verifier: VerifierAdapter, + /// We keep a copy of the block_import so that we can invoke it for locally-generated blocks, + /// instead of going through the import queue. + block_import: BlockImportAdapter<()>, + select_chain: Option>, + backend: Option>, + network: NetworkWorker::Hash>, + imported_blocks_stream: Pin> + Send>>, + finality_notification_stream: Pin> + Send>>, +} + +impl Peer { + /// Get this peer ID. + pub fn id(&self) -> PeerId { + self.network.service().local_peer_id().clone() + } + + /// Returns true if we're major syncing. + pub fn is_major_syncing(&self) -> bool { + self.network.service().is_major_syncing() + } + + // Returns a clone of the local SelectChain, only available on full nodes + pub fn select_chain(&self) -> Option> { + self.select_chain.clone() + } + + /// Returns the number of peers we're connected to. + pub fn num_peers(&self) -> usize { + self.network.num_connected_peers() + } + + /// Returns the number of downloaded blocks. + pub fn num_downloaded_blocks(&self) -> usize { + self.network.num_downloaded_blocks() + } + + /// Returns true if we have no peer. + pub fn is_offline(&self) -> bool { + self.num_peers() == 0 + } + + /// Request a justification for the given block. + pub fn request_justification(&self, hash: &::Hash, number: NumberFor) { + self.network.service().request_justification(hash, number); + } + + /// Announces an important block on the network. + pub fn announce_block(&self, hash: ::Hash, data: Vec) { + self.network.service().announce_block(hash, data); + } + + /// Request explicit fork sync. + pub fn set_sync_fork_request(&self, peers: Vec, hash: ::Hash, number: NumberFor) { + self.network.service().set_sync_fork_request(peers, hash, number); + } + + /// Add blocks to the peer -- edit the block before adding + pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 + where F: FnMut(BlockBuilder) -> Block + { + let best_hash = self.client.info().best_hash; + self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false) + } + + /// Add blocks to the peer -- edit the block before adding. The chain will + /// start at the given block iD. + fn generate_blocks_at( + &mut self, + at: BlockId, + count: usize, + origin: BlockOrigin, + mut edit_block: F, + headers_only: bool, + ) -> H256 where F: FnMut(BlockBuilder) -> Block { + let full_client = self.client.as_full() + .expect("blocks could only be generated by full clients"); + let mut at = full_client.header(&at).unwrap().unwrap().hash(); + for _ in 0..count { + let builder = full_client.new_block_at( + &BlockId::Hash(at), + Default::default(), + false, + ).unwrap(); + let block = edit_block(builder); + let hash = block.header.hash(); + trace!( + target: "test_network", + "Generating {}, (#{}, parent={})", + hash, + block.header.number, + block.header.parent_hash, + ); + let header = block.header.clone(); + let (import_block, cache) = self.verifier.verify( + origin, + header.clone(), + None, + if headers_only { None } else { Some(block.extrinsics) }, + ).unwrap(); + let cache = if let Some(cache) = cache { + cache.into_iter().collect() + } else { + Default::default() + }; + + self.block_import.import_block(import_block, cache).expect("block_import failed"); + self.network.service().announce_block(hash, Vec::new()); + at = hash; + } + + self.network.update_chain(); + self.network.service().announce_block(at.clone(), Vec::new()); + at + } + + /// Push blocks to the peer (simplified: with or without a TX) + pub fn push_blocks(&mut self, count: usize, with_tx: bool) -> H256 { + let best_hash = self.client.info().best_hash; + self.push_blocks_at(BlockId::Hash(best_hash), count, with_tx) + } + + /// Push blocks to the peer (simplified: with or without a TX) + pub fn push_headers(&mut self, count: usize) -> H256 { + let best_hash = self.client.info().best_hash; + self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true) + } + + /// Push blocks to the peer (simplified: with or without a TX) starting from + /// given hash. + pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { + self.generate_tx_blocks_at(at, count, with_tx, false) + } + + /// Push blocks/headers to the peer (simplified: with or without a TX) starting from + /// given hash. + fn generate_tx_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool, headers_only:bool) -> H256 { + let mut nonce = 0; + if with_tx { + self.generate_blocks_at( + at, + count, + BlockOrigin::File, |mut builder| { + let transfer = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Alice.into(), + amount: 1, + nonce, + }; + builder.push(transfer.into_signed_tx()).unwrap(); + nonce = nonce + 1; + builder.build().unwrap().block + }, + headers_only + ) + } else { + self.generate_blocks_at( + at, + count, + BlockOrigin::File, + |builder| builder.build().unwrap().block, + headers_only, + ) + } + } + + pub fn push_authorities_change_block(&mut self, new_authorities: Vec) -> H256 { + self.generate_blocks(1, BlockOrigin::File, |mut builder| { + builder.push(Extrinsic::AuthoritiesChange(new_authorities.clone())).unwrap(); + builder.build().unwrap().block + }) + } + + /// Get a reference to the client. + pub fn client(&self) -> &PeersClient { + &self.client + } + + /// Get a reference to the network service. + pub fn network_service(&self) -> &Arc::Hash>> { + &self.network.service() + } + + /// Test helper to compare the blockchain state of multiple (networked) + /// clients. + pub fn blockchain_canon_equals(&self, other: &Self) -> bool { + if let (Some(mine), Some(others)) = (self.backend.clone(), other.backend.clone()) { + mine.blockchain().info().best_hash == others.blockchain().info().best_hash + } else { + false + } + } + + /// Count the total number of imported blocks. + pub fn blocks_count(&self) -> u64 { + self.backend.as_ref().map( + |backend| backend.blockchain().info().best_number + ).unwrap_or(0) + } + + /// Return a collection of block hashes that failed verification + pub fn failed_verifications(&self) -> HashMap<::Hash, String> { + self.verifier.failed_verifications.lock().clone() + } + + pub fn has_block(&self, hash: &H256) -> bool { + self.backend.as_ref().map( + |backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some() + ).unwrap_or(false) + } +} + +/// Implements `BlockImport` for any `Transaction`. Internally the transaction is +/// "converted", aka the field is set to `None`. +/// +/// This is required as the `TestNetFactory` trait does not distinguish between +/// full and light nodes. +pub enum BlockImportAdapter { + Full( + Arc, + Error = ConsensusError + > + Send>>, + PhantomData, + ), + Light( + Arc, + Error = ConsensusError + > + Send>>, + PhantomData, + ), +} + +impl BlockImportAdapter { + /// Create a new instance of `Self::Full`. + pub fn new_full( + full: impl BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError + > + + 'static + + Send + ) -> Self { + Self::Full(Arc::new(Mutex::new(full)), PhantomData) + } + + /// Create a new instance of `Self::Light`. + pub fn new_light( + light: impl BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError + > + + 'static + + Send + ) -> Self { + Self::Light(Arc::new(Mutex::new(light)), PhantomData) + } +} + +impl Clone for BlockImportAdapter { + fn clone(&self) -> Self { + match self { + Self::Full(full, _) => Self::Full(full.clone(), PhantomData), + Self::Light(light, _) => Self::Light(light.clone(), PhantomData), + } + } +} + +impl BlockImport for BlockImportAdapter { + type Error = ConsensusError; + type Transaction = Transaction; + + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + match self { + Self::Full(full, _) => full.lock().check_block(block), + Self::Light(light, _) => light.lock().check_block(block), + } + } + + fn import_block( + &mut self, + block: BlockImportParams, + cache: HashMap>, + ) -> Result { + match self { + Self::Full(full, _) => full.lock().import_block(block.convert_transaction(), cache), + Self::Light(light, _) => light.lock().import_block(block.convert_transaction(), cache), + } + } +} + +/// Implements `Verifier` on an `Arc>`. Used internally. +#[derive(Clone)] +struct VerifierAdapter { + verifier: Arc>>>, + failed_verifications: Arc>>, +} + +impl Verifier for VerifierAdapter { + fn verify( + &mut self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option> + ) -> Result<(BlockImportParams, Option)>>), String> { + let hash = header.hash(); + self.verifier.lock().verify(origin, header, justification, body).map_err(|e| { + self.failed_verifications.lock().insert(hash, e.clone()); + e + }) + } +} + +impl VerifierAdapter { + fn new(verifier: Arc>>>) -> VerifierAdapter { + VerifierAdapter { + verifier, + failed_verifications: Default::default(), + } + } +} + +/// Configuration for a full peer. +#[derive(Default)] +pub struct FullPeerConfig { + /// Pruning window size. + pub keep_blocks: Option, + /// Block announce validator. + pub block_announce_validator: Option + Send + Sync>>, + /// List of notification protocols that the network must support. + pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, str>)>, +} + +pub trait TestNetFactory: Sized { + type Verifier: 'static + Verifier; + type PeerData: Default; + + /// These two need to be implemented! + fn from_config(config: &ProtocolConfig) -> Self; + fn make_verifier( + &self, + client: PeersClient, + config: &ProtocolConfig, + peer_data: &Self::PeerData, + ) -> Self::Verifier; + + /// Get reference to peer. + fn peer(&mut self, i: usize) -> &mut Peer; + fn peers(&self) -> &Vec>; + fn mut_peers>)>( + &mut self, + closure: F, + ); + + /// Get custom block import handle for fresh client, along with peer data. + fn make_block_import(&self, client: PeersClient) + -> ( + BlockImportAdapter, + Option>, + Option>, + Option>, + Self::PeerData, + ) + { + (client.as_block_import(), None, None, None, Default::default()) + } + + /// Get finality proof provider (if supported). + fn make_finality_proof_provider( + &self, + _client: PeersClient, + ) -> Option>> { + None + } + + fn default_config() -> ProtocolConfig { + ProtocolConfig::default() + } + + /// Create new test network with this many peers. + fn new(n: usize) -> Self { + trace!(target: "test_network", "Creating test network"); + let config = Self::default_config(); + let mut net = Self::from_config(&config); + + for i in 0..n { + trace!(target: "test_network", "Adding peer {}", i); + net.add_full_peer(); + } + net + } + + fn add_full_peer(&mut self) { + self.add_full_peer_with_config(Default::default()) + } + + /// Add a full peer. + fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { + let test_client_builder = match config.keep_blocks { + Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), + None => TestClientBuilder::with_default_backend(), + }; + let backend = test_client_builder.backend(); + let (c, longest_chain) = test_client_builder.build_with_longest_chain(); + let client = Arc::new(c); + + let ( + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + data, + ) = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); + + let verifier = self.make_verifier( + PeersClient::Full(client.clone(), backend.clone()), + &Default::default(), + &data, + ); + let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + + let import_queue = Box::new(BasicQueue::new( + verifier.clone(), + Box::new(block_import.clone()), + justification_import, + finality_proof_import, + &sp_core::testing::TaskExecutor::new(), + None, + )); + + let listen_addr = build_multiaddr![Memory(rand::random::())]; + + let mut network_config = NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ); + network_config.transport = TransportConfig::MemoryOnly; + network_config.listen_addresses = vec![listen_addr.clone()]; + network_config.allow_non_globals_in_dht = true; + network_config.notifications_protocols = config.notifications_protocols; + + let network = NetworkWorker::new(sc_network::config::Params { + role: Role::Full, + executor: None, + network_config, + chain: client.clone(), + finality_proof_provider: self.make_finality_proof_provider( + PeersClient::Full(client.clone(), backend.clone()), + ), + finality_proof_request_builder, + on_demand: None, + transaction_pool: Arc::new(EmptyTransactionPool), + protocol_id: ProtocolId::from("test-protocol-name"), + import_queue, + block_announce_validator: config.block_announce_validator + .unwrap_or_else(|| Box::new(DefaultBlockAnnounceValidator)), + metrics_registry: None, + }).unwrap(); + + self.mut_peers(|peers| { + for peer in peers.iter_mut() { + peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); + } + + let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); + let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); + + peers.push(Peer { + data, + client: PeersClient::Full(client, backend.clone()), + select_chain: Some(longest_chain), + backend: Some(backend), + imported_blocks_stream, + finality_notification_stream, + block_import, + verifier, + network, + }); + }); + } + + /// Add a light peer. + fn add_light_peer(&mut self) { + let (c, backend) = substrate_test_runtime_client::new_light(); + let client = Arc::new(c); + let ( + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + data, + ) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); + + let verifier = self.make_verifier( + PeersClient::Light(client.clone(), backend.clone()), + &Default::default(), + &data, + ); + let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + + let import_queue = Box::new(BasicQueue::new( + verifier.clone(), + Box::new(block_import.clone()), + justification_import, + finality_proof_import, + &sp_core::testing::TaskExecutor::new(), + None, + )); + + let listen_addr = build_multiaddr![Memory(rand::random::())]; + + let mut network_config = NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + None, + ); + network_config.transport = TransportConfig::MemoryOnly; + network_config.listen_addresses = vec![listen_addr.clone()]; + network_config.allow_non_globals_in_dht = true; + + let network = NetworkWorker::new(sc_network::config::Params { + role: Role::Light, + executor: None, + network_config, + chain: client.clone(), + finality_proof_provider: self.make_finality_proof_provider( + PeersClient::Light(client.clone(), backend.clone()) + ), + finality_proof_request_builder, + on_demand: None, + transaction_pool: Arc::new(EmptyTransactionPool), + protocol_id: ProtocolId::from("test-protocol-name"), + import_queue, + block_announce_validator: Box::new(DefaultBlockAnnounceValidator), + metrics_registry: None, + }).unwrap(); + + self.mut_peers(|peers| { + for peer in peers.iter_mut() { + peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); + } + + let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); + let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); + + peers.push(Peer { + data, + verifier, + select_chain: None, + backend: None, + block_import, + client: PeersClient::Light(client, backend), + imported_blocks_stream, + finality_notification_stream, + network, + }); + }); + } + + /// Polls the testnet until all nodes are in sync. + /// + /// Must be executed in a task context. + fn poll_until_sync(&mut self, cx: &mut FutureContext) -> Poll<()> { + self.poll(cx); + + // Return `NotReady` if there's a mismatch in the highest block number. + let mut highest = None; + for peer in self.peers().iter() { + if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { + return Poll::Pending + } + if peer.network.num_sync_requests() != 0 { + return Poll::Pending + } + match (highest, peer.client.info().best_hash) { + (None, b) => highest = Some(b), + (Some(ref a), ref b) if a == b => {}, + (Some(_), _) => return Poll::Pending + } + } + Poll::Ready(()) + } + + /// Polls the testnet until theres' no activiy of any kind. + /// + /// Must be executed in a task context. + fn poll_until_idle(&mut self, cx: &mut FutureContext) -> Poll<()> { + self.poll(cx); + + for peer in self.peers().iter() { + if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { + return Poll::Pending + } + if peer.network.num_sync_requests() != 0 { + return Poll::Pending + } + } + Poll::Ready(()) + } + + /// Polls the testnet until all peers are connected to each other. + /// + /// Must be executed in a task context. + fn poll_until_connected(&mut self, cx: &mut FutureContext) -> Poll<()> { + self.poll(cx); + + let num_peers = self.peers().len(); + if self.peers().iter().all(|p| p.num_peers() == num_peers - 1) { + return Poll::Ready(()) + } + + Poll::Pending + } + + /// Blocks the current thread until we are sync'ed. + /// + /// Calls `poll_until_sync` repeatedly. + fn block_until_sync(&mut self) { + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx))); + } + + /// Blocks the current thread until there are no pending packets. + /// + /// Calls `poll_until_idle` repeatedly with the runtime passed as parameter. + fn block_until_idle(&mut self) { + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_idle(cx))); + } + + /// Blocks the current thread until all peers are connected to each other. + /// + /// Calls `poll_until_connected` repeatedly with the runtime passed as parameter. + fn block_until_connected(&mut self) { + futures::executor::block_on( + futures::future::poll_fn::<(), _>(|cx| self.poll_until_connected(cx)), + ); + } + + /// Polls the testnet. Processes all the pending actions. + fn poll(&mut self, cx: &mut FutureContext) { + self.mut_peers(|peers| { + for peer in peers { + trace!(target: "sync", "-- Polling {}", peer.id()); + if let Poll::Ready(()) = peer.network.poll_unpin(cx) { + panic!("NetworkWorker has terminated unexpectedly.") + } + trace!(target: "sync", "-- Polling complete {}", peer.id()); + + // We poll `imported_blocks_stream`. + while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { + peer.network.service().announce_block(notification.hash, Vec::new()); + } + + // We poll `finality_notification_stream`, but we only take the last event. + let mut last = None; + while let Poll::Ready(Some(item)) = peer.finality_notification_stream.as_mut().poll_next(cx) { + last = Some(item); + } + if let Some(notification) = last { + peer.network.on_block_finalized(notification.hash, notification.header); + } + } + }); + } +} + +pub struct TestNet { + peers: Vec>, + fork_choice: ForkChoiceStrategy, +} + +impl TestNet { + /// Create a `TestNet` that used the given fork choice rule. + pub fn with_fork_choice(fork_choice: ForkChoiceStrategy) -> Self { + Self { + peers: Vec::new(), + fork_choice, + } + } +} + +impl TestNetFactory for TestNet { + type Verifier = PassThroughVerifier; + type PeerData = (); + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + TestNet { + peers: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + } + } + + fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig, _peer_data: &()) + -> Self::Verifier + { + PassThroughVerifier::new_with_fork_choice(false, self.fork_choice.clone()) + } + + fn peer(&mut self, i: usize) -> &mut Peer<()> { + &mut self.peers[i] + } + + fn peers(&self) -> &Vec> { + &self.peers + } + + fn mut_peers>)>(&mut self, closure: F) { + closure(&mut self.peers); + } +} + +pub struct ForceFinalized(PeersClient); + +impl JustificationImport for ForceFinalized { + type Error = ConsensusError; + + fn import_justification( + &mut self, + hash: H256, + _number: NumberFor, + justification: Justification, + ) -> Result<(), Self::Error> { + self.0.finalize_block(BlockId::Hash(hash), Some(justification), true) + .map_err(|_| ConsensusError::InvalidJustification.into()) + } +} + +pub struct JustificationTestNet(TestNet); + +impl TestNetFactory for JustificationTestNet { + type Verifier = PassThroughVerifier; + type PeerData = (); + + fn from_config(config: &ProtocolConfig) -> Self { + JustificationTestNet(TestNet::from_config(config)) + } + + fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig, peer_data: &()) -> Self::Verifier { + self.0.make_verifier(client, config, peer_data) + } + + fn peer(&mut self, i: usize) -> &mut Peer { + self.0.peer(i) + } + + fn peers(&self) -> &Vec> { + self.0.peers() + } + + fn mut_peers>, + )>(&mut self, closure: F) { + self.0.mut_peers(closure) + } + + fn make_block_import(&self, client: PeersClient) + -> ( + BlockImportAdapter, + Option>, + Option>, + Option>, + Self::PeerData, + ) + { + ( + client.as_block_import(), + Some(Box::new(ForceFinalized(client))), + None, + None, + Default::default(), + ) + } +} diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs new file mode 100644 index 0000000000000..86e274aae10eb --- /dev/null +++ b/client/network/test/src/sync.rs @@ -0,0 +1,752 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sp_consensus::BlockOrigin; +use std::time::Duration; +use futures::executor::block_on; +use super::*; +use sp_consensus::block_validation::Validation; +use substrate_test_runtime::Header; + +fn test_ancestor_search_when_common_is(n: usize) { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + + net.peer(0).push_blocks(n, false); + net.peer(1).push_blocks(n, false); + net.peer(2).push_blocks(n, false); + + net.peer(0).push_blocks(10, true); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); +} + +#[test] +fn sync_peers_works() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + for peer in 0..3 { + if net.peer(peer).num_peers() != 2 { + return Poll::Pending + } + } + Poll::Ready(()) + })); +} + +#[test] +fn sync_cycle_from_offline_to_syncing_to_offline() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + for peer in 0..3 { + // Offline, and not major syncing. + assert!(net.peer(peer).is_offline()); + assert!(!net.peer(peer).is_major_syncing()); + } + + // Generate blocks. + net.peer(2).push_blocks(100, false); + + // Block until all nodes are online and nodes 0 and 1 and major syncing. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + for peer in 0..3 { + // Online + if net.peer(peer).is_offline() { + return Poll::Pending + } + if peer < 2 { + // Major syncing. + if net.peer(peer).blocks_count() < 100 && !net.peer(peer).is_major_syncing() { + return Poll::Pending + } + } + } + Poll::Ready(()) + })); + + // Block until all nodes are done syncing. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + for peer in 0..3 { + if net.peer(peer).is_major_syncing() { + return Poll::Pending + } + } + Poll::Ready(()) + })); + + // Now drop nodes 1 and 2, and check that node 0 is offline. + net.peers.remove(2); + net.peers.remove(1); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if !net.peer(0).is_offline() { + Poll::Pending + } else { + Poll::Ready(()) + } + })); +} + +#[test] +fn syncing_node_not_major_syncing_when_disconnected() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + + // Generate blocks. + net.peer(2).push_blocks(100, false); + + // Check that we're not major syncing when disconnected. + assert!(!net.peer(1).is_major_syncing()); + + // Check that we switch to major syncing. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if !net.peer(1).is_major_syncing() { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + + // Destroy two nodes, and check that we switch to non-major syncing. + net.peers.remove(2); + net.peers.remove(0); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).is_major_syncing() { + Poll::Pending + } else { + Poll::Ready(()) + } + })); +} + +#[test] +fn sync_from_two_peers_works() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); + assert!(!net.peer(0).is_major_syncing()); +} + +#[test] +fn sync_from_two_peers_with_ancestry_search_works() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + net.peer(0).push_blocks(10, true); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); +} + +#[test] +fn ancestry_search_works_when_backoff_is_one() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + + net.peer(0).push_blocks(1, false); + net.peer(1).push_blocks(2, false); + net.peer(2).push_blocks(2, false); + + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); +} + +#[test] +fn ancestry_search_works_when_ancestor_is_genesis() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + + net.peer(0).push_blocks(13, true); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); +} + +#[test] +fn ancestry_search_works_when_common_is_one() { + test_ancestor_search_when_common_is(1); +} + +#[test] +fn ancestry_search_works_when_common_is_two() { + test_ancestor_search_when_common_is(2); +} + +#[test] +fn ancestry_search_works_when_common_is_hundred() { + test_ancestor_search_when_common_is(100); +} + +#[test] +fn sync_long_chain_works() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(2); + net.peer(1).push_blocks(500, false); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); +} + +#[test] +fn sync_no_common_longer_chain_fails() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + net.peer(0).push_blocks(20, true); + net.peer(1).push_blocks(20, false); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).is_major_syncing() { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + let peer1 = &net.peers()[1]; + assert!(!net.peers()[0].blockchain_canon_equals(peer1)); +} + +#[test] +fn sync_justifications() { + sp_tracing::try_init_simple(); + let mut net = JustificationTestNet::new(3); + net.peer(0).push_blocks(20, false); + net.block_until_sync(); + + // there's currently no justification for block #10 + assert_eq!(net.peer(0).client().justification(&BlockId::Number(10)).unwrap(), None); + assert_eq!(net.peer(1).client().justification(&BlockId::Number(10)).unwrap(), None); + + // we finalize block #10, #15 and #20 for peer 0 with a justification + net.peer(0).client().finalize_block(BlockId::Number(10), Some(Vec::new()), true).unwrap(); + net.peer(0).client().finalize_block(BlockId::Number(15), Some(Vec::new()), true).unwrap(); + net.peer(0).client().finalize_block(BlockId::Number(20), Some(Vec::new()), true).unwrap(); + + let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); + let h2 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap(); + let h3 = net.peer(1).client().header(&BlockId::Number(20)).unwrap().unwrap(); + + // peer 1 should get the justifications from the network + net.peer(1).request_justification(&h1.hash().into(), 10); + net.peer(1).request_justification(&h2.hash().into(), 15); + net.peer(1).request_justification(&h3.hash().into(), 20); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + + for height in (10..21).step_by(5) { + if net.peer(0).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { + return Poll::Pending; + } + if net.peer(1).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { + return Poll::Pending; + } + } + + Poll::Ready(()) + })); +} + +#[test] +fn sync_justifications_across_forks() { + sp_tracing::try_init_simple(); + let mut net = JustificationTestNet::new(3); + // we push 5 blocks + net.peer(0).push_blocks(5, false); + // and then two forks 5 and 6 blocks long + let f1_best = net.peer(0).push_blocks_at(BlockId::Number(5), 5, false); + let f2_best = net.peer(0).push_blocks_at(BlockId::Number(5), 6, false); + + // peer 1 will only see the longer fork. but we'll request justifications + // for both and finalize the small fork instead. + net.block_until_sync(); + + net.peer(0).client().finalize_block(BlockId::Hash(f1_best), Some(Vec::new()), true).unwrap(); + + net.peer(1).request_justification(&f1_best, 10); + net.peer(1).request_justification(&f2_best, 11); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + + if net.peer(0).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) && + net.peer(1).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) + { + Poll::Ready(()) + } else { + Poll::Pending + } + })); +} + +#[test] +fn sync_after_fork_works() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + net.peer(0).push_blocks(30, false); + net.peer(1).push_blocks(30, false); + net.peer(2).push_blocks(30, false); + + net.peer(0).push_blocks(10, true); + net.peer(1).push_blocks(20, false); + net.peer(2).push_blocks(20, false); + + net.peer(1).push_blocks(10, true); + net.peer(2).push_blocks(1, false); + + // peer 1 has the best chain + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); + (net.peers()[1].blockchain_canon_equals(peer1)); + (net.peers()[2].blockchain_canon_equals(peer1)); +} + +#[test] +fn syncs_all_forks() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(4); + net.peer(0).push_blocks(2, false); + net.peer(1).push_blocks(2, false); + + let b1 = net.peer(0).push_blocks(2, true); + let b2 = net.peer(1).push_blocks(4, false); + + net.block_until_sync(); + // Check that all peers have all of the branches. + assert!(net.peer(0).has_block(&b1)); + assert!(net.peer(0).has_block(&b2)); + assert!(net.peer(1).has_block(&b1)); + assert!(net.peer(1).has_block(&b2)); +} + +#[test] +fn own_blocks_are_announced() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + net.block_until_sync(); // connect'em + net.peer(0).generate_blocks(1, BlockOrigin::Own, |builder| builder.build().unwrap().block); + + net.block_until_sync(); + + assert_eq!(net.peer(0).client.info().best_number, 1); + assert_eq!(net.peer(1).client.info().best_number, 1); + let peer0 = &net.peers()[0]; + assert!(net.peers()[1].blockchain_canon_equals(peer0)); + (net.peers()[2].blockchain_canon_equals(peer0)); +} + +#[test] +fn blocks_are_not_announced_by_light_nodes() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(0); + + // full peer0 is connected to light peer + // light peer1 is connected to full peer2 + net.add_full_peer(); + net.add_light_peer(); + + // Sync between 0 and 1. + net.peer(0).push_blocks(1, false); + assert_eq!(net.peer(0).client.info().best_number, 1); + net.block_until_sync(); + assert_eq!(net.peer(1).client.info().best_number, 1); + + // Add another node and remove node 0. + net.add_full_peer(); + net.peers.remove(0); + + // Poll for a few seconds and make sure 1 and 2 (now 0 and 1) don't sync together. + let mut delay = futures_timer::Delay::new(Duration::from_secs(5)); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + Pin::new(&mut delay).poll(cx) + })); + assert_eq!(net.peer(1).client.info().best_number, 0); +} + +#[test] +fn can_sync_small_non_best_forks() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(2); + net.peer(0).push_blocks(30, false); + net.peer(1).push_blocks(30, false); + + // small fork + reorg on peer 1. + net.peer(0).push_blocks_at(BlockId::Number(30), 2, true); + let small_hash = net.peer(0).client().info().best_hash; + net.peer(0).push_blocks_at(BlockId::Number(30), 10, false); + assert_eq!(net.peer(0).client().info().best_number, 40); + + // peer 1 only ever had the long fork. + net.peer(1).push_blocks(10, false); + assert_eq!(net.peer(1).client().info().best_number, 40); + + assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none()); + + // poll until the two nodes connect, otherwise announcing the block will not work + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + + // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. + + assert_eq!(net.peer(0).client().info().best_number, 40); + + assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + assert!(!net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + + net.peer(0).announce_block(small_hash, Vec::new()); + + // after announcing, peer 1 downloads the block. + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + + assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { + return Poll::Pending + } + Poll::Ready(()) + })); + net.block_until_sync(); + + let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true); + net.peer(0).announce_block(another_fork, Vec::new()); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).client().header(&BlockId::Hash(another_fork)).unwrap().is_none() { + return Poll::Pending + } + Poll::Ready(()) + })); +} + +#[test] +fn can_not_sync_from_light_peer() { + sp_tracing::try_init_simple(); + + // given the network with 1 full nodes (#0) and 1 light node (#1) + let mut net = TestNet::new(1); + net.add_light_peer(); + + // generate some blocks on #0 + net.peer(0).push_blocks(1, false); + + // and let the light client sync from this node + net.block_until_sync(); + + // ensure #0 && #1 have the same best block + let full0_info = net.peer(0).client.info(); + let light_info = net.peer(1).client.info(); + assert_eq!(full0_info.best_number, 1); + assert_eq!(light_info.best_number, 1); + assert_eq!(light_info.best_hash, full0_info.best_hash); + + // add new full client (#2) && remove #0 + net.add_full_peer(); + net.peers.remove(0); + + // ensure that the #2 (now #1) fails to sync block #1 even after 5 seconds + let mut test_finished = futures_timer::Delay::new(Duration::from_secs(5)); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + Pin::new(&mut test_finished).poll(cx) + })); +} + +#[test] +fn light_peer_imports_header_from_announce() { + sp_tracing::try_init_simple(); + + fn import_with_announce(net: &mut TestNet, hash: H256) { + net.peer(0).announce_block(hash, Vec::new()); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + } + + // given the network with 1 full nodes (#0) and 1 light node (#1) + let mut net = TestNet::new(1); + net.add_light_peer(); + + // let them connect to each other + net.block_until_sync(); + + // check that NEW block is imported from announce message + let new_hash = net.peer(0).push_blocks(1, false); + import_with_announce(&mut net, new_hash); + + // check that KNOWN STALE block is imported from announce message + let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); + import_with_announce(&mut net, known_stale_hash); +} + +#[test] +fn can_sync_explicit_forks() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(2); + net.peer(0).push_blocks(30, false); + net.peer(1).push_blocks(30, false); + + // small fork + reorg on peer 1. + net.peer(0).push_blocks_at(BlockId::Number(30), 2, true); + let small_hash = net.peer(0).client().info().best_hash; + let small_number = net.peer(0).client().info().best_number; + net.peer(0).push_blocks_at(BlockId::Number(30), 10, false); + assert_eq!(net.peer(0).client().info().best_number, 40); + + // peer 1 only ever had the long fork. + net.peer(1).push_blocks(10, false); + assert_eq!(net.peer(1).client().info().best_number, 40); + + assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none()); + + // poll until the two nodes connect, otherwise announcing the block will not work + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + + // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. + + assert_eq!(net.peer(0).client().info().best_number, 40); + + assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + assert!(!net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + + // request explicit sync + let first_peer_id = net.peer(0).id(); + net.peer(1).set_sync_fork_request(vec![first_peer_id], small_hash, small_number); + + // peer 1 downloads the block. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + + assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); + if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { + return Poll::Pending + } + Poll::Ready(()) + })); +} + +#[test] +fn syncs_header_only_forks() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(0); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(FullPeerConfig { keep_blocks: Some(3), ..Default::default() }); + net.peer(0).push_blocks(2, false); + net.peer(1).push_blocks(2, false); + + net.peer(0).push_blocks(2, true); + let small_hash = net.peer(0).client().info().best_hash; + net.peer(1).push_blocks(4, false); + + net.block_until_sync(); + // Peer 1 will sync the small fork even though common block state is missing + assert!(net.peer(1).has_block(&small_hash)); +} + +#[test] +fn does_not_sync_announced_old_best_block() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(3); + + let old_hash = net.peer(0).push_blocks(1, false); + let old_hash_with_parent = net.peer(0).push_blocks(1, false); + net.peer(0).push_blocks(18, true); + net.peer(1).push_blocks(20, true); + + net.peer(0).announce_block(old_hash, Vec::new()); + block_on(futures::future::poll_fn::<(), _>(|cx| { + // poll once to import announcement + net.poll(cx); + Poll::Ready(()) + })); + assert!(!net.peer(1).is_major_syncing()); + + net.peer(0).announce_block(old_hash_with_parent, Vec::new()); + block_on(futures::future::poll_fn::<(), _>(|cx| { + // poll once to import announcement + net.poll(cx); + Poll::Ready(()) + })); + assert!(!net.peer(1).is_major_syncing()); +} + +#[test] +fn full_sync_requires_block_body() { + // Check that we don't sync headers-only in full mode. + sp_tracing::try_init_simple(); + let mut net = TestNet::new(2); + + net.peer(0).push_headers(1); + // Wait for nodes to connect + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + net.block_until_idle(); + assert_eq!(net.peer(1).client.info().best_number, 0); +} + +#[test] +fn imports_stale_once() { + sp_tracing::try_init_simple(); + + fn import_with_announce(net: &mut TestNet, hash: H256) { + // Announce twice + net.peer(0).announce_block(hash, Vec::new()); + net.peer(0).announce_block(hash, Vec::new()); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + } + + // given the network with 2 full nodes + let mut net = TestNet::new(2); + + // let them connect to each other + net.block_until_sync(); + + // check that NEW block is imported from announce message + let new_hash = net.peer(0).push_blocks(1, false); + import_with_announce(&mut net, new_hash); + assert_eq!(net.peer(1).num_downloaded_blocks(), 1); + + // check that KNOWN STALE block is imported from announce message + let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); + import_with_announce(&mut net, known_stale_hash); + assert_eq!(net.peer(1).num_downloaded_blocks(), 2); +} + +#[test] +fn can_sync_to_peers_with_wrong_common_block() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(2); + + net.peer(0).push_blocks(2, true); + net.peer(1).push_blocks(2, true); + let fork_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 2, false); + net.peer(1).push_blocks_at(BlockId::Number(0), 2, false); + // wait for connection + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + + // both peers re-org to the same fork without notifying each other + net.peer(0).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); + net.peer(1).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); + let final_hash = net.peer(0).push_blocks(1, false); + + net.block_until_sync(); + + assert!(net.peer(1).client().header(&BlockId::Hash(final_hash)).unwrap().is_some()); +} + +/// Returns `is_new_best = true` for each validated announcement. +struct NewBestBlockAnnounceValidator; + +impl BlockAnnounceValidator for NewBestBlockAnnounceValidator { + fn validate( + &mut self, + _: &Header, + _: &[u8], + ) -> Result> { + Ok(Validation::Success { is_new_best: true }) + } +} + +#[test] +fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { + sp_tracing::try_init_simple(); + log::trace!(target: "sync", "Test"); + let mut net = TestNet::with_fork_choice(ForkChoiceStrategy::Custom(false)); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(NewBestBlockAnnounceValidator)), + ..Default::default() + }); + + net.block_until_connected(); + + let block_hash = net.peer(0).push_blocks(1, false); + + while !net.peer(2).has_block(&block_hash) { + net.block_until_idle(); + } + + // Peer1 should not have the block, because peer 0 did not reported the block + // as new best. However, peer2 has a special block announcement validator + // that flags all blocks as `is_new_best` and thus, it should have synced the blocks. + assert!(!net.peer(1).has_block(&block_hash)); +} diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml new file mode 100644 index 0000000000000..e7009ef4fef9a --- /dev/null +++ b/client/service/Cargo.toml @@ -0,0 +1,91 @@ +[package] +name = "sc-service" +version = "0.8.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. Manages communication between them." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[features] +default = ["db"] +# The RocksDB feature activates the RocksDB database backend. If it is not activated, and you pass +# a path to a database, an error will be produced at runtime. +db = ["sc-client-db/with-kvdb-rocksdb", "sc-client-db/with-parity-db"] +wasmtime = [ + "sc-executor/wasmtime", +] +# exposes the client type +test-helpers = [] + +[dependencies] +derive_more = "0.99.2" +futures01 = { package = "futures", version = "0.1.29" } +futures = { version = "0.3.4", features = ["compat"] } +jsonrpc-pubsub = "15.0" +jsonrpc-core = "15.0" +rand = "0.7.3" +parking_lot = "0.10.0" +lazy_static = "1.4.0" +log = "0.4.8" +slog = { version = "2.5.2", features = ["nested-values"] } +futures-timer = "3.0.1" +wasm-timer = "0.2" +exit-future = "0.2.0" +pin-project = "0.4.8" +hash-db = "0.15.2" +serde = "1.0.101" +serde_json = "1.0.41" +sc-keystore = "2.0.0" +sp-io = "2.0.0" +sp-runtime = "2.0.0" +sp-trie = "2.0.0" +sp-externalities = "0.8.0" +sp-utils = "2.0.0" +sp-version = "2.0.0" +sp-blockchain = "2.0.0" +sp-core = "2.0.0" +sp-session = "2.0.0" +sp-state-machine = "0.8.0" +sp-application-crypto = "2.0.0" +sp-consensus = "0.8.0" +sp-inherents = "2.0.0" +sc-network = "0.8.0" +sc-chain-spec = "2.0.0" +sc-light = "2.0.0" +sc-client-api = "2.0.0" +sp-api = "2.0.0" +sc-client-db = { version = "0.8.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.4" } +sc-executor = "0.8.0" +sc-transaction-pool = "2.0.0" +sp-transaction-pool = "2.0.0" +sc-rpc-server = "2.0.0" +sc-rpc = "2.0.0" +sc-block-builder = { version = "0.8.0", path = "../block-builder" } +sp-block-builder = "2.0.0" +sc-informant = "0.8.0" +sc-telemetry = "2.0.0" +sc-offchain = "2.0.0" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0"} +sc-tracing = "2.0.0" +sp-tracing = "2.0.0" +tracing = "0.1.19" +parity-util-mem = { version = "0.7.0", default-features = false, features = ["primitive-types"] } + +[target.'cfg(not(target_os = "unknown"))'.dependencies] +tempfile = "3.1.0" +directories = "2.0.2" + +[dev-dependencies] +#substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-consensus-babe = "0.8.0" +grandpa = { version = "0.8.0", package = "sc-finality-grandpa" } +grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa" } +tokio = { version = "0.2", default-features = false } +async-std = { version = "1.6", default-features = false } diff --git a/client/service/README.md b/client/service/README.md new file mode 100644 index 0000000000000..26f940f16df02 --- /dev/null +++ b/client/service/README.md @@ -0,0 +1,4 @@ +Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. +Manages communication between them. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs new file mode 100644 index 0000000000000..410198af26da3 --- /dev/null +++ b/client/service/src/builder.rs @@ -0,0 +1,923 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::{ + error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm, + TelemetryConnectionSinks, RpcHandlers, NetworkStatusSinks, + start_rpc_servers, build_network_future, TransactionPoolAdapter, TaskManager, SpawnTaskHandle, + metrics::MetricsService, + client::{light, Client, ClientConfig}, + config::{Configuration, KeystoreConfig, PrometheusConfig}, +}; +use sc_client_api::{ + light::RemoteBlockchain, ForkBlocks, BadBlocks, UsageProvider, ExecutorProvider, +}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; +use sc_chain_spec::get_extension; +use sp_consensus::{ + block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator, Chain}, + import_queue::ImportQueue, +}; +use futures::{FutureExt, StreamExt, future::ready, channel::oneshot}; +use jsonrpc_pubsub::manager::SubscriptionManager; +use sc_keystore::Store as Keystore; +use log::{info, warn}; +use sc_network::config::{Role, FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; +use sc_network::NetworkService; +use parking_lot::RwLock; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{ + Block as BlockT, SaturatedConversion, HashFor, Zero, BlockIdTo, +}; +use sp_api::{ProvideRuntimeApi, CallApiAt}; +use sc_executor::{NativeExecutor, NativeExecutionDispatch, RuntimeInfo}; +use std::sync::Arc; +use wasm_timer::SystemTime; +use sc_telemetry::{telemetry, SUBSTRATE_INFO}; +use sp_transaction_pool::MaintainedTransactionPool; +use prometheus_endpoint::Registry; +use sc_client_db::{Backend, DatabaseSettings}; +use sp_core::traits::{CodeExecutor, SpawnNamed}; +use sp_runtime::BuildStorage; +use sc_client_api::{ + BlockBackend, BlockchainEvents, + backend::StorageProvider, + proof_provider::ProofProvider, + execution_extensions::ExecutionExtensions +}; +use sp_blockchain::{HeaderMetadata, HeaderBackend}; + +/// A utility trait for building an RPC extension given a `DenyUnsafe` instance. +/// This is useful since at service definition time we don't know whether the +/// specific interface where the RPC extension will be exposed is safe or not. +/// This trait allows us to lazily build the RPC extension whenever we bind the +/// service to an interface. +pub trait RpcExtensionBuilder { + /// The type of the RPC extension that will be built. + type Output: sc_rpc::RpcExtension; + + /// Returns an instance of the RPC extension for a particular `DenyUnsafe` + /// value, e.g. the RPC extension might not expose some unsafe methods. + fn build( + &self, + deny: sc_rpc::DenyUnsafe, + subscription_executor: sc_rpc::SubscriptionTaskExecutor, + ) -> Self::Output; +} + +impl RpcExtensionBuilder for F where + F: Fn(sc_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor) -> R, + R: sc_rpc::RpcExtension, +{ + type Output = R; + + fn build( + &self, + deny: sc_rpc::DenyUnsafe, + subscription_executor: sc_rpc::SubscriptionTaskExecutor, + ) -> Self::Output { + (*self)(deny, subscription_executor) + } +} + +/// A utility struct for implementing an `RpcExtensionBuilder` given a cloneable +/// `RpcExtension`, the resulting builder will simply ignore the provided +/// `DenyUnsafe` instance and return a static `RpcExtension` instance. +pub struct NoopRpcExtensionBuilder(pub R); + +impl RpcExtensionBuilder for NoopRpcExtensionBuilder where + R: Clone + sc_rpc::RpcExtension, +{ + type Output = R; + + fn build( + &self, + _deny: sc_rpc::DenyUnsafe, + _subscription_executor: sc_rpc::SubscriptionTaskExecutor, + ) -> Self::Output { + self.0.clone() + } +} + +impl From for NoopRpcExtensionBuilder where + R: sc_rpc::RpcExtension, +{ + fn from(e: R) -> NoopRpcExtensionBuilder { + NoopRpcExtensionBuilder(e) + } +} + + +/// Full client type. +pub type TFullClient = Client< + TFullBackend, + TFullCallExecutor, + TBl, + TRtApi, +>; + +/// Full client backend type. +pub type TFullBackend = sc_client_db::Backend; + +/// Full client call executor type. +pub type TFullCallExecutor = crate::client::LocalCallExecutor< + sc_client_db::Backend, + NativeExecutor, +>; + +/// Light client type. +pub type TLightClient = TLightClientWithBackend< + TBl, TRtApi, TExecDisp, TLightBackend +>; + +/// Light client backend type. +pub type TLightBackend = sc_light::Backend< + sc_client_db::light::LightStorage, + HashFor, +>; + +/// Light call executor type. +pub type TLightCallExecutor = sc_light::GenesisCallExecutor< + sc_light::Backend< + sc_client_db::light::LightStorage, + HashFor + >, + crate::client::LocalCallExecutor< + sc_light::Backend< + sc_client_db::light::LightStorage, + HashFor + >, + NativeExecutor + >, +>; + +type TFullParts = ( + TFullClient, + Arc>, + Arc>, + TaskManager, +); + +type TLightParts = ( + Arc>, + Arc>, + Arc>, + TaskManager, + Arc>, +); + +/// Light client backend type with a specific hash type. +pub type TLightBackendWithHash = sc_light::Backend< + sc_client_db::light::LightStorage, + THash, +>; + +/// Light client type with a specific backend. +pub type TLightClientWithBackend = Client< + TBackend, + sc_light::GenesisCallExecutor< + TBackend, + crate::client::LocalCallExecutor>, + >, + TBl, + TRtApi, +>; + +/// Creates a new full client for the given config. +pub fn new_full_client( + config: &Configuration, +) -> Result, Error> where + TBl: BlockT, + TExecDisp: NativeExecutionDispatch + 'static, +{ + new_full_parts(config).map(|parts| parts.0) +} + +/// Create the initial parts of a full node. +pub fn new_full_parts( + config: &Configuration, +) -> Result, Error> where + TBl: BlockT, + TExecDisp: NativeExecutionDispatch + 'static, +{ + let keystore = match &config.keystore { + KeystoreConfig::Path { path, password } => Keystore::open( + path.clone(), + password.clone() + )?, + KeystoreConfig::InMemory => Keystore::new_in_memory(), + }; + + let task_manager = { + let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + TaskManager::new(config.task_executor.clone(), registry)? + }; + + let executor = NativeExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let chain_spec = &config.chain_spec; + let fork_blocks = get_extension::>(chain_spec.extensions()) + .cloned() + .unwrap_or_default(); + + let bad_blocks = get_extension::>(chain_spec.extensions()) + .cloned() + .unwrap_or_default(); + + let (client, backend) = { + let db_config = sc_client_db::DatabaseSettings { + state_cache_size: config.state_cache_size, + state_cache_child_ratio: + config.state_cache_child_ratio.map(|v| (v, 100)), + pruning: config.pruning.clone(), + source: config.database.clone(), + }; + + let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( + config.execution_strategies.clone(), + Some(keystore.clone()), + ); + + new_client( + db_config, + executor, + chain_spec.as_storage_builder(), + fork_blocks, + bad_blocks, + extensions, + Box::new(task_manager.spawn_handle()), + config.prometheus_config.as_ref().map(|config| config.registry.clone()), + ClientConfig { + offchain_worker_enabled : config.offchain_worker.enabled , + offchain_indexing_api: config.offchain_worker.indexing_enabled, + }, + )? + }; + + Ok((client, backend, keystore, task_manager)) +} + +/// Create the initial parts of a light node. +pub fn new_light_parts( + config: &Configuration +) -> Result, Error> where + TBl: BlockT, + TExecDisp: NativeExecutionDispatch + 'static, +{ + + let task_manager = { + let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + TaskManager::new(config.task_executor.clone(), registry)? + }; + + let keystore = match &config.keystore { + KeystoreConfig::Path { path, password } => Keystore::open( + path.clone(), + password.clone() + )?, + KeystoreConfig::InMemory => Keystore::new_in_memory(), + }; + + let executor = NativeExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let db_storage = { + let db_settings = sc_client_db::DatabaseSettings { + state_cache_size: config.state_cache_size, + state_cache_child_ratio: + config.state_cache_child_ratio.map(|v| (v, 100)), + pruning: config.pruning.clone(), + source: config.database.clone(), + }; + sc_client_db::light::LightStorage::new(db_settings)? + }; + let light_blockchain = sc_light::new_light_blockchain(db_storage); + let fetch_checker = Arc::new( + sc_light::new_fetch_checker::<_, TBl, _>( + light_blockchain.clone(), + executor.clone(), + Box::new(task_manager.spawn_handle()), + ), + ); + let on_demand = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); + let backend = sc_light::new_light_backend(light_blockchain); + let client = Arc::new(light::new_light( + backend.clone(), + config.chain_spec.as_storage_builder(), + executor, + Box::new(task_manager.spawn_handle()), + config.prometheus_config.as_ref().map(|config| config.registry.clone()), + )?); + + Ok((client, backend, keystore, task_manager, on_demand)) +} + +/// Create an instance of db-backed client. +pub fn new_client( + settings: DatabaseSettings, + executor: E, + genesis_storage: &dyn BuildStorage, + fork_blocks: ForkBlocks, + bad_blocks: BadBlocks, + execution_extensions: ExecutionExtensions, + spawn_handle: Box, + prometheus_registry: Option, + config: ClientConfig, +) -> Result<( + crate::client::Client< + Backend, + crate::client::LocalCallExecutor, E>, + Block, + RA, + >, + Arc>, +), + sp_blockchain::Error, +> + where + Block: BlockT, + E: CodeExecutor + RuntimeInfo, +{ + const CANONICALIZATION_DELAY: u64 = 4096; + + let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?); + let executor = crate::client::LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone()); + Ok(( + crate::client::Client::new( + backend.clone(), + executor, + genesis_storage, + fork_blocks, + bad_blocks, + execution_extensions, + prometheus_registry, + config, + )?, + backend, + )) +} + +/// Parameters to pass into `build`. +pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { + /// The service configuration. + pub config: Configuration, + /// A shared client returned by `new_full_parts`/`new_light_parts`. + pub client: Arc, + /// A shared backend returned by `new_full_parts`/`new_light_parts`. + pub backend: Arc, + /// A task manager returned by `new_full_parts`/`new_light_parts`. + pub task_manager: &'a mut TaskManager, + /// A shared keystore returned by `new_full_parts`/`new_light_parts`. + pub keystore: Arc>, + /// An optional, shared data fetcher for light clients. + pub on_demand: Option>>, + /// A shared transaction pool. + pub transaction_pool: Arc, + /// A RPC extension builder. Use `NoopRpcExtensionBuilder` if you just want to pass in the + /// extensions directly. + pub rpc_extensions_builder: Box + Send>, + /// An optional, shared remote blockchain instance. Used for light clients. + pub remote_blockchain: Option>>, + /// A shared network instance. + pub network: Arc::Hash>>, + /// Sinks to propagate network status updates. + pub network_status_sinks: NetworkStatusSinks, + /// A Sender for RPC requests. + pub system_rpc_tx: TracingUnboundedSender>, + /// Shared Telemetry connection sinks, + pub telemetry_connection_sinks: TelemetryConnectionSinks, +} + +/// Build a shared offchain workers instance. +pub fn build_offchain_workers( + config: &Configuration, + backend: Arc, + spawn_handle: SpawnTaskHandle, + client: Arc, + network: Arc::Hash>>, +) -> Option>> + where + TBl: BlockT, TBackend: sc_client_api::Backend, + >::OffchainStorage: 'static, + TCl: Send + Sync + ProvideRuntimeApi + BlockchainEvents + 'static, + >::Api: sc_offchain::OffchainWorkerApi, +{ + let offchain_workers = match backend.offchain_storage() { + Some(db) => { + Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone(), db))) + }, + None => { + warn!("Offchain workers disabled, due to lack of offchain storage support in backend."); + None + }, + }; + + // Inform the offchain worker about new imported blocks + if let Some(offchain) = offchain_workers.clone() { + spawn_handle.spawn( + "offchain-notifications", + sc_offchain::notification_future( + config.role.is_authority(), + client.clone(), + offchain, + Clone::clone(&spawn_handle), + network.clone(), + ) + ); + } + + offchain_workers +} + +/// Spawn the tasks that are required to run a node. +pub fn spawn_tasks( + params: SpawnTasksParams, +) -> Result + where + TCl: ProvideRuntimeApi + HeaderMetadata + Chain + + BlockBackend + BlockIdTo + ProofProvider + + HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + + StorageProvider + CallApiAt + + Send + 'static, + >::Api: + sp_api::Metadata + + sc_offchain::OffchainWorkerApi + + sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_session::SessionKeys + + sp_api::ApiErrorExt + + sp_api::ApiExt, + TBl: BlockT, + TBackend: 'static + sc_client_api::backend::Backend + Send, + TExPool: MaintainedTransactionPool::Hash> + + MallocSizeOfWasm + 'static, + TRpc: sc_rpc::RpcExtension +{ + let SpawnTasksParams { + mut config, + task_manager, + client, + on_demand, + backend, + keystore, + transaction_pool, + rpc_extensions_builder, + remote_blockchain, + network, + network_status_sinks, + system_rpc_tx, + telemetry_connection_sinks, + } = params; + + let chain_info = client.usage_info().chain; + + sp_session::generate_initial_session_keys( + client.clone(), + &BlockId::Hash(chain_info.best_hash), + config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), + )?; + + info!("📦 Highest known block at #{}", chain_info.best_number); + telemetry!( + SUBSTRATE_INFO; + "node.start"; + "height" => chain_info.best_number.saturated_into::(), + "best" => ?chain_info.best_hash + ); + + let spawn_handle = task_manager.spawn_handle(); + + // Inform the tx pool about imported and finalized blocks. + spawn_handle.spawn( + "txpool-notifications", + sc_transaction_pool::notification_future(client.clone(), transaction_pool.clone()), + ); + + spawn_handle.spawn( + "on-transaction-imported", + transaction_notifications(transaction_pool.clone(), network.clone()), + ); + + // Prometheus metrics. + let metrics_service = if let Some(PrometheusConfig { port, registry }) = + config.prometheus_config.clone() + { + // Set static metrics. + let metrics = MetricsService::with_prometheus(®istry, &config)?; + spawn_handle.spawn( + "prometheus-endpoint", + prometheus_endpoint::init_prometheus(port, registry).map(drop) + ); + + metrics + } else { + MetricsService::new() + }; + + // Periodically updated metrics and telemetry updates. + spawn_handle.spawn("telemetry-periodic-send", + metrics_service.run( + client.clone(), + transaction_pool.clone(), + network_status_sinks.clone() + ) + ); + + // RPC + let gen_handler = | + deny_unsafe: sc_rpc::DenyUnsafe, + rpc_middleware: sc_rpc_server::RpcMiddleware + | gen_handler( + deny_unsafe, rpc_middleware, &config, task_manager.spawn_handle(), + client.clone(), transaction_pool.clone(), keystore.clone(), + on_demand.clone(), remote_blockchain.clone(), &*rpc_extensions_builder, + backend.offchain_storage(), system_rpc_tx.clone() + ); + let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry()).ok(); + let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.as_ref())?; + // This is used internally, so don't restrict access to unsafe RPC + let rpc_handlers = RpcHandlers(Arc::new(gen_handler( + sc_rpc::DenyUnsafe::No, + sc_rpc_server::RpcMiddleware::new(rpc_metrics.as_ref().cloned(), "inbrowser") + ).into())); + + // Telemetry + let telemetry = config.telemetry_endpoints.clone().and_then(|endpoints| { + if endpoints.is_empty() { + // we don't want the telemetry to be initialized if telemetry_endpoints == Some([]) + return None; + } + + let genesis_hash = match client.block_hash(Zero::zero()) { + Ok(Some(hash)) => hash, + _ => Default::default(), + }; + + Some(build_telemetry( + &mut config, endpoints, telemetry_connection_sinks.clone(), network.clone(), + task_manager.spawn_handle(), genesis_hash, + )) + }); + + // Spawn informant task + spawn_handle.spawn("informant", sc_informant::build( + client.clone(), + network_status_sinks.status.clone(), + transaction_pool.clone(), + config.informant_output_format, + )); + + task_manager.keep_alive((telemetry, config.base_path, rpc, rpc_handlers.clone())); + + Ok(rpc_handlers) +} + +async fn transaction_notifications( + transaction_pool: Arc, + network: Arc::Hash>> +) + where + TBl: BlockT, + TExPool: MaintainedTransactionPool::Hash>, +{ + // transaction notifications + transaction_pool.import_notification_stream() + .for_each(move |hash| { + network.propagate_transaction(hash); + let status = transaction_pool.status(); + telemetry!(SUBSTRATE_INFO; "txpool.import"; + "ready" => status.ready, + "future" => status.future + ); + ready(()) + }) + .await; +} + +fn build_telemetry( + config: &mut Configuration, + endpoints: sc_telemetry::TelemetryEndpoints, + telemetry_connection_sinks: TelemetryConnectionSinks, + network: Arc::Hash>>, + spawn_handle: SpawnTaskHandle, + genesis_hash: ::Hash, +) -> sc_telemetry::Telemetry { + let is_authority = config.role.is_authority(); + let network_id = network.local_peer_id().to_base58(); + let name = config.network.node_name.clone(); + let impl_name = config.impl_name.clone(); + let impl_version = config.impl_version.clone(); + let chain_name = config.chain_spec.name().to_owned(); + let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { + endpoints, + wasm_external_transport: config.telemetry_external_transport.take(), + }); + let startup_time = SystemTime::UNIX_EPOCH.elapsed() + .map(|dur| dur.as_millis()) + .unwrap_or(0); + + spawn_handle.spawn( + "telemetry-worker", + telemetry.clone() + .for_each(move |event| { + // Safe-guard in case we add more events in the future. + let sc_telemetry::TelemetryEvent::Connected = event; + + telemetry!(SUBSTRATE_INFO; "system.connected"; + "name" => name.clone(), + "implementation" => impl_name.clone(), + "version" => impl_version.clone(), + "config" => "", + "chain" => chain_name.clone(), + "genesis_hash" => ?genesis_hash, + "authority" => is_authority, + "startup_time" => startup_time, + "network_id" => network_id.clone() + ); + + telemetry_connection_sinks.0.lock().retain(|sink| { + sink.unbounded_send(()).is_ok() + }); + ready(()) + }) + ); + + telemetry +} + +fn gen_handler( + deny_unsafe: sc_rpc::DenyUnsafe, + rpc_middleware: sc_rpc_server::RpcMiddleware, + config: &Configuration, + spawn_handle: SpawnTaskHandle, + client: Arc, + transaction_pool: Arc, + keystore: Arc>, + on_demand: Option>>, + remote_blockchain: Option>>, + rpc_extensions_builder: &(dyn RpcExtensionBuilder + Send), + offchain_storage: Option<>::OffchainStorage>, + system_rpc_tx: TracingUnboundedSender> +) -> sc_rpc_server::RpcHandler + where + TBl: BlockT, + TCl: ProvideRuntimeApi + BlockchainEvents + HeaderBackend + + HeaderMetadata + ExecutorProvider + + CallApiAt + ProofProvider + + StorageProvider + BlockBackend + Send + Sync + 'static, + TExPool: MaintainedTransactionPool::Hash> + 'static, + TBackend: sc_client_api::backend::Backend + 'static, + TRpc: sc_rpc::RpcExtension, + >::Api: + sp_session::SessionKeys + + sp_api::Metadata, +{ + use sc_rpc::{chain, state, author, system, offchain}; + + let system_info = sc_rpc::system::SystemInfo { + chain_name: config.chain_spec.name().into(), + impl_name: config.impl_name.clone(), + impl_version: config.impl_version.clone(), + properties: config.chain_spec.properties(), + chain_type: config.chain_spec.chain_type(), + }; + + let task_executor = sc_rpc::SubscriptionTaskExecutor::new(spawn_handle); + let subscriptions = SubscriptionManager::new(Arc::new(task_executor.clone())); + + let (chain, state, child_state) = if let (Some(remote_blockchain), Some(on_demand)) = + (remote_blockchain, on_demand) { + // Light clients + let chain = sc_rpc::chain::new_light( + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + on_demand.clone(), + ); + let (state, child_state) = sc_rpc::state::new_light( + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + on_demand, + ); + (chain, state, child_state) + + } else { + // Full nodes + let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); + let (state, child_state) = sc_rpc::state::new_full(client.clone(), subscriptions.clone()); + (chain, state, child_state) + }; + + let author = sc_rpc::author::Author::new( + client, + transaction_pool, + subscriptions, + keystore, + deny_unsafe, + ); + let system = system::System::new(system_info, system_rpc_tx, deny_unsafe); + + let maybe_offchain_rpc = offchain_storage.map(|storage| { + let offchain = sc_rpc::offchain::Offchain::new(storage, deny_unsafe); + offchain::OffchainApi::to_delegate(offchain) + }); + + sc_rpc_server::rpc_handler( + ( + state::StateApi::to_delegate(state), + state::ChildStateApi::to_delegate(child_state), + chain::ChainApi::to_delegate(chain), + maybe_offchain_rpc, + author::AuthorApi::to_delegate(author), + system::SystemApi::to_delegate(system), + rpc_extensions_builder.build(deny_unsafe, task_executor), + ), + rpc_middleware + ) +} + +/// Parameters to pass into `build_network`. +pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { + /// The service configuration. + pub config: &'a Configuration, + /// A shared client returned by `new_full_parts`/`new_light_parts`. + pub client: Arc, + /// A shared transaction pool. + pub transaction_pool: Arc, + /// A handle for spawning tasks. + pub spawn_handle: SpawnTaskHandle, + /// An import queue. + pub import_queue: TImpQu, + /// An optional, shared data fetcher for light clients. + pub on_demand: Option>>, + /// A block annouce validator builder. + pub block_announce_validator_builder: Option) -> Box + Send> + Send + >>, + /// An optional finality proof request builder. + pub finality_proof_request_builder: Option>, + /// An optional, shared finality proof request provider. + pub finality_proof_provider: Option>>, +} + +/// Build the network service, the network status sinks and an RPC sender. +pub fn build_network( + params: BuildNetworkParams +) -> Result< + ( + Arc::Hash>>, + NetworkStatusSinks, + TracingUnboundedSender>, + NetworkStarter, + ), + Error +> + where + TBl: BlockT, + TCl: ProvideRuntimeApi + HeaderMetadata + Chain + + BlockBackend + BlockIdTo + ProofProvider + + HeaderBackend + BlockchainEvents + 'static, + TExPool: MaintainedTransactionPool::Hash> + 'static, + TImpQu: ImportQueue + 'static, +{ + let BuildNetworkParams { + config, client, transaction_pool, spawn_handle, import_queue, on_demand, + block_announce_validator_builder, finality_proof_request_builder, finality_proof_provider, + } = params; + + let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { + imports_external_transactions: !matches!(config.role, Role::Light), + pool: transaction_pool, + client: client.clone(), + }); + + let protocol_id = { + let protocol_id_full = match config.chain_spec.protocol_id() { + Some(pid) => pid, + None => { + warn!("Using default protocol ID {:?} because none is configured in the \ + chain specs", DEFAULT_PROTOCOL_ID + ); + DEFAULT_PROTOCOL_ID + } + }; + sc_network::config::ProtocolId::from(protocol_id_full) + }; + + let block_announce_validator = if let Some(f) = block_announce_validator_builder { + f(client.clone()) + } else { + Box::new(DefaultBlockAnnounceValidator) + }; + + let network_params = sc_network::config::Params { + role: config.role.clone(), + executor: { + let spawn_handle = Clone::clone(&spawn_handle); + Some(Box::new(move |fut| { + spawn_handle.spawn("libp2p-node", fut); + })) + }, + network_config: config.network.clone(), + chain: client.clone(), + finality_proof_provider, + finality_proof_request_builder, + on_demand: on_demand, + transaction_pool: transaction_pool_adapter as _, + import_queue: Box::new(import_queue), + protocol_id, + block_announce_validator, + metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()) + }; + + let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); + let network_mut = sc_network::NetworkWorker::new(network_params)?; + let network = network_mut.service().clone(); + let network_status_sinks = NetworkStatusSinks::new(); + + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); + + let future = build_network_future( + config.role.clone(), + network_mut, + client, + network_status_sinks.clone(), + system_rpc_rx, + has_bootnodes, + config.announce_block, + ); + + // TODO: Normally, one is supposed to pass a list of notifications protocols supported by the + // node through the `NetworkConfiguration` struct. But because this function doesn't know in + // advance which components, such as GrandPa or Polkadot, will be plugged on top of the + // service, it is unfortunately not possible to do so without some deep refactoring. To bypass + // this problem, the `NetworkService` provides a `register_notifications_protocol` method that + // can be called even after the network has been initialized. However, we want to avoid the + // situation where `register_notifications_protocol` is called *after* the network actually + // connects to other peers. For this reason, we delay the process of the network future until + // the user calls `NetworkStarter::start_network`. + // + // This entire hack should eventually be removed in favour of passing the list of protocols + // through the configuration. + // + // See also https://github.com/paritytech/substrate/issues/6827 + let (network_start_tx, network_start_rx) = oneshot::channel(); + + // The network worker is responsible for gathering all network messages and processing + // them. This is quite a heavy task, and at the time of the writing of this comment it + // frequently happens that this future takes several seconds or in some situations + // even more than a minute until it has processed its entire queue. This is clearly an + // issue, and ideally we would like to fix the network future to take as little time as + // possible, but we also take the extra harm-prevention measure to execute the networking + // future using `spawn_blocking`. + spawn_handle.spawn_blocking("network-worker", async move { + if network_start_rx.await.is_err() { + debug_assert!(false); + log::warn!( + "The NetworkStart returned as part of `build_network` has been silently dropped" + ); + // This `return` might seem unnecessary, but we don't want to make it look like + // everything is working as normal even though the user is clearly misusing the API. + return; + } + + future.await + }); + + Ok((network, network_status_sinks, system_rpc_tx, NetworkStarter(network_start_tx))) +} + +/// Object used to start the network. +#[must_use] +pub struct NetworkStarter(oneshot::Sender<()>); + +impl NetworkStarter { + /// Start the network. Call this after all sub-components have been initialized. + /// + /// > **Note**: If you don't call this function, the networking will not work. + pub fn start_network(self) { + let _ = self.0.send(()); + } +} diff --git a/client/service/src/chain_ops/build_sync_spec.rs b/client/service/src/chain_ops/build_sync_spec.rs new file mode 100644 index 0000000000000..9553ea21a6965 --- /dev/null +++ b/client/service/src/chain_ops/build_sync_spec.rs @@ -0,0 +1,36 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use sp_runtime::traits::Block as BlockT; +use sp_blockchain::HeaderBackend; +use std::sync::Arc; +use sp_runtime::generic::BlockId; + +/// Build a `LightSyncState` from the CHT roots stored in a backend. +pub fn build_light_sync_state( + client: Arc, +) -> Result, sp_blockchain::Error> + where + TBl: BlockT, + TCl: HeaderBackend, +{ + let finalized_hash = client.info().finalized_hash; + let header = client.header(BlockId::Hash(finalized_hash))?.unwrap(); + + Ok(sc_chain_spec::LightSyncState { + header + }) +} diff --git a/client/service/src/chain_ops/check_block.rs b/client/service/src/chain_ops/check_block.rs new file mode 100644 index 0000000000000..34baeb55445a8 --- /dev/null +++ b/client/service/src/chain_ops/check_block.rs @@ -0,0 +1,51 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use futures::{future, prelude::*}; +use sp_runtime::traits::Block as BlockT; +use sp_runtime::generic::BlockId; +use codec::Encode; +use sp_consensus::import_queue::ImportQueue; +use sc_client_api::{BlockBackend, UsageProvider}; + +use std::pin::Pin; +use std::sync::Arc; +use crate::chain_ops::import_blocks; + +/// Re-validate known block. +pub fn check_block( + client: Arc, + import_queue: IQ, + block_id: BlockId +) -> Pin> + Send>> +where + C: BlockBackend + UsageProvider + Send + Sync + 'static, + B: BlockT + for<'de> serde::Deserialize<'de>, + IQ: ImportQueue + 'static, +{ + match client.block(&block_id) { + Ok(Some(block)) => { + let mut buf = Vec::new(); + 1u64.encode_to(&mut buf); + block.encode_to(&mut buf); + let reader = std::io::Cursor::new(buf); + import_blocks(client, import_queue, reader, true, true) + } + Ok(None) => Box::pin(future::err("Unknown block".into())), + Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), + } +} diff --git a/client/service/src/chain_ops/export_blocks.rs b/client/service/src/chain_ops/export_blocks.rs new file mode 100644 index 0000000000000..2f32cbf7fbdb7 --- /dev/null +++ b/client/service/src/chain_ops/export_blocks.rs @@ -0,0 +1,104 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use log::info; +use futures::{future, prelude::*}; +use sp_runtime::traits::{ + Block as BlockT, NumberFor, One, Zero, SaturatedConversion +}; +use sp_runtime::generic::BlockId; +use codec::Encode; + +use std::{io::Write, pin::Pin}; +use sc_client_api::{BlockBackend, UsageProvider}; +use std::sync::Arc; +use std::task::Poll; + +/// Performs the blocks export. +pub fn export_blocks( + client: Arc, + mut output: impl Write + 'static, + from: NumberFor, + to: Option>, + binary: bool +) -> Pin>>> +where + C: BlockBackend + UsageProvider + 'static, + B: BlockT, +{ + let mut block = from; + + let last = match to { + Some(v) if v.is_zero() => One::one(), + Some(v) => v, + None => client.usage_info().chain.best_number, + }; + + let mut wrote_header = false; + + // Exporting blocks is implemented as a future, because we want the operation to be + // interruptible. + // + // Every time we write a block to the output, the `Future` re-schedules itself and returns + // `Poll::Pending`. + // This makes it possible either to interleave other operations in-between the block exports, + // or to stop the operation completely. + let export = future::poll_fn(move |cx| { + let client = &client; + + if last < block { + return Poll::Ready(Err("Invalid block range specified".into())); + } + + if !wrote_header { + info!("Exporting blocks from #{} to #{}", block, last); + if binary { + let last_: u64 = last.saturated_into::(); + let block_: u64 = block.saturated_into::(); + let len: u64 = last_ - block_ + 1; + output.write_all(&len.encode())?; + } + wrote_header = true; + } + + match client.block(&BlockId::number(block))? { + Some(block) => { + if binary { + output.write_all(&block.encode())?; + } else { + serde_json::to_writer(&mut output, &block) + .map_err(|e| format!("Error writing JSON: {}", e))?; + } + }, + // Reached end of the chain. + None => return Poll::Ready(Ok(())), + } + if (block % 10000.into()).is_zero() { + info!("#{}", block); + } + if block == last { + return Poll::Ready(Ok(())); + } + block += One::one(); + + // Re-schedule the task in order to continue the operation. + cx.waker().wake_by_ref(); + Poll::Pending + }); + + Box::pin(export) +} diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs new file mode 100644 index 0000000000000..3fe44dbdb142d --- /dev/null +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -0,0 +1,71 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use sp_runtime::traits::Block as BlockT; +use sp_runtime::generic::BlockId; +use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageChild, StorageMap}; +use sc_client_api::{StorageProvider, UsageProvider}; + +use std::{collections::HashMap, sync::Arc}; + +/// Export the raw state at the given `block`. If `block` is `None`, the +/// best block will be used. +pub fn export_raw_state( + client: Arc, + block: Option>, +) -> Result +where + C: UsageProvider + StorageProvider, + B: BlockT, + BA: sc_client_api::backend::Backend, +{ + let block = block.unwrap_or_else( + || BlockId::Hash(client.usage_info().chain.best_hash) + ); + + let empty_key = StorageKey(Vec::new()); + let mut top_storage = client.storage_pairs(&block, &empty_key)?; + let mut children_default = HashMap::new(); + + // Remove all default child storage roots from the top storage and collect the child storage + // pairs. + while let Some(pos) = top_storage + .iter() + .position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX)) { + let (key, _) = top_storage.swap_remove(pos); + + let key = StorageKey( + key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(), + ); + let child_info = ChildInfo::new_default(&key.0); + + let keys = client.child_storage_keys(&block, &child_info, &empty_key)?; + let mut pairs = StorageMap::new(); + keys.into_iter().try_for_each(|k| { + if let Some(value) = client.child_storage(&block, &child_info, &k)? { + pairs.insert(k.0, value.0); + } + + Ok::<_, Error>(()) + })?; + + children_default.insert(key.0, StorageChild { child_info, data: pairs }); + } + + let top = top_storage.into_iter().map(|(k, v)| (k.0, v.0)).collect(); + Ok(Storage { top, children_default }) +} diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs new file mode 100644 index 0000000000000..46ad0d0501d93 --- /dev/null +++ b/client/service/src/chain_ops/import_blocks.rs @@ -0,0 +1,472 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::error; +use crate::error::Error; +use sc_chain_spec::ChainSpec; +use log::{warn, info}; +use futures::{future, prelude::*}; +use sp_runtime::traits::{ + Block as BlockT, NumberFor, Zero, Header, MaybeSerializeDeserialize, +}; +use sp_runtime::generic::SignedBlock; +use codec::{Decode, IoReader as CodecIoReader}; +use sp_consensus::{ + BlockOrigin, + import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, +}; + +use std::{io::{Read, Seek}, pin::Pin}; +use std::time::{Duration, Instant}; +use futures_timer::Delay; +use std::task::Poll; +use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; +use std::convert::{TryFrom, TryInto}; +use sp_runtime::traits::{CheckedDiv, Saturating}; +use sc_client_api::UsageProvider; + +/// Number of blocks we will add to the queue before waiting for the queue to catch up. +const MAX_PENDING_BLOCKS: u64 = 1_024; + +/// Number of milliseconds to wait until next poll. +const DELAY_TIME: u64 = 2_000; + +/// Number of milliseconds that must have passed between two updates. +const TIME_BETWEEN_UPDATES: u64 = 3_000; + +use std::sync::Arc; + +/// Build a chain spec json +pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result { + spec.as_json(raw).map_err(Into::into) +} + + +/// Helper enum that wraps either a binary decoder (from parity-scale-codec), or a JSON decoder +/// (from serde_json). Implements the Iterator Trait, calling `next()` will decode the next +/// SignedBlock and return it. +enum BlockIter where + R: std::io::Read + std::io::Seek, +{ + Binary { + // Total number of blocks we are expecting to decode. + num_expected_blocks: u64, + // Number of blocks we have decoded thus far. + read_block_count: u64, + // Reader to the data, used for decoding new blocks. + reader: CodecIoReader, + }, + Json { + // Nubmer of blocks we have decoded thus far. + read_block_count: u64, + // Stream to the data, used for decoding new blocks. + reader: StreamDeserializer<'static, JsonIoRead, SignedBlock>, + }, +} + +impl BlockIter where + R: Read + Seek + 'static, + B: BlockT + MaybeSerializeDeserialize, +{ + fn new(input: R, binary: bool) -> Result { + if binary { + let mut reader = CodecIoReader(input); + // If the file is encoded in binary format, it is expected to first specify the number + // of blocks that are going to be decoded. We read it and add it to our enum struct. + let num_expected_blocks: u64 = Decode::decode(&mut reader) + .map_err(|e| format!("Failed to decode the number of blocks: {:?}", e))?; + Ok(BlockIter::Binary { + num_expected_blocks, + read_block_count: 0, + reader, + }) + } else { + let stream_deser = Deserializer::from_reader(input) + .into_iter::>(); + Ok(BlockIter::Json { + reader: stream_deser, + read_block_count: 0, + }) + } + } + + /// Returns the number of blocks read thus far. + fn read_block_count(&self) -> u64 { + match self { + BlockIter::Binary { read_block_count, .. } + | BlockIter::Json { read_block_count, .. } + => *read_block_count, + } + } + + /// Returns the total number of blocks to be imported, if possible. + fn num_expected_blocks(&self) -> Option { + match self { + BlockIter::Binary { num_expected_blocks, ..} => Some(*num_expected_blocks), + BlockIter::Json {..} => None + } + } +} + +impl Iterator for BlockIter where + R: Read + Seek + 'static, + B: BlockT + MaybeSerializeDeserialize, +{ + type Item = Result, String>; + + fn next(&mut self) -> Option { + match self { + BlockIter::Binary { num_expected_blocks, read_block_count, reader } => { + if read_block_count < num_expected_blocks { + let block_result: Result, _> = SignedBlock::::decode(reader) + .map_err(|e| e.to_string()); + *read_block_count += 1; + Some(block_result) + } else { + // `read_block_count` == `num_expected_blocks` so we've read enough blocks. + None + } + } + BlockIter::Json { reader, read_block_count } => { + let res = Some(reader.next()?.map_err(|e| e.to_string())); + *read_block_count += 1; + res + } + } + } +} + +/// Imports the SignedBlock to the queue. +fn import_block_to_queue( + signed_block: SignedBlock, + queue: &mut TImpQu, + force: bool +) where + TBl: BlockT + MaybeSerializeDeserialize, + TImpQu: 'static + ImportQueue, +{ + let (header, extrinsics) = signed_block.block.deconstruct(); + let hash = header.hash(); + // import queue handles verification and importing it into the client. + queue.import_blocks(BlockOrigin::File, vec![ + IncomingBlock:: { + hash, + header: Some(header), + body: Some(extrinsics), + justification: signed_block.justification, + origin: None, + allow_missing_state: false, + import_existing: force, + } + ]); +} + +/// Returns true if we have imported every block we were supposed to import, else returns false. +fn importing_is_done( + num_expected_blocks: Option, + read_block_count: u64, + imported_blocks: u64 +) -> bool { + if let Some(num_expected_blocks) = num_expected_blocks { + imported_blocks >= num_expected_blocks + } else { + imported_blocks >= read_block_count + } +} + +/// Structure used to log the block importing speed. +struct Speedometer { + best_number: NumberFor, + last_number: Option>, + last_update: Instant, +} + +impl Speedometer { + /// Creates a fresh Speedometer. + fn new() -> Self { + Self { + best_number: NumberFor::::from(0), + last_number: None, + last_update: Instant::now(), + } + } + + /// Calculates `(best_number - last_number) / (now - last_update)` and + /// logs the speed of import. + fn display_speed(&self) { + // Number of milliseconds elapsed since last time. + let elapsed_ms = { + let elapsed = self.last_update.elapsed(); + let since_last_millis = elapsed.as_secs() * 1000; + let since_last_subsec_millis = elapsed.subsec_millis() as u64; + since_last_millis + since_last_subsec_millis + }; + + // Number of blocks that have been imported since last time. + let diff = match self.last_number { + None => return, + Some(n) => self.best_number.saturating_sub(n) + }; + + if let Ok(diff) = TryInto::::try_into(diff) { + // If the number of blocks can be converted to a regular integer, then it's easy: just + // do the math and turn it into a `f64`. + let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) + .map_or(0.0, |s| s as f64) / 10.0; + info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed); + } else { + // If the number of blocks can't be converted to a regular integer, then we need a more + // algebraic approach and we stay within the realm of integers. + let one_thousand = NumberFor::::from(1_000); + let elapsed = NumberFor::::from( + >::try_from(elapsed_ms).unwrap_or(u32::max_value()) + ); + + let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) + .unwrap_or_else(Zero::zero); + info!("📦 Current best block: {} ({} bps)", self.best_number, speed) + } + } + + /// Updates the Speedometer. + fn update(&mut self, best_number: NumberFor) { + self.last_number = Some(self.best_number); + self.best_number = best_number; + self.last_update = Instant::now(); + } + + // If more than TIME_BETWEEN_UPDATES has elapsed since last update, + // then print and update the speedometer. + fn notify_user(&mut self, best_number: NumberFor) { + let delta = Duration::from_millis(TIME_BETWEEN_UPDATES); + if Instant::now().duration_since(self.last_update) >= delta { + self.display_speed(); + self.update(best_number); + } + } +} + +/// Different State that the `import_blocks` future could be in. +enum ImportState where + R: Read + Seek + 'static, + B: BlockT + MaybeSerializeDeserialize, +{ + /// We are reading from the BlockIter structure, adding those blocks to the queue if possible. + Reading{block_iter: BlockIter}, + /// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it to + /// catch up. + WaitingForImportQueueToCatchUp{ + block_iter: BlockIter, + delay: Delay, + block: SignedBlock + }, + // We have added all the blocks to the queue but they are still being processed. + WaitingForImportQueueToFinish{ + num_expected_blocks: Option, + read_block_count: u64, + delay: Delay, + }, +} + +/// Starts the process of importing blocks. +pub fn import_blocks( + client: Arc, + mut import_queue: IQ, + input: impl Read + Seek + Send + 'static, + force: bool, + binary: bool, +) -> Pin> + Send>> +where + C: UsageProvider + Send + Sync + 'static, + B: BlockT + for<'de> serde::Deserialize<'de>, + IQ: ImportQueue + 'static, +{ + struct WaitLink { + imported_blocks: u64, + has_error: bool, + } + + impl WaitLink { + fn new() -> WaitLink { + WaitLink { + imported_blocks: 0, + has_error: false, + } + } + } + + impl Link for WaitLink { + fn blocks_processed( + &mut self, + imported: usize, + _num_expected_blocks: usize, + results: Vec<(Result>, BlockImportError>, B::Hash)> + ) { + self.imported_blocks += imported as u64; + + for result in results { + if let (Err(err), hash) = result { + warn!("There was an error importing block with hash {:?}: {:?}", hash, err); + self.has_error = true; + break; + } + } + } + } + + let mut link = WaitLink::new(); + let block_iter_res: Result, String> = BlockIter::new(input, binary); + + let block_iter = match block_iter_res { + Ok(block_iter) => block_iter, + Err(e) => { + // We've encountered an error while creating the block iterator + // so we can just return a future that returns an error. + return future::ready(Err(Error::Other(e))).boxed() + } + }; + + let mut state = Some(ImportState::Reading{block_iter}); + let mut speedometer = Speedometer::::new(); + + // Importing blocks is implemented as a future, because we want the operation to be + // interruptible. + // + // Every time we read a block from the input or import a bunch of blocks from the import + // queue, the `Future` re-schedules itself and returns `Poll::Pending`. + // This makes it possible either to interleave other operations in-between the block imports, + // or to stop the operation completely. + let import = future::poll_fn(move |cx| { + let client = &client; + let queue = &mut import_queue; + match state.take().expect("state should never be None; qed") { + ImportState::Reading{mut block_iter} => { + match block_iter.next() { + None => { + // The iterator is over: we now need to wait for the import queue to finish. + let num_expected_blocks = block_iter.num_expected_blocks(); + let read_block_count = block_iter.read_block_count(); + let delay = Delay::new(Duration::from_millis(DELAY_TIME)); + state = Some(ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, delay + }); + }, + Some(block_result) => { + let read_block_count = block_iter.read_block_count(); + match block_result { + Ok(block) => { + if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { + // The queue is full, so do not add this block and simply wait + // until the queue has made some progress. + let delay = Delay::new(Duration::from_millis(DELAY_TIME)); + state = Some(ImportState::WaitingForImportQueueToCatchUp { + block_iter, delay, block + }); + } else { + // Queue is not full, we can keep on adding blocks to the queue. + import_block_to_queue(block, queue, force); + state = Some(ImportState::Reading{block_iter}); + } + } + Err(e) => { + return Poll::Ready( + Err(Error::Other( + format!("Error reading block #{}: {}", read_block_count, e) + ))) + } + } + } + } + }, + ImportState::WaitingForImportQueueToCatchUp{block_iter, mut delay, block} => { + let read_block_count = block_iter.read_block_count(); + if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS { + // Queue is still full, so wait until there is room to insert our block. + match Pin::new(&mut delay).poll(cx) { + Poll::Pending => { + state = Some(ImportState::WaitingForImportQueueToCatchUp { + block_iter, delay, block + }); + return Poll::Pending + }, + Poll::Ready(_) => { + delay.reset(Duration::from_millis(DELAY_TIME)); + }, + } + state = Some(ImportState::WaitingForImportQueueToCatchUp { + block_iter, delay, block + }); + } else { + // Queue is no longer full, so we can add our block to the queue. + import_block_to_queue(block, queue, force); + // Switch back to Reading state. + state = Some(ImportState::Reading{block_iter}); + } + }, + ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, mut delay + } => { + // All the blocks have been added to the queue, which doesn't mean they + // have all been properly imported. + if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) { + // Importing is done, we can log the result and return. + info!( + "🎉 Imported {} blocks. Best: #{}", + read_block_count, client.usage_info().chain.best_number + ); + return Poll::Ready(Ok(())) + } else { + // Importing is not done, we still have to wait for the queue to finish. + // Wait for the delay, because we know the queue is lagging behind. + match Pin::new(&mut delay).poll(cx) { + Poll::Pending => { + state = Some(ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, delay + }); + return Poll::Pending + }, + Poll::Ready(_) => { + delay.reset(Duration::from_millis(DELAY_TIME)); + }, + } + + state = Some(ImportState::WaitingForImportQueueToFinish { + num_expected_blocks, read_block_count, delay + }); + } + } + } + + queue.poll_actions(cx, &mut link); + + let best_number = client.usage_info().chain.best_number; + speedometer.notify_user(best_number); + + if link.has_error { + return Poll::Ready(Err( + Error::Other( + format!("Stopping after #{} blocks because of an error", link.imported_blocks) + ) + )) + } + + cx.waker().wake_by_ref(); + Poll::Pending + }); + Box::pin(import) +} diff --git a/client/service/src/chain_ops/mod.rs b/client/service/src/chain_ops/mod.rs new file mode 100644 index 0000000000000..e6b2fdfb8e0e6 --- /dev/null +++ b/client/service/src/chain_ops/mod.rs @@ -0,0 +1,31 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Chain utilities. + +mod check_block; +mod export_blocks; +mod export_raw_state; +mod import_blocks; +mod revert_chain; +mod build_sync_spec; + +pub use check_block::*; +pub use export_blocks::*; +pub use export_raw_state::*; +pub use import_blocks::*; +pub use revert_chain::*; +pub use build_sync_spec::*; diff --git a/client/service/src/chain_ops/revert_chain.rs b/client/service/src/chain_ops/revert_chain.rs new file mode 100644 index 0000000000000..eaee2c03f9b31 --- /dev/null +++ b/client/service/src/chain_ops/revert_chain.rs @@ -0,0 +1,43 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Error; +use log::info; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use sc_client_api::{Backend, UsageProvider}; +use std::sync::Arc; + +/// Performs a revert of `blocks` blocks. +pub fn revert_chain( + client: Arc, + backend: Arc, + blocks: NumberFor +) -> Result<(), Error> +where + B: BlockT, + C: UsageProvider, + BA: Backend, +{ + let reverted = backend.revert(blocks, false)?; + let info = client.usage_info().chain; + + if reverted.0.is_zero() { + info!("There aren't any non-finalized blocks to revert."); + } else { + info!("Reverted {} blocks. Best: #{} ({})", reverted.0, info.best_number, info.best_hash); + } + Ok(()) +} diff --git a/client/service/src/client/block_rules.rs b/client/service/src/client/block_rules.rs new file mode 100644 index 0000000000000..be84614c2a590 --- /dev/null +++ b/client/service/src/client/block_rules.rs @@ -0,0 +1,79 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Client fixed chain specification rules + +use std::collections::{HashMap, HashSet}; + +use sp_runtime::{ + traits::{Block as BlockT, NumberFor}, +}; + +use sc_client_api::{ForkBlocks, BadBlocks}; + +/// Chain specification rules lookup result. +pub enum LookupResult { + /// Specification rules do not contain any special rules about this block + NotSpecial, + /// The block is known to be bad and should not be imported + KnownBad, + /// There is a specified canonical block hash for the given height + Expected(B::Hash) +} + +/// Chain-specific block filtering rules. +/// +/// This holds known bad blocks and known good forks, and +/// is usually part of the chain spec. +pub struct BlockRules { + bad: HashSet, + forks: HashMap, B::Hash>, +} + +impl BlockRules { + /// New block rules with provided black and white lists. + pub fn new( + fork_blocks: ForkBlocks, + bad_blocks: BadBlocks, + ) -> Self { + Self { + bad: bad_blocks.unwrap_or_else(|| HashSet::new()), + forks: fork_blocks.unwrap_or_else(|| vec![]).into_iter().collect(), + } + } + + /// Mark a new block as bad. + pub fn mark_bad(&mut self, hash: B::Hash) { + self.bad.insert(hash); + } + + /// Check if there's any rule affecting the given block. + pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { + if let Some(hash_for_height) = self.forks.get(&number) { + if hash_for_height != hash { + return LookupResult::Expected(hash_for_height.clone()); + } + } + + if self.bad.contains(hash) { + return LookupResult::KnownBad + } + + LookupResult::NotSpecial + } +} diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs new file mode 100644 index 0000000000000..1919c76ff489b --- /dev/null +++ b/client/service/src/client/call_executor.rs @@ -0,0 +1,281 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{sync::Arc, panic::UnwindSafe, result, cell::RefCell}; +use codec::{Encode, Decode}; +use sp_runtime::{ + generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, +}; +use sp_state_machine::{ + self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, + backend::Backend as _, StorageProof, +}; +use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; +use sp_externalities::Extensions; +use sp_core::{ + NativeOrEncoded, NeverNativeValue, traits::{CodeExecutor, SpawnNamed}, + offchain::storage::OffchainOverlayedChanges, +}; +use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use sc_client_api::{backend, call_executor::CallExecutor}; +use super::client::ClientConfig; + +/// Call executor that executes methods locally, querying all required +/// data from local backend. +pub struct LocalCallExecutor { + backend: Arc, + executor: E, + spawn_handle: Box, + client_config: ClientConfig, +} + +impl LocalCallExecutor { + /// Creates new instance of local call executor. + pub fn new( + backend: Arc, + executor: E, + spawn_handle: Box, + client_config: ClientConfig, + ) -> Self { + LocalCallExecutor { + backend, + executor, + spawn_handle, + client_config, + } + } +} + +impl Clone for LocalCallExecutor where E: Clone { + fn clone(&self) -> Self { + LocalCallExecutor { + backend: self.backend.clone(), + executor: self.executor.clone(), + spawn_handle: self.spawn_handle.clone(), + client_config: self.client_config.clone(), + } + } +} + +impl CallExecutor for LocalCallExecutor +where + B: backend::Backend, + E: CodeExecutor + RuntimeInfo + Clone + 'static, + Block: BlockT, +{ + type Error = E::Error; + + type Backend = B; + + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + strategy: ExecutionStrategy, + extensions: Option, + ) -> sp_blockchain::Result> { + let mut changes = OverlayedChanges::default(); + let mut offchain_changes = if self.client_config.offchain_indexing_api { + OffchainOverlayedChanges::enabled() + } else { + OffchainOverlayedChanges::disabled() + }; + let changes_trie = backend::changes_tries_state_at_block( + id, self.backend.changes_trie_storage() + )?; + let state = self.backend.state_at(*id)?; + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let return_data = StateMachine::new( + &state, + changes_trie, + &mut changes, + &mut offchain_changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &state_runtime_code.runtime_code()?, + self.spawn_handle.clone(), + ).execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + strategy.get_manager(), + None, + )?; + + Ok(return_data.into_encoded()) + } + + fn contextual_call< + 'a, + IB: Fn() -> sp_blockchain::Result<()>, + EM: Fn( + Result, Self::Error>, + Result, Self::Error> + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + initialize_block_fn: IB, + at: &BlockId, + method: &str, + call_data: &[u8], + changes: &RefCell, + offchain_changes: &RefCell, + storage_transaction_cache: Option<&RefCell< + StorageTransactionCache + >>, + initialize_block: InitializeBlock<'a, Block>, + execution_manager: ExecutionManager, + native_call: Option, + recorder: &Option>, + extensions: Option, + ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { + match initialize_block { + InitializeBlock::Do(ref init_block) + if init_block.borrow().as_ref().map(|id| id != at).unwrap_or(true) => { + initialize_block_fn()?; + }, + // We don't need to initialize the runtime at a block. + _ => {}, + } + + let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; + let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); + + let mut state = self.backend.state_at(*at)?; + + let changes = &mut *changes.borrow_mut(); + let offchain_changes = &mut *offchain_changes.borrow_mut(); + + match recorder { + Some(recorder) => { + let trie_state = state.as_trie_backend() + .ok_or_else(|| + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box + )?; + + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_state); + // It is important to extract the runtime code here before we create the proof + // recorder. + let runtime_code = state_runtime_code.runtime_code()?; + + let backend = sp_state_machine::ProvingBackend::new_with_recorder( + trie_state, + recorder.clone(), + ); + + let mut state_machine = StateMachine::new( + &backend, + changes_trie_state, + changes, + offchain_changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ); + // TODO: https://github.com/paritytech/substrate/issues/4455 + // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) + state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + }, + None => { + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let runtime_code = state_runtime_code.runtime_code()?; + let mut state_machine = StateMachine::new( + &state, + changes_trie_state, + changes, + offchain_changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ).with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)); + state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + } + }.map_err(Into::into) + } + + fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = OffchainOverlayedChanges::default(); + let changes_trie_state = backend::changes_tries_state_at_block( + id, + self.backend.changes_trie_storage(), + )?; + let state = self.backend.state_at(*id)?; + let mut cache = StorageTransactionCache::::default(); + let mut ext = Ext::new( + &mut overlay, + &mut offchain_overlay, + &mut cache, + &state, + changes_trie_state, + None, + ); + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + self.executor.runtime_version(&mut ext, &state_runtime_code.runtime_code()?) + .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) + } + + fn prove_at_trie_state>>( + &self, + trie_state: &sp_state_machine::TrieBackend>, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8] + ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { + sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _, _>( + trie_state, + overlay, + &self.executor, + self.spawn_handle.clone(), + method, + call_data, + &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, + ) + .map_err(Into::into) + } + + fn native_runtime_version(&self) -> Option<&NativeVersion> { + Some(self.executor.native_version()) + } +} + +impl sp_version::GetRuntimeVersion for LocalCallExecutor + where + B: backend::Backend, + E: CodeExecutor + RuntimeInfo + Clone + 'static, + Block: BlockT, +{ + fn native_version(&self) -> &sp_version::NativeVersion { + self.executor.native_version() + } + + fn runtime_version( + &self, + at: &BlockId, + ) -> Result { + CallExecutor::runtime_version(self, at).map_err(|e| format!("{:?}", e)) + } +} diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs new file mode 100644 index 0000000000000..ce8bb9b7bdab0 --- /dev/null +++ b/client/service/src/client/client.rs @@ -0,0 +1,2039 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate Client + +use rand::{Rng, SeedableRng}; +use rand::seq::SliceRandom; +use rand::rngs::StdRng; +use std::{ + marker::PhantomData, + collections::{HashSet, BTreeMap, HashMap}, + sync::Arc, panic::UnwindSafe, result, +}; +use log::{info, trace, warn}; +use parking_lot::{Mutex, RwLock}; +use codec::{Encode, Decode}; +use hash_db::Prefix; +use sp_core::{ + convert_hash, + storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey}, + ChangesTrieConfiguration, ExecutionContext, NativeOrEncoded, +}; +use sc_telemetry::{telemetry, SUBSTRATE_INFO}; +use sp_runtime::{ + Justification, BuildStorage, + generic::{BlockId, SignedBlock, DigestItem}, + traits::{ + BlakeTwo256, Hash, Block as BlockT, Header as HeaderT, Zero, NumberFor, + HashFor, SaturatedConversion, One, DigestFor, + }, +}; +use sp_state_machine::{ + DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, + prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, + ChangesTrieConfigurationRange, key_changes, key_changes_proof, +}; +use sc_executor::RuntimeVersion; +use sp_consensus::{ + Error as ConsensusError, BlockStatus, BlockImportParams, BlockCheckParams, + ImportResult, BlockOrigin, ForkChoiceStrategy, RecordProof, +}; +use sp_blockchain::{ + self as blockchain, + Backend as ChainBackend, + HeaderBackend as ChainHeaderBackend, ProvideCache, Cache, + well_known_cache_keys::Id as CacheKeyId, + HeaderMetadata, CachedHeaderMetadata, +}; +use sp_trie::StorageProof; +use sp_api::{ + CallApiAt, ConstructRuntimeApi, Core as CoreApi, ApiExt, ApiRef, ProvideRuntimeApi, + CallApiAtParams, +}; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_client_api::{ + backend::{ + self, BlockImportOperation, PrunableStateChangesTrieStorage, + ClientImportOperation, Finalizer, ImportSummary, NewBlockState, + changes_tries_state_at_block, StorageProvider, + LockImportRun, apply_aux, + }, + client::{ + ImportNotifications, FinalityNotification, FinalityNotifications, BlockImportNotification, + ClientInfo, BlockchainEvents, BlockBackend, ProvideUncles, BadBlocks, ForkBlocks, + BlockOf, + }, + execution_extensions::ExecutionExtensions, + notifications::{StorageNotifications, StorageEventStream}, + KeyIterator, CallExecutor, ExecutorProvider, ProofProvider, + cht, UsageProvider +}; +use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded}; +use sp_blockchain::Error; +use prometheus_endpoint::Registry; +use super::{ + genesis, block_rules::{BlockRules, LookupResult as BlockLookupResult}, +}; +use sc_light::{call_executor::prove_execution, fetcher::ChangesProof}; + +#[cfg(feature="test-helpers")] +use { + sp_core::traits::{CodeExecutor, SpawnNamed}, + sc_client_api::in_mem, + sc_executor::RuntimeInfo, + super::call_executor::LocalCallExecutor, +}; + +type NotificationSinks = Mutex>>; + +/// Substrate Client +pub struct Client where Block: BlockT { + backend: Arc, + executor: E, + storage_notifications: Mutex>, + import_notification_sinks: NotificationSinks>, + finality_notification_sinks: NotificationSinks>, + // holds the block hash currently being imported. TODO: replace this with block queue + importing_block: RwLock>, + block_rules: BlockRules, + execution_extensions: ExecutionExtensions, + config: ClientConfig, + _phantom: PhantomData, +} + +// used in importing a block, where additional changes are made after the runtime +// executed. +enum PrePostHeader { + // they are the same: no post-runtime digest items. + Same(H), + // different headers (pre, post). + Different(H, H), +} + +impl PrePostHeader { + // get a reference to the "post-header" -- the header as it should be after all changes are applied. + fn post(&self) -> &H { + match *self { + PrePostHeader::Same(ref h) => h, + PrePostHeader::Different(_, ref h) => h, + } + } + + // convert to the "post-header" -- the header as it should be after all changes are applied. + fn into_post(self) -> H { + match self { + PrePostHeader::Same(h) => h, + PrePostHeader::Different(_, h) => h, + } + } +} + +/// Create an instance of in-memory client. +#[cfg(feature="test-helpers")] +pub fn new_in_mem( + executor: E, + genesis_storage: &S, + keystore: Option, + prometheus_registry: Option, + spawn_handle: Box, + config: ClientConfig, +) -> sp_blockchain::Result, + LocalCallExecutor, E>, + Block, + RA +>> where + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, + Block: BlockT, +{ + new_with_backend( + Arc::new(in_mem::Backend::new()), + executor, + genesis_storage, + keystore, + spawn_handle, + prometheus_registry, + config, + ) +} + +/// Relevant client configuration items relevant for the client. +#[derive(Debug,Clone,Default)] +pub struct ClientConfig { + /// Enable the offchain worker db. + pub offchain_worker_enabled: bool, + /// If true, allows access from the runtime to write into offchain worker db. + pub offchain_indexing_api: bool, +} + +/// Create a client with the explicitly provided backend. +/// This is useful for testing backend implementations. +#[cfg(feature="test-helpers")] +pub fn new_with_backend( + backend: Arc, + executor: E, + build_genesis_storage: &S, + keystore: Option, + spawn_handle: Box, + prometheus_registry: Option, + config: ClientConfig, +) -> sp_blockchain::Result, Block, RA>> + where + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, + Block: BlockT, + B: backend::LocalBackend + 'static, +{ + let call_executor = LocalCallExecutor::new(backend.clone(), executor, spawn_handle, config.clone()); + let extensions = ExecutionExtensions::new(Default::default(), keystore); + Client::new( + backend, + call_executor, + build_genesis_storage, + Default::default(), + Default::default(), + extensions, + prometheus_registry, + config, + ) +} + +impl BlockOf for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + type Type = Block; +} + +impl LockImportRun for Client + where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn lock_import_and_run(&self, f: F) -> Result + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, + { + let inner = || { + let _import_lock = self.backend.get_import_lock().write(); + + let mut op = ClientImportOperation { + op: self.backend.begin_operation()?, + notify_imported: None, + notify_finalized: Vec::new(), + }; + + let r = f(&mut op)?; + + let ClientImportOperation { op, notify_imported, notify_finalized } = op; + self.backend.commit_operation(op)?; + + self.notify_finalized(notify_finalized)?; + self.notify_imported(notify_imported)?; + + Ok(r) + }; + + let result = inner(); + *self.importing_block.write() = None; + + result + } +} + +impl LockImportRun for &Client + where + Block: BlockT, + B: backend::Backend, + E: CallExecutor, +{ + fn lock_import_and_run(&self, f: F) -> Result + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, + { + (**self).lock_import_and_run(f) + } +} + +impl Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + Block::Header: Clone, +{ + /// Creates new Substrate Client with given blockchain and code executor. + pub fn new( + backend: Arc, + executor: E, + build_genesis_storage: &dyn BuildStorage, + fork_blocks: ForkBlocks, + bad_blocks: BadBlocks, + execution_extensions: ExecutionExtensions, + prometheus_registry: Option, + config: ClientConfig, + ) -> sp_blockchain::Result { + if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { + let genesis_storage = build_genesis_storage.build_storage()?; + let mut op = backend.begin_operation()?; + backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; + let state_root = op.reset_storage(genesis_storage)?; + let genesis_block = genesis::construct_genesis_block::(state_root.into()); + info!("🔨 Initializing Genesis block/state (state: {}, header-hash: {})", + genesis_block.header().state_root(), + genesis_block.header().hash() + ); + op.set_block_data( + genesis_block.deconstruct().0, + Some(vec![]), + None, + NewBlockState::Final + )?; + backend.commit_operation(op)?; + } + + Ok(Client { + backend, + executor, + storage_notifications: Mutex::new(StorageNotifications::new(prometheus_registry)), + import_notification_sinks: Default::default(), + finality_notification_sinks: Default::default(), + importing_block: Default::default(), + block_rules: BlockRules::new(fork_blocks, bad_blocks), + execution_extensions, + config, + _phantom: Default::default(), + }) + } + + /// returns a reference to the block import notification sinks + /// useful for test environments. + pub fn import_notification_sinks(&self) -> &NotificationSinks> { + &self.import_notification_sinks + } + + /// returns a reference to the finality notification sinks + /// useful for test environments. + pub fn finality_notification_sinks(&self) -> &NotificationSinks> { + &self.finality_notification_sinks + } + + /// Get a reference to the state at a given block. + pub fn state_at(&self, block: &BlockId) -> sp_blockchain::Result { + self.backend.state_at(*block) + } + + /// Get the code at a given block. + pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { + Ok(StorageProvider::storage(self, id, &StorageKey(well_known_keys::CODE.to_vec()))? + .expect("None is returned if there's no value stored for the given key;\ + ':code' key is always defined; qed").0) + } + + /// Get the RuntimeVersion at a given block. + pub fn runtime_version_at(&self, id: &BlockId) -> sp_blockchain::Result { + self.executor.runtime_version(id) + } + + /// Reads given header and generates CHT-based header proof for CHT of given size. + pub fn header_proof_with_cht_size( + &self, + id: &BlockId, + cht_size: NumberFor, + ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { + let proof_error = || sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)); + let header = self.backend.blockchain().expect_header(*id)?; + let block_num = *header.number(); + let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; + let cht_start = cht::start_number(cht_size, cht_num); + let mut current_num = cht_start; + let cht_range = ::std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + let headers = cht_range.map(|num| self.block_hash(num)); + let proof = cht::build_proof::, _, _>( + cht_size, + cht_num, + std::iter::once(block_num), + headers, + )?; + Ok((header, proof)) + } + + /// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size. + pub fn key_changes_proof_with_cht_size( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + storage_key: Option<&PrefixedStorageKey>, + key: &StorageKey, + cht_size: NumberFor, + ) -> sp_blockchain::Result> { + struct AccessedRootsRecorder<'a, Block: BlockT> { + storage: &'a dyn ChangesTrieStorage, NumberFor>, + min: NumberFor, + required_roots_proofs: Mutex, Block::Hash>>, + }; + + impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> for + AccessedRootsRecorder<'a, Block> + { + fn build_anchor(&self, hash: Block::Hash) + -> Result>, String> + { + self.storage.build_anchor(hash) + } + + fn root( + &self, + anchor: &ChangesTrieAnchorBlockId>, + block: NumberFor, + ) -> Result, String> { + let root = self.storage.root(anchor, block)?; + if block < self.min { + if let Some(ref root) = root { + self.required_roots_proofs.lock().insert( + block, + root.clone() + ); + } + } + Ok(root) + } + } + + impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> for + AccessedRootsRecorder<'a, Block> + { + fn as_roots_storage(&self) + -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> + { + self + } + + fn with_cached_changed_keys( + &self, + root: &Block::Hash, + functor: &mut dyn FnMut(&HashMap, HashSet>>), + ) -> bool { + self.storage.with_cached_changed_keys(root, functor) + } + + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + self.storage.get(key, prefix) + } + } + + let first_number = self.backend.blockchain() + .expect_block_number_from_id(&BlockId::Hash(first))?; + let (storage, configs) = self.require_changes_trie(first_number, last, true)?; + let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; + + let recording_storage = AccessedRootsRecorder:: { + storage: storage.storage(), + min: min_number, + required_roots_proofs: Mutex::new(BTreeMap::new()), + }; + + let max_number = std::cmp::min( + self.backend.blockchain().info().best_number, + self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(max))?, + ); + + // fetch key changes proof + let mut proof = Vec::new(); + for (config_zero, config_end, config) in configs { + let last_number = self.backend.blockchain() + .expect_block_number_from_id(&BlockId::Hash(last))?; + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: config_zero, + end: config_end.map(|(config_end_number, _)| config_end_number), + }; + let proof_range = key_changes_proof::, _>( + config_range, + &recording_storage, + first_number, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&last), + number: last_number, + }, + max_number, + storage_key, + &key.0, + ) + .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; + proof.extend(proof_range); + } + + // now gather proofs for all changes tries roots that were touched during key_changes_proof + // execution AND are unknown (i.e. replaced with CHT) to the requester + let roots = recording_storage.required_roots_proofs.into_inner(); + let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?; + + Ok(ChangesProof { + max_block: max_number, + proof, + roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(), + roots_proof, + }) + } + + /// Generate CHT-based proof for roots of changes tries at given blocks. + fn changes_trie_roots_proof>>( + &self, + cht_size: NumberFor, + blocks: I + ) -> sp_blockchain::Result { + // most probably we have touched several changes tries that are parts of the single CHT + // => GroupBy changes tries by CHT number and then gather proof for the whole group at once + let mut proofs = Vec::new(); + + cht::for_each_cht_group::(cht_size, blocks, |_, cht_num, cht_blocks| { + let cht_proof = self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; + proofs.push(cht_proof); + Ok(()) + }, ())?; + + Ok(StorageProof::merge(proofs)) + } + + /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). + fn changes_trie_roots_proof_at_cht( + &self, + cht_size: NumberFor, + cht_num: NumberFor, + blocks: Vec> + ) -> sp_blockchain::Result { + let cht_start = cht::start_number(cht_size, cht_num); + let mut current_num = cht_start; + let cht_range = ::std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + let roots = cht_range + .map(|num| self.header(&BlockId::Number(num)) + .map(|block| + block.and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned())) + ); + let proof = cht::build_proof::, _, _>( + cht_size, + cht_num, + blocks, + roots, + )?; + Ok(proof) + } + + /// Returns changes trie storage and all configurations that have been active in the range [first; last]. + /// + /// Configurations are returned in descending order (and obviously never overlap). + /// If fail_if_disabled is false, returns maximal consequent configurations ranges, starting from last and + /// stopping on either first, or when CT have been disabled. + /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled + /// inside first..last blocks range. + fn require_changes_trie( + &self, + first: NumberFor, + last: Block::Hash, + fail_if_disabled: bool, + ) -> sp_blockchain::Result<( + &dyn PrunableStateChangesTrieStorage, + Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, + )> { + let storage = match self.backend.changes_trie_storage() { + Some(storage) => storage, + None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), + }; + + let mut configs = Vec::with_capacity(1); + let mut current = last; + loop { + let config_range = storage.configuration_at(&BlockId::Hash(current))?; + match config_range.config { + Some(config) => configs.push((config_range.zero.0, config_range.end, config)), + None if !fail_if_disabled => return Ok((storage, configs)), + None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), + } + + if config_range.zero.0 < first { + break; + } + + current = *self.backend.blockchain().expect_header(BlockId::Hash(config_range.zero.1))?.parent_hash(); + } + + Ok((storage, configs)) + } + + /// Apply a checked and validated block to an operation. If a justification is provided + /// then `finalized` *must* be true. + fn apply_block( + &self, + operation: &mut ClientImportOperation, + import_block: BlockImportParams>, + new_cache: HashMap>, + ) -> sp_blockchain::Result where + Self: ProvideRuntimeApi, + >::Api: CoreApi + + ApiExt, + { + + info!("Applying block"); + let BlockImportParams { + origin, + header, + justification, + post_digests, + body, + storage_changes, + finalized, + auxiliary, + fork_choice, + intermediates, + import_existing, + .. + } = import_block; + + assert!(justification.is_some() && finalized || justification.is_none()); + + if !intermediates.is_empty() { + return Err(Error::IncompletePipeline) + } + + let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?; + + let import_headers = if post_digests.is_empty() { + PrePostHeader::Same(header) + } else { + let mut post_header = header.clone(); + for item in post_digests { + post_header.digest_mut().push(item); + } + PrePostHeader::Different(header, post_header) + }; + + let hash = import_headers.post().hash(); + let height = (*import_headers.post().number()).saturated_into::(); + + *self.importing_block.write() = Some(hash); + + let result = self.execute_and_import_block( + operation, + origin, + hash, + import_headers, + justification, + body, + storage_changes, + new_cache, + finalized, + auxiliary, + fork_choice, + import_existing, + ); + + if let Ok(ImportResult::Imported(ref aux)) = result { + if aux.is_new_best { + // don't send telemetry block import events during initial sync for every + // block to avoid spamming the telemetry server, these events will be randomly + // sent at a rate of 1/10. + if origin != BlockOrigin::NetworkInitialSync || + rand::thread_rng().gen_bool(0.1) + { + telemetry!(SUBSTRATE_INFO; "block.import"; + "height" => height, + "best" => ?hash, + "origin" => ?origin + ); + } + } + } + + result + } + + fn execute_and_import_block( + &self, + operation: &mut ClientImportOperation, + origin: BlockOrigin, + hash: Block::Hash, + import_headers: PrePostHeader, + justification: Option, + body: Option>, + storage_changes: Option, Block>>, + new_cache: HashMap>, + finalized: bool, + aux: Vec<(Vec, Option>)>, + fork_choice: ForkChoiceStrategy, + import_existing: bool, + ) -> sp_blockchain::Result where + Self: ProvideRuntimeApi, + >::Api: CoreApi + + ApiExt, + { + + info!("Executing and importing block"); + let parent_hash = import_headers.post().parent_hash().clone(); + let status = self.backend.blockchain().status(BlockId::Hash(hash))?; + match (import_existing, status) { + (false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), + (false, blockchain::BlockStatus::Unknown) => {}, + (true, blockchain::BlockStatus::InChain) => {}, + (true, blockchain::BlockStatus::Unknown) => + return Err(Error::UnknownBlock(format!("{:?}", hash))), + } + + let info = self.backend.blockchain().info(); + + // the block is lower than our last finalized block so it must revert + // finality, refusing import. + if *import_headers.post().number() <= info.finalized_number { + return Err(sp_blockchain::Error::NotInFinalizedChain); + } + + // this is a fairly arbitrary choice of where to draw the line on making notifications, + // but the general goal is to only make notifications when we are already fully synced + // and get a new chain head. + let make_notifications = match origin { + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => true, + BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, + }; + + let storage_changes = match storage_changes { + Some(storage_changes) => { + self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; + + // ensure parent block is finalized to maintain invariant that + // finality is called sequentially. + if finalized { + self.apply_finality_with_block_hash( + operation, + parent_hash, + None, + info.best_hash, + make_notifications, + )?; + } + + operation.op.update_cache(new_cache); + + let ( + main_sc, + child_sc, + offchain_sc, + tx, _, + changes_trie_tx, + ) = storage_changes.into_inner(); + + if self.config.offchain_indexing_api { + operation.op.update_offchain_storage(offchain_sc)?; + } + + operation.op.update_db_storage(tx)?; + operation.op.update_storage(main_sc.clone(), child_sc.clone())?; + + if let Some(changes_trie_transaction) = changes_trie_tx { + operation.op.update_changes_trie(changes_trie_transaction)?; + } + + Some((main_sc, child_sc)) + }, + None => None, + }; + + let is_new_best = finalized || match fork_choice { + ForkChoiceStrategy::LongestChain => import_headers.post().number() > &info.best_number, + ForkChoiceStrategy::Custom(v) => v, + }; + + let leaf_state = if finalized { + NewBlockState::Final + } else if is_new_best { + NewBlockState::Best + } else { + NewBlockState::Normal + }; + + let tree_route = if is_new_best && info.best_hash != parent_hash { + let route_from_best = sp_blockchain::tree_route( + self.backend.blockchain(), + info.best_hash, + parent_hash, + )?; + Some(route_from_best) + } else { + None + }; + + trace!( + "Imported {}, (#{}), best={}, origin={:?}", + hash, + import_headers.post().number(), + is_new_best, + origin, + ); + + operation.op.set_block_data( + import_headers.post().clone(), + body, + justification, + leaf_state, + )?; + + operation.op.insert_aux(aux)?; + + // we only notify when we are already synced to the tip of the chain or if this import triggers a re-org + if make_notifications || tree_route.is_some() { + if finalized { + operation.notify_finalized.push(hash); + } + + operation.notify_imported = Some(ImportSummary { + hash, + origin, + header: import_headers.into_post(), + is_new_best, + storage_changes, + tree_route, + }) + } + + Ok(ImportResult::imported(is_new_best)) + } + + /// Prepares the storage changes for a block. + /// + /// It checks if the state should be enacted and if the `import_block` maybe already provides + /// the required storage changes. If the state should be enacted and the storage changes are not + /// provided, the block is re-executed to get the storage changes. + fn prepare_block_storage_changes( + &self, + import_block: &mut BlockImportParams>, + ) -> sp_blockchain::Result> + where + Self: ProvideRuntimeApi, + >::Api: CoreApi + + ApiExt, + { + + info!("Preparing block storage changes"); + let parent_hash = import_block.header.parent_hash(); + let at = BlockId::Hash(*parent_hash); + let enact_state = match self.block_status(&at)? { + BlockStatus::Unknown => return Ok(Some(ImportResult::UnknownParent)), + BlockStatus::InChainWithState | BlockStatus::Queued => true, + BlockStatus::InChainPruned if import_block.allow_missing_state => false, + BlockStatus::InChainPruned => return Ok(Some(ImportResult::MissingState)), + BlockStatus::KnownBad => return Ok(Some(ImportResult::KnownBad)), + }; + + match (enact_state, &mut import_block.storage_changes, &mut import_block.body) { + // We have storage changes and should enact the state, so we don't need to do anything + // here + (true, Some(_), _) => {}, + // We should enact state, but don't have any storage changes, so we need to execute the + // block. + (true, ref mut storage_changes @ None, Some(ref body)) => { + let runtime_api = self.runtime_api(); + let execution_context = if import_block.origin == BlockOrigin::NetworkInitialSync { + ExecutionContext::Syncing + } else { + ExecutionContext::Importing + }; + + + info!("Will execute block on client"); + match self.backend.blockchain().body(BlockId::Hash(*parent_hash)).unwrap() { + Some(mut previous_block_extrinsics) => { + if previous_block_extrinsics.is_empty() { + info!("previous block extrinsics has 0 length"); + runtime_api.execute_block_with_context( + &at, + execution_context, + Block::new(import_block.header.clone(), body.clone()), + )?; + } else { + info!("previous block has extrinsics"); + let extrinsics_hash = BlakeTwo256::hash(&body.clone().encode()); + let mut shuffled_extrinsics = previous_block_extrinsics.clone(); + let mut rng: StdRng = SeedableRng::from_seed(extrinsics_hash.to_fixed_bytes()); + shuffled_extrinsics.shuffle(&mut rng); + runtime_api.execute_block_with_context( + &at, + execution_context, + Block::new(import_block.header.clone(), shuffled_extrinsics), + )?; + } + }, + None => { + info!("previous block is empty"); + } + } + + let state = self.backend.state_at(at)?; + let changes_trie_state = changes_tries_state_at_block( + &at, + self.backend.changes_trie_storage(), + )?; + + let gen_storage_changes = runtime_api.into_storage_changes( + &state, + changes_trie_state.as_ref(), + *parent_hash, + )?; + + // if import_block.header.state_root() + // != &gen_storage_changes.transaction_storage_root + // { + // return Err(Error::InvalidStateRoot) + // } else { + **storage_changes = Some(gen_storage_changes); + // } + }, + // No block body, no storage changes + (true, None, None) => {}, + // We should not enact the state, so we set the storage changes to `None`. + (false, changes, _) => { + changes.take(); + } + }; + + Ok(None) + } + + fn apply_finality_with_block_hash( + &self, + operation: &mut ClientImportOperation, + block: Block::Hash, + justification: Option, + best_block: Block::Hash, + notify: bool, + ) -> sp_blockchain::Result<()> { + // find tree route from last finalized to given block. + let last_finalized = self.backend.blockchain().last_finalized()?; + + if block == last_finalized { + warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized); + return Ok(()); + } + + let route_from_finalized = sp_blockchain::tree_route(self.backend.blockchain(), last_finalized, block)?; + + if let Some(retracted) = route_from_finalized.retracted().get(0) { + warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \ + same chain as last finalized {:?}", retracted, last_finalized); + + return Err(sp_blockchain::Error::NotInFinalizedChain); + } + + let route_from_best = sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; + + // if the block is not a direct ancestor of the current best chain, + // then some other block is the common ancestor. + if route_from_best.common_block().hash != block { + // NOTE: we're setting the finalized block as best block, this might + // be slightly inaccurate since we might have a "better" block + // further along this chain, but since best chain selection logic is + // plugable we cannot make a better choice here. usages that need + // an accurate "best" block need to go through `SelectChain` + // instead. + operation.op.mark_head(BlockId::Hash(block))?; + } + + let enacted = route_from_finalized.enacted(); + assert!(enacted.len() > 0); + for finalize_new in &enacted[..enacted.len() - 1] { + operation.op.mark_finalized(BlockId::Hash(finalize_new.hash), None)?; + } + + assert_eq!(enacted.last().map(|e| e.hash), Some(block)); + operation.op.mark_finalized(BlockId::Hash(block), justification)?; + + if notify { + // sometimes when syncing, tons of blocks can be finalized at once. + // we'll send notifications spuriously in that case. + const MAX_TO_NOTIFY: usize = 256; + let enacted = route_from_finalized.enacted(); + let start = enacted.len() - std::cmp::min(enacted.len(), MAX_TO_NOTIFY); + for finalized in &enacted[start..] { + operation.notify_finalized.push(finalized.hash); + } + } + + Ok(()) + } + + fn notify_finalized( + &self, + notify_finalized: Vec, + ) -> sp_blockchain::Result<()> { + let mut sinks = self.finality_notification_sinks.lock(); + + if notify_finalized.is_empty() { + // cleanup any closed finality notification sinks + // since we won't be running the loop below which + // would also remove any closed sinks. + sinks.retain(|sink| !sink.is_closed()); + + return Ok(()); + } + + // We assume the list is sorted and only want to inform the + // telemetry once about the finalized block. + if let Some(last) = notify_finalized.last() { + let header = self.header(&BlockId::Hash(*last))? + .expect( + "Header already known to exist in DB because it is \ + indicated in the tree route; qed" + ); + + telemetry!(SUBSTRATE_INFO; "notify.finalized"; + "height" => format!("{}", header.number()), + "best" => ?last, + ); + } + + for finalized_hash in notify_finalized { + let header = self.header(&BlockId::Hash(finalized_hash))? + .expect( + "Header already known to exist in DB because it is \ + indicated in the tree route; qed" + ); + + let notification = FinalityNotification { + header, + hash: finalized_hash, + }; + + sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); + } + + Ok(()) + } + + fn notify_imported( + &self, + notify_import: Option>, + ) -> sp_blockchain::Result<()> { + let notify_import = match notify_import { + Some(notify_import) => notify_import, + None => { + // cleanup any closed import notification sinks since we won't + // be sending any notifications below which would remove any + // closed sinks. this is necessary since during initial sync we + // won't send any import notifications which could lead to a + // temporary leak of closed/discarded notification sinks (e.g. + // from consensus code). + self.import_notification_sinks + .lock() + .retain(|sink| !sink.is_closed()); + + return Ok(()); + } + }; + + if let Some(storage_changes) = notify_import.storage_changes { + // TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes? + self.storage_notifications.lock() + .trigger( + ¬ify_import.hash, + storage_changes.0.into_iter(), + storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), + ); + } + + let notification = BlockImportNotification:: { + hash: notify_import.hash, + origin: notify_import.origin, + header: notify_import.header, + is_new_best: notify_import.is_new_best, + tree_route: notify_import.tree_route.map(Arc::new), + }; + + self.import_notification_sinks.lock() + .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); + + Ok(()) + } + + /// Attempts to revert the chain by `n` blocks guaranteeing that no block is + /// reverted past the last finalized block. Returns the number of blocks + /// that were successfully reverted. + pub fn revert(&self, n: NumberFor) -> sp_blockchain::Result> { + let (number, _) = self.backend.revert(n, false)?; + Ok(number) + } + + /// Attempts to revert the chain by `n` blocks disregarding finality. This method will revert + /// any finalized blocks as requested and can potentially leave the node in an inconsistent + /// state. Other modules in the system that persist data and that rely on finality + /// (e.g. consensus parts) will be unaffected by the revert. Use this method with caution and + /// making sure that no other data needs to be reverted for consistency aside from the block + /// data. If `blacklist` is set to true, will also blacklist reverted blocks from finalizing + /// again. The blacklist is reset upon client restart. + /// + /// Returns the number of blocks that were successfully reverted. + pub fn unsafe_revert( + &mut self, + n: NumberFor, + blacklist: bool, + ) -> sp_blockchain::Result> { + let (number, reverted) = self.backend.revert(n, true)?; + if blacklist { + for b in reverted { + self.block_rules.mark_bad(b); + } + } + Ok(number) + } + + /// Get blockchain info. + pub fn chain_info(&self) -> blockchain::Info { + self.backend.blockchain().info() + } + + /// Get block status. + pub fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { + // this can probably be implemented more efficiently + if let BlockId::Hash(ref h) = id { + if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { + return Ok(BlockStatus::Queued); + } + } + let hash_and_number = match id.clone() { + BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), + BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), + }; + match hash_and_number { + Some((hash, number)) => { + if self.backend.have_state_at(&hash, number) { + Ok(BlockStatus::InChainWithState) + } else { + Ok(BlockStatus::InChainPruned) + } + } + None => Ok(BlockStatus::Unknown), + } + } + + /// Get block header by id. + pub fn header(&self, id: &BlockId) -> sp_blockchain::Result::Header>> { + self.backend.blockchain().header(*id) + } + + /// Get block body by id. + pub fn body(&self, id: &BlockId) -> sp_blockchain::Result::Extrinsic>>> { + self.backend.blockchain().body(*id) + } + + /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. + pub fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { + let load_header = |id: Block::Hash| -> sp_blockchain::Result { + match self.backend.blockchain().header(BlockId::Hash(id))? { + Some(hdr) => Ok(hdr), + None => Err(Error::UnknownBlock(format!("{:?}", id))), + } + }; + + let genesis_hash = self.backend.blockchain().info().genesis_hash; + if genesis_hash == target_hash { return Ok(Vec::new()); } + + let mut current_hash = target_hash; + let mut current = load_header(current_hash)?; + let mut ancestor_hash = *current.parent_hash(); + let mut ancestor = load_header(ancestor_hash)?; + let mut uncles = Vec::new(); + + for _generation in 0..max_generation.saturated_into() { + let children = self.backend.blockchain().children(ancestor_hash)?; + uncles.extend(children.into_iter().filter(|h| h != ¤t_hash)); + current_hash = ancestor_hash; + if genesis_hash == current_hash { break; } + current = ancestor; + ancestor_hash = *current.parent_hash(); + ancestor = load_header(ancestor_hash)?; + } + trace!("Collected {} uncles", uncles.len()); + Ok(uncles) + } + + /// Prepare in-memory header that is used in execution environment. + fn prepare_environment_block(&self, parent: &BlockId) -> sp_blockchain::Result { + let parent_header = self.backend.blockchain().expect_header(*parent)?; + Ok(<::Header as HeaderT>::new( + self.backend.blockchain().expect_block_number_from_id(parent)? + One::one(), + Default::default(), + Default::default(), + parent_header.hash(), + Default::default(), + )) + } +} + +impl UsageProvider for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + /// Get usage info about current client. + fn usage_info(&self) -> ClientInfo { + ClientInfo { + chain: self.chain_info(), + usage: self.backend.usage_info(), + } + } +} + +impl ProofProvider for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn read_proof( + &self, + id: &BlockId, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result { + self.state_at(id) + .and_then(|state| prove_read(state, keys) + .map_err(Into::into)) + } + + fn read_child_proof( + &self, + id: &BlockId, + child_info: &ChildInfo, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result { + self.state_at(id) + .and_then(|state| prove_child_read(state, child_info, keys) + .map_err(Into::into)) + } + + fn execution_proof( + &self, + id: &BlockId, + method: &str, + call_data: &[u8] + ) -> sp_blockchain::Result<(Vec, StorageProof)> { + info!("Execution proof?"); + // Make sure we include the `:code` and `:heap_pages` in the execution proof to be + // backwards compatible. + // + // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 + let code_proof = self.read_proof( + id, + &mut [well_known_keys::CODE, well_known_keys::HEAP_PAGES].iter().map(|v| *v), + )?; + + let state = self.state_at(id)?; + let header = self.prepare_environment_block(id)?; + prove_execution( + state, + header, + &self.executor, + method, + call_data, + ).map(|(r, p)| { + (r, StorageProof::merge(vec![p, code_proof])) + }) + } + + fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)> { + self.header_proof_with_cht_size(id, cht::size()) + } + + fn key_changes_proof( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + storage_key: Option<&PrefixedStorageKey>, + key: &StorageKey, + ) -> sp_blockchain::Result> { + self.key_changes_proof_with_cht_size( + first, + last, + min, + max, + storage_key, + key, + cht::size(), + ) + } +} + + +impl BlockBuilderProvider for Client + where + B: backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + 'static, + Block: BlockT, + Self: ChainHeaderBackend + ProvideRuntimeApi, + >::Api: ApiExt> + + BlockBuilderApi, +{ + fn new_block_at>( + &self, + parent: &BlockId, + inherent_digests: DigestFor, + record_proof: R, + ) -> sp_blockchain::Result> { + info!("New block at"); + sc_block_builder::BlockBuilder::new( + self, + self.expect_block_hash_from_id(parent)?, + self.expect_block_number_from_id(parent)?, + record_proof.into(), + inherent_digests, + &self.backend + ) + } + + fn new_block( + &self, + inherent_digests: DigestFor, + ) -> sp_blockchain::Result> { + info!("New block"); + let info = self.chain_info(); + sc_block_builder::BlockBuilder::new( + self, + info.best_hash, + info.best_number, + RecordProof::No, + inherent_digests, + &self.backend, + ) + } +} + +impl ExecutorProvider for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + type Executor = E; + + fn executor(&self) -> &Self::Executor { + &self.executor + } + + fn execution_extensions(&self) -> &ExecutionExtensions { + &self.execution_extensions + } +} + +impl StorageProvider for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result> { + let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); + Ok(keys) + } + + fn storage_pairs(&self, id: &BlockId, key_prefix: &StorageKey) + -> sp_blockchain::Result> + { + let state = self.state_at(id)?; + let keys = state + .keys(&key_prefix.0) + .into_iter() + .map(|k| { + let d = state.storage(&k).ok().flatten().unwrap_or_default(); + (StorageKey(k), StorageData(d)) + }) + .collect(); + Ok(keys) + } + + + fn storage_keys_iter<'a>( + &self, + id: &BlockId, + prefix: Option<&'a StorageKey>, + start_key: Option<&StorageKey> + ) -> sp_blockchain::Result> { + let state = self.state_at(id)?; + let start_key = start_key + .or(prefix) + .map(|key| key.0.clone()) + .unwrap_or_else(Vec::new); + Ok(KeyIterator::new(state, prefix, start_key)) + } + + + fn storage( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result> { + Ok(self.state_at(id)? + .storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .map(StorageData) + ) + } + + + fn storage_hash( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result> { + Ok(self.state_at(id)? + .storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + ) + } + + fn child_storage_keys( + &self, + id: &BlockId, + child_info: &ChildInfo, + key_prefix: &StorageKey + ) -> sp_blockchain::Result> { + let keys = self.state_at(id)? + .child_keys(child_info, &key_prefix.0) + .into_iter() + .map(StorageKey) + .collect(); + Ok(keys) + } + + fn child_storage( + &self, + id: &BlockId, + child_info: &ChildInfo, + key: &StorageKey + ) -> sp_blockchain::Result> { + Ok(self.state_at(id)? + .child_storage(child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .map(StorageData)) + } + + fn child_storage_hash( + &self, + id: &BlockId, + child_info: &ChildInfo, + key: &StorageKey + ) -> sp_blockchain::Result> { + Ok(self.state_at(id)? + .child_storage_hash(child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + ) + } + + fn max_key_changes_range( + &self, + first: NumberFor, + last: BlockId, + ) -> sp_blockchain::Result, BlockId)>> { + let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; + let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + if first > last_number { + return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); + } + + let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { + Some((storage, configs)) => (storage, configs), + None => return Ok(None), + }; + + let first_available_changes_trie = configs.last().map(|config| config.0); + match first_available_changes_trie { + Some(first_available_changes_trie) => { + let oldest_unpruned = storage.oldest_pruned_digest_range_end(); + let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); + Ok(Some((first, last))) + }, + None => Ok(None) + } + } + + fn key_changes( + &self, + first: NumberFor, + last: BlockId, + storage_key: Option<&PrefixedStorageKey>, + key: &StorageKey + ) -> sp_blockchain::Result, u32)>> { + let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; + let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + let (storage, configs) = self.require_changes_trie(first, last_hash, true)?; + + let mut result = Vec::new(); + let best_number = self.backend.blockchain().info().best_number; + for (config_zero, config_end, config) in configs { + let range_first = ::std::cmp::max(first, config_zero + One::one()); + let range_anchor = match config_end { + Some((config_end_number, config_end_hash)) => if last_number > config_end_number { + ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number } + } else { + ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number } + }, + None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, + }; + + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: config_zero.clone(), + end: config_end.map(|(config_end_number, _)| config_end_number), + }; + let result_range: Vec<(NumberFor, u32)> = key_changes::, _>( + config_range, + storage.storage(), + range_first, + &range_anchor, + best_number, + storage_key, + &key.0) + .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) + .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; + result.extend(result_range); + } + + Ok(result) + } +} + +impl HeaderMetadata for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + type Error = sp_blockchain::Error; + + fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { + self.backend.blockchain().header_metadata(hash) + } + + fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { + self.backend.blockchain().insert_header_metadata(hash, metadata) + } + + fn remove_header_metadata(&self, hash: Block::Hash) { + self.backend.blockchain().remove_header_metadata(hash) + } +} + +impl ProvideUncles for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { + Ok(Client::uncles(self, target_hash, max_generation)? + .into_iter() + .filter_map(|hash| Client::header(self, &BlockId::Hash(hash)).unwrap_or(None)) + .collect() + ) + } +} + +impl ChainHeaderBackend for Client where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + RA: Send + Sync, +{ + fn header(&self, id: BlockId) -> sp_blockchain::Result> { + self.backend.blockchain().header(id) + } + + fn info(&self) -> blockchain::Info { + self.backend.blockchain().info() + } + + fn status(&self, id: BlockId) -> sp_blockchain::Result { + self.backend.blockchain().status(id) + } + + fn number(&self, hash: Block::Hash) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + self.backend.blockchain().number(hash) + } + + fn hash(&self, number: NumberFor) -> sp_blockchain::Result> { + self.backend.blockchain().hash(number) + } +} + +impl sp_runtime::traits::BlockIdTo for Client where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + RA: Send + Sync, +{ + type Error = Error; + + fn to_hash(&self, block_id: &BlockId) -> sp_blockchain::Result> { + self.block_hash_from_id(block_id) + } + + fn to_number(&self, block_id: &BlockId) -> sp_blockchain::Result>> { + self.block_number_from_id(block_id) + } +} + +impl ChainHeaderBackend for &Client where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + RA: Send + Sync, +{ + fn header(&self, id: BlockId) -> sp_blockchain::Result> { + (**self).backend.blockchain().header(id) + } + + fn info(&self) -> blockchain::Info { + (**self).backend.blockchain().info() + } + + fn status(&self, id: BlockId) -> sp_blockchain::Result { + (**self).status(id) + } + + fn number(&self, hash: Block::Hash) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + (**self).number(hash) + } + + fn hash(&self, number: NumberFor) -> sp_blockchain::Result> { + (**self).hash(number) + } +} + +impl ProvideCache for Client where + B: backend::Backend, + Block: BlockT, +{ + fn cache(&self) -> Option>> { + self.backend.blockchain().cache() + } +} + +impl ProvideRuntimeApi for Client where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + RA: ConstructRuntimeApi, +{ + type Api = >::RuntimeApi; + + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + RA::construct_runtime_api(self) + } +} + +impl CallApiAt for Client where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, +{ + type Error = Error; + type StateBackend = B::State; + + fn call_api_at< + 'a, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + C: CoreApi, + >( + &self, + params: CallApiAtParams<'a, Block, C, NC, B::State>, + ) -> sp_blockchain::Result> { + let core_api = params.core_api; + let at = params.at; + + let (manager, extensions) = self.execution_extensions.manager_and_extensions( + at, + params.context, + ); + + self.executor.contextual_call::<_, fn(_,_) -> _,_,_>( + || core_api.initialize_block(at, &self.prepare_environment_block(at)?), + at, + params.function, + ¶ms.arguments, + params.overlayed_changes, + params.offchain_changes, + Some(params.storage_transaction_cache), + params.initialize_block, + manager, + params.native_call, + params.recorder, + Some(extensions), + ) + } + + fn runtime_version_at(&self, at: &BlockId) -> sp_blockchain::Result { + self.runtime_version_at(at) + } +} + +/// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport +/// objects. Otherwise, importing blocks directly into the client would be bypassing +/// important verification work. +impl sp_consensus::BlockImport for &Client where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: CoreApi + + ApiExt, +{ + type Error = ConsensusError; + type Transaction = backend::TransactionFor; + + /// Import a checked and validated block. If a justification is provided in + /// `BlockImportParams` then `finalized` *must* be true. + /// + /// NOTE: only use this implementation when there are NO consensus-level BlockImport + /// objects. Otherwise, importing blocks directly into the client would be bypassing + /// important verification work. + /// + /// If you are not sure that there are no BlockImport objects provided by the consensus + /// algorithm, don't use this function. + fn import_block( + &mut self, + mut import_block: BlockImportParams>, + new_cache: HashMap>, + ) -> Result { + info!("Unsafe block importing"); + let span = tracing::span!(tracing::Level::DEBUG, "import_block"); + let _enter = span.enter(); + + info!("Calling prepare changes"); + if let Some(res) = self.prepare_block_storage_changes(&mut import_block).map_err(|e| { + warn!("Block prepare storage changes error:\n{:?}", e); + ConsensusError::ClientImport(e.to_string()) + })? { + return Ok(res) + } + + self.lock_import_and_run(|operation| { + self.apply_block(operation, import_block, new_cache) + }).map_err(|e| { + warn!("Block import error:\n{:?}", e); + ConsensusError::ClientImport(e.to_string()).into() + }) + } + + /// Check block preconditions. + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + let BlockCheckParams { hash, number, parent_hash, allow_missing_state, import_existing } = block; + + // Check the block against white and black lists if any are defined + // (i.e. fork blocks and bad blocks respectively) + match self.block_rules.lookup(number, &hash) { + BlockLookupResult::KnownBad => { + trace!( + "Rejecting known bad block: #{} {:?}", + number, + hash, + ); + return Ok(ImportResult::KnownBad); + }, + BlockLookupResult::Expected(expected_hash) => { + trace!( + "Rejecting block from known invalid fork. Got {:?}, expected: {:?} at height {}", + hash, + expected_hash, + number + ); + return Ok(ImportResult::KnownBad); + }, + BlockLookupResult::NotSpecial => {} + } + + // Own status must be checked first. If the block and ancestry is pruned + // this function must return `AlreadyInChain` rather than `MissingState` + match self.block_status(&BlockId::Hash(hash)) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + { + BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => return Ok(ImportResult::AlreadyInChain), + BlockStatus::InChainWithState | BlockStatus::Queued => {}, + BlockStatus::InChainPruned => return Ok(ImportResult::AlreadyInChain), + BlockStatus::Unknown => {}, + BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), + } + + match self.block_status(&BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + { + BlockStatus::InChainWithState | BlockStatus::Queued => {}, + BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), + BlockStatus::InChainPruned if allow_missing_state => {}, + BlockStatus::InChainPruned => return Ok(ImportResult::MissingState), + BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), + } + + + Ok(ImportResult::imported(false)) + } +} + +impl sp_consensus::BlockImport for Client where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + Self: ProvideRuntimeApi, + >::Api: CoreApi + + ApiExt, +{ + type Error = ConsensusError; + type Transaction = backend::TransactionFor; + + fn import_block( + &mut self, + import_block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + (&*self).import_block(import_block, new_cache) + } + + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + (&*self).check_block(block) + } +} + +impl Finalizer for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn apply_finality( + &self, + operation: &mut ClientImportOperation, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()> { + let last_best = self.backend.blockchain().info().best_hash; + let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.apply_finality_with_block_hash( + operation, + to_finalize_hash, + justification, + last_best, + notify, + ) + } + + fn finalize_block( + &self, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()> { + self.lock_import_and_run(|operation| { + self.apply_finality(operation, id, justification, notify) + }) + } +} + + +impl Finalizer for &Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn apply_finality( + &self, + operation: &mut ClientImportOperation, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()> { + (**self).apply_finality(operation, id, justification, notify) + } + + fn finalize_block( + &self, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()> { + (**self).finalize_block(id, justification, notify) + } +} + +impl BlockchainEvents for Client +where + E: CallExecutor, + Block: BlockT, +{ + /// Get block import event stream. + fn import_notification_stream(&self) -> ImportNotifications { + let (sink, stream) = tracing_unbounded("mpsc_import_notification_stream"); + self.import_notification_sinks.lock().push(sink); + stream + } + + fn finality_notification_stream(&self) -> FinalityNotifications { + let (sink, stream) = tracing_unbounded("mpsc_finality_notification_stream"); + self.finality_notification_sinks.lock().push(sink); + stream + } + + /// Get storage changes event stream. + fn storage_changes_notification_stream( + &self, + filter_keys: Option<&[StorageKey]>, + child_filter_keys: Option<&[(StorageKey, Option>)]>, + ) -> sp_blockchain::Result> { + Ok(self.storage_notifications.lock().listen(filter_keys, child_filter_keys)) + } +} + +impl BlockBackend for Client + where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn block_body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result::Extrinsic>>> { + self.body(id) + } + + fn block(&self, id: &BlockId) -> sp_blockchain::Result>> + { + Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { + (Some(header), Some(extrinsics), justification) => + Some(SignedBlock { block: Block::new(header, extrinsics), justification }), + _ => None, + }) + } + + fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { + // this can probably be implemented more efficiently + if let BlockId::Hash(ref h) = id { + if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { + return Ok(BlockStatus::Queued); + } + } + let hash_and_number = match id.clone() { + BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), + BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), + }; + match hash_and_number { + Some((hash, number)) => { + if self.backend.have_state_at(&hash, number) { + Ok(BlockStatus::InChainWithState) + } else { + Ok(BlockStatus::InChainPruned) + } + } + None => Ok(BlockStatus::Unknown), + } + } + + fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { + self.backend.blockchain().justification(*id) + } + + fn block_hash(&self, number: NumberFor) -> sp_blockchain::Result> { + self.backend.blockchain().hash(number) + } +} + +impl backend::AuxStore for Client + where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + Self: ProvideRuntimeApi, + >::Api: CoreApi, +{ + /// Insert auxiliary data into key-value store. + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + // Import is locked here because we may have other block import + // operations that tries to set aux data. Note that for consensus + // layer, one can always use atomic operations to make sure + // import is only locked once. + self.lock_import_and_run(|operation| { + apply_aux(operation, insert, delete) + }) + } + /// Query auxiliary data from key-value store. + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + backend::AuxStore::get_aux(&*self.backend, key) + } +} + +impl backend::AuxStore for &Client + where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: CoreApi, +{ + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { + (**self).insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + (**self).get_aux(key) + } +} + +impl sp_consensus::block_validation::Chain for Client + where BE: backend::Backend, + E: CallExecutor, + B: BlockT +{ + fn block_status( + &self, + id: &BlockId, + ) -> Result> { + Client::block_status(self, id).map_err(|e| Box::new(e) as Box<_>) + } +} diff --git a/client/service/src/client/genesis.rs b/client/service/src/client/genesis.rs new file mode 100644 index 0000000000000..4df08025e3826 --- /dev/null +++ b/client/service/src/client/genesis.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Tool for creating the genesis block. + +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, Zero}; + +/// Create a genesis block, given the initial storage. +pub fn construct_genesis_block< + Block: BlockT +> ( + state_root: Block::Hash +) -> Block { + let extrinsics_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( + Vec::new(), + ); + + Block::new( + <::Header as HeaderT>::new( + Zero::zero(), + extrinsics_root, + state_root, + Default::default(), + Default::default() + ), + Default::default() + ) +} diff --git a/client/service/src/client/light.rs b/client/service/src/client/light.rs new file mode 100644 index 0000000000000..e8e1286eccdb0 --- /dev/null +++ b/client/service/src/client/light.rs @@ -0,0 +1,75 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Light client utilities. + +use std::sync::Arc; + +use sc_executor::RuntimeInfo; +use sp_core::traits::{CodeExecutor, SpawnNamed}; +use sp_runtime::BuildStorage; +use sp_runtime::traits::{Block as BlockT, HashFor}; +use sp_blockchain::Result as ClientResult; +use prometheus_endpoint::Registry; + +use super::{call_executor::LocalCallExecutor, client::{Client, ClientConfig}}; +use sc_client_api::light::Storage as BlockchainStorage; +use sc_light::{Backend, GenesisCallExecutor}; + + +/// Create an instance of light client. +pub fn new_light( + backend: Arc>>, + genesis_storage: &dyn BuildStorage, + code_executor: E, + spawn_handle: Box, + prometheus_registry: Option, +) -> ClientResult< + Client< + Backend>, + GenesisCallExecutor< + Backend>, + LocalCallExecutor>, E> + >, + B, + RA + > + > + where + B: BlockT, + S: BlockchainStorage + 'static, + E: CodeExecutor + RuntimeInfo + Clone + 'static, +{ + let local_executor = LocalCallExecutor::new( + backend.clone(), + code_executor, + spawn_handle.clone(), + ClientConfig::default() + ); + let executor = GenesisCallExecutor::new(backend.clone(), local_executor); + Client::new( + backend, + executor, + genesis_storage, + Default::default(), + Default::default(), + Default::default(), + prometheus_registry, + ClientConfig::default(), + ) +} diff --git a/client/service/src/client/mod.rs b/client/service/src/client/mod.rs new file mode 100644 index 0000000000000..7c96f61a7867a --- /dev/null +++ b/client/service/src/client/mod.rs @@ -0,0 +1,59 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate Client and associated logic. +//! +//! The [`Client`] is one of the most important components of Substrate. It mainly comprises two +//! parts: +//! +//! - A database containing the blocks and chain state, generally referred to as +//! the [`Backend`](sc_client_api::backend::Backend). +//! - A runtime environment, generally referred to as the [`Executor`](CallExecutor). +//! +//! # Initialization +//! +//! Creating a [`Client`] is done by calling the `new` method and passing to it a +//! [`Backend`](sc_client_api::backend::Backend) and an [`Executor`](CallExecutor). +//! +//! The former is typically provided by the `sc-client-db` crate. +//! +//! The latter typically requires passing one of: +//! +//! - A [`LocalCallExecutor`] running the runtime locally. +//! - A [`RemoteCallExecutor`](light::call_executor::RemoteCallRequest) that will ask a +//! third-party to perform the executions. +//! - A [`RemoteOrLocalCallExecutor`](light::call_executor::RemoteOrLocalCallExecutor), combination +//! of the two. +//! +//! Additionally, the fourth generic parameter of the `Client` is a marker type representing +//! the ways in which the runtime can interface with the outside. Any code that builds a `Client` +//! is responsible for putting the right marker. + +pub mod genesis; +pub mod light; +mod call_executor; +mod client; +mod block_rules; + +pub use self::{ + call_executor::LocalCallExecutor, + client::{Client, ClientConfig}, +}; + +#[cfg(feature="test-helpers")] +pub use self::client::{new_with_backend, new_in_mem}; diff --git a/client/service/src/config.rs b/client/service/src/config.rs new file mode 100644 index 0000000000000..15783a87f9917 --- /dev/null +++ b/client/service/src/config.rs @@ -0,0 +1,323 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Service configuration. + +pub use sc_client_db::{Database, PruningMode, DatabaseSettingsSrc as DatabaseConfig}; +pub use sc_network::Multiaddr; +pub use sc_network::config::{ExtTransport, MultiaddrWithPeerId, NetworkConfiguration, Role, NodeKeyConfig}; +pub use sc_executor::WasmExecutionMethod; +use sc_client_api::execution_extensions::ExecutionStrategies; + +use std::{io, future::Future, path::{PathBuf, Path}, pin::Pin, net::SocketAddr, sync::Arc}; +pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; +use sc_chain_spec::ChainSpec; +use sp_core::crypto::SecretString; +pub use sc_telemetry::TelemetryEndpoints; +use prometheus_endpoint::Registry; +#[cfg(not(target_os = "unknown"))] +use tempfile::TempDir; + +/// Service configuration. +#[derive(Debug)] +pub struct Configuration { + /// Implementation name + pub impl_name: String, + /// Implementation version (see sc-cli to see an example of format) + pub impl_version: String, + /// Node role. + pub role: Role, + /// How to spawn background tasks. Mandatory, otherwise creating a `Service` will error. + pub task_executor: TaskExecutor, + /// Extrinsic pool configuration. + pub transaction_pool: TransactionPoolOptions, + /// Network configuration. + pub network: NetworkConfiguration, + /// Configuration for the keystore. + pub keystore: KeystoreConfig, + /// Configuration for the database. + pub database: DatabaseConfig, + /// Size of internal state cache in Bytes + pub state_cache_size: usize, + /// Size in percent of cache size dedicated to child tries + pub state_cache_child_ratio: Option, + /// Pruning settings. + pub pruning: PruningMode, + /// Chain configuration. + pub chain_spec: Box, + /// Wasm execution method. + pub wasm_method: WasmExecutionMethod, + /// Execution strategies. + pub execution_strategies: ExecutionStrategies, + /// RPC over HTTP binding address. `None` if disabled. + pub rpc_http: Option, + /// RPC over Websockets binding address. `None` if disabled. + pub rpc_ws: Option, + /// RPC over IPC binding path. `None` if disabled. + pub rpc_ipc: Option, + /// Maximum number of connections for WebSockets RPC server. `None` if default. + pub rpc_ws_max_connections: Option, + /// CORS settings for HTTP & WS servers. `None` if all origins are allowed. + pub rpc_cors: Option>, + /// RPC methods to expose (by default only a safe subset or all of them). + pub rpc_methods: RpcMethods, + /// Prometheus endpoint configuration. `None` if disabled. + pub prometheus_config: Option, + /// Telemetry service URL. `None` if disabled. + pub telemetry_endpoints: Option, + /// External WASM transport for the telemetry. If `Some`, when connection to a telemetry + /// endpoint, this transport will be tried in priority before all others. + pub telemetry_external_transport: Option, + /// The default number of 64KB pages to allocate for Wasm execution + pub default_heap_pages: Option, + /// Should offchain workers be executed. + pub offchain_worker: OffchainWorkerConfig, + /// Enable authoring even when offline. + pub force_authoring: bool, + /// Disable GRANDPA when running in validator mode + pub disable_grandpa: bool, + /// Development key seed. + /// + /// When running in development mode, the seed will be used to generate authority keys by the keystore. + /// + /// Should only be set when `node` is running development mode. + pub dev_key_seed: Option, + /// Tracing targets + pub tracing_targets: Option, + /// Tracing receiver + pub tracing_receiver: sc_tracing::TracingReceiver, + /// The size of the instances cache. + /// + /// The default value is 8. + pub max_runtime_instances: usize, + /// Announce block automatically after they have been imported + pub announce_block: bool, + /// Base path of the configuration + pub base_path: Option, + /// Configuration of the output format that the informant uses. + pub informant_output_format: sc_informant::OutputFormat, +} + +/// Type for tasks spawned by the executor. +#[derive(PartialEq)] +pub enum TaskType { + /// Regular non-blocking futures. Polling the task is expected to be a lightweight operation. + Async, + /// The task might perform a lot of expensive CPU operations and/or call `thread::sleep`. + Blocking, +} + +/// Configuration of the client keystore. +#[derive(Debug, Clone)] +pub enum KeystoreConfig { + /// Keystore at a path on-disk. Recommended for native nodes. + Path { + /// The path of the keystore. + path: PathBuf, + /// Node keystore's password. + password: Option + }, + /// In-memory keystore. Recommended for in-browser nodes. + InMemory, +} + +impl KeystoreConfig { + /// Returns the path for the keystore. + pub fn path(&self) -> Option<&Path> { + match self { + Self::Path { path, .. } => Some(path), + Self::InMemory => None, + } + } +} +/// Configuration of the database of the client. +#[derive(Debug, Clone, Default)] +pub struct OffchainWorkerConfig { + /// If this is allowed. + pub enabled: bool, + /// allow writes from the runtime to the offchain worker database. + pub indexing_enabled: bool, +} + +/// Configuration of the Prometheus endpoint. +#[derive(Debug, Clone)] +pub struct PrometheusConfig { + /// Port to use. + pub port: SocketAddr, + /// A metrics registry to use. Useful for setting the metric prefix. + pub registry: Registry, +} + +impl PrometheusConfig { + /// Create a new config using the default registry. + /// + /// The default registry prefixes metrics with `substrate`. + pub fn new_with_default_registry(port: SocketAddr) -> Self { + Self { + port, + registry: Registry::new_custom(Some("substrate".into()), None) + .expect("this can only fail if the prefix is empty") + } + } +} + +impl Configuration { + /// Returns a string displaying the node role. + pub fn display_role(&self) -> String { + self.role.to_string() + } + + /// Returns the prometheus metrics registry, if available. + pub fn prometheus_registry<'a>(&'a self) -> Option<&'a Registry> { + self.prometheus_config.as_ref().map(|config| &config.registry) + } +} + +/// Available RPC methods. +#[derive(Debug, Copy, Clone)] +pub enum RpcMethods { + /// Expose every RPC method only when RPC is listening on `localhost`, + /// otherwise serve only safe RPC methods. + Auto, + /// Allow only a safe subset of RPC methods. + Safe, + /// Expose every RPC method (even potentially unsafe ones). + Unsafe, +} + +impl Default for RpcMethods { + fn default() -> RpcMethods { + RpcMethods::Auto + } +} + +/// The base path that is used for everything that needs to be write on disk to run a node. +#[derive(Debug)] +pub enum BasePath { + /// A temporary directory is used as base path and will be deleted when dropped. + #[cfg(not(target_os = "unknown"))] + Temporary(TempDir), + /// A path on the disk. + Permanenent(PathBuf), +} + +impl BasePath { + /// Create a `BasePath` instance using a temporary directory prefixed with "substrate" and use + /// it as base path. + /// + /// Note: the temporary directory will be created automatically and deleted when the `BasePath` + /// instance is dropped. + #[cfg(not(target_os = "unknown"))] + pub fn new_temp_dir() -> io::Result { + Ok(BasePath::Temporary( + tempfile::Builder::new().prefix("substrate").tempdir()?, + )) + } + + /// Create a `BasePath` instance based on an existing path on disk. + /// + /// Note: this function will not ensure that the directory exist nor create the directory. It + /// will also not delete the directory when the instance is dropped. + pub fn new>(path: P) -> BasePath { + BasePath::Permanenent(path.as_ref().to_path_buf()) + } + + /// Create a base path from values describing the project. + #[cfg(not(target_os = "unknown"))] + pub fn from_project(qualifier: &str, organization: &str, application: &str) -> BasePath { + BasePath::new( + directories::ProjectDirs::from(qualifier, organization, application) + .expect("app directories exist on all supported platforms; qed") + .data_local_dir(), + ) + } + + /// Retrieve the base path. + pub fn path(&self) -> &Path { + match self { + #[cfg(not(target_os = "unknown"))] + BasePath::Temporary(temp_dir) => temp_dir.path(), + BasePath::Permanenent(path) => path.as_path(), + } + } +} + +impl std::convert::From for BasePath { + fn from(path: PathBuf) -> Self { + BasePath::new(path) + } +} + +// NOTE: here for code readability. +pub(crate) type SomeFuture = Pin + Send>>; +pub(crate) type JoinFuture = Pin + Send>>; + +/// Callable object that execute tasks. +/// +/// This struct can be created easily using `Into`. +/// +/// # Examples +/// +/// ## Using tokio +/// +/// ``` +/// # use sc_service::TaskExecutor; +/// use futures::future::FutureExt; +/// use tokio::runtime::Runtime; +/// +/// let runtime = Runtime::new().unwrap(); +/// let handle = runtime.handle().clone(); +/// let task_executor: TaskExecutor = (move |future, _task_type| { +/// handle.spawn(future).map(|_| ()) +/// }).into(); +/// ``` +/// +/// ## Using async-std +/// +/// ``` +/// # use sc_service::TaskExecutor; +/// let task_executor: TaskExecutor = (|future, _task_type| { +/// // NOTE: async-std's JoinHandle is not a Result so we don't need to map the result +/// async_std::task::spawn(future) +/// }).into(); +/// ``` +#[derive(Clone)] +pub struct TaskExecutor(Arc JoinFuture + Send + Sync>); + +impl std::fmt::Debug for TaskExecutor { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "TaskExecutor") + } +} + +impl std::convert::From for TaskExecutor +where + F: Fn(SomeFuture, TaskType) -> FUT + Send + Sync + 'static, + FUT: Future + Send + 'static, +{ + fn from(func: F) -> Self { + Self(Arc::new(move |fut, tt| Box::pin(func(fut, tt)))) + } +} + +impl TaskExecutor { + /// Spawns a new asynchronous task. + pub fn spawn(&self, future: SomeFuture, task_type: TaskType) -> JoinFuture { + self.0(future, task_type) + } +} diff --git a/client/service/src/error.rs b/client/service/src/error.rs new file mode 100644 index 0000000000000..ffe1b39405501 --- /dev/null +++ b/client/service/src/error.rs @@ -0,0 +1,75 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Errors that can occur during the service operation. + +use sc_network; +use sc_keystore; +use sp_consensus; +use sp_blockchain; + +/// Service Result typedef. +pub type Result = std::result::Result; + +/// Service errors. +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum Error { + /// Client error. + Client(sp_blockchain::Error), + /// IO error. + Io(std::io::Error), + /// Consensus error. + Consensus(sp_consensus::Error), + /// Network error. + Network(sc_network::error::Error), + /// Keystore error. + Keystore(sc_keystore::Error), + /// Best chain selection strategy is missing. + #[display(fmt="Best chain selection strategy (SelectChain) is not provided.")] + SelectChainRequired, + /// Tasks executor is missing. + #[display(fmt="Tasks executor hasn't been provided.")] + TaskExecutorRequired, + /// Other error. + Other(String), +} + +impl<'a> From<&'a str> for Error { + fn from(s: &'a str) -> Self { + Error::Other(s.into()) + } +} + +impl From for Error { + fn from(e: prometheus_endpoint::PrometheusError) -> Self { + Error::Other(format!("Prometheus error: {}", e)) + } +} + +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Client(ref err) => Some(err), + Error::Io(ref err) => Some(err), + Error::Consensus(ref err) => Some(err), + Error::Network(ref err) => Some(err), + Error::Keystore(ref err) => Some(err), + _ => None, + } + } +} diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs new file mode 100644 index 0000000000000..39f1dff289a1a --- /dev/null +++ b/client/service/src/lib.rs @@ -0,0 +1,632 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. +//! Manages communication between them. + +#![warn(missing_docs)] +#![recursion_limit = "1024"] + +pub mod config; +pub mod chain_ops; +pub mod error; + +mod metrics; +mod builder; +#[cfg(feature = "test-helpers")] +pub mod client; +#[cfg(not(feature = "test-helpers"))] +mod client; +mod task_manager; + +use std::{io, pin::Pin}; +use std::net::SocketAddr; +use std::collections::HashMap; +use std::time::Duration; +use std::task::Poll; +use parking_lot::Mutex; + +use futures::{Future, FutureExt, Stream, StreamExt, stream, compat::*}; +use sc_network::{NetworkStatus, network_state::NetworkState, PeerId}; +use log::{warn, debug, error}; +use codec::{Encode, Decode}; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use parity_util_mem::MallocSizeOf; +use sp_utils::{status_sinks, mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}}; + +pub use self::error::Error; +pub use self::builder::{ + new_full_client, new_client, new_full_parts, new_light_parts, + spawn_tasks, build_network, BuildNetworkParams, NetworkStarter, build_offchain_workers, + SpawnTasksParams, TFullClient, TLightClient, TFullBackend, TLightBackend, + TLightBackendWithHash, TLightClientWithBackend, + TFullCallExecutor, TLightCallExecutor, RpcExtensionBuilder, NoopRpcExtensionBuilder, +}; +pub use config::{ + BasePath, Configuration, DatabaseConfig, PruningMode, Role, RpcMethods, TaskExecutor, TaskType, +}; +pub use sc_chain_spec::{ + ChainSpec, GenericChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension, + NoExtension, ChainType, +}; +pub use sp_transaction_pool::{TransactionPool, InPoolTransaction, error::IntoPoolError}; +pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; +pub use sc_rpc::Metadata as RpcMetadata; +pub use sc_executor::NativeExecutionDispatch; +#[doc(hidden)] +pub use std::{ops::Deref, result::Result, sync::Arc}; +#[doc(hidden)] +pub use sc_network::config::{ + FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder, TransactionImport, + TransactionImportFuture, +}; +pub use sc_tracing::TracingReceiver; +pub use task_manager::SpawnTaskHandle; +pub use task_manager::TaskManager; +pub use sp_consensus::import_queue::ImportQueue; +use sc_client_api::BlockchainEvents; +pub use sc_keystore::KeyStorePtr as KeyStore; + +const DEFAULT_PROTOCOL_ID: &str = "sup"; + +/// A type that implements `MallocSizeOf` on native but not wasm. +#[cfg(not(target_os = "unknown"))] +pub trait MallocSizeOfWasm: MallocSizeOf {} +#[cfg(target_os = "unknown")] +pub trait MallocSizeOfWasm {} +#[cfg(not(target_os = "unknown"))] +impl MallocSizeOfWasm for T {} +#[cfg(target_os = "unknown")] +impl MallocSizeOfWasm for T {} + +/// RPC handlers that can perform RPC queries. +#[derive(Clone)] +pub struct RpcHandlers(Arc>); + +impl RpcHandlers { + /// Starts an RPC query. + /// + /// The query is passed as a string and must be a JSON text similar to what an HTTP client + /// would for example send. + /// + /// Returns a `Future` that contains the optional response. + /// + /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to + /// send back spontaneous events. + pub fn rpc_query(&self, mem: &RpcSession, request: &str) + -> Pin> + Send>> { + self.0.handle_request(request, mem.metadata.clone()) + .compat() + .map(|res| res.expect("this should never fail")) + .boxed() + } + + /// Provides access to the underlying `MetaIoHandler` + pub fn io_handler(&self) + -> Arc> { + self.0.clone() + } +} + +/// Sinks to propagate network status updates. +/// For each element, every time the `Interval` fires we push an element on the sender. +#[derive(Clone)] +pub struct NetworkStatusSinks { + status: Arc>>, + state: Arc>, +} + +impl NetworkStatusSinks { + fn new() -> Self { + Self { + status: Arc::new(status_sinks::StatusSinks::new()), + state: Arc::new(status_sinks::StatusSinks::new()), + } + } + + /// Returns a receiver that periodically yields a [`NetworkStatus`]. + pub fn status_stream(&self, interval: Duration) + -> TracingUnboundedReceiver> + { + let (sink, stream) = tracing_unbounded("mpsc_network_status"); + self.status.push(interval, sink); + stream + } + + /// Returns a receiver that periodically yields a [`NetworkState`]. + pub fn state_stream(&self, interval: Duration) + -> TracingUnboundedReceiver + { + let (sink, stream) = tracing_unbounded("mpsc_network_state"); + self.state.push(interval, sink); + stream + } + +} + +/// Sinks to propagate telemetry connection established events. +#[derive(Default, Clone)] +pub struct TelemetryConnectionSinks(Arc>>>); + +impl TelemetryConnectionSinks { + /// Get event stream for telemetry connection established events. + pub fn on_connect_stream(&self) -> TracingUnboundedReceiver<()> { + let (sink, stream) =tracing_unbounded("mpsc_telemetry_on_connect"); + self.0.lock().push(sink); + stream + } +} + +/// An imcomplete set of chain components, but enough to run the chain ops subcommands. +pub struct PartialComponents { + /// A shared client instance. + pub client: Arc, + /// A shared backend instance. + pub backend: Arc, + /// The chain task manager. + pub task_manager: TaskManager, + /// A shared keystore instance. + pub keystore: KeyStore, + /// A chain selection algorithm instance. + pub select_chain: SelectChain, + /// An import queue. + pub import_queue: ImportQueue, + /// A shared transaction pool. + pub transaction_pool: Arc, + /// A registry of all providers of `InherentData`. + pub inherent_data_providers: sp_inherents::InherentDataProviders, + /// Everything else that needs to be passed into the main build function. + pub other: Other, +} + +/// Builds a never-ending future that continuously polls the network. +/// +/// The `status_sink` contain a list of senders to send a periodic network status to. +async fn build_network_future< + B: BlockT, + C: BlockchainEvents, + H: sc_network::ExHashT +> ( + role: Role, + mut network: sc_network::NetworkWorker, + client: Arc, + status_sinks: NetworkStatusSinks, + mut rpc_rx: TracingUnboundedReceiver>, + should_have_peers: bool, + announce_imported_blocks: bool, +) { + let mut imported_blocks_stream = client.import_notification_stream().fuse(); + + // Stream of finalized blocks reported by the client. + let mut finality_notification_stream = { + let mut finality_notification_stream = client.finality_notification_stream().fuse(); + + // We tweak the `Stream` in order to merge together multiple items if they happen to be + // ready. This way, we only get the latest finalized block. + stream::poll_fn(move |cx| { + let mut last = None; + while let Poll::Ready(Some(item)) = Pin::new(&mut finality_notification_stream).poll_next(cx) { + last = Some(item); + } + if let Some(last) = last { + Poll::Ready(Some(last)) + } else { + Poll::Pending + } + }).fuse() + }; + + loop { + futures::select!{ + // List of blocks that the client has imported. + notification = imported_blocks_stream.next() => { + let notification = match notification { + Some(n) => n, + // If this stream is shut down, that means the client has shut down, and the + // most appropriate thing to do for the network future is to shut down too. + None => return, + }; + + if announce_imported_blocks { + network.service().announce_block(notification.hash, Vec::new()); + } + + if let sp_consensus::BlockOrigin::Own = notification.origin { + network.service().own_block_imported( + notification.hash, + notification.header.number().clone(), + ); + } + } + + // List of blocks that the client has finalized. + notification = finality_notification_stream.select_next_some() => { + network.on_block_finalized(notification.hash, notification.header); + } + + // Answer incoming RPC requests. + request = rpc_rx.select_next_some() => { + match request { + sc_rpc::system::Request::Health(sender) => { + let _ = sender.send(sc_rpc::system::Health { + peers: network.peers_debug_info().len(), + is_syncing: network.service().is_major_syncing(), + should_have_peers, + }); + }, + sc_rpc::system::Request::LocalPeerId(sender) => { + let _ = sender.send(network.local_peer_id().to_base58()); + }, + sc_rpc::system::Request::LocalListenAddresses(sender) => { + let peer_id = network.local_peer_id().clone().into(); + let p2p_proto_suffix = sc_network::multiaddr::Protocol::P2p(peer_id); + let addresses = network.listen_addresses() + .map(|addr| addr.clone().with(p2p_proto_suffix.clone()).to_string()) + .collect(); + let _ = sender.send(addresses); + }, + sc_rpc::system::Request::Peers(sender) => { + let _ = sender.send(network.peers_debug_info().into_iter().map(|(peer_id, p)| + sc_rpc::system::PeerInfo { + peer_id: peer_id.to_base58(), + roles: format!("{:?}", p.roles), + best_hash: p.best_hash, + best_number: p.best_number, + } + ).collect()); + } + sc_rpc::system::Request::NetworkState(sender) => { + if let Some(network_state) = serde_json::to_value(&network.network_state()).ok() { + let _ = sender.send(network_state); + } + } + sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { + let x = network.add_reserved_peer(peer_addr) + .map_err(sc_rpc::system::error::Error::MalformattedPeerArg); + let _ = sender.send(x); + } + sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => { + let _ = match peer_id.parse::() { + Ok(peer_id) => { + network.remove_reserved_peer(peer_id); + sender.send(Ok(())) + } + Err(e) => sender.send(Err(sc_rpc::system::error::Error::MalformattedPeerArg( + e.to_string(), + ))), + }; + } + sc_rpc::system::Request::NodeRoles(sender) => { + use sc_rpc::system::NodeRole; + + let node_role = match role { + Role::Authority { .. } => NodeRole::Authority, + Role::Light => NodeRole::LightClient, + Role::Full => NodeRole::Full, + Role::Sentry { .. } => NodeRole::Sentry, + }; + + let _ = sender.send(vec![node_role]); + } + } + } + + // The network worker has done something. Nothing special to do, but could be + // used in the future to perform actions in response of things that happened on + // the network. + _ = (&mut network).fuse() => {} + + // At a regular interval, we send high-level status as well as + // detailed state information of the network on what are called + // "status sinks". + + status_sink = status_sinks.status.next().fuse() => { + status_sink.send(network.status()); + } + + state_sink = status_sinks.state.next().fuse() => { + state_sink.send(network.network_state()); + } + } + } +} + +#[cfg(not(target_os = "unknown"))] +// Wrapper for HTTP and WS servers that makes sure they are properly shut down. +mod waiting { + pub struct HttpServer(pub Option); + impl Drop for HttpServer { + fn drop(&mut self) { + if let Some(server) = self.0.take() { + server.close_handle().close(); + server.wait(); + } + } + } + + pub struct IpcServer(pub Option); + impl Drop for IpcServer { + fn drop(&mut self) { + if let Some(server) = self.0.take() { + server.close_handle().close(); + let _ = server.wait(); + } + } + } + + pub struct WsServer(pub Option); + impl Drop for WsServer { + fn drop(&mut self) { + if let Some(server) = self.0.take() { + server.close_handle().close(); + let _ = server.wait(); + } + } + } +} + +/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. +#[cfg(not(target_os = "unknown"))] +fn start_rpc_servers< + H: FnMut(sc_rpc::DenyUnsafe, sc_rpc_server::RpcMiddleware) + -> sc_rpc_server::RpcHandler +>( + config: &Configuration, + mut gen_handler: H, + rpc_metrics: Option<&sc_rpc_server::RpcMetrics> +) -> Result, error::Error> { + fn maybe_start_server(address: Option, mut start: F) -> Result, io::Error> + where F: FnMut(&SocketAddr) -> Result, + { + Ok(match address { + Some(mut address) => Some(start(&address) + .or_else(|e| match e.kind() { + io::ErrorKind::AddrInUse | + io::ErrorKind::PermissionDenied => { + warn!("Unable to bind RPC server to {}. Trying random port.", address); + address.set_port(0); + start(&address) + }, + _ => Err(e), + })?), + None => None, + }) + } + + fn deny_unsafe(addr: &SocketAddr, methods: &RpcMethods) -> sc_rpc::DenyUnsafe { + let is_exposed_addr = !addr.ip().is_loopback(); + match (is_exposed_addr, methods) { + | (_, RpcMethods::Unsafe) + | (false, RpcMethods::Auto) => sc_rpc::DenyUnsafe::No, + _ => sc_rpc::DenyUnsafe::Yes + } + } + + Ok(Box::new(( + config.rpc_ipc.as_ref().map(|path| sc_rpc_server::start_ipc( + &*path, gen_handler( + sc_rpc::DenyUnsafe::No, + sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "ipc") + ) + )), + maybe_start_server( + config.rpc_http, + |address| sc_rpc_server::start_http( + address, + config.rpc_cors.as_ref(), + gen_handler( + deny_unsafe(&address, &config.rpc_methods), + sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "http") + ), + ), + )?.map(|s| waiting::HttpServer(Some(s))), + maybe_start_server( + config.rpc_ws, + |address| sc_rpc_server::start_ws( + address, + config.rpc_ws_max_connections, + config.rpc_cors.as_ref(), + gen_handler( + deny_unsafe(&address, &config.rpc_methods), + sc_rpc_server::RpcMiddleware::new(rpc_metrics.cloned(), "ws") + ), + ), + )?.map(|s| waiting::WsServer(Some(s))), + ))) +} + +/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. +#[cfg(target_os = "unknown")] +fn start_rpc_servers< + H: FnMut(sc_rpc::DenyUnsafe, sc_rpc_server::RpcMiddleware) + -> sc_rpc_server::RpcHandler +>( + _: &Configuration, + _: H, + _: Option<&sc_rpc_server::RpcMetrics> +) -> Result, error::Error> { + Ok(Box::new(())) +} + +/// An RPC session. Used to perform in-memory RPC queries (ie. RPC queries that don't go through +/// the HTTP or WebSockets server). +#[derive(Clone)] +pub struct RpcSession { + metadata: sc_rpc::Metadata, +} + +impl RpcSession { + /// Creates an RPC session. + /// + /// The `sender` is stored inside the `RpcSession` and is used to communicate spontaneous JSON + /// messages. + /// + /// The `RpcSession` must be kept alive in order to receive messages on the sender. + pub fn new(sender: futures01::sync::mpsc::Sender) -> RpcSession { + RpcSession { + metadata: sender.into(), + } + } +} + +/// Transaction pool adapter. +pub struct TransactionPoolAdapter { + imports_external_transactions: bool, + pool: Arc

, + client: Arc, +} + +/// Get transactions for propagation. +/// +/// Function extracted to simplify the test and prevent creating `ServiceFactory`. +fn transactions_to_propagate(pool: &Pool) + -> Vec<(H, B::Extrinsic)> +where + Pool: TransactionPool, + B: BlockT, + H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, + E: IntoPoolError + From, +{ + pool.ready() + .filter(|t| t.is_propagable()) + .map(|t| { + let hash = t.hash().clone(); + let ex: B::Extrinsic = t.data().clone(); + (hash, ex) + }) + .collect() +} + +impl sc_network::config::TransactionPool for + TransactionPoolAdapter +where + C: sc_network::config::Client + Send + Sync, + Pool: 'static + TransactionPool, + B: BlockT, + H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, + E: 'static + IntoPoolError + From, +{ + fn transactions(&self) -> Vec<(H, B::Extrinsic)> { + transactions_to_propagate(&*self.pool) + } + + fn hash_of(&self, transaction: &B::Extrinsic) -> H { + self.pool.hash_of(transaction) + } + + fn import( + &self, + transaction: B::Extrinsic, + ) -> TransactionImportFuture { + if !self.imports_external_transactions { + debug!("Transaction rejected"); + Box::pin(futures::future::ready(TransactionImport::None)); + } + + let encoded = transaction.encode(); + let uxt = match Decode::decode(&mut &encoded[..]) { + Ok(uxt) => uxt, + Err(e) => { + debug!("Transaction invalid: {:?}", e); + return Box::pin(futures::future::ready(TransactionImport::Bad)); + } + }; + + let best_block_id = BlockId::hash(self.client.info().best_hash); + + let import_future = self.pool.submit_one(&best_block_id, sp_transaction_pool::TransactionSource::External, uxt); + Box::pin(async move { + match import_future.await { + Ok(_) => TransactionImport::NewGood, + Err(e) => match e.into_pool_error() { + Ok(sp_transaction_pool::error::Error::AlreadyImported(_)) => TransactionImport::KnownGood, + Ok(e) => { + debug!("Error adding transaction to the pool: {:?}", e); + TransactionImport::Bad + } + Err(e) => { + debug!("Error converting pool error: {:?}", e); + // it is not bad at least, just some internal node logic error, so peer is innocent. + TransactionImport::KnownGood + } + } + } + }) + } + + fn on_broadcasted(&self, propagations: HashMap>) { + self.pool.on_broadcasted(propagations) + } + + fn transaction(&self, hash: &H) -> Option { + self.pool.ready_transaction(hash) + .and_then( + // Only propagable transactions should be resolved for network service. + |tx| if tx.is_propagable() { Some(tx.data().clone()) } else { None } + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::executor::block_on; + use sp_consensus::SelectChain; + use sp_runtime::traits::BlindCheckable; + use substrate_test_runtime_client::{prelude::*, runtime::{Extrinsic, Transfer}}; + use sc_transaction_pool::BasicPool; + + #[test] + fn should_not_propagate_transactions_that_are_marked_as_such() { + // given + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let client = Arc::new(client); + let spawner = sp_core::testing::TaskExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + None, + spawner, + client.clone(), + ); + let source = sp_runtime::transaction_validity::TransactionSource::External; + let best = longest_chain.best_chain().unwrap(); + let transaction = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_signed_tx(); + block_on(pool.submit_one( + &BlockId::hash(best.hash()), source, transaction.clone()), + ).unwrap(); + block_on(pool.submit_one( + &BlockId::hash(best.hash()), source, Extrinsic::IncludeData(vec![1])), + ).unwrap(); + assert_eq!(pool.status().ready, 2); + + // when + let transactions = transactions_to_propagate(&*pool); + + // then + assert_eq!(transactions.len(), 1); + assert!(transactions[0].1.clone().check().is_ok()); + // this should not panic + let _ = transactions[0].1.transfer(); + } +} diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs new file mode 100644 index 0000000000000..0af393b53f517 --- /dev/null +++ b/client/service/src/metrics.rs @@ -0,0 +1,338 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{convert::TryFrom, time::SystemTime}; + +use crate::{NetworkStatus, NetworkState, NetworkStatusSinks, config::Configuration}; +use futures_timer::Delay; +use prometheus_endpoint::{register, Gauge, U64, Registry, PrometheusError, Opts, GaugeVec}; +use sc_telemetry::{telemetry, SUBSTRATE_INFO}; +use sp_api::ProvideRuntimeApi; +use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; +use sp_transaction_pool::{PoolStatus, MaintainedTransactionPool}; +use sp_utils::metrics::register_globals; +use sp_utils::mpsc::TracingUnboundedReceiver; +use sc_client_api::{ClientInfo, UsageProvider}; +use sc_network::config::Role; +use std::sync::Arc; +use std::time::Duration; +use wasm_timer::Instant; + +struct PrometheusMetrics { + // generic info + block_height: GaugeVec, + number_leaves: Gauge, + ready_transactions_number: Gauge, + + // I/O + database_cache: Gauge, + state_cache: Gauge, + state_db: GaugeVec, +} + +impl PrometheusMetrics { + fn setup( + registry: &Registry, + name: &str, + version: &str, + roles: u64, + ) -> Result { + register(Gauge::::with_opts( + Opts::new( + "build_info", + "A metric with a constant '1' value labeled by name, version" + ) + .const_label("name", name) + .const_label("version", version) + )?, ®istry)?.set(1); + + register(Gauge::::new( + "node_roles", "The roles the node is running as", + )?, ®istry)?.set(roles); + + register_globals(registry)?; + + let start_time_since_epoch = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_default(); + register(Gauge::::new( + "process_start_time_seconds", + "Number of seconds between the UNIX epoch and the moment the process started", + )?, registry)?.set(start_time_since_epoch.as_secs()); + + Ok(Self { + // generic internals + block_height: register(GaugeVec::new( + Opts::new("block_height", "Block height info of the chain"), + &["status"] + )?, registry)?, + + number_leaves: register(Gauge::new( + "number_leaves", "Number of known chain leaves (aka forks)", + )?, registry)?, + + ready_transactions_number: register(Gauge::new( + "ready_transactions_number", "Number of transactions in the ready queue", + )?, registry)?, + + // I/ O + database_cache: register(Gauge::new( + "database_cache_bytes", "RocksDB cache size in bytes", + )?, registry)?, + state_cache: register(Gauge::new( + "state_cache_bytes", "State cache size in bytes", + )?, registry)?, + state_db: register(GaugeVec::new( + Opts::new("state_db_cache_bytes", "State DB cache in bytes"), + &["subtype"] + )?, registry)?, + }) + } +} + +/// A `MetricsService` periodically sends general client and +/// network state to the telemetry as well as (optionally) +/// a Prometheus endpoint. +pub struct MetricsService { + metrics: Option, + last_update: Instant, + last_total_bytes_inbound: u64, + last_total_bytes_outbound: u64, +} + +impl MetricsService { + /// Creates a `MetricsService` that only sends information + /// to the telemetry. + pub fn new() -> Self { + MetricsService { + metrics: None, + last_total_bytes_inbound: 0, + last_total_bytes_outbound: 0, + last_update: Instant::now(), + } + } + + /// Creates a `MetricsService` that sends metrics + /// to prometheus alongside the telemetry. + pub fn with_prometheus( + registry: &Registry, + config: &Configuration, + ) -> Result { + let role_bits = match config.role { + Role::Full => 1u64, + Role::Light => 2u64, + Role::Sentry { .. } => 3u64, + Role::Authority { .. } => 4u64, + }; + + PrometheusMetrics::setup( + registry, + &config.network.node_name, + &config.impl_version, + role_bits, + ) + .map(|p| MetricsService { + metrics: Some(p), + last_total_bytes_inbound: 0, + last_total_bytes_outbound: 0, + last_update: Instant::now(), + }) + } + + /// Returns a never-ending `Future` that performs the + /// metric and telemetry updates with information from + /// the given sources. + pub async fn run( + mut self, + client: Arc, + transactions: Arc, + network: NetworkStatusSinks, + ) where + TBl: Block, + TCl: ProvideRuntimeApi + UsageProvider, + TExPool: MaintainedTransactionPool::Hash>, + { + let mut timer = Delay::new(Duration::from_secs(0)); + let timer_interval = Duration::from_secs(5); + + // Metric and telemetry update interval. + let net_status_interval = timer_interval; + let net_state_interval = Duration::from_secs(30); + + // Source of network information. + let mut net_status_rx = Some(network.status_stream(net_status_interval)); + let mut net_state_rx = Some(network.state_stream(net_state_interval)); + + loop { + // Wait for the next tick of the timer. + (&mut timer).await; + + // Try to get the latest network information. + let mut net_status = None; + let mut net_state = None; + if let Some(rx) = net_status_rx.as_mut() { + match Self::latest(rx) { + Ok(status) => { net_status = status; } + Err(()) => { net_status_rx = None; } + } + } + if let Some(rx) = net_state_rx.as_mut() { + match Self::latest(rx) { + Ok(state) => { net_state = state; } + Err(()) => { net_state_rx = None; } + } + } + + // Update / Send the metrics. + self.update( + &client.usage_info(), + &transactions.status(), + net_status, + net_state, + ); + + // Schedule next tick. + timer.reset(timer_interval); + } + } + + // Try to get the latest value from a receiver, dropping intermediate values. + fn latest(rx: &mut TracingUnboundedReceiver) -> Result, ()> { + let mut value = None; + + while let Ok(next) = rx.try_next() { + match next { + Some(v) => { + value = Some(v) + } + None => { + log::error!("Receiver closed unexpectedly."); + return Err(()) + } + } + } + + Ok(value) + } + + fn update( + &mut self, + info: &ClientInfo, + txpool_status: &PoolStatus, + net_status: Option>, + net_state: Option, + ) { + let now = Instant::now(); + let elapsed = (now - self.last_update).as_secs(); + self.last_update = now; + + let best_number = info.chain.best_number.saturated_into::(); + let best_hash = info.chain.best_hash; + let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); + + // Update/send metrics that are always available. + telemetry!( + SUBSTRATE_INFO; + "system.interval"; + "height" => best_number, + "best" => ?best_hash, + "txcount" => txpool_status.ready, + "finalized_height" => finalized_number, + "finalized_hash" => ?info.chain.finalized_hash, + "used_state_cache_size" => info.usage.as_ref() + .map(|usage| usage.memory.state_cache.as_bytes()) + .unwrap_or(0), + ); + + if let Some(metrics) = self.metrics.as_ref() { + metrics + .block_height + .with_label_values(&["finalized"]) + .set(finalized_number); + metrics + .block_height + .with_label_values(&["best"]) + .set(best_number); + + if let Ok(leaves) = u64::try_from(info.chain.number_leaves) { + metrics.number_leaves.set(leaves); + } + + metrics.ready_transactions_number.set(txpool_status.ready as u64); + + if let Some(info) = info.usage.as_ref() { + metrics.database_cache.set(info.memory.database_cache.as_bytes() as u64); + metrics.state_cache.set(info.memory.state_cache.as_bytes() as u64); + + metrics.state_db.with_label_values(&["non_canonical"]).set( + info.memory.state_db.non_canonical.as_bytes() as u64, + ); + if let Some(pruning) = info.memory.state_db.pruning { + metrics.state_db.with_label_values(&["pruning"]).set(pruning.as_bytes() as u64); + } + metrics.state_db.with_label_values(&["pinned"]).set( + info.memory.state_db.pinned.as_bytes() as u64, + ); + } + } + + // Update/send network status information, if any. + if let Some(net_status) = net_status { + let num_peers = net_status.num_connected_peers; + let total_bytes_inbound = net_status.total_bytes_inbound; + let total_bytes_outbound = net_status.total_bytes_outbound; + + let diff_bytes_inbound = total_bytes_inbound - self.last_total_bytes_inbound; + let diff_bytes_outbound = total_bytes_outbound - self.last_total_bytes_outbound; + let (avg_bytes_per_sec_inbound, avg_bytes_per_sec_outbound) = + if elapsed > 0 { + self.last_total_bytes_inbound = total_bytes_inbound; + self.last_total_bytes_outbound = total_bytes_outbound; + (diff_bytes_inbound / elapsed, diff_bytes_outbound / elapsed) + } else { + (diff_bytes_inbound, diff_bytes_outbound) + }; + + telemetry!( + SUBSTRATE_INFO; + "system.interval"; + "peers" => num_peers, + "bandwidth_download" => avg_bytes_per_sec_inbound, + "bandwidth_upload" => avg_bytes_per_sec_outbound, + ); + + if let Some(metrics) = self.metrics.as_ref() { + let best_seen_block = net_status + .best_seen_block + .map(|num: NumberFor| num.unique_saturated_into() as u64); + + if let Some(best_seen_block) = best_seen_block { + metrics.block_height.with_label_values(&["sync_target"]).set(best_seen_block); + } + } + } + + // Send network state information, if any. + if let Some(net_state) = net_state { + telemetry!( + SUBSTRATE_INFO; + "system.network_state"; + "state" => net_state, + ); + } + } +} diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs new file mode 100644 index 0000000000000..88a44e1360d7f --- /dev/null +++ b/client/service/src/task_manager/mod.rs @@ -0,0 +1,401 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! Substrate service tasks management module. + +use std::{panic, result::Result, pin::Pin}; +use exit_future::Signal; +use log::{debug, error}; +use futures::{ + Future, FutureExt, StreamExt, + future::{select, Either, BoxFuture, join_all, try_join_all, pending}, + sink::SinkExt, +}; +use prometheus_endpoint::{ + exponential_buckets, register, + PrometheusError, + CounterVec, HistogramOpts, HistogramVec, Opts, Registry, U64 +}; +use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; +use crate::{config::{TaskExecutor, TaskType, JoinFuture}, Error}; + +mod prometheus_future; +#[cfg(test)] +mod tests; + +/// An handle for spawning tasks in the service. +#[derive(Clone)] +pub struct SpawnTaskHandle { + on_exit: exit_future::Exit, + executor: TaskExecutor, + metrics: Option, + task_notifier: TracingUnboundedSender, +} + +impl SpawnTaskHandle { + /// Spawns the given task with the given name. + /// + /// Note that the `name` is a `&'static str`. The reason for this choice is that statistics + /// about this task are getting reported to the Prometheus endpoint (if enabled), and that + /// therefore the set of possible task names must be bounded. + /// + /// In other words, it would be a bad idea for someone to do for example + /// `spawn(format!("{:?}", some_public_key))`. + pub fn spawn(&self, name: &'static str, task: impl Future + Send + 'static) { + self.spawn_inner(name, task, TaskType::Async) + } + + /// Spawns the blocking task with the given name. See also `spawn`. + pub fn spawn_blocking(&self, name: &'static str, task: impl Future + Send + 'static) { + self.spawn_inner(name, task, TaskType::Blocking) + } + + /// Helper function that implements the spawning logic. See `spawn` and `spawn_blocking`. + fn spawn_inner( + &self, + name: &'static str, + task: impl Future + Send + 'static, + task_type: TaskType, + ) { + if self.task_notifier.is_closed() { + debug!("Attempt to spawn a new task has been prevented: {}", name); + return; + } + + let on_exit = self.on_exit.clone(); + let metrics = self.metrics.clone(); + + // Note that we increase the started counter here and not within the future. This way, + // we could properly visualize on Prometheus situations where the spawning doesn't work. + if let Some(metrics) = &self.metrics { + metrics.tasks_spawned.with_label_values(&[name]).inc(); + // We do a dummy increase in order for the task to show up in metrics. + metrics.tasks_ended.with_label_values(&[name, "finished"]).inc_by(0); + } + + let future = async move { + if let Some(metrics) = metrics { + // Add some wrappers around `task`. + let task = { + let poll_duration = metrics.poll_duration.with_label_values(&[name]); + let poll_start = metrics.poll_start.with_label_values(&[name]); + let inner = prometheus_future::with_poll_durations(poll_duration, poll_start, task); + // The logic of `AssertUnwindSafe` here is ok considering that we throw + // away the `Future` after it has panicked. + panic::AssertUnwindSafe(inner).catch_unwind() + }; + futures::pin_mut!(task); + + match select(on_exit, task).await { + Either::Right((Err(payload), _)) => { + metrics.tasks_ended.with_label_values(&[name, "panic"]).inc(); + panic::resume_unwind(payload) + } + Either::Right((Ok(()), _)) => { + metrics.tasks_ended.with_label_values(&[name, "finished"]).inc(); + } + Either::Left(((), _)) => { + // The `on_exit` has triggered. + metrics.tasks_ended.with_label_values(&[name, "interrupted"]).inc(); + } + } + + } else { + futures::pin_mut!(task); + let _ = select(on_exit, task).await; + } + }; + + let join_handle = self.executor.spawn(Box::pin(future), task_type); + let mut task_notifier = self.task_notifier.clone(); + self.executor.spawn( + Box::pin(async move { + if let Err(err) = task_notifier.send(join_handle).await { + error!("Could not send spawned task handle to queue: {}", err); + } + }), + TaskType::Async, + ); + } +} + +impl sp_core::traits::SpawnNamed for SpawnTaskHandle { + fn spawn_blocking(&self, name: &'static str, future: BoxFuture<'static, ()>) { + self.spawn_blocking(name, future); + } + + fn spawn(&self, name: &'static str, future: BoxFuture<'static, ()>) { + self.spawn(name, future); + } +} + +/// A wrapper over `SpawnTaskHandle` that will notify a receiver whenever any +/// task spawned through it fails. The service should be on the receiver side +/// and will shut itself down whenever it receives any message, i.e. an +/// essential task has failed. +pub struct SpawnEssentialTaskHandle { + essential_failed_tx: TracingUnboundedSender<()>, + inner: SpawnTaskHandle, +} + +impl SpawnEssentialTaskHandle { + /// Creates a new `SpawnEssentialTaskHandle`. + pub fn new( + essential_failed_tx: TracingUnboundedSender<()>, + spawn_task_handle: SpawnTaskHandle, + ) -> SpawnEssentialTaskHandle { + SpawnEssentialTaskHandle { + essential_failed_tx, + inner: spawn_task_handle, + } + } + + /// Spawns the given task with the given name. + /// + /// See also [`SpawnTaskHandle::spawn`]. + pub fn spawn(&self, name: &'static str, task: impl Future + Send + 'static) { + self.spawn_inner(name, task, TaskType::Async) + } + + /// Spawns the blocking task with the given name. + /// + /// See also [`SpawnTaskHandle::spawn_blocking`]. + pub fn spawn_blocking( + &self, + name: &'static str, + task: impl Future + Send + 'static, + ) { + self.spawn_inner(name, task, TaskType::Blocking) + } + + fn spawn_inner( + &self, + name: &'static str, + task: impl Future + Send + 'static, + task_type: TaskType, + ) { + let essential_failed = self.essential_failed_tx.clone(); + let essential_task = std::panic::AssertUnwindSafe(task) + .catch_unwind() + .map(move |_| { + log::error!("Essential task `{}` failed. Shutting down service.", name); + let _ = essential_failed.close_channel(); + }); + + let _ = self.inner.spawn_inner(name, essential_task, task_type); + } +} + +/// Helper struct to manage background/async tasks in Service. +pub struct TaskManager { + /// A future that resolves when the service has exited, this is useful to + /// make sure any internally spawned futures stop when the service does. + on_exit: exit_future::Exit, + /// A signal that makes the exit future above resolve, fired on service drop. + signal: Option, + /// How to spawn background tasks. + executor: TaskExecutor, + /// Prometheus metric where to report the polling times. + metrics: Option, + /// Send a signal when a spawned essential task has concluded. The next time + /// the service future is polled it should complete with an error. + essential_failed_tx: TracingUnboundedSender<()>, + /// A receiver for spawned essential-tasks concluding. + essential_failed_rx: TracingUnboundedReceiver<()>, + /// Things to keep alive until the task manager is dropped. + keep_alive: Box, + /// A sender to a stream of background tasks. This is used for the completion future. + task_notifier: TracingUnboundedSender, + /// This future will complete when all the tasks are joined and the stream is closed. + completion_future: JoinFuture, + /// A list of other `TaskManager`'s to terminate and gracefully shutdown when the parent + /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential + /// task fails. + children: Vec, +} + +impl TaskManager { + /// If a Prometheus registry is passed, it will be used to report statistics about the + /// service tasks. + pub(super) fn new( + executor: TaskExecutor, + prometheus_registry: Option<&Registry> + ) -> Result { + let (signal, on_exit) = exit_future::signal(); + + // A side-channel for essential tasks to communicate shutdown. + let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks"); + + let metrics = prometheus_registry.map(Metrics::register).transpose()?; + + let (task_notifier, background_tasks) = tracing_unbounded("mpsc_background_tasks"); + // NOTE: for_each_concurrent will await on all the JoinHandle futures at the same time. It + // is possible to limit this but it's actually better for the memory foot print to await + // them all to not accumulate anything on that stream. + let completion_future = executor.spawn( + Box::pin(background_tasks.for_each_concurrent(None, |x| x)), + TaskType::Async, + ); + + Ok(Self { + on_exit, + signal: Some(signal), + executor, + metrics, + essential_failed_tx, + essential_failed_rx, + keep_alive: Box::new(()), + task_notifier, + completion_future, + children: Vec::new(), + }) + } + + /// Get a handle for spawning tasks. + pub fn spawn_handle(&self) -> SpawnTaskHandle { + SpawnTaskHandle { + on_exit: self.on_exit.clone(), + executor: self.executor.clone(), + metrics: self.metrics.clone(), + task_notifier: self.task_notifier.clone(), + } + } + + /// Get a handle for spawning essential tasks. + pub fn spawn_essential_handle(&self) -> SpawnEssentialTaskHandle { + SpawnEssentialTaskHandle::new(self.essential_failed_tx.clone(), self.spawn_handle()) + } + + /// Send the signal for termination, prevent new tasks to be created, await for all the existing + /// tasks to be finished and drop the object. You can consider this as an async drop. + /// + /// It's always better to call and await this function before exiting the process as background + /// tasks may be running in the background. If the process exit and the background tasks are not + /// cancelled, this will lead to objects not getting dropped properly. + /// + /// This is an issue in some cases as some of our dependencies do require that we drop all the + /// objects properly otherwise it triggers a SIGABRT on exit. + pub fn clean_shutdown(mut self) -> Pin + Send>> { + self.terminate(); + let children_shutdowns = self.children.into_iter().map(|x| x.clean_shutdown()); + let keep_alive = self.keep_alive; + let completion_future = self.completion_future; + + Box::pin(async move { + join_all(children_shutdowns).await; + completion_future.await; + drop(keep_alive); + }) + } + + /// Return a future that will end with success if the signal to terminate was sent + /// (`self.terminate()`) or with an error if an essential task fails. + /// + /// # Warning + /// + /// This function will not wait until the end of the remaining task. You must call and await + /// `clean_shutdown()` after this. + pub fn future<'a>(&'a mut self) -> Pin> + Send + 'a>> { + Box::pin(async move { + let mut t1 = self.essential_failed_rx.next().fuse(); + let mut t2 = self.on_exit.clone().fuse(); + let mut t3 = try_join_all( + self.children.iter_mut().map(|x| x.future()) + // Never end this future if there is no error because if there is no children, + // it must not stop + .chain(std::iter::once(pending().boxed())) + ).fuse(); + + futures::select! { + _ = t1 => Err(Error::Other("Essential task failed.".into())), + _ = t2 => Ok(()), + res = t3 => Err(res.map(|_| ()).expect_err("this future never ends; qed")), + } + }) + } + + /// Signal to terminate all the running tasks. + pub fn terminate(&mut self) { + if let Some(signal) = self.signal.take() { + let _ = signal.fire(); + // NOTE: this will prevent new tasks to be spawned + self.task_notifier.close_channel(); + for child in self.children.iter_mut() { + child.terminate(); + } + } + } + + /// Set what the task manager should keep alive, can be called multiple times. + pub fn keep_alive(&mut self, to_keep_alive: T) { + // allows this fn to safely called multiple times. + use std::mem; + let old = mem::replace(&mut self.keep_alive, Box::new(())); + self.keep_alive = Box::new((to_keep_alive, old)); + } + + /// Register another TaskManager to terminate and gracefully shutdown when the parent + /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential + /// task fails. (But don't end the parent if a child's normal task fails.) + pub fn add_child(&mut self, child: TaskManager) { + self.children.push(child); + } +} + +#[derive(Clone)] +struct Metrics { + // This list is ordered alphabetically + poll_duration: HistogramVec, + poll_start: CounterVec, + tasks_spawned: CounterVec, + tasks_ended: CounterVec, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + poll_duration: register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "tasks_polling_duration", + "Duration in seconds of each invocation of Future::poll" + ), + buckets: exponential_buckets(0.001, 4.0, 9) + .expect("function parameters are constant and always valid; qed"), + }, + &["task_name"] + )?, registry)?, + poll_start: register(CounterVec::new( + Opts::new( + "tasks_polling_started_total", + "Total number of times we started invoking Future::poll" + ), + &["task_name"] + )?, registry)?, + tasks_spawned: register(CounterVec::new( + Opts::new( + "tasks_spawned_total", + "Total number of tasks that have been spawned on the Service" + ), + &["task_name"] + )?, registry)?, + tasks_ended: register(CounterVec::new( + Opts::new( + "tasks_ended_total", + "Total number of tasks for which Future::poll has returned Ready(()) or panicked" + ), + &["task_name", "reason"] + )?, registry)?, + }) + } +} diff --git a/client/service/src/task_manager/prometheus_future.rs b/client/service/src/task_manager/prometheus_future.rs new file mode 100644 index 0000000000000..53bd59aa7a507 --- /dev/null +++ b/client/service/src/task_manager/prometheus_future.rs @@ -0,0 +1,69 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! Wrapper around a `Future` that reports statistics about when the `Future` is polled. + +use futures::prelude::*; +use prometheus_endpoint::{Counter, Histogram, U64}; +use std::{fmt, pin::Pin, task::{Context, Poll}}; + +/// Wraps around a `Future`. Report the polling duration to the `Histogram` and when the polling +/// starts to the `Counter`. +pub fn with_poll_durations( + poll_duration: Histogram, + poll_start: Counter, + inner: T +) -> PrometheusFuture { + PrometheusFuture { + inner, + poll_duration, + poll_start, + } +} + +/// Wraps around `Future` and adds diagnostics to it. +#[pin_project::pin_project] +#[derive(Clone)] +pub struct PrometheusFuture { + /// The inner future doing the actual work. + #[pin] + inner: T, + poll_duration: Histogram, + poll_start: Counter, +} + +impl Future for PrometheusFuture +where + T: Future, +{ + type Output = T::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = self.project(); + + this.poll_start.inc(); + let _timer = this.poll_duration.start_timer(); + Future::poll(this.inner, cx) + + // `_timer` is dropped here and will observe the duration + } +} + +impl fmt::Debug for PrometheusFuture +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.inner, f) + } +} diff --git a/client/service/src/task_manager/tests.rs b/client/service/src/task_manager/tests.rs new file mode 100644 index 0000000000000..27d9b0b9e9ad9 --- /dev/null +++ b/client/service/src/task_manager/tests.rs @@ -0,0 +1,310 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::config::TaskExecutor; +use crate::task_manager::TaskManager; +use futures::{future::FutureExt, pin_mut, select}; +use parking_lot::Mutex; +use std::any::Any; +use std::sync::Arc; +use std::time::Duration; + +#[derive(Clone, Debug)] +struct DropTester(Arc>); + +struct DropTesterRef(DropTester); + +impl DropTester { + fn new() -> DropTester { + DropTester(Arc::new(Mutex::new(0))) + } + + fn new_ref(&self) -> DropTesterRef { + *self.0.lock() += 1; + DropTesterRef(self.clone()) + } +} + +impl PartialEq for DropTester { + fn eq(&self, other: &usize) -> bool { + &*self.0.lock() == other + } +} + +impl Drop for DropTesterRef { + fn drop(&mut self) { + *(self.0).0.lock() -= 1; + } +} + +#[test] +fn ensure_drop_tester_working() { + let drop_tester = DropTester::new(); + assert_eq!(drop_tester, 0); + let drop_tester_ref_1 = drop_tester.new_ref(); + assert_eq!(drop_tester, 1); + let drop_tester_ref_2 = drop_tester.new_ref(); + assert_eq!(drop_tester, 2); + drop(drop_tester_ref_1); + assert_eq!(drop_tester, 1); + drop(drop_tester_ref_2); + assert_eq!(drop_tester, 0); +} + +async fn run_background_task(_keep_alive: impl Any) { + loop { + tokio::time::delay_for(Duration::from_secs(1)).await; + } +} + +async fn run_background_task_blocking(duration: Duration, _keep_alive: impl Any) { + loop { + // block for X sec (not interruptible) + std::thread::sleep(duration); + // await for 1 sec (interruptible) + tokio::time::delay_for(Duration::from_secs(1)).await; + } +} + +#[test] +fn ensure_tasks_are_awaited_on_shutdown() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let task_manager = TaskManager::new(task_executor, None).unwrap(); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 2); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 2); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_keep_alive_during_shutdown() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + task_manager.keep_alive(drop_tester.new_ref()); + spawn_handle.spawn("task1", run_background_task(())); + assert_eq!(drop_tester, 1); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 1); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_blocking_futures_are_awaited_on_shutdown() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let task_manager = TaskManager::new(task_executor, None).unwrap(); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn( + "task1", + run_background_task_blocking(Duration::from_secs(3), drop_tester.new_ref()), + ); + spawn_handle.spawn( + "task2", + run_background_task_blocking(Duration::from_secs(3), drop_tester.new_ref()), + ); + assert_eq!(drop_tester, 2); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 2); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_no_task_can_be_spawn_after_terminate() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 2); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 2); + task_manager.terminate(); + spawn_handle.spawn("task3", run_background_task(drop_tester.new_ref())); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_task_manager_future_ends_when_task_manager_terminated() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 2); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 2); + task_manager.terminate(); + runtime.block_on(task_manager.future()).expect("future has ended without error"); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_task_manager_future_ends_with_error_when_essential_task_fails() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor, None).unwrap(); + let spawn_handle = task_manager.spawn_handle(); + let spawn_essential_handle = task_manager.spawn_essential_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 2); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 2); + spawn_essential_handle.spawn("task3", async { panic!("task failed") }); + runtime.block_on(task_manager.future()).expect_err("future()'s Result must be Err"); + assert_eq!(drop_tester, 2); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_children_tasks_ends_when_task_manager_terminated() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_1 = child_1.spawn_handle(); + let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_2 = child_2.spawn_handle(); + task_manager.add_child(child_1); + task_manager.add_child(child_2); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + spawn_handle_child_1.spawn("task3", run_background_task(drop_tester.new_ref())); + spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 4); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 4); + task_manager.terminate(); + runtime.block_on(task_manager.future()).expect("future has ended without error"); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_task_manager_future_ends_with_error_when_childs_essential_task_fails() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_1 = child_1.spawn_handle(); + let spawn_essential_handle_child_1 = child_1.spawn_essential_handle(); + let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_2 = child_2.spawn_handle(); + task_manager.add_child(child_1); + task_manager.add_child(child_2); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + spawn_handle_child_1.spawn("task3", run_background_task(drop_tester.new_ref())); + spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 4); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 4); + spawn_essential_handle_child_1.spawn("task5", async { panic!("task failed") }); + runtime.block_on(task_manager.future()).expect_err("future()'s Result must be Err"); + assert_eq!(drop_tester, 4); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} + +#[test] +fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { + let mut runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let task_executor: TaskExecutor = (move |future, _| handle.spawn(future).map(|_| ())).into(); + + let mut task_manager = TaskManager::new(task_executor.clone(), None).unwrap(); + let child_1 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_1 = child_1.spawn_handle(); + let child_2 = TaskManager::new(task_executor.clone(), None).unwrap(); + let spawn_handle_child_2 = child_2.spawn_handle(); + task_manager.add_child(child_1); + task_manager.add_child(child_2); + let spawn_handle = task_manager.spawn_handle(); + let drop_tester = DropTester::new(); + spawn_handle.spawn("task1", run_background_task(drop_tester.new_ref())); + spawn_handle.spawn("task2", run_background_task(drop_tester.new_ref())); + spawn_handle_child_1.spawn("task3", run_background_task(drop_tester.new_ref())); + spawn_handle_child_2.spawn("task4", run_background_task(drop_tester.new_ref())); + assert_eq!(drop_tester, 4); + // allow the tasks to even start + runtime.block_on(async { tokio::time::delay_for(Duration::from_secs(1)).await }); + assert_eq!(drop_tester, 4); + spawn_handle_child_1.spawn("task5", async { panic!("task failed") }); + runtime.block_on(async { + let t1 = task_manager.future().fuse(); + let t2 = tokio::time::delay_for(Duration::from_secs(3)).fuse(); + + pin_mut!(t1, t2); + + select! { + res = t1 => panic!("task should not have stopped: {:?}", res), + _ = t2 => {}, + } + }); + assert_eq!(drop_tester, 4); + runtime.block_on(task_manager.clean_shutdown()); + assert_eq!(drop_tester, 0); +} diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml new file mode 100644 index 0000000000000..fde79d19ede71 --- /dev/null +++ b/client/service/test/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "sc-service-test" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +hex-literal = "0.3.1" +tempfile = "3.1.0" +tokio = "0.1.22" +futures01 = { package = "futures", version = "0.1.29" } +log = "0.4.8" +fdlimit = "0.2.0" +parking_lot = "0.10.0" +sc-light = { version = "2.0.0", path = "../../light" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +sp-externalities = { version = "0.8.0", path = "../../../primitives/externalities" } +sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } +sp-storage = { version = "2.0.0", path = "../../../primitives/storage" } +sc-client-db = { version = "0.8.0", default-features = false, path = "../../db" } +futures = { version = "0.3.1", features = ["compat"] } +sc-service = { version = "0.8.0", default-features = false, features = ["test-helpers"], path = "../../service" } +sc-network = { version = "0.8.0", path = "../../network" } +sp-consensus = { version = "0.8.0", path = "../../../primitives/consensus/common" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sc-block-builder = { version = "0.8.0", path = "../../block-builder" } +sc-executor = { version = "0.8.0", path = "../../executor" } +sp-panic-handler = { version = "2.0.0", path = "../../../primitives/panic-handler" } +parity-scale-codec = "1.3.4" +sp-tracing = { version = "2.0.0", path = "../../../primitives/tracing" } diff --git a/client/service/test/src/client/db.rs b/client/service/test/src/client/db.rs new file mode 100644 index 0000000000000..36d49732246e5 --- /dev/null +++ b/client/service/test/src/client/db.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sp_core::offchain::{OffchainStorage, storage::InMemOffchainStorage}; +use std::sync::Arc; + +type TestBackend = sc_client_api::in_mem::Backend; + +#[test] +fn test_leaves_with_complex_block_tree() { + let backend = Arc::new(TestBackend::new()); + + substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); +} + +#[test] +fn test_blockchain_query_by_number_gets_canonical() { + let backend = Arc::new(TestBackend::new()); + + substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); +} + +#[test] +fn in_memory_offchain_storage() { + + let mut storage = InMemOffchainStorage::default(); + assert_eq!(storage.get(b"A", b"B"), None); + assert_eq!(storage.get(b"B", b"A"), None); + + storage.set(b"A", b"B", b"C"); + assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); + assert_eq!(storage.get(b"B", b"A"), None); + + storage.compare_and_set(b"A", b"B", Some(b"X"), b"D"); + assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); + storage.compare_and_set(b"A", b"B", Some(b"C"), b"D"); + assert_eq!(storage.get(b"A", b"B"), Some(b"D".to_vec())); + + assert!(!storage.compare_and_set(b"B", b"A", Some(b""), b"Y")); + assert!(storage.compare_and_set(b"B", b"A", None, b"X")); + assert_eq!(storage.get(b"B", b"A"), Some(b"X".to_vec())); +} diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs new file mode 100644 index 0000000000000..f38aef008e11c --- /dev/null +++ b/client/service/test/src/client/light.rs @@ -0,0 +1,907 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sc_light::{ + call_executor::{ + GenesisCallExecutor, + check_execution_proof, + check_execution_proof_with_make_header, + }, + fetcher::LightDataChecker, + blockchain::{BlockchainCache, Blockchain}, + backend::{Backend, GenesisOrUnavailableState}, +}; +use std::sync::Arc; +use sp_runtime::{ + traits::{BlakeTwo256, HashFor, NumberFor}, + generic::BlockId, traits::{Block as _, Header as HeaderT}, Digest, +}; +use std::collections::HashMap; +use parking_lot::Mutex; +use substrate_test_runtime_client::{ + runtime::{Hash, Block, Header}, TestClient, ClientBlockImportExt, +}; +use sp_api::{InitializeBlock, StorageTransactionCache, ProofRecorder, OffchainOverlayedChanges}; +use sp_consensus::BlockOrigin; +use sc_executor::{NativeExecutor, WasmExecutionMethod, RuntimeVersion, NativeVersion}; +use sp_core::{H256, NativeOrEncoded, testing::TaskExecutor}; +use sc_client_api::{ + blockchain::Info, backend::NewBlockState, Backend as ClientBackend, ProofProvider, + in_mem::{Backend as InMemBackend, Blockchain as InMemoryBlockchain}, ProvideChtRoots, + AuxStore, Storage, CallExecutor, cht, ExecutionStrategy, StorageProof, BlockImportOperation, + RemoteCallRequest, StorageProvider, ChangesProof, RemoteBodyRequest, RemoteReadRequest, + RemoteChangesRequest, FetchChecker, RemoteReadChildRequest, RemoteHeaderRequest, BlockBackend, +}; +use sp_externalities::Extensions; +use sc_block_builder::BlockBuilderProvider; +use sp_blockchain::{ + BlockStatus, Result as ClientResult, Error as ClientError, CachedHeaderMetadata, + HeaderBackend, well_known_cache_keys +}; +use std::panic::UnwindSafe; +use std::cell::RefCell; +use sp_state_machine::{OverlayedChanges, ExecutionManager}; +use parity_scale_codec::{Decode, Encode}; +use super::prepare_client_with_key_changes; +use substrate_test_runtime_client::{ + AccountKeyring, runtime::{self, Extrinsic}, +}; + +use sp_core::{blake2_256, ChangesTrieConfiguration, storage::{well_known_keys, StorageKey, ChildInfo}}; +use sp_state_machine::Backend as _; + +pub type DummyBlockchain = Blockchain; + +pub struct DummyStorage { + pub changes_tries_cht_roots: HashMap, + pub aux_store: Mutex, Vec>>, +} + +impl DummyStorage { + pub fn new() -> Self { + DummyStorage { + changes_tries_cht_roots: HashMap::new(), + aux_store: Mutex::new(HashMap::new()), + } + } +} + +impl sp_blockchain::HeaderBackend for DummyStorage { + fn header(&self, _id: BlockId) -> ClientResult> { + Err(ClientError::Backend("Test error".into())) + } + + fn info(&self) -> Info { + panic!("Test error") + } + + fn status(&self, _id: BlockId) -> ClientResult { + Err(ClientError::Backend("Test error".into())) + } + + fn number(&self, hash: Hash) -> ClientResult>> { + if hash == Default::default() { + Ok(Some(Default::default())) + } else { + Err(ClientError::Backend("Test error".into())) + } + } + + fn hash(&self, number: u64) -> ClientResult> { + if number == 0 { + Ok(Some(Default::default())) + } else { + Err(ClientError::Backend("Test error".into())) + } + } +} + +impl sp_blockchain::HeaderMetadata for DummyStorage { + type Error = ClientError; + + fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { + self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header)) + .ok_or(ClientError::UnknownBlock("header not found".to_owned())) + } + fn insert_header_metadata(&self, _hash: Hash, _metadata: CachedHeaderMetadata) {} + fn remove_header_metadata(&self, _hash: Hash) {} +} + +impl AuxStore for DummyStorage { + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >(&self, insert: I, _delete: D) -> ClientResult<()> { + for (k, v) in insert.into_iter() { + self.aux_store.lock().insert(k.to_vec(), v.to_vec()); + } + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> ClientResult>> { + Ok(self.aux_store.lock().get(key).cloned()) + } +} + +impl Storage for DummyStorage { + fn import_header( + &self, + _header: Header, + _cache: HashMap>, + _state: NewBlockState, + _aux_ops: Vec<(Vec, Option>)>, + ) -> ClientResult<()> { + Ok(()) + } + + fn set_head(&self, _block: BlockId) -> ClientResult<()> { + Err(ClientError::Backend("Test error".into())) + } + + fn finalize_header(&self, _block: BlockId) -> ClientResult<()> { + Err(ClientError::Backend("Test error".into())) + } + + fn last_finalized(&self) -> ClientResult { + Err(ClientError::Backend("Test error".into())) + } + + fn cache(&self) -> Option>> { + None + } + + fn usage_info(&self) -> Option { + None + } +} + +impl ProvideChtRoots for DummyStorage { + fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult> { + Err(ClientError::Backend("Test error".into())) + } + + fn changes_trie_cht_root(&self, cht_size: u64, block: u64) -> ClientResult> { + cht::block_to_cht_number(cht_size, block) + .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) + .cloned() + .ok_or_else(|| ClientError::Backend( + format!("Test error: CHT for block #{} not found", block) + ).into()) + .map(Some) + } +} + +struct DummyCallExecutor; + +impl CallExecutor for DummyCallExecutor { + type Error = ClientError; + + type Backend = substrate_test_runtime_client::Backend; + + fn call( + &self, + _id: &BlockId, + _method: &str, + _call_data: &[u8], + _strategy: ExecutionStrategy, + _extensions: Option, + ) -> Result, ClientError> { + Ok(vec![42]) + } + + fn contextual_call< + 'a, + IB: Fn() -> ClientResult<()>, + EM: Fn( + Result, Self::Error>, + Result, Self::Error> + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> Result + UnwindSafe, + >( + &self, + _initialize_block_fn: IB, + _at: &BlockId, + _method: &str, + _call_data: &[u8], + _changes: &RefCell, + _offchain_changes: &RefCell, + _storage_transaction_cache: Option<&RefCell< + StorageTransactionCache< + Block, + >::State, + > + >>, + _initialize_block: InitializeBlock<'a, Block>, + _execution_manager: ExecutionManager, + _native_call: Option, + _proof_recorder: &Option>, + _extensions: Option, + ) -> ClientResult> where ExecutionManager: Clone { + unreachable!() + } + + fn runtime_version(&self, _id: &BlockId) -> Result { + unreachable!() + } + + fn prove_at_trie_state>>( + &self, + _trie_state: &sp_state_machine::TrieBackend>, + _overlay: &mut OverlayedChanges, + _method: &str, + _call_data: &[u8] + ) -> Result<(Vec, StorageProof), ClientError> { + unreachable!() + } + + fn native_runtime_version(&self) -> Option<&NativeVersion> { + unreachable!() + } +} + +fn local_executor() -> NativeExecutor { + NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) +} + +#[test] +fn local_state_is_created_when_genesis_state_is_available() { + let def = Default::default(); + let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); + + let backend: Backend<_, BlakeTwo256> = Backend::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + ); + let mut op = backend.begin_operation().unwrap(); + op.set_block_data(header0, None, None, NewBlockState::Final).unwrap(); + op.reset_storage(Default::default()).unwrap(); + backend.commit_operation(op).unwrap(); + + match backend.state_at(BlockId::Number(0)).unwrap() { + GenesisOrUnavailableState::Genesis(_) => (), + _ => panic!("unexpected state"), + } +} + +#[test] +fn unavailable_state_is_created_when_genesis_state_is_unavailable() { + let backend: Backend<_, BlakeTwo256> = Backend::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + ); + + match backend.state_at(BlockId::Number(0)).unwrap() { + GenesisOrUnavailableState::Unavailable => (), + _ => panic!("unexpected state"), + } +} + +#[test] +fn light_aux_store_is_updated_via_non_importing_op() { + let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let mut op = ClientBackend::::begin_operation(&backend).unwrap(); + BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); + ClientBackend::::commit_operation(&backend, op).unwrap(); + + assert_eq!(AuxStore::get_aux(&backend, &[1]).unwrap(), Some(vec![2])); +} + +#[test] +fn execution_proof_is_generated_and_checked() { + fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { + let remote_block_id = BlockId::Number(at); + let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + + // 'fetch' execution proof from remote node + let (remote_result, remote_execution_proof) = remote_client.execution_proof( + &remote_block_id, + method, + &[] + ).unwrap(); + + // check remote execution proof locally + let local_result = check_execution_proof::<_, _, BlakeTwo256>( + &local_executor(), + Box::new(TaskExecutor::new()), + &RemoteCallRequest { + block: substrate_test_runtime_client::runtime::Hash::default(), + header: remote_header, + method: method.into(), + call_data: vec![], + retry_count: None, + }, + remote_execution_proof, + ).unwrap(); + + (remote_result, local_result) + } + + fn execute_with_proof_failure(remote_client: &TestClient, at: u64, method: &'static str) { + let remote_block_id = BlockId::Number(at); + let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + + // 'fetch' execution proof from remote node + let (_, remote_execution_proof) = remote_client.execution_proof( + &remote_block_id, + method, + &[] + ).unwrap(); + + // check remote execution proof locally + let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( + &local_executor(), + Box::new(TaskExecutor::new()), + &RemoteCallRequest { + block: substrate_test_runtime_client::runtime::Hash::default(), + header: remote_header, + method: method.into(), + call_data: vec![], + retry_count: None, + }, + remote_execution_proof, + |header|

::new( + at + 1, + Default::default(), + Default::default(), + header.hash(), + header.digest().clone(), // this makes next header wrong + ), + ); + match execution_result { + Err(sp_blockchain::Error::Execution(_)) => (), + _ => panic!("Unexpected execution result: {:?}", execution_result), + } + } + + // prepare remote client + let mut remote_client = substrate_test_runtime_client::new(); + for i in 1u32..3u32 { + let mut digest = Digest::default(); + digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); + remote_client.import_justified( + BlockOrigin::Own, + remote_client.new_block(digest).unwrap().build().unwrap().block, + Default::default(), + ).unwrap(); + } + + // check method that doesn't requires environment + let (remote, local) = execute(&remote_client, 0, "Core_version"); + assert_eq!(remote, local); + + let (remote, local) = execute(&remote_client, 2, "Core_version"); + assert_eq!(remote, local); + + // check method that requires environment + let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 1); + + let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 3); + + // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set + execute_with_proof_failure(&remote_client, 2, "Core_version"); + + // check that proof check doesn't panic even if proof is incorrect AND panic handler is set + sp_panic_handler::set("TEST", "1.2.3"); + execute_with_proof_failure(&remote_client, 2, "Core_version"); +} + +#[test] +fn code_is_executed_at_genesis_only() { + let backend = Arc::new(InMemBackend::::new()); + let def = H256::default(); + let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); + let hash0 = header0.hash(); + let header1 = substrate_test_runtime_client::runtime::Header::new(1, def, def, hash0, Default::default()); + let hash1 = header1.hash(); + backend.blockchain().insert(hash0, header0, None, None, NewBlockState::Final).unwrap(); + backend.blockchain().insert(hash1, header1, None, None, NewBlockState::Final).unwrap(); + + let genesis_executor = GenesisCallExecutor::new(backend, DummyCallExecutor); + assert_eq!( + genesis_executor.call( + &BlockId::Number(0), + "test_method", + &[], + ExecutionStrategy::NativeElseWasm, + None, + ).unwrap(), + vec![42], + ); + + let call_on_unavailable = genesis_executor.call( + &BlockId::Number(1), + "test_method", + &[], + ExecutionStrategy::NativeElseWasm, + None, + ); + + match call_on_unavailable { + Err(ClientError::NotAvailableOnLightClient) => (), + _ => unreachable!("unexpected result: {:?}", call_on_unavailable), + } +} + + +type TestChecker = LightDataChecker< + NativeExecutor, + BlakeTwo256, + Block, + DummyStorage, +>; + +fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { + // prepare remote client + let remote_client = substrate_test_runtime_client::new(); + let remote_block_id = BlockId::Number(0); + let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); + let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() + .storage_root(::std::iter::empty()).0.into(); + + // 'fetch' read proof from remote node + let heap_pages = remote_client.storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) + .unwrap() + .and_then(|v| Decode::decode(&mut &v.0[..]).ok()).unwrap(); + let remote_read_proof = remote_client.read_proof( + &remote_block_id, + &mut std::iter::once(well_known_keys::HEAP_PAGES), + ).unwrap(); + + // check remote read proof locally + let local_storage = InMemoryBlockchain::::new(); + local_storage.insert( + remote_block_hash, + remote_block_header.clone(), + None, + None, + NewBlockState::Final, + ).unwrap(); + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + Box::new(TaskExecutor::new()), + ); + (local_checker, remote_block_header, remote_read_proof, heap_pages) +} + +fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { + use substrate_test_runtime_client::DefaultTestClientBuilderExt; + use substrate_test_runtime_client::TestClientBuilderExt; + let child_info = ChildInfo::new_default(b"child1"); + let child_info = &child_info; + // prepare remote client + let remote_client = substrate_test_runtime_client::TestClientBuilder::new() + .add_extra_child_storage( + child_info, + b"key1".to_vec(), + b"value1".to_vec(), + ).build(); + let remote_block_id = BlockId::Number(0); + let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); + let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() + .storage_root(::std::iter::empty()).0.into(); + + // 'fetch' child read proof from remote node + let child_value = remote_client.child_storage( + &remote_block_id, + child_info, + &StorageKey(b"key1".to_vec()), + ).unwrap().unwrap().0; + assert_eq!(b"value1"[..], child_value[..]); + let remote_read_proof = remote_client.read_child_proof( + &remote_block_id, + child_info, + &mut std::iter::once("key1".as_bytes()), + ).unwrap(); + + // check locally + let local_storage = InMemoryBlockchain::::new(); + local_storage.insert( + remote_block_hash, + remote_block_header.clone(), + None, + None, + NewBlockState::Final, + ).unwrap(); + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + Box::new(TaskExecutor::new()), + ); + (local_checker, remote_block_header, remote_read_proof, child_value) +} + +fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Header, StorageProof) { + // prepare remote client + let mut remote_client = substrate_test_runtime_client::new(); + let mut local_headers_hashes = Vec::new(); + for i in 0..4 { + let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; + remote_client.import(BlockOrigin::Own, block).unwrap(); + local_headers_hashes.push( + remote_client.block_hash(i + 1) + .map_err(|_| ClientError::Backend("TestError".into())) + ); + } + + // 'fetch' header proof from remote node + let remote_block_id = BlockId::Number(1); + let (remote_block_header, remote_header_proof) = remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); + + // check remote read proof locally + let local_storage = InMemoryBlockchain::::new(); + let local_cht_root = cht::compute_root::(4, 0, local_headers_hashes).unwrap(); + if insert_cht { + local_storage.insert_cht_root(1, local_cht_root); + } + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + Box::new(TaskExecutor::new()), + ); + (local_checker, local_cht_root, remote_block_header, remote_header_proof) +} + +fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { + use sp_trie::{TrieConfiguration, trie_types::Layout}; + let iter = extrinsics.iter().map(Encode::encode); + let extrinsics_root = Layout::::ordered_trie_root(iter); + + // only care about `extrinsics_root` + Header::new(0, extrinsics_root, H256::zero(), H256::zero(), Default::default()) +} + +#[test] +fn storage_read_proof_is_generated_and_checked() { + let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); + assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); +} + +#[test] +fn storage_child_read_proof_is_generated_and_checked() { + let child_info = ChildInfo::new_default(&b"child1"[..]); + let ( + local_checker, + remote_block_header, + remote_read_proof, + result, + ) = prepare_for_read_child_proof_check(); + assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( + &RemoteReadChildRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + storage_key: child_info.prefixed_storage_key(), + keys: vec![b"key1".to_vec()], + retry_count: None, + }, + remote_read_proof + ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); +} + +#[test] +fn header_proof_is_generated_and_checked() { + let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); +} + +#[test] +fn check_header_proof_fails_if_cht_root_is_invalid() { + let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + remote_block_header.number = 100; + assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + cht_root: Default::default(), + block: 1, + retry_count: None, + }, Some(remote_block_header.clone()), remote_header_proof).is_err()); +} + +#[test] +fn check_header_proof_fails_if_invalid_header_provided() { + let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + remote_block_header.number = 100; + assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, Some(remote_block_header.clone()), remote_header_proof).is_err()); +} + +#[test] +fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { + let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + Box::new(TaskExecutor::new()), + ); + let local_checker = &local_checker as &dyn FetchChecker; + let max = remote_client.chain_info().best_number; + let max_hash = remote_client.chain_info().best_hash; + + for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { + let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); + let end_hash = remote_client.block_hash(end).unwrap().unwrap(); + + // 'fetch' changes proof from remote node + let key = StorageKey(key); + let remote_proof = remote_client.key_changes_proof( + begin_hash, end_hash, begin_hash, max_hash, None, &key + ).unwrap(); + + // check proof on local client + let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); + let request = RemoteChangesRequest::
{ + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], + first_block: (begin, begin_hash), + last_block: (end, end_hash), + max_block: (max, max_hash), + tries_roots: (begin, begin_hash, local_roots_range), + key: key.0, + storage_key: None, + retry_count: None, + }; + let local_result = local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }).unwrap(); + + // ..and ensure that result is the same as on remote node + match local_result == expected_result { + true => (), + false => panic!(format!("Failed test {}: local = {:?}, expected = {:?}", + index, local_result, expected_result)), + } + } +} + +#[test] +fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { + // we're testing this test case here: + // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); + let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); + let dave = StorageKey(dave); + + // 'fetch' changes proof from remote node: + // we're fetching changes for range b1..b4 + // we do not know changes trie roots before b3 (i.e. we only know b3+b4) + // but we have changes trie CHT root for b1...b4 + let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); + let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); + let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); + let remote_proof = remote_client.key_changes_proof_with_cht_size( + b1, b4, b3, b4, None, &dave, 4 + ).unwrap(); + + // prepare local checker, having a root of changes trie CHT#0 + let local_cht_root = cht::compute_root::(4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); + let mut local_storage = DummyStorage::new(); + local_storage.changes_tries_cht_roots.insert(0, local_cht_root); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(local_storage)), + local_executor(), + Box::new(TaskExecutor::new()), + ); + + // check proof on local client + let config = ChangesTrieConfiguration::new(4, 2); + let request = RemoteChangesRequest::
{ + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], + first_block: (1, b1), + last_block: (4, b4), + max_block: (4, b4), + tries_roots: (3, b3, vec![remote_roots[2].clone(), remote_roots[3].clone()]), + storage_key: None, + key: dave.0, + retry_count: None, + }; + let local_result = local_checker.check_changes_proof_with_cht_size(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, 4).unwrap(); + + assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); +} + +#[test] +fn check_changes_proof_fails_if_proof_is_wrong() { + let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + Box::new(TaskExecutor::new()), + ); + let local_checker = &local_checker as &dyn FetchChecker; + let max = remote_client.chain_info().best_number; + let max_hash = remote_client.chain_info().best_hash; + + let (begin, end, key, _) = test_cases[0].clone(); + let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); + let end_hash = remote_client.block_hash(end).unwrap().unwrap(); + + // 'fetch' changes proof from remote node + let key = StorageKey(key); + let remote_proof = remote_client.key_changes_proof( + begin_hash, end_hash, begin_hash, max_hash, None, &key).unwrap(); + + let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); + let request = RemoteChangesRequest::
{ + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], + first_block: (begin, begin_hash), + last_block: (end, end_hash), + max_block: (max, max_hash), + tries_roots: (begin, begin_hash, local_roots_range.clone()), + storage_key: None, + key: key.0, + retry_count: None, + }; + + // check proof on local client using max from the future + assert!(local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block + 1, + proof: remote_proof.proof.clone(), + roots: remote_proof.roots.clone(), + roots_proof: remote_proof.roots_proof.clone(), + }).is_err()); + + // check proof on local client using broken proof + assert!(local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }).is_err()); + + // extra roots proofs are provided + assert!(local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(begin - 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + }).is_err()); + assert!(local_checker.check_changes_proof(&request, ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(end + 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + }).is_err()); +} + +#[test] +fn check_changes_tries_proof_fails_if_proof_is_wrong() { + // we're testing this test case here: + // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); + let local_cht_root = cht::compute_root::( + 4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); + let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); + let dave = StorageKey(dave); + + // 'fetch' changes proof from remote node: + // we're fetching changes for range b1..b4 + // we do not know changes trie roots before b3 (i.e. we only know b3+b4) + // but we have changes trie CHT root for b1...b4 + let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); + let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); + let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); + let remote_proof = remote_client.key_changes_proof_with_cht_size( + b1, b4, b3, b4, None, &dave, 4 + ).unwrap(); + + // fails when changes trie CHT is missing from the local db + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + Box::new(TaskExecutor::new()), + ); + assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, + remote_proof.roots_proof.clone()).is_err()); + + // fails when proof is broken + let mut local_storage = DummyStorage::new(); + local_storage.changes_tries_cht_roots.insert(0, local_cht_root); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(local_storage)), + local_executor(), + Box::new(TaskExecutor::new()), + ); + let result = local_checker.check_changes_tries_proof( + 4, &remote_proof.roots, StorageProof::empty() + ); + assert!(result.is_err()); +} + +#[test] +fn check_body_proof_faulty() { + let header = header_with_computed_extrinsics_root( + vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])] + ); + let block = Block::new(header.clone(), Vec::new()); + + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + Box::new(TaskExecutor::new()), + ); + + let body_request = RemoteBodyRequest { + header: header.clone(), + retry_count: None, + }; + + assert!( + local_checker.check_body_proof(&body_request, block.extrinsics).is_err(), + "vec![1, 2, 3, 4] != vec![]" + ); +} + +#[test] +fn check_body_proof_of_same_data_should_succeed() { + let extrinsics = vec![Extrinsic::IncludeData(vec![1, 2, 3, 4, 5, 6, 7, 8, 255])]; + + let header = header_with_computed_extrinsics_root(extrinsics.clone()); + let block = Block::new(header.clone(), extrinsics); + + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + Box::new(TaskExecutor::new()), + ); + + let body_request = RemoteBodyRequest { + header: header.clone(), + retry_count: None, + }; + + assert!(local_checker.check_body_proof(&body_request, block.extrinsics).is_ok()); +} diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs new file mode 100644 index 0000000000000..34b063a3e3484 --- /dev/null +++ b/client/service/test/src/client/mod.rs @@ -0,0 +1,1858 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use parity_scale_codec::{Encode, Decode, Joiner}; +use sc_executor::native_executor_instance; +use sp_state_machine::{StateMachine, OverlayedChanges, ExecutionStrategy, InMemoryBackend}; +use substrate_test_runtime_client::{ + prelude::*, + runtime::{ + self, genesismap::{GenesisConfig, insert_genesis_block}, + Hash, Transfer, Block, BlockNumber, Header, Digest, RuntimeApi, + }, + AccountKeyring, Sr25519Keyring, TestClientBuilder, ClientBlockImportExt, + BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilderExt, ClientExt, +}; +use sc_client_api::{ + StorageProvider, BlockBackend, in_mem, BlockchainEvents, +}; +use sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode}; +use sc_block_builder::BlockBuilderProvider; +use sc_service::client::{self, Client, LocalCallExecutor, new_in_mem}; +use sp_runtime::traits::{ + BlakeTwo256, Block as BlockT, Header as HeaderT, +}; +use substrate_test_runtime::TestAPI; +use sp_state_machine::backend::Backend as _; +use sp_api::{ProvideRuntimeApi, OffchainOverlayedChanges}; +use sp_core::{H256, ChangesTrieConfiguration, blake2_256, testing::TaskExecutor}; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use sp_consensus::{ + BlockOrigin, SelectChain, BlockImport, Error as ConsensusError, BlockCheckParams, ImportResult, + BlockStatus, BlockImportParams, ForkChoiceStrategy, +}; +use sp_storage::StorageKey; +use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_runtime::{generic::BlockId, DigestItem}; +use hex_literal::hex; + +mod light; +mod db; + +native_executor_instance!( + Executor, + substrate_test_runtime_client::runtime::api::dispatch, + substrate_test_runtime_client::runtime::native_version, +); + +fn executor() -> sc_executor::NativeExecutor { + sc_executor::NativeExecutor::new( + sc_executor::WasmExecutionMethod::Interpreted, + None, + 8, + ) +} + +pub fn prepare_client_with_key_changes() -> ( + client::Client< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + Block, + RuntimeApi + >, + Vec, + Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, +) { + // prepare block structure + let blocks_transfers = vec![ + vec![(AccountKeyring::Alice, AccountKeyring::Dave), (AccountKeyring::Bob, AccountKeyring::Dave)], + vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], + vec![], + vec![(AccountKeyring::Alice, AccountKeyring::Dave)], + ]; + + // prepare client ang import blocks + let mut local_roots = Vec::new(); + let config = Some(ChangesTrieConfiguration::new(4, 2)); + let mut remote_client = TestClientBuilder::new().changes_trie_config(config).build(); + let mut nonces: HashMap<_, u64> = Default::default(); + for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { + let mut builder = remote_client.new_block(Default::default()).unwrap(); + for (from, to) in block_transfers { + builder.push_transfer(Transfer { + from: from.into(), + to: to.into(), + amount: 1, + nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(), + }).unwrap(); + } + let block = builder.build().unwrap().block; + remote_client.import(BlockOrigin::Own, block).unwrap(); + + let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); + let trie_root = header.digest().log(DigestItem::as_changes_trie_root) + .map(|root| H256::from_slice(root.as_ref())) + .unwrap(); + local_roots.push(trie_root); + } + + // prepare test cases + let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec(); + let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); + let charlie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); + let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); + let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); + let ferdie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); + let test_cases = vec![ + (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), + (1, 3, alice.clone(), vec![(1, 0)]), + (2, 4, alice.clone(), vec![(4, 0)]), + (2, 3, alice.clone(), vec![]), + (1, 4, bob.clone(), vec![(1, 1)]), + (1, 1, bob.clone(), vec![(1, 1)]), + (2, 4, bob.clone(), vec![]), + (1, 4, charlie.clone(), vec![(2, 0)]), + (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + (1, 1, dave.clone(), vec![(1, 1), (1, 0)]), + (3, 4, dave.clone(), vec![(4, 0)]), + (1, 4, eve.clone(), vec![(2, 0)]), + (1, 1, eve.clone(), vec![]), + (3, 4, eve.clone(), vec![]), + (1, 4, ferdie.clone(), vec![]), + ]; + + (remote_client, local_roots, test_cases) +} + +fn construct_block( + backend: &InMemoryBackend, + number: BlockNumber, + parent_hash: Hash, + state_root: Hash, + txs: Vec, +) -> (Vec, Hash) { + let transactions = txs.into_iter().map(|tx| tx.into_signed_tx()).collect::>(); + + let iter = transactions.iter().map(Encode::encode); + let extrinsics_root = Layout::::ordered_trie_root(iter).into(); + + let mut header = Header { + parent_hash, + number, + state_root, + extrinsics_root, + digest: Digest { logs: vec![] }, + }; + let hash = header.hash(); + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = OffchainOverlayedChanges::default(); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + let task_executor = Box::new(TaskExecutor::new()); + + StateMachine::new( + backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "Core_initialize_block", + &header.encode(), + Default::default(), + &runtime_code, + task_executor.clone() as Box<_>, + ).execute( + ExecutionStrategy::NativeElseWasm, + ).unwrap(); + + for tx in transactions.iter() { + StateMachine::new( + backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "BlockBuilder_apply_extrinsic", + &tx.encode(), + Default::default(), + &runtime_code, + task_executor.clone() as Box<_>, + ).execute( + ExecutionStrategy::NativeElseWasm, + ).unwrap(); + } + + let ret_data = StateMachine::new( + backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "BlockBuilder_finalize_block", + &[], + Default::default(), + &runtime_code, + task_executor.clone() as Box<_>, + ).execute( + ExecutionStrategy::NativeElseWasm, + ).unwrap(); + header = Header::decode(&mut &ret_data[..]).unwrap(); + + (vec![].and(&Block { header, extrinsics: transactions }), hash) +} + +fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { + construct_block( + backend, + 1, + genesis_hash, + hex!("25e5b37074063ab75c889326246640729b40d0c86932edc527bc80db0e04fe5c").into(), + vec![Transfer { + from: AccountKeyring::One.into(), + to: AccountKeyring::Two.into(), + amount: 69, + nonce: 0, + }], + ) +} + +#[test] +fn construct_genesis_should_work_with_native() { + let mut storage = GenesisConfig::new( + None, + vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 1000, + None, + Default::default(), + ).genesis_map(); + let genesis_hash = insert_genesis_block(&mut storage); + + let backend = InMemoryBackend::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = OffchainOverlayedChanges::default(); + + let _ = StateMachine::new( + &backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "Core_execute_block", + &b1data, + Default::default(), + &runtime_code, + TaskExecutor::new(), + ).execute( + ExecutionStrategy::NativeElseWasm, + ).unwrap(); +} + +#[test] +fn construct_genesis_should_work_with_wasm() { + let mut storage = GenesisConfig::new( + None, + vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 1000, + None, + Default::default(), + ).genesis_map(); + let genesis_hash = insert_genesis_block(&mut storage); + + let backend = InMemoryBackend::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = OffchainOverlayedChanges::default(); + + let _ = StateMachine::new( + &backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "Core_execute_block", + &b1data, + Default::default(), + &runtime_code, + TaskExecutor::new(), + ).execute( + ExecutionStrategy::AlwaysWasm, + ).unwrap(); +} + +#[test] +fn construct_genesis_with_bad_transaction_should_panic() { + let mut storage = GenesisConfig::new( + None, + vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 68, + None, + Default::default(), + ).genesis_map(); + let genesis_hash = insert_genesis_block(&mut storage); + + let backend = InMemoryBackend::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + + let mut overlay = OverlayedChanges::default(); + let mut offchain_overlay = OffchainOverlayedChanges::default(); + + let r = StateMachine::new( + &backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &mut offchain_overlay, + &executor(), + "Core_execute_block", + &b1data, + Default::default(), + &runtime_code, + TaskExecutor::new(), + ).execute( + ExecutionStrategy::NativeElseWasm, + ); + assert!(r.is_err()); +} + + +#[test] +fn client_initializes_from_genesis_ok() { + let client = substrate_test_runtime_client::new(); + + assert_eq!( + client.runtime_api().balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ).unwrap(), + 1000 + ); + assert_eq!( + client.runtime_api().balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into(), + ).unwrap(), + 0 + ); +} + +#[test] +fn block_builder_works_with_no_transactions() { + let mut client = substrate_test_runtime_client::new(); + + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(client.chain_info().best_number, 1); +} + +#[test] +fn block_builder_works_with_transactions() { + let mut client = substrate_test_runtime_client::new(); + + let mut builder = client.new_block(Default::default()).unwrap(); + + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }).unwrap(); + + let block = builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(client.chain_info().best_number, 1); + assert_ne!( + client.state_at(&BlockId::Number(1)).unwrap().pairs(), + client.state_at(&BlockId::Number(0)).unwrap().pairs() + ); + assert_eq!( + client.runtime_api().balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ).unwrap(), + 958 + ); + assert_eq!( + client.runtime_api().balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into(), + ).unwrap(), + 42 + ); +} + +#[test] +fn block_builder_does_not_include_invalid() { + let mut client = substrate_test_runtime_client::new(); + + let mut builder = client.new_block(Default::default()).unwrap(); + + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }).unwrap(); + + assert!( + builder.push_transfer(Transfer { + from: AccountKeyring::Eve.into(), + to: AccountKeyring::Alice.into(), + amount: 42, + nonce: 0, + }).is_err() + ); + + let block = builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(client.chain_info().best_number, 1); + assert_ne!( + client.state_at(&BlockId::Number(1)).unwrap().pairs(), + client.state_at(&BlockId::Number(0)).unwrap().pairs() + ); + assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) +} + +#[test] +fn best_containing_with_genesis_block() { + // block tree: + // G + + let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!( + genesis_hash.clone(), + longest_chain_select.finality_target(genesis_hash.clone(), None).unwrap().unwrap() + ); +} + +#[test] +fn best_containing_with_hash_not_found() { + // block tree: + // G + + let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + let uninserted_block = client.new_block(Default::default()).unwrap().build().unwrap().block; + + assert_eq!( + None, + longest_chain_select.finality_target(uninserted_block.hash().clone(), None).unwrap() + ); +} + +#[test] +fn uncles_with_only_ancestors() { + // block tree: + // G -> A1 -> A2 + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let v: Vec = Vec::new(); + assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); +} + +#[test] +fn uncles_with_multiple_forks() { + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client.new_block_at( + &BlockId::Hash(a2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client.new_block_at( + &BlockId::Hash(a3.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client.new_block_at( + &BlockId::Hash(a4.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }).unwrap(); + let b2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client.new_block_at( + &BlockId::Hash(b3.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }).unwrap(); + let c3 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let d2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + let uncles1 = client.uncles(a4.hash(), 10).unwrap(); + assert_eq!(vec![b2.hash(), d2.hash()], uncles1); + + let uncles2 = client.uncles(a4.hash(), 0).unwrap(); + assert_eq!(0, uncles2.len()); + + let uncles3 = client.uncles(a1.hash(), 10).unwrap(); + assert_eq!(0, uncles3.len()); + + let uncles4 = client.uncles(genesis_hash, 10).unwrap(); + assert_eq!(0, uncles4.len()); + + let uncles5 = client.uncles(d2.hash(), 10).unwrap(); + assert_eq!(vec![a2.hash(), b2.hash()], uncles5); + + let uncles6 = client.uncles(b3.hash(), 1).unwrap(); + assert_eq!(vec![c3.hash()], uncles6); +} + +#[test] +fn best_containing_on_longest_chain_with_single_chain_3_blocks() { + // block tree: + // G -> A1 -> A2 + + let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, None).unwrap().unwrap()); + assert_eq!(a2.hash(), longest_chain_select.finality_target(a1.hash(), None).unwrap().unwrap()); + assert_eq!(a2.hash(), longest_chain_select.finality_target(a2.hash(), None).unwrap().unwrap()); +} + +#[test] +fn best_containing_on_longest_chain_with_multiple_forks() { + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client.new_block_at( + &BlockId::Hash(a2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client.new_block_at( + &BlockId::Hash(a3.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client.new_block_at( + &BlockId::Hash(a4.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }).unwrap(); + let b2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client.new_block_at( + &BlockId::Hash(b3.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }).unwrap(); + let c3 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let d2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + assert_eq!(client.chain_info().best_hash, a5.hash()); + + let genesis_hash = client.chain_info().genesis_hash; + let leaves = longest_chain_select.leaves().unwrap(); + + assert!(leaves.contains(&a5.hash())); + assert!(leaves.contains(&b4.hash())); + assert!(leaves.contains(&c3.hash())); + assert!(leaves.contains(&d2.hash())); + assert_eq!(leaves.len(), 4); + + // search without restriction + + assert_eq!(a5.hash(), longest_chain_select.finality_target( + genesis_hash, None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a1.hash(), None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a2.hash(), None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a3.hash(), None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a4.hash(), None).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a5.hash(), None).unwrap().unwrap()); + + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b2.hash(), None).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b3.hash(), None).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b4.hash(), None).unwrap().unwrap()); + + assert_eq!(c3.hash(), longest_chain_select.finality_target( + c3.hash(), None).unwrap().unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), None).unwrap().unwrap()); + + + // search only blocks with number <= 5. equivalent to without restriction for this scenario + + assert_eq!(a5.hash(), longest_chain_select.finality_target( + genesis_hash, Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a1.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a2.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a3.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a4.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(a5.hash(), longest_chain_select.finality_target( + a5.hash(), Some(5)).unwrap().unwrap()); + + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b2.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b3.hash(), Some(5)).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b4.hash(), Some(5)).unwrap().unwrap()); + + assert_eq!(c3.hash(), longest_chain_select.finality_target( + c3.hash(), Some(5)).unwrap().unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), Some(5)).unwrap().unwrap()); + + + // search only blocks with number <= 4 + + assert_eq!(a4.hash(), longest_chain_select.finality_target( + genesis_hash, Some(4)).unwrap().unwrap()); + assert_eq!(a4.hash(), longest_chain_select.finality_target( + a1.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(a4.hash(), longest_chain_select.finality_target( + a2.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(a4.hash(), longest_chain_select.finality_target( + a3.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(a4.hash(), longest_chain_select.finality_target( + a4.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(4)).unwrap()); + + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b2.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b3.hash(), Some(4)).unwrap().unwrap()); + assert_eq!(b4.hash(), longest_chain_select.finality_target( + b4.hash(), Some(4)).unwrap().unwrap()); + + assert_eq!(c3.hash(), longest_chain_select.finality_target( + c3.hash(), Some(4)).unwrap().unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), Some(4)).unwrap().unwrap()); + + + // search only blocks with number <= 3 + + assert_eq!(a3.hash(), longest_chain_select.finality_target( + genesis_hash, Some(3)).unwrap().unwrap()); + assert_eq!(a3.hash(), longest_chain_select.finality_target( + a1.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(a3.hash(), longest_chain_select.finality_target( + a2.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(a3.hash(), longest_chain_select.finality_target( + a3.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a4.hash(), Some(3)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(3)).unwrap()); + + assert_eq!(b3.hash(), longest_chain_select.finality_target( + b2.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(b3.hash(), longest_chain_select.finality_target( + b3.hash(), Some(3)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b4.hash(), Some(3)).unwrap()); + + assert_eq!(c3.hash(), longest_chain_select.finality_target( + c3.hash(), Some(3)).unwrap().unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), Some(3)).unwrap().unwrap()); + + + // search only blocks with number <= 2 + + assert_eq!(a2.hash(), longest_chain_select.finality_target( + genesis_hash, Some(2)).unwrap().unwrap()); + assert_eq!(a2.hash(), longest_chain_select.finality_target( + a1.hash(), Some(2)).unwrap().unwrap()); + assert_eq!(a2.hash(), longest_chain_select.finality_target( + a2.hash(), Some(2)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a3.hash(), Some(2)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a4.hash(), Some(2)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(2)).unwrap()); + + assert_eq!(b2.hash(), longest_chain_select.finality_target( + b2.hash(), Some(2)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b3.hash(), Some(2)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b4.hash(), Some(2)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + c3.hash(), Some(2)).unwrap()); + + assert_eq!(d2.hash(), longest_chain_select.finality_target( + d2.hash(), Some(2)).unwrap().unwrap()); + + + // search only blocks with number <= 1 + + assert_eq!(a1.hash(), longest_chain_select.finality_target( + genesis_hash, Some(1)).unwrap().unwrap()); + assert_eq!(a1.hash(), longest_chain_select.finality_target( + a1.hash(), Some(1)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a2.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a3.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a4.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(1)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + b2.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b3.hash(), Some(1)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b4.hash(), Some(1)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + c3.hash(), Some(1)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + d2.hash(), Some(1)).unwrap()); + + // search only blocks with number <= 0 + + assert_eq!(genesis_hash, longest_chain_select.finality_target( + genesis_hash, Some(0)).unwrap().unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a1.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a2.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a3.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a4.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + a5.hash(), Some(0)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + b2.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b3.hash(), Some(0)).unwrap()); + assert_eq!(None, longest_chain_select.finality_target( + b4.hash(), Some(0)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + c3.hash().clone(), Some(0)).unwrap()); + + assert_eq!(None, longest_chain_select.finality_target( + d2.hash().clone(), Some(0)).unwrap()); +} + +#[test] +fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { + // block tree: + // G -> A1 -> A2 + + let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap()); +} + +#[test] +fn key_changes_works() { + let (client, _, test_cases) = prepare_client_with_key_changes(); + + for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { + let end = client.block_hash(end).unwrap().unwrap(); + let actual_result = client.key_changes( + begin, + BlockId::Hash(end), + None, + &StorageKey(key), + ).unwrap(); + match actual_result == expected_result { + true => (), + false => panic!(format!("Failed test {}: actual = {:?}, expected = {:?}", + index, actual_result, expected_result)), + } + } +} + +#[test] +fn import_with_justification() { + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let justification = vec![1, 2, 3]; + let a3 = client.new_block_at( + &BlockId::Hash(a2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap(); + + assert_eq!( + client.chain_info().finalized_hash, + a3.hash(), + ); + + assert_eq!( + client.justification(&BlockId::Hash(a3.hash())).unwrap(), + Some(justification), + ); + + assert_eq!( + client.justification(&BlockId::Hash(a1.hash())).unwrap(), + None, + ); + + assert_eq!( + client.justification(&BlockId::Hash(a2.hash())).unwrap(), + None, + ); +} + +#[test] +fn importing_diverged_finalized_block_should_trigger_reorg() { + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 -> A2 + // \ + // -> B1 + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let mut b1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + // create but don't import B1 just yet + let b1 = b1.build().unwrap().block; + + // A2 is the current best since it's the longest chain + assert_eq!( + client.chain_info().best_hash, + a2.hash(), + ); + + // importing B1 as finalized should trigger a re-org and set it as new best + let justification = vec![1, 2, 3]; + client.import_justified(BlockOrigin::Own, b1.clone(), justification).unwrap(); + + assert_eq!( + client.chain_info().best_hash, + b1.hash(), + ); + + assert_eq!( + client.chain_info().finalized_hash, + b1.hash(), + ); +} + +#[test] +fn finalizing_diverged_block_should_trigger_reorg() { + let (mut client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 -> A2 + // \ + // -> B1 -> B2 + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let mut b1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + client.import(BlockOrigin::Own, b1.clone()).unwrap(); + + let b2 = client.new_block_at( + &BlockId::Hash(b1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // A2 is the current best since it's the longest chain + assert_eq!( + client.chain_info().best_hash, + a2.hash(), + ); + + // we finalize block B1 which is on a different branch from current best + // which should trigger a re-org. + ClientExt::finalize_block(&client, BlockId::Hash(b1.hash()), None).unwrap(); + + // B1 should now be the latest finalized + assert_eq!( + client.chain_info().finalized_hash, + b1.hash(), + ); + + // and B1 should be the new best block (`finalize_block` as no way of + // knowing about B2) + assert_eq!( + client.chain_info().best_hash, + b1.hash(), + ); + + // `SelectChain` should report B2 as best block though + assert_eq!( + select_chain.best_chain().unwrap().hash(), + b2.hash(), + ); + + // after we build B3 on top of B2 and import it + // it should be the new best block, + let b3 = client.new_block_at( + &BlockId::Hash(b2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + assert_eq!( + client.chain_info().best_hash, + b3.hash(), + ); +} + +#[test] +fn get_header_by_block_number_doesnt_panic() { + let client = substrate_test_runtime_client::new(); + + // backend uses u32 for block numbers, make sure we don't panic when + // trying to convert + let id = BlockId::::Number(72340207214430721); + client.header(&id).expect_err("invalid block number overflows u32"); +} + +#[test] +fn state_reverted_on_reorg() { + sp_tracing::try_init_simple(); + let mut client = substrate_test_runtime_client::new(); + + let current_balance = |client: &substrate_test_runtime_client::TestClient| + client.runtime_api().balance_of( + &BlockId::number(client.chain_info().best_number), AccountKeyring::Alice.into(), + ).unwrap(); + + // G -> A1 -> A2 + // \ + // -> B1 + let mut a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + a1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 10, + nonce: 0, + }).unwrap(); + let a1 = a1.build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let mut b1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 50, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + // Reorg to B1 + client.import_as_best(BlockOrigin::Own, b1.clone()).unwrap(); + + assert_eq!(950, current_balance(&client)); + let mut a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap(); + a2.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Charlie.into(), + amount: 10, + nonce: 1, + }).unwrap(); + let a2 = a2.build().unwrap().block; + // Re-org to A2 + client.import_as_best(BlockOrigin::Own, a2).unwrap(); + assert_eq!(980, current_balance(&client)); +} + +#[test] +fn doesnt_import_blocks_that_revert_finality() { + sp_tracing::try_init_simple(); + let tmp = tempfile::tempdir().unwrap(); + + // we need to run with archive pruning to avoid pruning non-canonical + // states + let backend = Arc::new(Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + pruning: PruningMode::ArchiveAll, + source: DatabaseSettingsSrc::RocksDb { + path: tmp.path().into(), + cache_size: 1024, + }, + }, + u64::max_value(), + ).unwrap()); + + let mut client = TestClientBuilder::with_backend(backend).build(); + + // -> C1 + // / + // G -> A1 -> A2 + // \ + // -> B1 -> B2 -> B3 + + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + client.import(BlockOrigin::Own, b1.clone()).unwrap(); + + let b2 = client.new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // prepare B3 before we finalize A2, because otherwise we won't be able to + // read changes trie configuration after A2 is finalized + let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap().build().unwrap().block; + + // we will finalize A2 which should make it impossible to import a new + // B3 at the same height but that doesn't include it + ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); + + let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); + let expected_err = ConsensusError::ClientImport( + sp_blockchain::Error::NotInFinalizedChain.to_string() + ); + + assert_eq!( + import_err.to_string(), + expected_err.to_string(), + ); + + // adding a C1 block which is lower than the last finalized should also + // fail (with a cheaper check that doesn't require checking ancestry). + let mut c1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + + // needed to make sure C1 gets a different hash from A1 and B1 + c1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 2, + nonce: 0, + }).unwrap(); + let c1 = c1.build().unwrap().block; + + let import_err = client.import(BlockOrigin::Own, c1).err().unwrap(); + let expected_err = ConsensusError::ClientImport( + sp_blockchain::Error::NotInFinalizedChain.to_string() + ); + + assert_eq!( + import_err.to_string(), + expected_err.to_string(), + ); +} + + +#[test] +fn respects_block_rules() { + fn run_test( + record_only: bool, + known_bad: &mut HashSet, + fork_rules: &mut Vec<(u64, H256)>, + ) { + let mut client = if record_only { + TestClientBuilder::new().build() + } else { + TestClientBuilder::new() + .set_block_rules( + Some(fork_rules.clone()), + Some(known_bad.clone()), + ) + .build() + }; + + let block_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap().build().unwrap().block; + + let params = BlockCheckParams { + hash: block_ok.hash().clone(), + number: 0, + parent_hash: block_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + + // this is 0x0d6d6612a10485370d9e085aeea7ec427fb3f34d961c6a816cdbe5cde2278864 + let mut block_not_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + block_not_ok.push_storage_change(vec![0], Some(vec![1])).unwrap(); + let block_not_ok = block_not_ok.build().unwrap().block; + + let params = BlockCheckParams { + hash: block_not_ok.hash().clone(), + number: 0, + parent_hash: block_not_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + if record_only { + known_bad.insert(block_not_ok.hash()); + } else { + assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + } + + // Now going to the fork + client.import_as_final(BlockOrigin::Own, block_ok).unwrap(); + + // And check good fork + let mut block_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) + .unwrap(); + block_ok.push_storage_change(vec![0], Some(vec![2])).unwrap(); + let block_ok = block_ok.build().unwrap().block; + + let params = BlockCheckParams { + hash: block_ok.hash().clone(), + number: 1, + parent_hash: block_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + if record_only { + fork_rules.push((1, block_ok.hash().clone())); + } + assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + + // And now try bad fork + let mut block_not_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) + .unwrap(); + block_not_ok.push_storage_change(vec![0], Some(vec![3])).unwrap(); + let block_not_ok = block_not_ok.build().unwrap().block; + + let params = BlockCheckParams { + hash: block_not_ok.hash().clone(), + number: 1, + parent_hash: block_not_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + if !record_only { + assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + } + } + + let mut known_bad = HashSet::new(); + let mut fork_rules = Vec::new(); + + // records what bad_blocks and fork_blocks hashes should be + run_test(true, &mut known_bad, &mut fork_rules); + + // enforces rules and actually makes assertions + run_test(false, &mut known_bad, &mut fork_rules); +} + +#[test] +fn returns_status_for_pruned_blocks() { + sp_tracing::try_init_simple(); + let tmp = tempfile::tempdir().unwrap(); + + // set to prune after 1 block + // states + let backend = Arc::new(Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + pruning: PruningMode::keep_blocks(1), + source: DatabaseSettingsSrc::RocksDb { + path: tmp.path().into(), + cache_size: 1024, + }, + }, + u64::max_value(), + ).unwrap()); + + let mut client = TestClientBuilder::with_backend(backend).build(); + + let a1 = client.new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap().build().unwrap().block; + + let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); + + // b1 is created, but not imported + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + + let check_block_a1 = BlockCheckParams { + hash: a1.hash().clone(), + number: 0, + parent_hash: a1.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::imported(false)); + assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::Unknown); + + client.import_as_final(BlockOrigin::Own, a1.clone()).unwrap(); + + assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainWithState); + + let a2 = client.new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap().build().unwrap().block; + client.import_as_final(BlockOrigin::Own, a2.clone()).unwrap(); + + let check_block_a2 = BlockCheckParams { + hash: a2.hash().clone(), + number: 1, + parent_hash: a1.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); + assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainWithState); + + let a3 = client.new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap().build().unwrap().block; + + client.import_as_final(BlockOrigin::Own, a3.clone()).unwrap(); + let check_block_a3 = BlockCheckParams { + hash: a3.hash().clone(), + number: 2, + parent_hash: a2.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + // a1 and a2 are both pruned at this point + assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); + assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainPruned); + assert_eq!(client.check_block(check_block_a3.clone()).unwrap(), ImportResult::AlreadyInChain); + assert_eq!(client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), BlockStatus::InChainWithState); + + let mut check_block_b1 = BlockCheckParams { + hash: b1.hash().clone(), + number: 0, + parent_hash: b1.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::MissingState); + check_block_b1.allow_missing_state = true; + assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::imported(false)); + check_block_b1.parent_hash = H256::random(); + assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::UnknownParent); +} + +#[test] +fn imports_blocks_with_changes_tries_config_change() { + // create client with initial 4^2 configuration + let mut client = TestClientBuilder::with_default_backend() + .changes_trie_config(Some(ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + })).build(); + + // =================================================================== + // blocks 1,2,3,4,5,6,7,8,9,10 are empty + // block 11 changes the key + // block 12 is the L1 digest that covers this change + // blocks 13,14,15,16,17,18,19,20,21,22 are empty + // block 23 changes the configuration to 5^1 AND is skewed digest + // =================================================================== + // blocks 24,25 are changing the key + // block 26 is empty + // block 27 changes the key + // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to 3^1 + // =================================================================== + // block 29 is empty + // block 30 changes the key + // block 31 is L1 digest that covers this change + // =================================================================== + (1..11).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (11..12).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (12..23).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (23..24).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 5, + digest_levels: 1, + })).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (24..26).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (26..27).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (27..28).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (28..29).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 3, + digest_levels: 1, + })).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (29..30).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (30..31).for_each(|number| { + let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); + block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (31..32).for_each(|number| { + let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + + // now check that configuration cache works + assert_eq!( + client.key_changes(1, BlockId::Number(31), None, &StorageKey(vec![42])).unwrap(), + vec![(30, 0), (27, 0), (25, 0), (24, 0), (11, 0)] + ); +} + +#[test] +fn storage_keys_iter_prefix_and_start_key_works() { + let client = substrate_test_runtime_client::new(); + + let prefix = StorageKey(hex!("3a").to_vec()); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, [hex!("3a636f6465").to_vec(), hex!("3a686561707061676573").to_vec()]); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, [hex!("3a686561707061676573").to_vec()]); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a686561707061676573").to_vec()))) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, Vec::>::new()); +} + +#[test] +fn storage_keys_iter_works() { + let client = substrate_test_runtime_client::new(); + + let prefix = StorageKey(hex!("").to_vec()); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + .unwrap() + .take(2) + .map(|x| x.0) + .collect(); + assert_eq!(res, [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()]); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) + .unwrap() + .take(3) + .map(|x| x.0) + .collect(); + assert_eq!(res, [ + hex!("3a686561707061676573").to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + ]); + + let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec()))) + .unwrap() + .take(1) + .map(|x| x.0) + .collect(); + assert_eq!(res, [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()]); +} + +#[test] +fn cleans_up_closed_notification_sinks_on_block_import() { + use substrate_test_runtime_client::GenesisInit; + + // NOTE: we need to build the client here instead of using the client + // provided by test_runtime_client otherwise we can't access the private + // `import_notification_sinks` and `finality_notification_sinks` fields. + let mut client = + new_in_mem::< + _, + substrate_test_runtime_client::runtime::Block, + _, + substrate_test_runtime_client::runtime::RuntimeApi + >( + substrate_test_runtime_client::new_native_executor(), + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + None, + None, + Box::new(TaskExecutor::new()), + Default::default(), + ) + .unwrap(); + + type TestClient = Client< + in_mem::Backend, + LocalCallExecutor, sc_executor::NativeExecutor>, + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::RuntimeApi, + >; + + let import_notif1 = client.import_notification_stream(); + let import_notif2 = client.import_notification_stream(); + let finality_notif1 = client.finality_notification_stream(); + let finality_notif2 = client.finality_notification_stream(); + + // for some reason I can't seem to use `ClientBlockImportExt` + let bake_and_import_block = |client: &mut TestClient, origin| { + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.body = Some(extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + client.import_block(import, Default::default()).unwrap(); + }; + + // after importing a block we should still have 4 notification sinks + // (2 import + 2 finality) + bake_and_import_block(&mut client, BlockOrigin::Own); + assert_eq!(client.import_notification_sinks().lock().len(), 2); + assert_eq!(client.finality_notification_sinks().lock().len(), 2); + + // if we drop one import notification receiver and one finality + // notification receiver + drop(import_notif2); + drop(finality_notif2); + + // the sinks should be cleaned up after block import + bake_and_import_block(&mut client, BlockOrigin::Own); + assert_eq!(client.import_notification_sinks().lock().len(), 1); + assert_eq!(client.finality_notification_sinks().lock().len(), 1); + + // the same thing should happen if block import happens during initial + // sync + drop(import_notif1); + drop(finality_notif1); + + bake_and_import_block(&mut client, BlockOrigin::NetworkInitialSync); + assert_eq!(client.import_notification_sinks().lock().len(), 0); + assert_eq!(client.finality_notification_sinks().lock().len(), 0); +} + +/// Test that ensures that we always send an import notification for re-orgs. +#[test] +fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifications() { + let mut client = TestClientBuilder::new().build(); + + let mut notification_stream = futures::executor::block_on_stream( + client.import_notification_stream() + ); + + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::NetworkInitialSync, a1.clone()).unwrap(); + + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + client.import(BlockOrigin::NetworkInitialSync, a2.clone()).unwrap(); + + let mut b1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap(); + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }).unwrap(); + let b1 = b1.build().unwrap().block; + client.import(BlockOrigin::NetworkInitialSync, b1.clone()).unwrap(); + + let b2 = client.new_block_at( + &BlockId::Hash(b1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + + // Should trigger a notification because we reorg + client.import_as_best(BlockOrigin::NetworkInitialSync, b2.clone()).unwrap(); + + // There should be one notification + let notification = notification_stream.next().unwrap(); + + // We should have a tree route of the re-org + let tree_route = notification.tree_route.unwrap(); + assert_eq!(tree_route.enacted()[0].hash, b1.hash()); +} \ No newline at end of file diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs new file mode 100644 index 0000000000000..cfe815f174fac --- /dev/null +++ b/client/service/test/src/lib.rs @@ -0,0 +1,647 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Service integration test utils. + +use std::iter; +use std::sync::Arc; +use std::net::Ipv4Addr; +use std::pin::Pin; +use std::time::Duration; +use log::{info, debug}; +use futures01::{Future, Stream, Poll}; +use futures::{FutureExt as _, TryFutureExt as _}; +use tempfile::TempDir; +use tokio::{runtime::Runtime, prelude::FutureExt}; +use tokio::timer::Interval; +use sc_service::{ + TaskManager, + GenericChainSpec, + ChainSpecExtension, + Configuration, + config::{BasePath, DatabaseConfig, KeystoreConfig}, + RuntimeGenesis, + Role, + Error, + TaskExecutor, + client::Client, +}; +use sp_blockchain::HeaderBackend; +use sc_network::{multiaddr, Multiaddr}; +use sc_network::config::{NetworkConfiguration, TransportConfig}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_transaction_pool::TransactionPool; +use sc_client_api::{Backend, CallExecutor}; +use parking_lot::Mutex; + +#[cfg(test)] +mod client; + +/// Maximum duration of single wait call. +const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); + +struct TestNet { + runtime: Runtime, + authority_nodes: Vec<(usize, F, U, Multiaddr)>, + full_nodes: Vec<(usize, F, U, Multiaddr)>, + light_nodes: Vec<(usize, L, Multiaddr)>, + chain_spec: GenericChainSpec, + base_port: u16, + nodes: usize, +} + +pub trait TestNetNode: Clone + Future + Send + 'static { + type Block: BlockT; + type Backend: Backend; + type Executor: CallExecutor + Send + Sync; + type RuntimeApi: Send + Sync; + type TransactionPool: TransactionPool; + + fn client(&self) -> Arc>; + fn transaction_pool(&self) -> Arc; + fn network(&self) -> Arc::Hash>>; +} + +pub struct TestNetComponents { + task_manager: Arc>, + client: Arc>, + transaction_pool: Arc, + network: Arc::Hash>>, +} + +impl +TestNetComponents { + pub fn new( + task_manager: TaskManager, + client: Arc>, + network: Arc::Hash>>, + transaction_pool: Arc, + ) -> Self { + Self { + client, transaction_pool, network, + task_manager: Arc::new(Mutex::new(task_manager)), + } + } +} + + +impl Clone for +TestNetComponents { + fn clone(&self) -> Self { + Self { + task_manager: self.task_manager.clone(), + client: self.client.clone(), + transaction_pool: self.transaction_pool.clone(), + network: self.network.clone(), + } + } +} + +impl Future for + TestNetComponents +{ + type Item = (); + type Error = sc_service::Error; + + fn poll(&mut self) -> Poll { + futures::compat::Compat::new(&mut self.task_manager.lock().future()).poll() + } +} + +impl TestNetNode for +TestNetComponents + where + TBl: BlockT, + TBackend: sc_client_api::Backend + Send + Sync + 'static, + TExec: CallExecutor + Send + Sync + 'static, + TRtApi: Send + Sync + 'static, + TExPool: TransactionPool + Send + Sync + 'static, +{ + type Block = TBl; + type Backend = TBackend; + type Executor = TExec; + type RuntimeApi = TRtApi; + type TransactionPool = TExPool; + + fn client(&self) -> Arc> { + self.client.clone() + } + fn transaction_pool(&self) -> Arc { + self.transaction_pool.clone() + } + fn network(&self) -> Arc::Hash>> { + self.network.clone() + } +} + +impl TestNet +where F: Clone + Send + 'static, L: Clone + Send +'static, U: Clone + Send + 'static +{ + pub fn run_until_all_full( + &mut self, + full_predicate: FP, + light_predicate: LP, + ) + where + FP: Send + Fn(usize, &F) -> bool + 'static, + LP: Send + Fn(usize, &L) -> bool + 'static, + { + let full_nodes = self.full_nodes.clone(); + let light_nodes = self.light_nodes.clone(); + let interval = Interval::new_interval(Duration::from_millis(100)) + .map_err(|_| ()) + .for_each(move |_| { + let full_ready = full_nodes.iter().all(|&(ref id, ref service, _, _)| + full_predicate(*id, service) + ); + + if !full_ready { + return Ok(()); + } + + let light_ready = light_nodes.iter().all(|&(ref id, ref service, _)| + light_predicate(*id, service) + ); + + if !light_ready { + Ok(()) + } else { + Err(()) + } + }) + .timeout(MAX_WAIT_TIME); + + match self.runtime.block_on(interval) { + Ok(()) => unreachable!("interval always fails; qed"), + Err(ref err) if err.is_inner() => (), + Err(_) => panic!("Waited for too long"), + } + } +} + +fn node_config ( + index: usize, + spec: &GenericChainSpec, + role: Role, + task_executor: TaskExecutor, + key_seed: Option, + base_port: u16, + root: &TempDir, +) -> Configuration +{ + let root = root.path().join(format!("node-{}", index)); + + let mut network_config = NetworkConfiguration::new( + format!("Node {}", index), + "network/test/0.1", + Default::default(), + None, + ); + + network_config.allow_non_globals_in_dht = true; + + network_config.listen_addresses.push( + iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(multiaddr::Protocol::Tcp(base_port + index as u16))) + .collect() + ); + + network_config.transport = TransportConfig::Normal { + enable_mdns: false, + allow_private_ipv4: true, + wasm_external_transport: None, + use_yamux_flow_control: true, + }; + + Configuration { + impl_name: String::from("network-test-impl"), + impl_version: String::from("0.1"), + role, + task_executor, + transaction_pool: Default::default(), + network: network_config, + keystore: KeystoreConfig::Path { + path: root.join("key"), + password: None + }, + database: DatabaseConfig::RocksDb { + path: root.join("db"), + cache_size: 128, + }, + state_cache_size: 16777216, + state_cache_child_ratio: None, + pruning: Default::default(), + chain_spec: Box::new((*spec).clone()), + wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, + execution_strategies: Default::default(), + rpc_http: None, + rpc_ipc: None, + rpc_ws: None, + rpc_ws_max_connections: None, + rpc_cors: None, + rpc_methods: Default::default(), + prometheus_config: None, + telemetry_endpoints: None, + telemetry_external_transport: None, + default_heap_pages: None, + offchain_worker: Default::default(), + force_authoring: false, + disable_grandpa: false, + dev_key_seed: key_seed, + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 8, + announce_block: true, + base_path: Some(BasePath::new(root)), + informant_output_format: Default::default(), + } +} + +impl TestNet where + F: TestNetNode, + L: TestNetNode, + E: ChainSpecExtension + Clone + 'static + Send, + G: RuntimeGenesis + 'static, +{ + fn new( + temp: &TempDir, + spec: GenericChainSpec, + full: impl Iterator Result<(F, U), Error>>, + light: impl Iterator Result>, + authorities: impl Iterator Result<(F, U), Error> + )>, + base_port: u16 + ) -> TestNet { + sp_tracing::try_init_simple(); + fdlimit::raise_fd_limit(); + let runtime = Runtime::new().expect("Error creating tokio runtime"); + let mut net = TestNet { + runtime, + authority_nodes: Default::default(), + full_nodes: Default::default(), + light_nodes: Default::default(), + chain_spec: spec, + base_port, + nodes: 0, + }; + net.insert_nodes(temp, full, light, authorities); + net + } + + fn insert_nodes( + &mut self, + temp: &TempDir, + full: impl Iterator Result<(F, U), Error>>, + light: impl Iterator Result>, + authorities: impl Iterator Result<(F, U), Error>)> + ) { + let executor = self.runtime.executor(); + let task_executor: TaskExecutor = { + let executor = executor.clone(); + (move |fut: Pin + Send>>, _| { + executor.spawn(fut.unit_error().compat()); + async {} + }).into() + }; + + for (key, authority) in authorities { + let node_config = node_config( + self.nodes, + &self.chain_spec, + Role::Authority { sentry_nodes: Vec::new() }, + task_executor.clone(), + Some(key), + self.base_port, + &temp, + ); + let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); + let (service, user_data) = authority(node_config).expect("Error creating test node service"); + + executor.spawn(service.clone().map_err(|_| ())); + let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); + self.authority_nodes.push((self.nodes, service, user_data, addr)); + self.nodes += 1; + } + + for full in full { + let node_config = node_config( + self.nodes, + &self.chain_spec, + Role::Full, + task_executor.clone(), + None, + self.base_port, + &temp, + ); + let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); + let (service, user_data) = full(node_config).expect("Error creating test node service"); + + executor.spawn(service.clone().map_err(|_| ())); + let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); + self.full_nodes.push((self.nodes, service, user_data, addr)); + self.nodes += 1; + } + + for light in light { + let node_config = node_config( + self.nodes, + &self.chain_spec, + Role::Light, + task_executor.clone(), + None, + self.base_port, + &temp, + ); + let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); + let service = light(node_config).expect("Error creating test node service"); + + executor.spawn(service.clone().map_err(|_| ())); + let addr = addr.with(multiaddr::Protocol::P2p(service.network().local_peer_id().clone().into())); + self.light_nodes.push((self.nodes, service, addr)); + self.nodes += 1; + } + } +} + +fn tempdir_with_prefix(prefix: &str) -> TempDir { + tempfile::Builder::new().prefix(prefix).tempdir().expect("Error creating test dir") +} + +pub fn connectivity( + spec: GenericChainSpec, + full_builder: Fb, + light_builder: Lb, +) where + E: ChainSpecExtension + Clone + 'static + Send, + G: RuntimeGenesis + 'static, + Fb: Fn(Configuration) -> Result, + F: TestNetNode, + Lb: Fn(Configuration) -> Result, + L: TestNetNode, +{ + const NUM_FULL_NODES: usize = 5; + const NUM_LIGHT_NODES: usize = 5; + + let expected_full_connections = NUM_FULL_NODES - 1 + NUM_LIGHT_NODES; + let expected_light_connections = NUM_FULL_NODES; + + { + let temp = tempdir_with_prefix("substrate-connectivity-test"); + let runtime = { + let mut network = TestNet::new( + &temp, + spec.clone(), + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), + 30400, + ); + info!("Checking star topology"); + let first_address = network.full_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter().skip(1) { + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _) in network.light_nodes.iter() { + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + + network.run_until_all_full( + move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} full connections...", connected, expected_full_connections); + connected == expected_full_connections + }, + move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} light connections...", connected, expected_light_connections); + connected == expected_light_connections + }, + ); + + network.runtime + }; + + runtime.shutdown_now().wait().expect("Error shutting down runtime"); + + temp.close().expect("Error removing temp dir"); + } + { + let temp = tempdir_with_prefix("substrate-connectivity-test"); + { + let mut network = TestNet::new( + &temp, + spec, + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), + 30400, + ); + info!("Checking linked topology"); + let mut address = network.full_nodes[0].3.clone(); + let max_nodes = std::cmp::max(NUM_FULL_NODES, NUM_LIGHT_NODES); + for i in 0..max_nodes { + if i != 0 { + if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { + service.network().add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); + address = node_id.clone(); + } + } + + if let Some((_, service, node_id)) = network.light_nodes.get(i) { + service.network().add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); + address = node_id.clone(); + } + } + + network.run_until_all_full( + move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} full connections...", connected, expected_full_connections); + connected == expected_full_connections + }, + move |_index, service| { + let connected = service.network().num_connected(); + debug!("Got {}/{} light connections...", connected, expected_light_connections); + connected == expected_light_connections + }, + ); + } + temp.close().expect("Error removing temp dir"); + } +} + +pub fn sync( + spec: GenericChainSpec, + full_builder: Fb, + light_builder: Lb, + mut make_block_and_import: B, + mut extrinsic_factory: ExF +) where + Fb: Fn(Configuration) -> Result<(F, U), Error>, + F: TestNetNode, + Lb: Fn(Configuration) -> Result, + L: TestNetNode, + B: FnMut(&F, &mut U), + ExF: FnMut(&F, &U) -> ::Extrinsic, + U: Clone + Send + 'static, + E: ChainSpecExtension + Clone + 'static + Send, + G: RuntimeGenesis + 'static, +{ + const NUM_FULL_NODES: usize = 10; + // FIXME: BABE light client support is currently not working. + const NUM_LIGHT_NODES: usize = 10; + const NUM_BLOCKS: usize = 512; + let temp = tempdir_with_prefix("substrate-sync-test"); + let mut network = TestNet::new( + &temp, + spec, + (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), + (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), + 30500, + ); + info!("Checking block sync"); + let first_address = { + let &mut (_, ref first_service, ref mut first_user_data, _) = &mut network.full_nodes[0]; + for i in 0 .. NUM_BLOCKS { + if i % 128 == 0 { + info!("Generating #{}", i + 1); + } + + make_block_and_import(&first_service, first_user_data); + } + network.full_nodes[0].1.network().update_chain(); + network.full_nodes[0].3.clone() + }; + + info!("Running sync"); + for (_, service, _, _) in network.full_nodes.iter().skip(1) { + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _) in network.light_nodes.iter() { + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full( + |_index, service| + service.client().info().best_number == (NUM_BLOCKS as u32).into(), + |_index, service| + service.client().info().best_number == (NUM_BLOCKS as u32).into(), + ); + + info!("Checking extrinsic propagation"); + let first_service = network.full_nodes[0].1.clone(); + let first_user_data = &network.full_nodes[0].2; + let best_block = BlockId::number(first_service.client().info().best_number); + let extrinsic = extrinsic_factory(&first_service, first_user_data); + let source = sp_transaction_pool::TransactionSource::External; + + futures::executor::block_on( + first_service.transaction_pool().submit_one(&best_block, source, extrinsic) + ).expect("failed to submit extrinsic"); + + network.run_until_all_full( + |_index, service| service.transaction_pool().ready().count() == 1, + |_index, _service| true, + ); +} + +pub fn consensus( + spec: GenericChainSpec, + full_builder: Fb, + light_builder: Lb, + authorities: impl IntoIterator +) where + Fb: Fn(Configuration) -> Result, + F: TestNetNode, + Lb: Fn(Configuration) -> Result, + L: TestNetNode, + E: ChainSpecExtension + Clone + 'static + Send, + G: RuntimeGenesis + 'static, +{ + const NUM_FULL_NODES: usize = 10; + const NUM_LIGHT_NODES: usize = 10; + const NUM_BLOCKS: usize = 10; // 10 * 2 sec block production time = ~20 seconds + let temp = tempdir_with_prefix("substrate-consensus-test"); + let mut network = TestNet::new( + &temp, + spec, + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + authorities.into_iter().map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), + 30600, + ); + + info!("Checking consensus"); + let first_address = network.authority_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter() { + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _) in network.light_nodes.iter() { + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _, _) in network.authority_nodes.iter().skip(1) { + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full( + |_index, service| + service.client().info().finalized_number >= (NUM_BLOCKS as u32 / 2).into(), + |_index, service| + service.client().info().best_number >= (NUM_BLOCKS as u32 / 2).into(), + ); + + info!("Adding more peers"); + network.insert_nodes( + &temp, + (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), + (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), + ); + for (_, service, _, _) in network.full_nodes.iter() { + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _) in network.light_nodes.iter() { + service.network().add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full( + |_index, service| + service.client().info().finalized_number >= (NUM_BLOCKS as u32).into(), + |_index, service| + service.client().info().best_number >= (NUM_BLOCKS as u32).into(), + ); +} diff --git a/devops/docker-compose.yml b/devops/docker-compose.yml new file mode 100644 index 0000000000000..6c025f962fa54 --- /dev/null +++ b/devops/docker-compose.yml @@ -0,0 +1,34 @@ +version: "3" +services: + node_alice: + image: mangata/node + container_name: alice + entrypoint: ./node --dev --alice --validator --unsafe-ws-external --rpc-cors=all -lruntime=debug --node-key=0000000000000000000000000000000000000000000000000000000000000001 + ports: + - "30333:30333" + - "9933:9933" + - "9944:9944" + networks: + testing_net: + ipv4_address: 172.28.1.1 + + node_bob: + image: mangata/node + container_name: bob + entrypoint: ./node --dev --bob --unsafe-ws-external --rpc-cors=all -lframe_executive=debug --bootnodes='/ip4/172.28.1.1/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp' + ports: + - "30335:30333" + - "9935:9933" + - "9945:9944" + links: + - "node_alice:alice" + networks: + testing_net: + ipv4_address: 172.28.1.2 + +networks: + testing_net: + ipam: + driver: default + config: + - subnet: 172.28.0.0/16 diff --git a/devops/dockerfiles/node-and-runtime/Dockerfile b/devops/dockerfiles/node-and-runtime/Dockerfile new file mode 100644 index 0000000000000..183b5297b6435 --- /dev/null +++ b/devops/dockerfiles/node-and-runtime/Dockerfile @@ -0,0 +1,23 @@ +FROM mangata/rust-builder AS builder +LABEL description="Compiles all workspace artifacts" +WORKDIR /mangata +COPY . /mangata + +RUN WASM_BUILD_TOOLCHAIN=nightly-2020-05-23 cargo build --release + +FROM debian:stretch +LABEL description="mangata node" +WORKDIR /mangata +COPY --from=builder /mangata/target/release/node-template /mangata/node +COPY --from=builder /mangata/target/release/wbuild/node-template-runtime/node_template_runtime.compact.wasm /mangata/runtime.compact.wasm + +RUN /mangata/node --version + +RUN b2sum -l 256 /mangata/runtime.compact.wasm +RUN b2sum -l 512 /mangata/runtime.compact.wasm + +EXPOSE 30333 9933 9944 + +VOLUME ["/data", "/keystore"] + +ENTRYPOINT ["/mangata/node"] diff --git a/devops/dockerfiles/rust-builder/Dockerfile b/devops/dockerfiles/rust-builder/Dockerfile new file mode 100644 index 0000000000000..8a2443a536b28 --- /dev/null +++ b/devops/dockerfiles/rust-builder/Dockerfile @@ -0,0 +1,8 @@ +FROM liuchong/rustup:1.46.0 AS builder +LABEL description="Rust and WASM build environment for mangata and substrate" + +WORKDIR /setup +COPY setup.sh /setup +ENV TERM=xterm + +RUN ./setup.sh diff --git a/node/Cargo.toml b/node/Cargo.toml index 64afc97621bfa..28f3449843788 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -72,9 +72,8 @@ sp-finality-grandpa = '2.0.0' sp-inherents = '2.0.0' sp-runtime = '2.0.0' sp-transaction-pool = '2.0.0' -substrate-frame-rpc-system = '2.0.0' -pallet-assets = { default-features = false, version = '2.0.0' } - +substrate-frame-rpc-system = { version = '2.0.0', path = '../utils/frame/rpc/system'} +pallet-assets = { default-features = false, version = '2.0.0', path = '../pallets/assets' } [features] default = [] diff --git a/pallets/executive/Cargo.toml b/pallets/executive/Cargo.toml new file mode 100644 index 0000000000000..6458a5aa8d9f7 --- /dev/null +++ b/pallets/executive/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "frame-executive" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME executives engine" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +frame-support = { version = "2.0.0", default-features = false } +frame-system = { version = "2.0.0", default-features = false } +serde = { version = "1.0.101", optional = true } +sp-runtime = { version = "2.0.0", default-features = false } +sp-tracing = { version = "2.0.0", default-features = false } +sp-std = { version = "2.0.0", default-features = false } +sp-io = { version = "2.0.0", default-features = false } +sp-core = { version = "2.0.0", default-features = false } + +[dev-dependencies] +hex-literal = "0.3.1" +sp-core = { version = "2.0.0" } +sp-io ={ version = "2.0.0" } +pallet-indices = { version = "2.0.0" } +pallet-balances = { version = "2.0.0" } +pallet-transaction-payment = { version = "2.0.0" } +sp-version = { version = "2.0.0" } + +[features] +default = ["std"] +with-tracing = [ + "sp-tracing/with-tracing" +] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "serde", + "sp-core/std", + "sp-runtime/std", + "sp-tracing/std", + "sp-std/std" +] diff --git a/pallets/executive/README.md b/pallets/executive/README.md new file mode 100644 index 0000000000000..24b354902e876 --- /dev/null +++ b/pallets/executive/README.md @@ -0,0 +1,61 @@ +# Executive Module + +The Executive module acts as the orchestration layer for the runtime. It dispatches incoming +extrinsic calls to the respective modules in the runtime. + +## Overview + +The executive module is not a typical pallet providing functionality around a specific feature. +It is a cross-cutting framework component for the FRAME. It works in conjunction with the +[FRAME System module](https://docs.rs/frame-system/latest/frame_system/) to perform these cross-cutting functions. + +The Executive module provides functions to: + +- Check transaction validity. +- Initialize a block. +- Apply extrinsics. +- Execute a block. +- Finalize a block. +- Start an off-chain worker. + +### Implementations + +The Executive module provides the following implementations: + +- `ExecuteBlock`: Trait that can be used to execute a block. +- `Executive`: Type that can be used to make the FRAME available from the runtime. + +## Usage + +The default Substrate node template declares the [`Executive`](https://docs.rs/frame-executive/latest/frame_executive/struct.Executive.html) type in its library. + +### Example + +`Executive` type declaration from the node template. + +```rust +# +/// Executive: handles dispatch to the various modules. +pub type Executive = executive::Executive; +``` + +### Custom `OnRuntimeUpgrade` logic + +You can add custom logic that should be called in your runtime on a runtime upgrade. This is +done by setting an optional generic parameter. The custom logic will be called before +the on runtime upgrade logic of all modules is called. + +```rust +# +struct CustomOnRuntimeUpgrade; +impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + // Do whatever you want. + 0 + } +} + +pub type Executive = executive::Executive; +``` + +License: Apache-2.0 \ No newline at end of file diff --git a/pallets/executive/src/lib.rs b/pallets/executive/src/lib.rs new file mode 100644 index 0000000000000..79ce3d0c26fa3 --- /dev/null +++ b/pallets/executive/src/lib.rs @@ -0,0 +1,1081 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Executive Module +//! +//! The Executive module acts as the orchestration layer for the runtime. It dispatches incoming +//! extrinsic calls to the respective modules in the runtime. +//! +//! ## Overview +//! +//! The executive module is not a typical pallet providing functionality around a specific feature. +//! It is a cross-cutting framework component for the FRAME. It works in conjunction with the +//! [FRAME System module](../frame_system/index.html) to perform these cross-cutting functions. +//! +//! The Executive module provides functions to: +//! +//! - Check transaction validity. +//! - Initialize a block. +//! - Apply extrinsics. +//! - Execute a block. +//! - Finalize a block. +//! - Start an off-chain worker. +//! +//! ### Implementations +//! +//! The Executive module provides the following implementations: +//! +//! - `ExecuteBlock`: Trait that can be used to execute a block. +//! - `Executive`: Type that can be used to make the FRAME available from the runtime. +//! +//! ## Usage +//! +//! The default Substrate node template declares the [`Executive`](./struct.Executive.html) type in its library. +//! +//! ### Example +//! +//! `Executive` type declaration from the node template. +//! +//! ``` +//! # use sp_runtime::generic; +//! # use frame_executive as executive; +//! # pub struct UncheckedExtrinsic {}; +//! # pub struct Header {}; +//! # type Context = frame_system::ChainContext; +//! # pub type Block = generic::Block; +//! # pub type Balances = u64; +//! # pub type AllModules = u64; +//! # pub enum Runtime {}; +//! # use sp_runtime::transaction_validity::{ +//! # TransactionValidity, UnknownTransaction, TransactionSource, +//! # }; +//! # use sp_runtime::traits::ValidateUnsigned; +//! # impl ValidateUnsigned for Runtime { +//! # type Call = (); +//! # +//! # fn validate_unsigned(_source: TransactionSource, _call: &Self::Call) -> TransactionValidity { +//! # UnknownTransaction::NoUnsignedValidator.into() +//! # } +//! # } +//! /// Executive: handles dispatch to the various modules. +//! pub type Executive = executive::Executive; +//! ``` +//! +//! ### Custom `OnRuntimeUpgrade` logic +//! +//! You can add custom logic that should be called in your runtime on a runtime upgrade. This is +//! done by setting an optional generic parameter. The custom logic will be called before +//! the on runtime upgrade logic of all modules is called. +//! +//! ``` +//! # use sp_runtime::generic; +//! # use frame_executive as executive; +//! # pub struct UncheckedExtrinsic {}; +//! # pub struct Header {}; +//! # type Context = frame_system::ChainContext; +//! # pub type Block = generic::Block; +//! # pub type Balances = u64; +//! # pub type AllModules = u64; +//! # pub enum Runtime {}; +//! # use sp_runtime::transaction_validity::{ +//! # TransactionValidity, UnknownTransaction, TransactionSource, +//! # }; +//! # use sp_runtime::traits::ValidateUnsigned; +//! # impl ValidateUnsigned for Runtime { +//! # type Call = (); +//! # +//! # fn validate_unsigned(_source: TransactionSource, _call: &Self::Call) -> TransactionValidity { +//! # UnknownTransaction::NoUnsignedValidator.into() +//! # } +//! # } +//! struct CustomOnRuntimeUpgrade; +//! impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { +//! fn on_runtime_upgrade() -> frame_support::weights::Weight { +//! // Do whatever you want. +//! 0 +//! } +//! } +//! +//! pub type Executive = executive::Executive; +//! ``` + +#![cfg_attr(not(feature = "std"), no_std)] +use sp_runtime::print; +use sp_std::{prelude::*, marker::PhantomData}; +use frame_support::{ + storage::StorageValue, weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, + traits::{OnInitialize, OnFinalize, OnRuntimeUpgrade, OffchainWorker}, + dispatch::PostDispatchInfo, +}; +use sp_runtime::{ + generic::Digest, ApplyExtrinsicResult, + traits::{ + self, Header, Zero, One, Checkable, Applyable, CheckEqual, ValidateUnsigned, NumberFor, + Block as BlockT, Dispatchable, Saturating, + }, + transaction_validity::{TransactionValidity, TransactionSource}, +}; +use codec::{Codec, Encode}; +use frame_system::{extrinsics_root, DigestOf}; + +/// Trait that can be used to execute a block. +pub trait ExecuteBlock { + /// Actually execute all transitions for `block`. + fn execute_block(block: Block); +} + +pub type CheckedOf = >::Checked; +pub type CallOf = as Applyable>::Call; +pub type OriginOf = as Dispatchable>::Origin; + +/// Main entry point for certain runtime actions as e.g. `execute_block`. +/// +/// Generic parameters: +/// - `System`: Something that implements `frame_system::Trait` +/// - `Block`: The block type of the runtime +/// - `Context`: The context that is used when checking an extrinsic. +/// - `UnsignedValidator`: The unsigned transaction validator of the runtime. +/// - `AllModules`: Tuple that contains all modules. Will be used to call e.g. `on_initialize`. +/// - `OnRuntimeUpgrade`: Custom logic that should be called after a runtime upgrade. Modules are +/// already called by `AllModules`. It will be called before all modules will +/// be called. +pub struct Executive( + PhantomData<(System, Block, Context, UnsignedValidator, AllModules, OnRuntimeUpgrade)> +); + +impl< + System: frame_system::Trait, + Block: traits::Block, + Context: Default, + UnsignedValidator, + AllModules: + OnRuntimeUpgrade + + OnInitialize + + OnFinalize + + OffchainWorker, + COnRuntimeUpgrade: OnRuntimeUpgrade, +> ExecuteBlock for + Executive +where + Block::Extrinsic: Checkable + Codec, + CheckedOf: + Applyable + + GetDispatchInfo, + CallOf: Dispatchable, + OriginOf: From>, + UnsignedValidator: ValidateUnsigned>, +{ + fn execute_block(block: Block) { + Executive::::execute_block(block); + } +} + +impl< + System: frame_system::Trait, + Block: traits::Block, + Context: Default, + UnsignedValidator, + AllModules: + OnRuntimeUpgrade + + OnInitialize + + OnFinalize + + OffchainWorker, + COnRuntimeUpgrade: OnRuntimeUpgrade, +> Executive +where + Block::Extrinsic: Checkable + Codec, + CheckedOf: + Applyable + + GetDispatchInfo, + CallOf: Dispatchable, + OriginOf: From>, + UnsignedValidator: ValidateUnsigned>, +{ + /// Start the execution of a particular block. + pub fn initialize_block(header: &System::Header) { + + print("Initialized block"); + sp_io::init_tracing(); + sp_tracing::enter_span!(sp_tracing::Level::TRACE, "init_block"); + let digests = Self::extract_pre_digest(&header); + Self::initialize_block_impl( + header.number(), + header.parent_hash(), + header.extrinsics_root(), + &digests + ); + } + + fn extract_pre_digest(header: &System::Header) -> DigestOf { + let mut digest = >::default(); + header.digest().logs() + .iter() + .for_each(|d| if d.as_pre_runtime().is_some() { + digest.push(d.clone()) + }); + digest + } + + fn initialize_block_impl( + block_number: &System::BlockNumber, + parent_hash: &System::Hash, + extrinsics_root: &System::Hash, + digest: &Digest, + ) { + if Self::runtime_upgraded() { + // System is not part of `AllModules`, so we need to call this manually. + let mut weight = as OnRuntimeUpgrade>::on_runtime_upgrade(); + weight = weight.saturating_add(COnRuntimeUpgrade::on_runtime_upgrade()); + weight = weight.saturating_add(::on_runtime_upgrade()); + >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); + } + >::initialize( + block_number, + parent_hash, + extrinsics_root, + digest, + frame_system::InitKind::Full, + ); + as OnInitialize>::on_initialize(*block_number); + let weight = >::on_initialize(*block_number) + .saturating_add(>::get()); + >::register_extra_weight_unchecked(weight, DispatchClass::Mandatory); + + frame_system::Module::::note_finished_initialize(); + } + + /// Returns if the runtime was upgraded since the last time this function was called. + fn runtime_upgraded() -> bool { + let last = frame_system::LastRuntimeUpgrade::get(); + let current = >::get(); + + if last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) { + frame_system::LastRuntimeUpgrade::put( + frame_system::LastRuntimeUpgradeInfo::from(current), + ); + true + } else { + false + } + } + + fn initial_checks(block: &Block) { + + print("Initial checks"); + sp_tracing::enter_span!(sp_tracing::Level::TRACE, "initial_checks"); + let header = block.header(); + + // Check that `parent_hash` is correct. + let n = header.number().clone(); + assert!( + n > System::BlockNumber::zero() + && >::block_hash(n - System::BlockNumber::one()) == *header.parent_hash(), + "Parent hash should be valid." + ); + + // Check that transaction trie root represents the transactions. + //FIXME return extrinsic root check + // let xts_root = extrinsics_root::(&block.extrinsics()); + // header.extrinsics_root().check_equal(&xts_root); + // assert!(header.extrinsics_root() == &xts_root, "Transaction trie root must be valid."); + } + + /// Actually execute all transitions for `block`. + pub fn execute_block(block: Block) { + print("Block is being executed"); + sp_io::init_tracing(); + sp_tracing::within_span! { + sp_tracing::info_span!( "execute_block", ?block); + { + + print("inside block execution"); + Self::initialize_block(block.header()); + + // any initial checks + Self::initial_checks(&block); + + let signature_batching = sp_runtime::SignatureBatching::start(); + + frame_support::debug::RuntimeLogger::init(); + + // execute extrinsics + let (header, extrinsics) = block.deconstruct(); + + Self::execute_extrinsics_with_book_keeping(extrinsics.clone(), *header.number()); + + if !signature_batching.verify() { + panic!("Signature verification failed."); + } + + // any final checks + Self::final_checks(&header); + } }; + } + + /// Execute given extrinsics and take care of post-extrinsics book-keeping. + fn execute_extrinsics_with_book_keeping(extrinsics: Vec, block_number: NumberFor) { + print("extrinsic are executed with book keeping (dont expect to see this message)"); + extrinsics.into_iter().for_each(Self::apply_extrinsic_no_note); + + // post-extrinsics book-keeping + >::note_finished_extrinsics(); + as OnFinalize>::on_finalize(block_number); + >::on_finalize(block_number); + } + + /// Finalize the block - it is up the caller to ensure that all header fields are valid + /// except state-root. + pub fn finalize_block() -> System::Header { + print("block is being finalized"); + sp_io::init_tracing(); + sp_tracing::enter_span!( sp_tracing::Level::TRACE, "finalize_block" ); + >::note_finished_extrinsics(); + let block_number = >::block_number(); + as OnFinalize>::on_finalize(block_number); + >::on_finalize(block_number); + + // set up extrinsics + >::derive_extrinsics(); + >::finalize() + } + + /// Apply extrinsic outside of the block execution function. + /// + /// This doesn't attempt to validate anything regarding the block, but it builds a list of uxt + /// hashes. + pub fn apply_extrinsic(uxt: Block::Extrinsic) -> ApplyExtrinsicResult { + print("extrinsic applied outside block execution"); + sp_io::init_tracing(); + let encoded = uxt.encode(); + let encoded_len = encoded.len(); + Self::apply_extrinsic_with_len(uxt, encoded_len, Some(encoded)) + } + + pub fn mock_apply_extrinsic(uxt: Block::Extrinsic) -> ApplyExtrinsicResult { + Ok(Ok(())) + } + + /// Apply an extrinsic inside the block execution function. + fn apply_extrinsic_no_note(uxt: Block::Extrinsic) { + print("extrinsic applied inside block execution"); + let l = uxt.encode().len(); + match Self::apply_extrinsic_with_len(uxt, l, None) { + Ok(_) => (), + Err(e) => { let err: &'static str = e.into(); panic!(err) }, + } + } + + /// Actually apply an extrinsic given its `encoded_len`; this doesn't note its hash. + fn apply_extrinsic_with_len( + uxt: Block::Extrinsic, + encoded_len: usize, + to_note: Option>, + ) -> ApplyExtrinsicResult { + print("extrinsic application from unknown origin"); + sp_tracing::enter_span!( + sp_tracing::info_span!("apply_extrinsic", + ext=?sp_core::hexdisplay::HexDisplay::from(&uxt.encode())) + ); + // Verify that the signature is good. + let xt = uxt.check(&Default::default())?; + + // We don't need to make sure to `note_extrinsic` only after we know it's going to be + // executed to prevent it from leaking in storage since at this point, it will either + // execute or panic (and revert storage changes). + if let Some(encoded) = to_note { + >::note_extrinsic(encoded); + } + + // AUDIT: Under no circumstances may this function panic from here onwards. + + // Decode parameters and dispatch + let dispatch_info = xt.get_dispatch_info(); + let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; + + >::note_applied_extrinsic(&r, dispatch_info); + + Ok(r.map(|_| ()).map_err(|e| e.error)) + } + + fn final_checks(header: &System::Header) { + sp_tracing::enter_span!(sp_tracing::Level::TRACE, "final_checks"); + // remove temporaries + let new_header = >::finalize(); + + // check digest + assert_eq!( + header.digest().logs().len(), + new_header.digest().logs().len(), + "Number of digest items must match that calculated." + ); + let items_zip = header.digest().logs().iter().zip(new_header.digest().logs().iter()); + for (header_item, computed_item) in items_zip { + header_item.check_equal(&computed_item); + assert!(header_item == computed_item, "Digest item must match that calculated."); + } + + // check storage root. + let storage_root = new_header.state_root(); + header.state_root().check_equal(&storage_root); + //assert!(header.state_root() == storage_root, "Storage root must match that calculated."); + } + + /// Check a given signed transaction for validity. This doesn't execute any + /// side-effects; it merely checks whether the transaction would panic if it were included or not. + /// + /// Changes made to storage should be discarded. + pub fn validate_transaction( + source: TransactionSource, + uxt: Block::Extrinsic, + ) -> TransactionValidity { + sp_io::init_tracing(); + use sp_tracing::{enter_span, within_span}; + + enter_span!{ sp_tracing::Level::TRACE, "validate_transaction" }; + + let encoded_len = within_span!{ sp_tracing::Level::TRACE, "using_encoded"; + uxt.using_encoded(|d| d.len()) + }; + + let xt = within_span!{ sp_tracing::Level::TRACE, "check"; + uxt.check(&Default::default()) + }?; + + let dispatch_info = within_span!{ sp_tracing::Level::TRACE, "dispatch_info"; + xt.get_dispatch_info() + }; + + within_span! { + sp_tracing::Level::TRACE, "validate"; + xt.validate::(source, &dispatch_info, encoded_len) + } + } + + /// Start an offchain worker and generate extrinsics. + pub fn offchain_worker(header: &System::Header) { + sp_io::init_tracing(); + // We need to keep events available for offchain workers, + // hence we initialize the block manually. + // OffchainWorker RuntimeApi should skip initialization. + let digests = Self::extract_pre_digest(header); + + >::initialize( + header.number(), + header.parent_hash(), + header.extrinsics_root(), + &digests, + frame_system::InitKind::Inspection, + ); + + // Initialize logger, so the log messages are visible + // also when running WASM. + frame_support::debug::RuntimeLogger::init(); + + >::offchain_worker( + // to maintain backward compatibility we call module offchain workers + // with parent block number. + header.number().saturating_sub(1.into()) + ) + } +} + + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::H256; + use sp_runtime::{ + generic::Era, Perbill, DispatchError, testing::{Digest, Header, Block}, + traits::{Header as HeaderT, BlakeTwo256, IdentityLookup}, + transaction_validity::{InvalidTransaction, UnknownTransaction, TransactionValidityError}, + }; + use frame_support::{ + impl_outer_event, impl_outer_origin, parameter_types, impl_outer_dispatch, + weights::{Weight, RuntimeDbWeight, IdentityFee, WeightToFeePolynomial}, + traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons, WithdrawReason}, + }; + use frame_system::{self as system, Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use pallet_balances::Call as BalancesCall; + use hex_literal::hex; + const TEST_KEY: &[u8] = &*b":test:key:"; + + mod custom { + use frame_support::weights::{Weight, DispatchClass}; + + pub trait Trait: frame_system::Trait {} + + frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 100] + fn some_function(origin) { + // NOTE: does not make any different. + let _ = frame_system::ensure_signed(origin); + } + #[weight = (200, DispatchClass::Operational)] + fn some_root_operation(origin) { + let _ = frame_system::ensure_root(origin); + } + #[weight = 0] + fn some_unsigned_message(origin) { + let _ = frame_system::ensure_none(origin); + } + + // module hooks. + // one with block number arg and one without + fn on_initialize(n: T::BlockNumber) -> Weight { + println!("on_initialize({})", n); + 175 + } + + fn on_finalize() { + println!("on_finalize(?)"); + } + + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); + 0 + } + } + } + } + + type System = frame_system::Module; + type Balances = pallet_balances::Module; + type Custom = custom::Module; + + use pallet_balances as balances; + + impl_outer_origin! { + pub enum Origin for Runtime { } + } + + impl_outer_event!{ + pub enum MetaEvent for Runtime { + system, + balances, + } + } + impl_outer_dispatch! { + pub enum Call for Runtime where origin: Origin { + frame_system::System, + pallet_balances::Balances, + } + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Runtime; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockExecutionWeight: Weight = 10; + pub const ExtrinsicBaseWeight: Weight = 5; + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 10, + write: 100, + }; + } + impl frame_system::Trait for Runtime { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = sp_core::H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = MetaEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = DbWeight; + type BlockExecutionWeight = BlockExecutionWeight; + type ExtrinsicBaseWeight = ExtrinsicBaseWeight; + type MaximumExtrinsicWeight = MaximumBlockWeight; + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = RuntimeVersion; + type PalletInfo = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + } + + type Balance = u64; + parameter_types! { + pub const ExistentialDeposit: Balance = 1; + } + impl pallet_balances::Trait for Runtime { + type Balance = Balance; + type Event = MetaEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = (); + type WeightInfo = (); + } + + parameter_types! { + pub const TransactionByteFee: Balance = 0; + } + impl pallet_transaction_payment::Trait for Runtime { + type Currency = Balances; + type OnTransactionPayment = (); + type TransactionByteFee = TransactionByteFee; + type WeightToFee = IdentityFee; + type FeeMultiplierUpdate = (); + } + impl custom::Trait for Runtime {} + + impl ValidateUnsigned for Runtime { + type Call = Call; + + fn pre_dispatch(_call: &Self::Call) -> Result<(), TransactionValidityError> { + Ok(()) + } + + fn validate_unsigned( + _source: TransactionSource, + call: &Self::Call, + ) -> TransactionValidity { + match call { + Call::Balances(BalancesCall::set_balance(_, _, _)) => Ok(Default::default()), + _ => UnknownTransaction::NoUnsignedValidator.into(), + } + } + } + + pub struct RuntimeVersion; + impl frame_support::traits::Get for RuntimeVersion { + fn get() -> sp_version::RuntimeVersion { + RUNTIME_VERSION.with(|v| v.borrow().clone()) + } + } + + thread_local! { + pub static RUNTIME_VERSION: std::cell::RefCell = + Default::default(); + } + + type SignedExtra = ( + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, + ); + type AllModules = (System, Balances, Custom); + type TestXt = sp_runtime::testing::TestXt; + + // Will contain `true` when the custom runtime logic was called. + const CUSTOM_ON_RUNTIME_KEY: &[u8] = &*b":custom:on_runtime"; + + struct CustomOnRuntimeUpgrade; + impl OnRuntimeUpgrade for CustomOnRuntimeUpgrade { + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(TEST_KEY, "custom_upgrade".as_bytes()); + sp_io::storage::set(CUSTOM_ON_RUNTIME_KEY, &true.encode()); + 0 + } + } + + type Executive = super::Executive< + Runtime, + Block, + ChainContext, + Runtime, + AllModules, + CustomOnRuntimeUpgrade + >; + + fn extra(nonce: u64, fee: Balance) -> SignedExtra { + ( + frame_system::CheckEra::from(Era::Immortal), + frame_system::CheckNonce::from(nonce), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(fee) + ) + } + + fn sign_extra(who: u64, nonce: u64, fee: Balance) -> Option<(u64, SignedExtra)> { + Some((who, extra(nonce, fee))) + } + + #[test] + fn balance_transfer_dispatch_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 211)], + }.assimilate_storage(&mut t).unwrap(); + let xt = TestXt::new(Call::Balances(BalancesCall::transfer(2, 69)), sign_extra(1, 0, 0)); + let weight = xt.get_dispatch_info().weight + ::ExtrinsicBaseWeight::get(); + let fee: Balance + = ::WeightToFee::calc(&weight); + let mut t = sp_io::TestExternalities::new(t); + t.execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + let r = Executive::apply_extrinsic(xt); + assert!(r.is_ok()); + assert_eq!(>::total_balance(&1), 142 - fee); + assert_eq!(>::total_balance(&2), 69); + }); + } + + fn new_test_ext(balance_factor: Balance) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 111 * balance_factor)], + }.assimilate_storage(&mut t).unwrap(); + t.into() + } + + #[test] + fn block_import_works() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: hex!("465a1569d309039bdf84b0479d28064ea29e6584584dc7d788904bb14489c6f6").into(), + extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), + digest: Digest { logs: vec![], }, + }, + extrinsics: vec![], + }); + }); + } + + #[test] + #[should_panic] + fn block_import_of_bad_state_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: [0u8; 32].into(), + extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), + digest: Digest { logs: vec![], }, + }, + extrinsics: vec![], + }); + }); + } + + #[test] + #[should_panic] + fn block_import_of_bad_extrinsic_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: hex!("49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48").into(), + extrinsics_root: [0u8; 32].into(), + digest: Digest { logs: vec![], }, + }, + extrinsics: vec![], + }); + }); + } + + #[test] + fn bad_extrinsic_not_inserted() { + let mut t = new_test_ext(1); + // bad nonce check! + let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 69)), sign_extra(1, 30, 0)); + t.execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + assert!(Executive::apply_extrinsic(xt).is_err()); + assert_eq!(>::extrinsic_index(), Some(0)); + }); + } + + #[test] + fn block_weight_limit_enforced() { + let mut t = new_test_ext(10000); + // given: TestXt uses the encoded len as fixed Len: + let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); + let encoded = xt.encode(); + let encoded_len = encoded.len() as Weight; + // Block execution weight + on_initialize weight + let base_block_weight = 175 + ::BlockExecutionWeight::get(); + let limit = AvailableBlockRatio::get() * MaximumBlockWeight::get() - base_block_weight; + let num_to_exhaust_block = limit / (encoded_len + 5); + t.execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + // Base block execution weight + `on_initialize` weight from the custom module. + assert_eq!(>::block_weight().total(), base_block_weight); + + for nonce in 0..=num_to_exhaust_block { + let xt = TestXt::new( + Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, nonce.into(), 0), + ); + let res = Executive::apply_extrinsic(xt); + if nonce != num_to_exhaust_block { + assert!(res.is_ok()); + assert_eq!( + >::block_weight().total(), + //--------------------- on_initialize + block_execution + extrinsic_base weight + (encoded_len + 5) * (nonce + 1) + base_block_weight, + ); + assert_eq!(>::extrinsic_index(), Some(nonce as u32 + 1)); + } else { + assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); + } + } + }); + } + + #[test] + fn block_weight_and_size_is_stored_per_tx() { + let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); + let x1 = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 1, 0)); + let x2 = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 2, 0)); + let len = xt.clone().encode().len() as u32; + let mut t = new_test_ext(1); + t.execute_with(|| { + // Block execution weight + on_initialize weight from custom module + let base_block_weight = 175 + ::BlockExecutionWeight::get(); + + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + assert_eq!(>::block_weight().total(), base_block_weight); + assert_eq!(>::all_extrinsics_len(), 0); + + assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); + + // default weight for `TestXt` == encoded length. + let extrinsic_weight = len as Weight + ::ExtrinsicBaseWeight::get(); + assert_eq!( + >::block_weight().total(), + base_block_weight + 3 * extrinsic_weight, + ); + assert_eq!(>::all_extrinsics_len(), 3 * len); + + let _ = >::finalize(); + // All extrinsics length cleaned on `System::finalize` + assert_eq!(>::all_extrinsics_len(), 0); + + // New Block + Executive::initialize_block(&Header::new( + 2, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + // Block weight cleaned up on `System::initialize` + assert_eq!(>::block_weight().total(), base_block_weight); + }); + } + + #[test] + fn validate_unsigned() { + let xt = TestXt::new(Call::Balances(BalancesCall::set_balance(33, 69, 69)), None); + let mut t = new_test_ext(1); + + t.execute_with(|| { + assert_eq!( + Executive::validate_transaction(TransactionSource::InBlock, xt.clone()), + Ok(Default::default()), + ); + assert_eq!(Executive::apply_extrinsic(xt), Ok(Err(DispatchError::BadOrigin))); + }); + } + + #[test] + fn can_pay_for_tx_fee_on_full_lock() { + let id: LockIdentifier = *b"0 "; + let execute_with_lock = |lock: WithdrawReasons| { + let mut t = new_test_ext(1); + t.execute_with(|| { + as LockableCurrency>::set_lock( + id, + &1, + 110, + lock, + ); + let xt = TestXt::new( + Call::System(SystemCall::remark(vec![1u8])), + sign_extra(1, 0, 0), + ); + let weight = xt.get_dispatch_info().weight + + ::ExtrinsicBaseWeight::get(); + let fee: Balance = + ::WeightToFee::calc(&weight); + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + if lock == WithdrawReasons::except(WithdrawReason::TransactionPayment) { + assert!(Executive::apply_extrinsic(xt).unwrap().is_ok()); + // tx fee has been deducted. + assert_eq!(>::total_balance(&1), 111 - fee); + } else { + assert_eq!( + Executive::apply_extrinsic(xt), + Err(InvalidTransaction::Payment.into()), + ); + assert_eq!(>::total_balance(&1), 111); + } + }); + }; + + execute_with_lock(WithdrawReasons::all()); + execute_with_lock(WithdrawReasons::except(WithdrawReason::TransactionPayment)); + } + + #[test] + fn block_hooks_weight_is_stored() { + new_test_ext(1).execute_with(|| { + + Executive::initialize_block(&Header::new_from_number(1)); + // NOTE: might need updates over time if new weights are introduced. + // For now it only accounts for the base block execution weight and + // the `on_initialize` weight defined in the custom test module. + assert_eq!(>::block_weight().total(), 175 + 10); + }) + } + + #[test] + fn runtime_upgraded_should_work() { + new_test_ext(1).execute_with(|| { + RUNTIME_VERSION.with(|v| *v.borrow_mut() = Default::default()); + // It should be added at genesis + assert!(frame_system::LastRuntimeUpgrade::exists()); + assert!(!Executive::runtime_upgraded()); + + RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + ..Default::default() + }); + assert!(Executive::runtime_upgraded()); + assert_eq!( + Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "".into() }), + frame_system::LastRuntimeUpgrade::get(), + ); + + RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + ..Default::default() + }); + assert!(Executive::runtime_upgraded()); + assert_eq!( + Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), + frame_system::LastRuntimeUpgrade::get(), + ); + + RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + impl_version: 2, + ..Default::default() + }); + assert!(!Executive::runtime_upgraded()); + + frame_system::LastRuntimeUpgrade::take(); + assert!(Executive::runtime_upgraded()); + assert_eq!( + Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), + frame_system::LastRuntimeUpgrade::get(), + ); + }) + } + + #[test] + fn last_runtime_upgrade_was_upgraded_works() { + let test_data = vec![ + (0, "", 1, "", true), + (1, "", 1, "", false), + (1, "", 1, "test", true), + (1, "", 0, "", false), + (1, "", 0, "test", true), + ]; + + for (spec_version, spec_name, c_spec_version, c_spec_name, result) in test_data { + let current = sp_version::RuntimeVersion { + spec_version: c_spec_version, + spec_name: c_spec_name.into(), + ..Default::default() + }; + + let last = LastRuntimeUpgradeInfo { + spec_version: spec_version.into(), + spec_name: spec_name.into(), + }; + + assert_eq!(result, last.was_upgraded(¤t)); + } + } + + #[test] + fn custom_runtime_upgrade_is_called_before_modules() { + new_test_ext(1).execute_with(|| { + // Make sure `on_runtime_upgrade` is called. + RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + ..Default::default() + }); + + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + assert_eq!(&sp_io::storage::get(TEST_KEY).unwrap()[..], *b"module"); + assert_eq!(sp_io::storage::get(CUSTOM_ON_RUNTIME_KEY).unwrap(), true.encode()); + }); + } +} diff --git a/pallets/timestamp/Cargo.toml b/pallets/timestamp/Cargo.toml new file mode 100644 index 0000000000000..5e15bbe185ca7 --- /dev/null +++ b/pallets/timestamp/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "pallet-timestamp" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME Timestamp Module" +documentation = "https://docs.rs/pallet-timestamp" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + + +[dependencies] +serde = { version = "1.0.101", optional = true } +codec = { package = "parity-scale-codec", version = "1.3.1", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0", default-features = false } +sp-io = { version = "2.0.0", default-features = false, optional = true } +sp-runtime = { version = "2.0.0", default-features = false } +sp-inherents = { version = "2.0.0", default-features = false } +frame-benchmarking = { version = "2.0.0", default-features = false, optional = true } +frame-support = { version = "2.0.0", default-features = false } +frame-system = { version = "2.0.0", default-features = false } +sp-timestamp = { version = "2.0.0", default-features = false } +impl-trait-for-tuples = "0.1.3" + +[dev-dependencies] +sp-io ={ version = "2.0.0" } +sp-core = { version = "2.0.0" } + +[features] +default = ["std"] +std = [ + "sp-inherents/std", + "codec/std", + "sp-std/std", + "sp-runtime/std", + "frame-benchmarking/std", + "frame-support/std", + "serde", + "frame-system/std", + "sp-timestamp/std" +] +runtime-benchmarks = ["frame-benchmarking", "sp-io"] diff --git a/pallets/timestamp/README.md b/pallets/timestamp/README.md new file mode 100644 index 0000000000000..5610caca4da51 --- /dev/null +++ b/pallets/timestamp/README.md @@ -0,0 +1,74 @@ +# Timestamp Module + +The Timestamp module provides functionality to get and set the on-chain time. + +- [`timestamp::Trait`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/trait.Trait.html) +- [`Call`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/enum.Call.html) +- [`Module`](https://docs.rs/pallet-timestamppallet-timestamp/latest/pallet_timestamp/struct.Module.html) + +## Overview + +The Timestamp module allows the validators to set and validate a timestamp with each block. + +It uses inherents for timestamp data, which is provided by the block author and validated/verified +by other validators. The timestamp can be set only once per block and must be set each block. +There could be a constraint on how much time must pass before setting the new timestamp. + +**NOTE:** The Timestamp module is the recommended way to query the on-chain time instead of using +an approach based on block numbers. The block number based time measurement can cause issues +because of cumulative calculation errors and hence should be avoided. + +## Interface + +### Dispatchable Functions + +* `set` - Sets the current time. + +### Public functions + +* `get` - Gets the current time for the current block. If this function is called prior to +setting the timestamp, it will return the timestamp of the previous block. + +### Trait Getters + +* `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. + +## Usage + +The following example shows how to use the Timestamp module in your custom module to query the current timestamp. + +### Prerequisites + +Import the Timestamp module into your custom module and derive the module configuration +trait from the timestamp trait. + +### Get current timestamp + +```rust +use frame_support::{decl_module, dispatch}; +use frame_system::ensure_signed; + +pub trait Trait: timestamp::Trait {} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = 0] + pub fn get_time(origin) -> dispatch::DispatchResult { + let _sender = ensure_signed(origin)?; + let _now = >::get(); + Ok(()) + } + } +} +``` + +### Example from the FRAME + +The [Session module](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses +the Timestamp module for session management. + +## Related Modules + +* [Session](https://docs.rs/pallet-timestamppallet-session/latest/pallet_session/) + +License: Apache-2.0 \ No newline at end of file diff --git a/pallets/timestamp/src/benchmarking.rs b/pallets/timestamp/src/benchmarking.rs new file mode 100644 index 0000000000000..1cd0f15ca01b9 --- /dev/null +++ b/pallets/timestamp/src/benchmarking.rs @@ -0,0 +1,75 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Timestamp pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use sp_std::prelude::*; +use frame_system::RawOrigin; +use frame_support::{ensure, traits::OnFinalize}; +use frame_benchmarking::{benchmarks, TrackedStorageKey}; + +use crate::Module as Timestamp; + +const MAX_TIME: u32 = 100; + +benchmarks! { + _ { } + + set { + let t in 1 .. MAX_TIME; + // Ignore write to `DidUpdate` since it transient. + let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + frame_benchmarking::benchmarking::add_to_whitelist(TrackedStorageKey { + key: did_update_key, + has_been_read: false, + has_been_written: true, + }); + }: _(RawOrigin::None, t.into()) + verify { + ensure!(Timestamp::::now() == t.into(), "Time was not set."); + } + + on_finalize { + let t in 1 .. MAX_TIME; + Timestamp::::set(RawOrigin::None.into(), t.into())?; + ensure!(DidUpdate::exists(), "Time was not set."); + // Ignore read/write to `DidUpdate` since it is transient. + let did_update_key = crate::DidUpdate::hashed_key().to_vec(); + frame_benchmarking::benchmarking::add_to_whitelist(did_update_key.into()); + }: { Timestamp::::on_finalize(t.into()); } + verify { + ensure!(!DidUpdate::exists(), "Time was not removed."); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set::()); + assert_ok!(test_benchmark_on_finalize::()); + }); + } +} diff --git a/pallets/timestamp/src/default_weights.rs b/pallets/timestamp/src/default_weights.rs new file mode 100644 index 0000000000000..726b3444e2532 --- /dev/null +++ b/pallets/timestamp/src/default_weights.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 + +#![allow(unused_parens)] + +use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; + +impl crate::WeightInfo for () { + // WARNING! Some components were not used: ["t"] + fn set() -> Weight { + (9133000 as Weight) + .saturating_add(DbWeight::get().reads(2 as Weight)) + .saturating_add(DbWeight::get().writes(1 as Weight)) + } + // WARNING! Some components were not used: ["t"] + fn on_finalize() -> Weight { + (5915000 as Weight) + } +} diff --git a/pallets/timestamp/src/lib.rs b/pallets/timestamp/src/lib.rs new file mode 100644 index 0000000000000..f58882ae51896 --- /dev/null +++ b/pallets/timestamp/src/lib.rs @@ -0,0 +1,387 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Timestamp Module +//! +//! The Timestamp module provides functionality to get and set the on-chain time. +//! +//! - [`timestamp::Trait`](./trait.Trait.html) +//! - [`Call`](./enum.Call.html) +//! - [`Module`](./struct.Module.html) +//! +//! ## Overview +//! +//! The Timestamp module allows the validators to set and validate a timestamp with each block. +//! +//! It uses inherents for timestamp data, which is provided by the block author and validated/verified +//! by other validators. The timestamp can be set only once per block and must be set each block. +//! There could be a constraint on how much time must pass before setting the new timestamp. +//! +//! **NOTE:** The Timestamp module is the recommended way to query the on-chain time instead of using +//! an approach based on block numbers. The block number based time measurement can cause issues +//! because of cumulative calculation errors and hence should be avoided. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! * `set` - Sets the current time. +//! +//! ### Public functions +//! +//! * `get` - Gets the current time for the current block. If this function is called prior to +//! setting the timestamp, it will return the timestamp of the previous block. +//! +//! ### Trait Getters +//! +//! * `MinimumPeriod` - Gets the minimum (and advised) period between blocks for the chain. +//! +//! ## Usage +//! +//! The following example shows how to use the Timestamp module in your custom module to query the current timestamp. +//! +//! ### Prerequisites +//! +//! Import the Timestamp module into your custom module and derive the module configuration +//! trait from the timestamp trait. +//! +//! ### Get current timestamp +//! +//! ``` +//! use frame_support::{decl_module, dispatch}; +//! # use pallet_timestamp as timestamp; +//! use frame_system::ensure_signed; +//! +//! pub trait Trait: timestamp::Trait {} +//! +//! decl_module! { +//! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = 0] +//! pub fn get_time(origin) -> dispatch::DispatchResult { +//! let _sender = ensure_signed(origin)?; +//! let _now = >::get(); +//! Ok(()) +//! } +//! } +//! } +//! # fn main() {} +//! ``` +//! +//! ### Example from the FRAME +//! +//! The [Session module](https://github.com/paritytech/substrate/blob/master/frame/session/src/lib.rs) uses +//! the Timestamp module for session management. +//! +//! ## Related Modules +//! +//! * [Session](../pallet_session/index.html) + +#![cfg_attr(not(feature = "std"), no_std)] + +mod benchmarking; +mod default_weights; + +use sp_std::{result, cmp}; +use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier}; +#[cfg(feature = "std")] +use frame_support::debug; +use frame_support::{ + Parameter, decl_storage, decl_module, + traits::{Time, UnixTime, Get}, + weights::{DispatchClass, Weight}, +}; +use sp_runtime::{ + RuntimeString, + traits::{ + AtLeast32Bit, Zero, SaturatedConversion, Scale + } +}; +use frame_system::ensure_none; +use sp_timestamp::{ + InherentError, INHERENT_IDENTIFIER, InherentType, + OnTimestampSet, +}; + +pub trait WeightInfo { + fn set() -> Weight; + fn on_finalize() -> Weight; +} + +/// The module configuration trait +pub trait Trait: frame_system::Trait { + /// Type used for expressing timestamp. + type Moment: Parameter + Default + AtLeast32Bit + + Scale + Copy; + + /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. + type OnTimestampSet: OnTimestampSet; + + /// The minimum period between blocks. Beware that this is different to the *expected* period + /// that the block production apparatus provides. Your chosen consensus system will generally + /// work with this to determine a sensible block time. e.g. For Aura, it will be double this + /// period on default settings. + type MinimumPeriod: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; +} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + /// The minimum period between blocks. Beware that this is different to the *expected* period + /// that the block production apparatus provides. Your chosen consensus system will generally + /// work with this to determine a sensible block time. e.g. For Aura, it will be double this + /// period on default settings. + const MinimumPeriod: T::Moment = T::MinimumPeriod::get(); + + /// Set the current time. + /// + /// This call should be invoked exactly once per block. It will panic at the finalization + /// phase, if this call hasn't been invoked by that time. + /// + /// The timestamp should be greater than the previous one by the amount specified by + /// `MinimumPeriod`. + /// + /// The dispatch origin for this call must be `Inherent`. + /// + /// # + /// - `O(T)` where `T` complexity of `on_timestamp_set` + /// - 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in `on_finalize`) + /// - 1 event handler `on_timestamp_set` `O(T)`. + /// # + #[weight = ( + T::WeightInfo::set(), + DispatchClass::Mandatory + )] + fn set(origin, #[compact] now: T::Moment) { + ensure_none(origin)?; + assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); + let prev = Self::now(); + //FIXME bring back the check + // assert!( + // prev.is_zero() || now >= prev + T::MinimumPeriod::get(), + // "Timestamp must increment by at least between sequential blocks" + // ); + ::Now::put(now); + ::DidUpdate::put(true); + + >::on_timestamp_set(now); + } + + /// dummy `on_initialize` to return the weight used in `on_finalize`. + fn on_initialize() -> Weight { + // weight of `on_finalize` + T::WeightInfo::on_finalize() + } + + /// # + /// - `O(1)` + /// - 1 storage deletion (codec `O(1)`). + /// # + fn on_finalize() { + assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); + } + } +} + +decl_storage! { + trait Store for Module as Timestamp { + /// Current time for the current block. + pub Now get(fn now) build(|_| 0.into()): T::Moment; + + /// Did the timestamp get updated in this block? + DidUpdate: bool; + } +} + +impl Module { + /// Get the current time for the current block. + /// + /// NOTE: if this function is called prior to setting the timestamp, + /// it will return the timestamp of the previous block. + pub fn get() -> T::Moment { + Self::now() + } + + /// Set the timestamp to something in particular. Only used for tests. + #[cfg(feature = "std")] + pub fn set_timestamp(now: T::Moment) { + ::Now::put(now); + } +} + +fn extract_inherent_data(data: &InherentData) -> Result { + data.get_data::(&INHERENT_IDENTIFIER) + .map_err(|_| RuntimeString::from("Invalid timestamp inherent data encoding."))? + .ok_or_else(|| "Timestamp inherent data is not provided.".into()) +} + +impl ProvideInherent for Module { + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let data: T::Moment = extract_inherent_data(data) + .expect("Gets and decodes timestamp inherent data") + .saturated_into(); + + let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); + Some(Call::set(next_time.into())) + } + + fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { + const MAX_TIMESTAMP_DRIFT_MILLIS: u64 = 30 * 1000; + + let t: u64 = match call { + Call::set(ref t) => t.clone().saturated_into::(), + _ => return Ok(()), + }; + + let data = extract_inherent_data(data).map_err(|e| InherentError::Other(e))?; + + let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); + if t > data + MAX_TIMESTAMP_DRIFT_MILLIS { + Err(InherentError::Other("Timestamp too far in future to accept".into())) + } else if t < minimum { + Err(InherentError::ValidAtTimestamp(minimum)) + } else { + Ok(()) + } + } +} + +impl Time for Module { + type Moment = T::Moment; + + /// Before the first set of now with inherent the value returned is zero. + fn now() -> Self::Moment { + Self::now() + } +} + +/// Before the timestamp inherent is applied, it returns the time of previous block. +/// +/// On genesis the time returned is not valid. +impl UnixTime for Module { + fn now() -> core::time::Duration { + // now is duration since unix epoch in millisecond as documented in + // `sp_timestamp::InherentDataProvider`. + let now = Self::now(); + sp_std::if_std! { + if now == T::Moment::zero() { + debug::error!( + "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0" + ); + } + } + core::time::Duration::from_millis(now.saturated_into::()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use frame_support::{impl_outer_origin, assert_ok, parameter_types, weights::Weight}; + use sp_io::TestExternalities; + use sp_core::H256; + use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + + pub fn new_test_ext() -> TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + TestExternalities::new(t) + } + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type BlockExecutionWeight = (); + type ExtrinsicBaseWeight = (); + type MaximumExtrinsicWeight = MaximumBlockWeight; + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type PalletInfo = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + } + parameter_types! { + pub const MinimumPeriod: u64 = 5; + } + impl Trait for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + type WeightInfo = (); + } + type Timestamp = Module; + + #[test] + fn timestamp_works() { + new_test_ext().execute_with(|| { + Timestamp::set_timestamp(42); + assert_ok!(Timestamp::set(Origin::none(), 69)); + assert_eq!(Timestamp::now(), 69); + }); + } + + #[test] + #[should_panic(expected = "Timestamp must be updated only once in the block")] + fn double_timestamp_should_fail() { + new_test_ext().execute_with(|| { + Timestamp::set_timestamp(42); + assert_ok!(Timestamp::set(Origin::none(), 69)); + let _ = Timestamp::set(Origin::none(), 70); + }); + } + + #[test] + #[should_panic(expected = "Timestamp must increment by at least between sequential blocks")] + fn block_period_minimum_enforced() { + new_test_ext().execute_with(|| { + Timestamp::set_timestamp(42); + let _ = Timestamp::set(Origin::none(), 46); + }); + } +} diff --git a/pallets/xyk/src/lib.rs b/pallets/xyk/src/lib.rs index b0abdfbd5a703..26726d054605d 100644 --- a/pallets/xyk/src/lib.rs +++ b/pallets/xyk/src/lib.rs @@ -5,6 +5,7 @@ // TODO documentation! use sp_runtime::traits::{BlakeTwo256, Hash, One, SaturatedConversion, Zero}; +use sp_runtime::print; use codec::{Decode, Encode}; use frame_support::{ decl_error, decl_event, decl_module, decl_storage, dispatch::DispatchResult, ensure, @@ -453,6 +454,8 @@ impl Module { origin: T::Origin, amount: T::Balance ) -> DispatchResult { + + print("creating liquidity asset"); let vault: T::AccountId = >::get(); let sender = ensure_signed(origin.clone())?; diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 4a1f66360e5ef..5dadda2171c8e 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -28,10 +28,11 @@ serde = { features = ['derive'], optional = true, version = '1.0.101' } pallet-template = { path = '../pallets/template', default-features = false, version = '2.0.0' } pallet-xyk = { path = '../pallets/xyk', default-features = false, version = '0.1.0' } pallet-assets = { path = '../pallets/assets', default-features = false, version = '2.0.0' } +frame-executive = { path = '../pallets/executive', default-features = false, version = '2.0.0' } +pallet-timestamp = { path = '../pallets/timestamp', default-features = false, version = '2.0.0' } # Substrate dependencies frame-benchmarking = { default-features = false, optional = true, version = '2.0.0' } -frame-executive = { default-features = false, version = '2.0.0' } frame-support = { default-features = false, version = '2.0.0' } frame-system = { default-features = false, version = '2.0.0' } frame-system-benchmarking = { default-features = false, optional = true, version = '2.0.0' } @@ -46,7 +47,6 @@ pallet-balances = { default-features = false, version = '2.0.0' } pallet-grandpa = { default-features = false, version = '2.0.0' } pallet-randomness-collective-flip = { default-features = false, version = '2.0.0' } pallet-sudo = { default-features = false, version = '2.0.0' } -pallet-timestamp = { default-features = false, version = '2.0.0' } pallet-transaction-payment = { default-features = false, version = '2.0.0' } pallet-transaction-payment-rpc-runtime-api = { default-features = false, version = '2.0.0' } sp-api = { default-features = false, version = '2.0.0' } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index c2bfa0f7693b9..e6b4af5e384ae 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -6,6 +6,8 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +use sp_runtime::print; use sp_std::prelude::*; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ @@ -504,6 +506,7 @@ impl_runtime_apis! { impl sp_block_builder::BlockBuilder for Runtime { fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + print("extrinsic application called using rpc"); Executive::apply_extrinsic(extrinsic) } diff --git a/scripts/build-mangata-node-docker-image.sh b/scripts/build-mangata-node-docker-image.sh new file mode 100644 index 0000000000000..e63aefd2744d7 --- /dev/null +++ b/scripts/build-mangata-node-docker-image.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +docker build --tag mangata/node --file ./devops/dockerfiles/node-and-runtime/Dockerfile . \ No newline at end of file diff --git a/scripts/build-rust-builder-docker-image.sh b/scripts/build-rust-builder-docker-image.sh new file mode 100644 index 0000000000000..48f2020157867 --- /dev/null +++ b/scripts/build-rust-builder-docker-image.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +docker build --tag mangata/rust-builder --file ./devops/dockerfiles/rust-builder/Dockerfile . \ No newline at end of file diff --git a/setup.sh b/setup.sh new file mode 100644 index 0000000000000..c3d9f1857d828 --- /dev/null +++ b/setup.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -e + +# If OS is supported will install: +# - build tools and any other dependencies required for rust and substrate +# - rustup - rust insaller +# - rust compiler and toolchains +# - skips installing substrate and subkey +curl https://getsubstrate.io -sSf | bash -s -- --fast + +source ~/.cargo/env + +rustup component add rustfmt clippy + +# Current version of substrate requires an older version of nightly toolchain +# to successfully compile the WASM runtime. We force install because rustfmt package +# is not available for this nightly version. +rustup install nightly-2020-05-23 --force +rustup target add wasm32-unknown-unknown --toolchain nightly-2020-05-23 + +# Ensure the stable toolchain is still the default +rustup default stable + +# TODO: Install additional tools... + +# - b2sum +# - nodejs +# - npm +# - yarn +# .... ? \ No newline at end of file diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml new file mode 100644 index 0000000000000..80a7b205229b3 --- /dev/null +++ b/utils/frame/rpc/system/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "substrate-frame-rpc-system" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME's system exposed over Substrate RPC" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sc-client-api = "2.0.0" +codec = { package = "parity-scale-codec", version = "1.3.1" } +futures = { version = "0.3.4", features = ["compat"] } +jsonrpc-core = "15.0.0" +jsonrpc-core-client = "15.0.0" +jsonrpc-derive = "15.0.0" +log = "0.4.8" +serde = { version = "1.0.101", features = ["derive"] } +sp-runtime = "2.0.0" +sp-api = "2.0.0" +frame-system-rpc-runtime-api = "2.0.0" +sp-core = "2.0.0" +sp-blockchain = "2.0.0" +sp-transaction-pool = "2.0.0" +sp-block-builder = "2.0.0" +sc-rpc-api = "0.8.0" + +[dev-dependencies] +#substrate-test-runtime-client = "2.0.0" +sp-tracing = "2.0.0" +sc-transaction-pool = "2.0.0" diff --git a/utils/frame/rpc/system/README.md b/utils/frame/rpc/system/README.md new file mode 100644 index 0000000000000..38986983d93c5 --- /dev/null +++ b/utils/frame/rpc/system/README.md @@ -0,0 +1,3 @@ +System FRAME specific RPC methods. + +License: Apache-2.0 \ No newline at end of file diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs new file mode 100644 index 0000000000000..e0642aa6ae809 --- /dev/null +++ b/utils/frame/rpc/system/src/lib.rs @@ -0,0 +1,423 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! System FRAME specific RPC methods. + +use std::sync::Arc; + +use sp_runtime::print; +use codec::{self, Codec, Decode, Encode}; +use sc_client_api::light::{future_header, RemoteBlockchain, Fetcher, RemoteCallRequest}; +use jsonrpc_core::{ + Error as RpcError, ErrorCode, + futures::future::{self as rpc_future,result, Future}, +}; +use jsonrpc_derive::rpc; +use futures::future::{ready, TryFutureExt}; +use sp_blockchain::{ + HeaderBackend, + Error as ClientError +}; +use sp_runtime::{ + generic::BlockId, + traits, +}; +use sp_core::{hexdisplay::HexDisplay, Bytes}; +use sp_transaction_pool::{TransactionPool, InPoolTransaction}; +use sp_block_builder::BlockBuilder; +use sc_rpc_api::DenyUnsafe; + +pub use frame_system_rpc_runtime_api::AccountNonceApi; +pub use self::gen_client::Client as SystemClient; + +/// Future that resolves to account nonce. +pub type FutureResult = Box + Send>; + +/// System RPC methods. +#[rpc] +pub trait SystemApi { + /// Returns the next valid index (aka nonce) for given account. + /// + /// This method takes into consideration all pending transactions + /// currently in the pool and if no transactions are found in the pool + /// it fallbacks to query the index from the runtime (aka. state nonce). + #[rpc(name = "system_accountNextIndex", alias("account_nextIndex"))] + fn nonce(&self, account: AccountId) -> FutureResult; + + /// Dry run an extrinsic at a given block. Return SCALE encoded ApplyExtrinsicResult. + #[rpc(name = "system_dryRun", alias("system_dryRunAt"))] + fn dry_run(&self, extrinsic: Bytes, at: Option) -> FutureResult; +} + +/// Error type of this RPC api. +pub enum Error { + /// The transaction was not decodable. + DecodeError, + /// The call to runtime failed. + RuntimeError, +} + +impl From for i64 { + fn from(e: Error) -> i64 { + match e { + Error::RuntimeError => 1, + Error::DecodeError => 2, + } + } +} + +/// An implementation of System-specific RPC methods on full client. +pub struct FullSystem { + client: Arc, + pool: Arc

, + deny_unsafe: DenyUnsafe, + _marker: std::marker::PhantomData, +} + +impl FullSystem { + /// Create new `FullSystem` given client and transaction pool. + pub fn new(client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe,) -> Self { + FullSystem { + client, + pool, + deny_unsafe, + _marker: Default::default(), + } + } +} + +impl SystemApi<::Hash, AccountId, Index> + for FullSystem +where + C: sp_api::ProvideRuntimeApi, + C: HeaderBackend, + C: Send + Sync + 'static, + C::Api: AccountNonceApi, + C::Api: BlockBuilder, + P: TransactionPool + 'static, + Block: traits::Block, + AccountId: Clone + std::fmt::Display + Codec, + Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, +{ + fn nonce(&self, account: AccountId) -> FutureResult { + let get_nonce = || { + let api = self.client.runtime_api(); + let best = self.client.info().best_hash; + let at = BlockId::hash(best); + + let nonce = api.account_nonce(&at, account.clone()).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), + message: "Unable to query nonce.".into(), + data: Some(format!("{:?}", e).into()), + })?; + + Ok(adjust_nonce(&*self.pool, account, nonce)) + }; + + Box::new(result(get_nonce())) + } + + fn dry_run(&self, extrinsic: Bytes, at: Option<::Hash>) -> FutureResult { + if let Err(err) = self.deny_unsafe.check_if_safe() { + return Box::new(rpc_future::err(err.into())); + } + + let dry_run = || { + let api = self.client.runtime_api(); + let at = BlockId::::hash(at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash + )); + + let uxt: ::Extrinsic = Decode::decode(&mut &*extrinsic).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::DecodeError.into()), + message: "Unable to dry run extrinsic.".into(), + data: Some(format!("{:?}", e).into()), + })?; + + print("dry running transactions"); + let result = "test"; + // let result = api.apply_extrinsic(&at, uxt) + // .map_err(|e| RpcError { + // code: ErrorCode::ServerError(Error::RuntimeError.into()), + // message: "Unable to dry run extrinsic.".into(), + // data: Some(format!("{:?}", e).into()), + // })?; + + Ok(Encode::encode(&result).into()) + }; + + + Box::new(result(dry_run())) + } +} + +/// An implementation of System-specific RPC methods on light client. +pub struct LightSystem { + client: Arc, + remote_blockchain: Arc>, + fetcher: Arc, + pool: Arc

, +} + +impl LightSystem { + /// Create new `LightSystem`. + pub fn new( + client: Arc, + remote_blockchain: Arc>, + fetcher: Arc, + pool: Arc

, + ) -> Self { + LightSystem { + client, + remote_blockchain, + fetcher, + pool, + } + } +} + +impl SystemApi<::Hash, AccountId, Index> + for LightSystem +where + P: TransactionPool + 'static, + C: HeaderBackend, + C: Send + Sync + 'static, + F: Fetcher + 'static, + Block: traits::Block, + AccountId: Clone + std::fmt::Display + Codec + Send + 'static, + Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, +{ + fn nonce(&self, account: AccountId) -> FutureResult { + let best_hash = self.client.info().best_hash; + let best_id = BlockId::hash(best_hash); + let future_best_header = future_header(&*self.remote_blockchain, &*self.fetcher, best_id); + let fetcher = self.fetcher.clone(); + let call_data = account.encode(); + let future_best_header = future_best_header + .and_then(move |maybe_best_header| ready( + match maybe_best_header { + Some(best_header) => Ok(best_header), + None => Err(ClientError::UnknownBlock(format!("{}", best_hash))), + } + )); + let future_nonce = future_best_header.and_then(move |best_header| + fetcher.remote_call(RemoteCallRequest { + block: best_hash, + header: best_header, + method: "AccountNonceApi_account_nonce".into(), + call_data, + retry_count: None, + }) + ).compat(); + let future_nonce = future_nonce.and_then(|nonce| Decode::decode(&mut &nonce[..]) + .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e))); + let future_nonce = future_nonce.map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), + message: "Unable to query nonce.".into(), + data: Some(format!("{:?}", e).into()), + }); + + let pool = self.pool.clone(); + let future_nonce = future_nonce.map(move |nonce| adjust_nonce(&*pool, account, nonce)); + + Box::new(future_nonce) + } + + fn dry_run(&self, _extrinsic: Bytes, _at: Option<::Hash>) -> FutureResult { + Box::new(result(Err(RpcError { + code: ErrorCode::MethodNotFound, + message: "Unable to dry run extrinsic.".into(), + data: None, + }))) + } +} + +/// Adjust account nonce from state, so that tx with the nonce will be +/// placed after all ready txpool transactions. +fn adjust_nonce( + pool: &P, + account: AccountId, + nonce: Index, +) -> Index where + P: TransactionPool, + AccountId: Clone + std::fmt::Display + Encode, + Index: Clone + std::fmt::Display + Encode + traits::AtLeast32Bit + 'static, +{ + log::debug!(target: "rpc", "State nonce for {}: {}", account, nonce); + // Now we need to query the transaction pool + // and find transactions originating from the same sender. + // + // Since extrinsics are opaque to us, we look for them using + // `provides` tag. And increment the nonce if we find a transaction + // that matches the current one. + let mut current_nonce = nonce.clone(); + let mut current_tag = (account.clone(), nonce).encode(); + for tx in pool.ready() { + log::debug!( + target: "rpc", + "Current nonce to {}, checking {} vs {:?}", + current_nonce, + HexDisplay::from(¤t_tag), + tx.provides().iter().map(|x| format!("{}", HexDisplay::from(x))).collect::>(), + ); + // since transactions in `ready()` need to be ordered by nonce + // it's fine to continue with current iterator. + if tx.provides().get(0) == Some(¤t_tag) { + current_nonce += traits::One::one(); + current_tag = (account.clone(), current_nonce.clone()).encode(); + } + } + + current_nonce +} + +#[cfg(test)] +mod tests { + use super::*; + + use futures::executor::block_on; + use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; + use sc_transaction_pool::BasicPool; + use sp_runtime::{ApplyExtrinsicResult, transaction_validity::{TransactionValidityError, InvalidTransaction}}; + + #[test] + fn should_return_next_nonce_for_some_account() { + sp_tracing::try_init_simple(); + + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + None, + spawner, + client.clone(), + ); + + let source = sp_runtime::transaction_validity::TransactionSource::External; + let new_transaction = |nonce: u64| { + let t = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 5, + nonce, + }; + t.into_signed_tx() + }; + // Populate the pool + let ext0 = new_transaction(0); + block_on(pool.submit_one(&BlockId::number(0), source, ext0)).unwrap(); + let ext1 = new_transaction(1); + block_on(pool.submit_one(&BlockId::number(0), source, ext1)).unwrap(); + + let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); + + // when + let nonce = accounts.nonce(AccountKeyring::Alice.into()); + + // then + assert_eq!(nonce.wait().unwrap(), 2); + } + + #[test] + fn dry_run_should_deny_unsafe() { + sp_tracing::try_init_simple(); + + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + None, + spawner, + client.clone(), + ); + + let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); + + // when + let res = accounts.dry_run(vec![].into(), None); + + // then + assert_eq!(res.wait(), Err(RpcError::method_not_found())); + } + + #[test] + fn dry_run_should_work() { + sp_tracing::try_init_simple(); + + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + None, + spawner, + client.clone(), + ); + + let accounts = FullSystem::new(client, pool, DenyUnsafe::No); + + let tx = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 5, + nonce: 0, + }.into_signed_tx(); + + // when + let res = accounts.dry_run(tx.encode().into(), None); + + // then + let bytes = res.wait().unwrap().0; + let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_slice()).unwrap(); + assert_eq!(apply_res, Ok(Ok(()))); + } + + #[test] + fn dry_run_should_indicate_error() { + sp_tracing::try_init_simple(); + + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let pool = BasicPool::new_full( + Default::default(), + None, + spawner, + client.clone(), + ); + + let accounts = FullSystem::new(client, pool, DenyUnsafe::No); + + let tx = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 5, + nonce: 100, + }.into_signed_tx(); + + // when + let res = accounts.dry_run(tx.encode().into(), None); + + // then + let bytes = res.wait().unwrap().0; + let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_slice()).unwrap(); + assert_eq!(apply_res, Err(TransactionValidityError::Invalid(InvalidTransaction::Stale))); + } +}