diff --git a/.gitignore b/.gitignore index 6c79bfaf62ed1..aadaa13912c19 100644 --- a/.gitignore +++ b/.gitignore @@ -2,14 +2,11 @@ **/*.rs.bk *.swp .wasm-binaries -polkadot/runtime/wasm/target/ -core/executor/wasm/target/ -core/test-runtime/wasm/target/ pwasm-alloc/target/ pwasm-libc/target/ pwasm-alloc/Cargo.lock pwasm-libc/Cargo.lock -node/runtime/wasm/target/ +bin/node/runtime/wasm/target/ **/._* **/.criterion/ .vscode diff --git a/.maintain/common.sh b/.maintain/common.sh deleted file mode 100644 index 8aff9acc578ec..0000000000000 --- a/.maintain/common.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -ROOT=`dirname "$0"` - -# A list of directories which contain wasm projects. -SRCS=( - "core/executor/wasm" - "node/runtime/wasm" - "node-template/runtime/wasm" - "core/test-runtime/wasm" -) - -# Make pushd/popd silent. - -pushd () { - command pushd "$@" > /dev/null -} - -popd () { - command popd "$@" > /dev/null -} diff --git a/.maintain/update.sh b/.maintain/update.sh deleted file mode 100755 index a264fab43df30..0000000000000 --- a/.maintain/update.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -# This script assumes that all pre-requisites are installed. - -set -e - -PROJECT_ROOT=`git rev-parse --show-toplevel` -source `dirname "$0"`/common.sh - -export CARGO_INCREMENTAL=0 - -# Save current directory. -pushd . - -cd $ROOT - -for SRC in "${SRCS[@]}" -do - echo "*** Updating and building wasm binaries in $SRC" - cd "$PROJECT_ROOT/$SRC" - - cargo update - ./build.sh "$@" - - cd - >> /dev/null -done - -# Restore initial directory. -popd diff --git a/Cargo.lock b/Cargo.lock index 6ae7d4deabf19..506c4e966b82d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -915,6 +915,19 @@ name = "data-encoding" version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "derive_more" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "derive_more" version = "0.99.2" @@ -3009,6 +3022,7 @@ dependencies = [ "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "sp-finality-tracker 2.0.0", "sp-timestamp 2.0.0", + "sp-transaction-pool-api 2.0.0", "sr-io 2.0.0", "sr-primitives 2.0.0", "structopt 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3093,7 +3107,7 @@ dependencies = [ "node-runtime 2.0.0", "pallet-contracts-rpc 2.0.0", "pallet-transaction-payment-rpc 2.0.0", - "sc-transaction-pool 2.0.0", + "sp-transaction-pool-api 2.0.0", "sr-primitives 2.0.0", "substrate-client 2.0.0", "substrate-frame-rpc-system 2.0.0", @@ -3152,6 +3166,7 @@ dependencies = [ "rustc-hex 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-transaction-pool-runtime-api 2.0.0", "sr-api 2.0.0", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -3166,7 +3181,6 @@ dependencies = [ "substrate-offchain-primitives 2.0.0", "substrate-primitives 2.0.0", "substrate-session 2.0.0", - "substrate-transaction-pool-runtime-api 2.0.0", "substrate-wasm-builder-runner 1.0.4", ] @@ -3182,6 +3196,7 @@ dependencies = [ "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sc-transaction-pool 2.0.0", + "sp-transaction-pool-api 2.0.0", "sr-io 2.0.0", "sr-primitives 2.0.0", "substrate-basic-authorship 2.0.0", @@ -3220,6 +3235,7 @@ dependencies = [ "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "safe-mix 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-transaction-pool-runtime-api 2.0.0", "sr-api 2.0.0", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -3231,7 +3247,6 @@ dependencies = [ "substrate-offchain-primitives 2.0.0", "substrate-primitives 2.0.0", "substrate-session 2.0.0", - "substrate-transaction-pool-runtime-api 2.0.0", "substrate-wasm-builder-runner 1.0.4", ] @@ -4842,9 +4857,9 @@ dependencies = [ "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-transaction-graph 2.0.0", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-transaction-pool-api 2.0.0", "sr-version 2.0.0", "substrate-primitives 2.0.0", "substrate-rpc-primitives 2.0.0", @@ -4863,6 +4878,7 @@ dependencies = [ "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-transaction-pool-api 2.0.0", "sr-primitives 2.0.0", "substrate-primitives 2.0.0", "substrate-test-runtime 2.0.0", @@ -4878,12 +4894,15 @@ dependencies = [ "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sc-transaction-graph 2.0.0", + "sp-blockchain 2.0.0", + "sp-transaction-pool-api 2.0.0", + "sp-transaction-pool-runtime-api 2.0.0", "sr-api 2.0.0", "sr-primitives 2.0.0", + "substrate-client-api 2.0.0", "substrate-keyring 2.0.0", "substrate-primitives 2.0.0", "substrate-test-runtime-client 2.0.0", - "substrate-transaction-pool-runtime-api 2.0.0", ] [[package]] @@ -5225,6 +5244,28 @@ dependencies = [ "substrate-inherents 2.0.0", ] +[[package]] +name = "sp-transaction-pool-api" +version = "2.0.0" +dependencies = [ + "derive_more 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "sr-primitives 2.0.0", + "substrate-primitives 2.0.0", +] + +[[package]] +name = "sp-transaction-pool-runtime-api" +version = "2.0.0" +dependencies = [ + "sr-api 2.0.0", + "sr-primitives 2.0.0", + "substrate-primitives 2.0.0", +] + [[package]] name = "spin" version = "0.5.2" @@ -5524,6 +5565,7 @@ dependencies = [ "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sc-transaction-pool 2.0.0", "sp-blockchain 2.0.0", + "sp-transaction-pool-api 2.0.0", "sr-primitives 2.0.0", "substrate-block-builder 2.0.0", "substrate-client 2.0.0", @@ -5688,6 +5730,7 @@ dependencies = [ "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sp-blockchain 2.0.0", + "sp-transaction-pool-api 2.0.0", "sr-api 2.0.0", "sr-primitives 2.0.0", "sr-std 2.0.0", @@ -6067,11 +6110,12 @@ dependencies = [ "jsonrpc-derive 14.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-transaction-graph 2.0.0", "sc-transaction-pool 2.0.0", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", "sp-blockchain 2.0.0", + "sp-transaction-pool-api 2.0.0", "sr-primitives 2.0.0", + "substrate-client 2.0.0", "substrate-primitives 2.0.0", "substrate-test-runtime-client 2.0.0", ] @@ -6182,6 +6226,7 @@ dependencies = [ "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "sc-transaction-pool 2.0.0", + "sp-transaction-pool-api 2.0.0", "sr-api 2.0.0", "sr-primitives 2.0.0", "substrate-client-api 2.0.0", @@ -6305,6 +6350,7 @@ dependencies = [ "sc-transaction-pool 2.0.0", "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", "sp-blockchain 2.0.0", + "sp-transaction-pool-api 2.0.0", "sr-api 2.0.0", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -6430,6 +6476,8 @@ dependencies = [ "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)", "slog 2.5.2 (registry+https://github.com/rust-lang/crates.io-index)", "sp-blockchain 2.0.0", + "sp-transaction-pool-api 2.0.0", + "sp-transaction-pool-runtime-api 2.0.0", "sr-api 2.0.0", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -6453,7 +6501,6 @@ dependencies = [ "substrate-telemetry 2.0.0", "substrate-test-runtime-client 2.0.0", "substrate-tracing 2.0.0", - "substrate-transaction-pool-runtime-api 2.0.0", "sysinfo 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", "target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6471,6 +6518,7 @@ dependencies = [ "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-transaction-pool-api 2.0.0", "sr-primitives 2.0.0", "substrate-client 2.0.0", "substrate-consensus-common 2.0.0", @@ -6586,6 +6634,7 @@ dependencies = [ "pallet-timestamp 2.0.0", "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-transaction-pool-runtime-api 2.0.0", "sr-api 2.0.0", "sr-io 2.0.0", "sr-primitives 2.0.0", @@ -6605,7 +6654,6 @@ dependencies = [ "substrate-session 2.0.0", "substrate-state-machine 2.0.0", "substrate-test-runtime-client 2.0.0", - "substrate-transaction-pool-runtime-api 2.0.0", "substrate-trie 2.0.0", "substrate-wasm-builder-runner 1.0.4", "trie-db 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -6615,6 +6663,7 @@ dependencies = [ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ + "futures 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "sp-blockchain 2.0.0", "sr-primitives 2.0.0", @@ -6642,15 +6691,6 @@ dependencies = [ "tracing-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "substrate-transaction-pool-runtime-api" -version = "2.0.0" -dependencies = [ - "sr-api 2.0.0", - "sr-primitives 2.0.0", - "substrate-primitives 2.0.0", -] - [[package]] name = "substrate-trie" version = "2.0.0" @@ -8044,6 +8084,7 @@ dependencies = [ "checksum cuckoofilter 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8dd43f7cfaffe0a386636a10baea2ee05cc50df3b77bea4a456c9572a939bf1f" "checksum curve25519-dalek 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8b7dcd30ba50cdf88b55b033456138b7c0ac4afdc436d82e1b79f370f24cc66d" "checksum data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f47ca1860a761136924ddd2422ba77b2ea54fe8cc75b9040804a0d9d32ad97" +"checksum derive_more 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a141330240c921ec6d074a3e188a7c7ef95668bb95e7d44fa0e5778ec2a7afe" "checksum derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2159be042979966de68315bce7034bb000c775f22e3e834e1c52ff78f041cae8" "checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" "checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" diff --git a/Cargo.toml b/Cargo.toml index 84fbb2430b782..9b7ad19f8321b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -126,6 +126,7 @@ members = [ "primitives/sr-version", "primitives/state-machine", "primitives/timestamp", + "primitives/transaction-pool", "primitives/transaction-pool/runtime-api", "primitives/trie", "primitives/wasm-interface", diff --git a/bin/node-template/Cargo.toml b/bin/node-template/Cargo.toml index 5e0ee24547b81..9fc248fe80b74 100644 --- a/bin/node-template/Cargo.toml +++ b/bin/node-template/Cargo.toml @@ -24,7 +24,8 @@ primitives = { package = "substrate-primitives", path = "../../primitives/core" substrate-executor = { path = "../../client/executor" } substrate-service = { path = "../../client/service" } inherents = { package = "substrate-inherents", path = "../../primitives/inherents" } -transaction-pool = { package = "sc-transaction-pool", path = "../../client/transaction-pool" } +txpool = { package = "sc-transaction-pool", path = "../../client/transaction-pool" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../primitives/transaction-pool" } network = { package = "substrate-network", path = "../../client/network" } aura = { package = "substrate-consensus-aura", path = "../../client/consensus/aura" } aura-primitives = { package = "substrate-consensus-aura-primitives", path = "../../primitives/consensus/aura" } diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index f71c238d4c524..46767e70989a1 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -29,7 +29,7 @@ support = { package = "frame-support", path = "../../../frame/support", default- system = { package = "frame-system", path = "../../../frame/system", default-features = false } timestamp = { package = "pallet-timestamp", path = "../../../frame/timestamp", default-features = false } transaction-payment = { package = "pallet-transaction-payment", path = "../../../frame/transaction-payment", default-features = false } -tx-pool-api = { package = "substrate-transaction-pool-runtime-api", path = "../../../primitives/transaction-pool/runtime-api", default-features = false } +txpool-runtime-api = { package = "sp-transaction-pool-runtime-api", path = "../../../primitives/transaction-pool/runtime-api", default-features = false } version = { package = "sr-version", path = "../../../primitives/sr-version", default-features = false } [build-dependencies] @@ -62,6 +62,6 @@ std = [ "system/std", "timestamp/std", "transaction-payment/std", - "tx-pool-api/std", + "txpool-runtime-api/std", "version/std", ] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 525452256b7f9..36edeecc6eaea 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -328,7 +328,7 @@ impl_runtime_apis! { } } - impl tx_pool_api::TaggedTransactionQueue for Runtime { + impl txpool_runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { Executive::validate_transaction(tx) } diff --git a/bin/node-template/src/service.rs b/bin/node-template/src/service.rs index ad517d956b4c7..5a04459ef3f4c 100644 --- a/bin/node-template/src/service.rs +++ b/bin/node-template/src/service.rs @@ -5,7 +5,6 @@ use std::time::Duration; use substrate_client::LongestChain; use runtime::{self, GenesisConfig, opaque::Block, RuntimeApi}; use substrate_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; -use transaction_pool::{self, txpool::{Pool as TransactionPool}}; use inherents::InherentDataProviders; use network::{construct_simple_protocol}; use substrate_executor::native_executor_instance; @@ -41,9 +40,13 @@ macro_rules! new_full_start { .with_select_chain(|_config, backend| { Ok(substrate_client::LongestChain::new(backend.clone())) })? - .with_transaction_pool(|config, client| - Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::FullChainApi::new(client))) - )? + .with_transaction_pool(|config, client, _fetcher| { + let pool_api = txpool::FullChainApi::new(client.clone()); + let pool = txpool::BasicPool::new(config, pool_api); + let maintainer = txpool::FullBasicPoolMaintainer::new(pool.pool().clone(), client); + let maintainable_pool = txpool_api::MaintainableTransactionPool::new(pool, maintainer); + Ok(maintainable_pool) + })? .with_import_queue(|_config, client, mut select_chain, transaction_pool| { let select_chain = select_chain.take() .ok_or_else(|| substrate_service::Error::SelectChainRequired)?; @@ -191,9 +194,15 @@ pub fn new_light(config: Configuration RpcExtension { - node_rpc::create(client, pool) + .with_rpc_extensions(|client, pool, _backend, fetcher, _remote_blockchain| -> Result { + Ok(node_rpc::create(client, pool, node_rpc::LightDeps::none(fetcher))) })?; (builder, import_setup, inherent_data_providers) @@ -267,6 +270,17 @@ type ConcreteClient = >; #[allow(dead_code)] type ConcreteBackend = Backend; +#[allow(dead_code)] +type ConcreteTransactionPool = txpool_api::MaintainableTransactionPool< + txpool::BasicPool< + txpool::FullChainApi, + ConcreteBlock + >, + txpool::FullBasicPoolMaintainer< + ConcreteClient, + txpool::FullChainApi + > +>; /// A specialized configuration object for setting up the node.. pub type NodeConfiguration = Configuration; @@ -280,7 +294,7 @@ pub fn new_full(config: NodeConfiguration) LongestChain, NetworkStatus, NetworkService::Hash>, - TransactionPool>, + ConcreteTransactionPool, OffchainWorkers< ConcreteClient, >::OffchainStorage, @@ -303,9 +317,15 @@ pub fn new_light(config: NodeConfiguration) .with_select_chain(|_config, backend| { Ok(LongestChain::new(backend.clone())) })? - .with_transaction_pool(|config, client| - Ok(TransactionPool::new(config, transaction_pool::FullChainApi::new(client))) - )? + .with_transaction_pool(|config, client, fetcher| { + let fetcher = fetcher + .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; + let pool_api = txpool::LightChainApi::new(client.clone(), fetcher.clone()); + let pool = txpool::BasicPool::new(config, pool_api); + let maintainer = txpool::LightBasicPoolMaintainer::with_defaults(pool.pool().clone(), client, fetcher); + let maintainable_pool = txpool_api::MaintainableTransactionPool::new(pool, maintainer); + Ok(maintainable_pool) + })? .with_import_queue_and_fprb(|_config, client, backend, fetcher, _select_chain, _tx_pool| { let fetch_checker = fetcher .map(|fetcher| fetcher.checker().clone()) @@ -344,8 +364,14 @@ pub fn new_light(config: NodeConfiguration) .with_finality_proof_provider(|client, backend| Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) )? - .with_rpc_extensions(|client, pool, _backend| -> RpcExtension { - node_rpc::create(client, pool) + .with_rpc_extensions(|client, pool, _backend, fetcher, remote_blockchain| -> Result { + let fetcher = fetcher + .ok_or_else(|| "Trying to start node RPC without active fetcher")?; + let remote_blockchain = remote_blockchain + .ok_or_else(|| "Trying to start node RPC without active remote blockchain")?; + + let light_deps = node_rpc::LightDeps { remote_blockchain, fetcher }; + Ok(node_rpc::create(client, pool, Some(light_deps))) })? .build()?; diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index d267118c125a7..fb571e2706f35 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -13,4 +13,4 @@ sr-primitives = { path = "../../../primitives/sr-primitives" } pallet-contracts-rpc = { path = "../../../frame/contracts/rpc/" } pallet-transaction-payment-rpc = { path = "../../../frame/transaction-payment/rpc/" } substrate-frame-rpc-system = { path = "../../../utils/frame/rpc/system" } -transaction_pool = { package = "sc-transaction-pool", path = "../../../client/transaction-pool" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../../primitives/transaction-pool" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index cb7aee283b278..06b0e33606483 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -34,32 +34,68 @@ use std::sync::Arc; use node_primitives::{Block, AccountId, Index, Balance}; use node_runtime::UncheckedExtrinsic; use sr_primitives::traits::ProvideRuntimeApi; -use transaction_pool::txpool::{ChainApi, Pool}; +use txpool_api::TransactionPool; + +/// Light client extra dependencies. +pub struct LightDeps { + /// Remote access to the blockchain (async). + pub remote_blockchain: Arc>, + /// Fetcher instance. + pub fetcher: Arc, +} + +impl LightDeps { + /// Create empty `LightDeps` with given `F` type. + /// + /// This is a convenience method to be used in the service builder, + /// to make sure the type of the `LightDeps` is matching. + pub fn none(_: Option>) -> Option { + None + } +} /// Instantiate all RPC extensions. -pub fn create(client: Arc, pool: Arc>) -> jsonrpc_core::IoHandler where +/// +/// If you provide `LightDeps`, the system is configured for light client. +pub fn create( + client: Arc, + pool: Arc

, + light_deps: Option>, +) -> jsonrpc_core::IoHandler where C: ProvideRuntimeApi, C: client::blockchain::HeaderBackend, C: Send + Sync + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, C::Api: pallet_contracts_rpc::ContractsRuntimeApi, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - P: ChainApi + Sync + Send + 'static, + F: client::light::fetcher::Fetcher + 'static, + P: TransactionPool + 'static, M: jsonrpc_core::Metadata + Default, { - use substrate_frame_rpc_system::{System, SystemApi}; + use substrate_frame_rpc_system::{FullSystem, LightSystem, SystemApi}; use pallet_contracts_rpc::{Contracts, ContractsApi}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with( - SystemApi::to_delegate(System::new(client.clone(), pool)) - ); - io.extend_with( - ContractsApi::to_delegate(Contracts::new(client.clone())) - ); - io.extend_with( - TransactionPaymentApi::to_delegate(TransactionPayment::new(client)) - ); + + if let Some(LightDeps { remote_blockchain, fetcher }) = light_deps { + io.extend_with( + SystemApi::::to_delegate(LightSystem::new(client, remote_blockchain, fetcher, pool)) + ); + } else { + io.extend_with( + SystemApi::to_delegate(FullSystem::new(client.clone(), pool)) + ); + + // Making synchronous calls in light client freezes the browser currently, + // more context: https://github.com/paritytech/substrate/pull/3480 + // These RPCs should use an asynchronous caller instead. + io.extend_with( + ContractsApi::to_delegate(Contracts::new(client.clone())) + ); + io.extend_with( + TransactionPaymentApi::to_delegate(TransactionPayment::new(client)) + ); + } io } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index f5c4d5855bbbe..c24e4eb4d00fd 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -27,7 +27,7 @@ sr-primitives = { path = "../../../primitives/sr-primitives", default-features = sr-staking-primitives = { path = "../../../primitives/sr-staking-primitives", default-features = false } substrate-keyring = { path = "../../../primitives/keyring", optional = true } substrate-session = { path = "../../../primitives/session", default-features = false } -tx-pool-api = { package = "substrate-transaction-pool-runtime-api", path = "../../../primitives/transaction-pool/runtime-api", default-features = false } +txpool-runtime-api = { package = "sp-transaction-pool-runtime-api", path = "../../../primitives/transaction-pool/runtime-api", default-features = false } version = { package = "sr-version", path = "../../../primitives/sr-version", default-features = false } # frame dependencies @@ -116,7 +116,7 @@ std = [ "transaction-payment-rpc-runtime-api/std", "transaction-payment/std", "treasury/std", - "tx-pool-api/std", + "txpool-runtime-api/std", "utility/std", "version/std", ] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d55f37da12ac9..c1ae0510205b0 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -609,7 +609,7 @@ impl_runtime_apis! { } } - impl tx_pool_api::TaggedTransactionQueue for Runtime { + impl txpool_runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { Executive::validate_transaction(tx) } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index ff83aa412bf01..b48990d763c2a 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -29,6 +29,7 @@ sr-primitives = { path = "../../primitives/sr-primitives", default-features = fa state-machine = { package = "substrate-state-machine", path = "../../primitives/state-machine" } substrate-telemetry = { path = "../telemetry" } trie = { package = "substrate-trie", path = "../../primitives/trie" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../primitives/transaction-pool" } [dev-dependencies] env_logger = "0.7.0" diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 83d70998e16aa..94ce6c573e5e3 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -30,7 +30,6 @@ use primitives::{ use sr_primitives::{ generic::BlockId, traits, - offchain::{TransactionPool}, }; use state_machine::{ExecutionStrategy, ExecutionManager, DefaultHandler}; use externalities::Extensions; @@ -71,7 +70,7 @@ impl Default for ExecutionStrategies { pub struct ExecutionExtensions { strategies: ExecutionStrategies, keystore: Option, - transaction_pool: RwLock>>>, + transaction_pool: RwLock>>>, } impl Default for ExecutionExtensions { @@ -105,7 +104,7 @@ impl ExecutionExtensions { /// extension to be a `Weak` reference. /// That's also the reason why it's being registered lazily instead of /// during initialisation. - pub fn register_transaction_pool(&self, pool: Weak>) { + pub fn register_transaction_pool(&self, pool: Weak>) { *self.transaction_pool.write() = Some(pool); } @@ -166,7 +165,7 @@ impl ExecutionExtensions { /// A wrapper type to pass `BlockId` to the actual transaction pool. struct TransactionPoolAdapter { at: BlockId, - pool: Arc>, + pool: Arc>, } impl offchain::TransactionPool for TransactionPoolAdapter { diff --git a/client/api/src/light.rs b/client/api/src/light.rs index d0206dbc40fbc..56a18dcc1f069 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -139,15 +139,30 @@ pub struct RemoteBodyRequest { /// is correct (see FetchedDataChecker) and return already checked data. pub trait Fetcher: Send + Sync { /// Remote header future. - type RemoteHeaderResult: Future> + Send + 'static; + type RemoteHeaderResult: Future> + Unpin + Send + 'static; /// Remote storage read future. - type RemoteReadResult: Future, Option>>, ClientError>> + Send + 'static; + type RemoteReadResult: Future, Option>>, + ClientError, + >> + Unpin + Send + 'static; /// Remote call result future. - type RemoteCallResult: Future, ClientError>> + Send + 'static; + type RemoteCallResult: Future, + ClientError, + >> + Unpin + Send + 'static; /// Remote changes result future. - type RemoteChangesResult: Future, u32)>, ClientError>> + Send + 'static; + type RemoteChangesResult: Future, u32)>, + ClientError, + >> + Unpin + Send + 'static; /// Remote block body result future. - type RemoteBodyResult: Future, ClientError>> + Send + 'static; + type RemoteBodyResult: Future, + ClientError, + >> + Unpin + Send + 'static; /// Fetch remote header. fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult; diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 852a9bbe78742..c284f4b2ee7eb 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -16,10 +16,11 @@ client-api = { package = "substrate-client-api", path = "../api" } consensus_common = { package = "substrate-consensus-common", path = "../../primitives/consensus/common" } inherents = { package = "substrate-inherents", path = "../../primitives/inherents" } substrate-telemetry = { path = "../telemetry" } -transaction_pool = { package = "sc-transaction-pool", path = "../../client/transaction-pool" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../primitives/transaction-pool" } block-builder = { package = "substrate-block-builder", path = "../block-builder" } tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } [dev-dependencies] +txpool = { package = "sc-transaction-pool", path = "../../client/transaction-pool" } test-client = { package = "substrate-test-runtime-client", path = "../../test/utils/runtime/client" } parking_lot = "0.9" diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 46494ef4bbf10..abffe8ee4007d 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -33,21 +33,21 @@ use sr_primitives::{ }, generic::BlockId, }; -use transaction_pool::txpool::{self, Pool as TransactionPool}; +use txpool_api::{TransactionPool, InPoolTransaction}; use substrate_telemetry::{telemetry, CONSENSUS_INFO}; use block_builder::BlockBuilderApi; /// Proposer factory. -pub struct ProposerFactory where A: txpool::ChainApi { +pub struct ProposerFactory where A: TransactionPool { /// The client instance. pub client: Arc, /// The transaction pool. - pub transaction_pool: Arc>, + pub transaction_pool: Arc, } impl ProposerFactory, A> where - A: txpool::ChainApi + 'static, + A: TransactionPool + 'static, B: client_api::backend::Backend + Send + Sync + 'static, E: CallExecutor + Send + Sync + Clone + 'static, Block: BlockT, @@ -85,7 +85,7 @@ where impl consensus_common::Environment for ProposerFactory, A> where - A: txpool::ChainApi + 'static, + A: TransactionPool + 'static, B: client_api::backend::Backend + Send + Sync + 'static, E: CallExecutor + Send + Sync + Clone + 'static, Block: BlockT, @@ -106,24 +106,24 @@ where } /// The proposer logic. -pub struct Proposer { +pub struct Proposer { inner: Arc>, } /// Proposer inner, to wrap parameters under Arc. -struct ProposerInner { +struct ProposerInner { client: Arc, parent_hash: ::Hash, parent_id: BlockId, parent_number: <::Header as HeaderT>::Number, - transaction_pool: Arc>, + transaction_pool: Arc, now: Box time::Instant + Send + Sync>, } impl consensus_common::Proposer for Proposer, A> where - A: txpool::ChainApi + 'static, + A: TransactionPool + 'static, B: client_api::backend::Backend + Send + Sync + 'static, E: CallExecutor + Send + Sync + Clone + 'static, Block: BlockT, @@ -151,7 +151,7 @@ where } impl ProposerInner, A> where - A: txpool::ChainApi + 'static, + A: TransactionPool + 'static, B: client_api::backend::Backend + Send + Sync + 'static, E: CallExecutor + Send + Sync + Clone + 'static, Block: BlockT, @@ -192,22 +192,24 @@ impl ProposerInner, let pending_iterator = self.transaction_pool.ready(); debug!("Attempting to push transactions from the pool."); - for pending in pending_iterator { + for pending_tx in pending_iterator { if (self.now)() > deadline { debug!("Consensus deadline reached when pushing block transactions, proceeding with proposing."); break; } - trace!("[{:?}] Pushing to the block.", pending.hash); - match block_builder::BlockBuilder::push(&mut block_builder, pending.data.clone()) { + let pending_tx_data = pending_tx.data().clone(); + let pending_tx_hash = pending_tx.hash().clone(); + trace!("[{:?}] Pushing to the block.", pending_tx_hash); + match block_builder::BlockBuilder::push(&mut block_builder, pending_tx_data) { Ok(()) => { - debug!("[{:?}] Pushed to the block.", pending.hash); + debug!("[{:?}] Pushed to the block.", pending_tx_hash); } Err(sp_blockchain::Error::ApplyExtrinsicFailed(sp_blockchain::ApplyExtrinsicFailed::Validity(e))) if e.exhausted_resources() => { if is_first { - debug!("[{:?}] Invalid transaction: FullBlock on empty block", pending.hash); - unqueue_invalid.push(pending.hash.clone()); + debug!("[{:?}] Invalid transaction: FullBlock on empty block", pending_tx_hash); + unqueue_invalid.push(pending_tx_hash); } else if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; debug!( @@ -220,8 +222,8 @@ impl ProposerInner, } } Err(e) => { - debug!("[{:?}] Invalid transaction: {}", pending.hash, e); - unqueue_invalid.push(pending.hash.clone()); + debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); + unqueue_invalid.push(pending_tx_hash); } } @@ -266,6 +268,7 @@ mod tests { use parking_lot::Mutex; use consensus_common::Proposer; use test_client::{self, runtime::{Extrinsic, Transfer}, AccountKeyring}; + use txpool::{BasicPool, FullChainApi}; fn extrinsic(nonce: u64) -> Extrinsic { Transfer { @@ -280,11 +283,10 @@ mod tests { fn should_cease_building_block_when_deadline_is_reached() { // given let client = Arc::new(test_client::new()); - let chain_api = transaction_pool::FullChainApi::new(client.clone()); - let txpool = Arc::new(TransactionPool::new(Default::default(), chain_api)); + let txpool = Arc::new(BasicPool::new(Default::default(), FullChainApi::new(client.clone()))); futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), vec![extrinsic(0), extrinsic(1)], false) + txpool.submit_at(&BlockId::number(0), vec![extrinsic(0), extrinsic(1)]) ).unwrap(); let mut proposer_factory = ProposerFactory { diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 7961e4fe9e9df..de4a8af93b61c 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -24,10 +24,9 @@ //! # use sr_primitives::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; //! # use test_client::{self, runtime::{Extrinsic, Transfer}, AccountKeyring}; -//! # use transaction_pool::txpool::{self, Pool as TransactionPool}; +//! # use txpool::{BasicPool, FullChainApi}; //! # let client = Arc::new(test_client::new()); -//! # let chain_api = transaction_pool::FullChainApi::new(client.clone()); -//! # let txpool = Arc::new(TransactionPool::new(Default::default(), chain_api)); +//! # let txpool = Arc::new(BasicPool::new(Default::default(), FullChainApi::new(client.clone()))); //! // The first step is to create a `ProposerFactory`. //! let mut proposer_factory = ProposerFactory { //! client: client.clone(), diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index afec2af4fd55a..d7e0ba1e98ba4 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -35,7 +35,8 @@ client-db = { package = "substrate-client-db", path = "../db/", default-features env_logger = "0.7.0" test-client = { package = "substrate-test-runtime-client", path = "../../test/utils/runtime/client" } tokio = "0.1.22" -transaction_pool = { package = "sc-transaction-pool", path = "../../client/transaction-pool" } +txpool = { package = "sc-transaction-pool", path = "../../client/transaction-pool" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../primitives/transaction-pool" } [features] default = [] diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index a1db2456bd9e7..bf7965d308412 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -145,9 +145,11 @@ impl OffchainWorkers< #[cfg(test)] mod tests { use super::*; - use network::{Multiaddr, PeerId}; use std::sync::Arc; - use transaction_pool::txpool::Pool; + use network::{Multiaddr, PeerId}; + use test_client::runtime::Block; + use txpool::{BasicPool, FullChainApi}; + use txpool_api::{TransactionPool, InPoolTransaction}; struct MockNetworkStateInfo(); @@ -161,15 +163,26 @@ mod tests { } } + struct TestPool(BasicPool, Block>); + + impl txpool_api::OffchainSubmitTransaction for TestPool { + fn submit_at( + &self, + at: &BlockId, + extrinsic: ::Extrinsic, + ) -> Result<(), ()> { + futures::executor::block_on(self.0.submit_one(&at, extrinsic)) + .map(|_| ()) + .map_err(|_| ()) + } + } + #[test] fn should_call_into_runtime_and_produce_extrinsic() { // given let _ = env_logger::try_init(); let client = Arc::new(test_client::new()); - let pool = Arc::new(Pool::new( - Default::default(), - transaction_pool::FullChainApi::new(client.clone()) - )); + let pool = Arc::new(TestPool(BasicPool::new(Default::default(), FullChainApi::new(client.clone())))); client.execution_extensions() .register_transaction_pool(Arc::downgrade(&pool.clone()) as _); let db = client_db::offchain::LocalStorage::new_test(); @@ -180,7 +193,7 @@ mod tests { futures::executor::block_on(offchain.on_block_imported(&0u64, network_state, false)); // then - assert_eq!(pool.status().ready, 1); - assert_eq!(pool.ready().next().unwrap().is_propagateable(), false); + assert_eq!(pool.0.status().ready, 1); + assert_eq!(pool.0.ready().next().unwrap().is_propagateable(), false); } } diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index a42ada7205fdd..afc79ed026595 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -23,7 +23,7 @@ rpc-primitives = { package = "substrate-rpc-primitives", path = "../../primitive state_machine = { package = "substrate-state-machine", path = "../../primitives/state-machine" } substrate-executor = { path = "../executor" } substrate-keystore = { path = "../keystore" } -transaction_pool = { package = "sc-transaction-pool", path = "../../client/transaction-pool" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../primitives/transaction-pool" } sp-blockchain = { path = "../../primitives/blockchain" } hash-db = { version = "0.15.2", default-features = false } parking_lot = { version = "0.9.0" } @@ -36,3 +36,4 @@ rustc-hex = "2.0.1" sr-io = { path = "../../primitives/sr-io" } test-client = { package = "substrate-test-runtime-client", path = "../../test/utils/runtime/client" } tokio = "0.1.22" +txpool = { package = "sc-transaction-pool", path = "../transaction-pool" } diff --git a/client/rpc/api/Cargo.toml b/client/rpc/api/Cargo.toml index 9d88aae9a0bba..acc4aa1c008b1 100644 --- a/client/rpc/api/Cargo.toml +++ b/client/rpc/api/Cargo.toml @@ -18,5 +18,5 @@ primitives = { package = "substrate-primitives", path = "../../../primitives/cor runtime_version = { package = "sr-version", path = "../../../primitives/sr-version" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -txpool = { package = "sc-transaction-graph", path = "../../../client/transaction-pool/graph" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../../primitives/transaction-pool" } rpc-primitives = { package = "substrate-rpc-primitives", path = "../../../primitives/rpc" } diff --git a/client/rpc/api/src/author/error.rs b/client/rpc/api/src/author/error.rs index d1c943ef4465b..eb98fbda51cbd 100644 --- a/client/rpc/api/src/author/error.rs +++ b/client/rpc/api/src/author/error.rs @@ -34,7 +34,7 @@ pub enum Error { Client(Box), /// Transaction pool error, #[display(fmt="Transaction pool error: {}", _0)] - Pool(txpool::error::Error), + Pool(txpool_api::error::Error), /// Verification error #[display(fmt="Extrinsic verification error: {}", _0)] #[from(ignore)] @@ -93,7 +93,7 @@ const UNSUPPORTED_KEY_TYPE: i64 = POOL_INVALID_TX + 7; impl From for rpc::Error { fn from(e: Error) -> Self { - use txpool::error::{Error as PoolError}; + use txpool_api::error::{Error as PoolError}; match e { Error::BadFormat(e) => rpc::Error { diff --git a/client/rpc/api/src/author/mod.rs b/client/rpc/api/src/author/mod.rs index 4ea96cb3c6122..927bb530eab0f 100644 --- a/client/rpc/api/src/author/mod.rs +++ b/client/rpc/api/src/author/mod.rs @@ -21,11 +21,9 @@ pub mod hash; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use primitives::{ - Bytes -}; +use primitives::Bytes; +use txpool_api::TransactionStatus; use self::error::{FutureResult, Result}; -use txpool::watcher::Status; pub use self::gen_client::Client as AuthorClient; @@ -69,7 +67,7 @@ pub trait AuthorApi { )] fn watch_extrinsic(&self, metadata: Self::Metadata, - subscriber: Subscriber>, + subscriber: Subscriber>, bytes: Bytes ); diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 920331d9aebf4..9380bc46f5bd5 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -20,7 +20,6 @@ mod tests; use std::{sync::Arc, convert::TryInto}; -use futures::future::{FutureExt, TryFutureExt}; use log::warn; use client::Client; @@ -30,21 +29,17 @@ use rpc::futures::{ Sink, Future, future::result, }; -use futures::{StreamExt as _, compat::Compat, future::ready}; +use futures::{StreamExt as _, compat::Compat}; +use futures::future::{ready, FutureExt, TryFutureExt}; use api::Subscriptions; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use codec::{Encode, Decode}; use primitives::{Bytes, Blake2Hasher, H256, traits::BareCryptoStorePtr}; +use sr_api::ConstructRuntimeApi; use sr_primitives::{generic, traits::{self, ProvideRuntimeApi}}; -use transaction_pool::{ - txpool::{ - ChainApi as PoolChainApi, - BlockHash, - ExHash, - IntoPoolError, - Pool, - watcher::Status, - }, +use txpool_api::{ + TransactionPool, InPoolTransaction, TransactionStatus, + BlockHash, TxHash, TransactionFor, IntoPoolError, }; use session::SessionKeys; @@ -53,22 +48,22 @@ pub use api::author::*; use self::error::{Error, FutureResult, Result}; /// Authoring API -pub struct Author where P: PoolChainApi + Sync + Send + 'static { +pub struct Author { /// Substrate client - client: Arc::Block, RA>>, + client: Arc>, /// Transactions pool - pool: Arc>, + pool: Arc

, /// Subscriptions manager subscriptions: Subscriptions, /// The key store. keystore: BareCryptoStorePtr, } -impl Author where P: PoolChainApi + Sync + Send + 'static { +impl Author { /// Create new instance of Authoring API. pub fn new( - client: Arc::Block, RA>>, - pool: Arc>, + client: Arc>, + pool: Arc

, subscriptions: Subscriptions, keystore: BareCryptoStorePtr, ) -> Self { @@ -81,16 +76,15 @@ impl Author where P: PoolChainApi + Sync + Send + 'sta } } -impl AuthorApi, BlockHash

> for Author where - B: client_api::backend::Backend<

::Block, Blake2Hasher> + Send + Sync + 'static, - E: client_api::CallExecutor<

::Block, Blake2Hasher> + Send + Sync + 'static, - P: PoolChainApi + Sync + Send + 'static, - P::Block: traits::Block, - P::Error: 'static, - RA: Send + Sync + 'static, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - SessionKeys, +impl AuthorApi for Author where + Block: traits::Block, + B: client_api::backend::Backend + Send + Sync + 'static, + E: client_api::CallExecutor + Clone + Send + Sync + 'static, + P: TransactionPool + Sync + Send + 'static, + RA: ConstructRuntimeApi> + Send + Sync + 'static, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: + SessionKeys, { type Metadata = crate::metadata::Metadata; @@ -115,7 +109,7 @@ impl AuthorApi, BlockHash

> for Author whe ).map(Into::into).map_err(|e| Error::Client(Box::new(e))) } - fn submit_extrinsic(&self, ext: Bytes) -> FutureResult> { + fn submit_extrinsic(&self, ext: Bytes) -> FutureResult> { let xt = match Decode::decode(&mut &ext[..]) { Ok(xt) => xt, Err(err) => return Box::new(result(Err(err.into()))), @@ -131,13 +125,13 @@ impl AuthorApi, BlockHash

> for Author whe } fn pending_extrinsics(&self) -> Result> { - Ok(self.pool.ready().map(|tx| tx.data.encode().into()).collect()) + Ok(self.pool.ready().map(|tx| tx.data().encode().into()).collect()) } fn remove_extrinsic( &self, - bytes_or_hash: Vec>>, - ) -> Result>> { + bytes_or_hash: Vec>>, + ) -> Result>> { let hashes = bytes_or_hash.into_iter() .map(|x| match x { hash::ExtrinsicOrHash::Hash(h) => Ok(h), @@ -149,21 +143,22 @@ impl AuthorApi, BlockHash

> for Author whe .collect::>>()?; Ok( - self.pool.remove_invalid(&hashes) + self.pool + .remove_invalid(&hashes) .into_iter() - .map(|tx| tx.hash.clone()) + .map(|tx| tx.hash().clone()) .collect() ) } fn watch_extrinsic(&self, _metadata: Self::Metadata, - subscriber: Subscriber, BlockHash

>>, + subscriber: Subscriber, BlockHash

>>, xt: Bytes, ) { let submit = || -> Result<_> { let best_block_hash = self.client.info().chain.best_hash; - let dxt = <

::Block as traits::Block>::Extrinsic::decode(&mut &xt[..]) + let dxt = TransactionFor::

::decode(&mut &xt[..]) .map_err(error::Error::from)?; Ok( self.pool @@ -179,7 +174,7 @@ impl AuthorApi, BlockHash

> for Author whe let future = ready(submit()) .and_then(|res| res) // convert the watcher into a `Stream` - .map(|res| res.map(|watcher| watcher.into_stream().map(|v| Ok::<_, ()>(Ok(v))))) + .map(|res| res.map(|stream| stream.map(|v| Ok::<_, ()>(Ok(v))))) // now handle the import result, // start a new subscrition .map(move |result| match result { diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index 5ae044ff49ede..14aa03a643ca6 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -25,13 +25,10 @@ use primitives::{ }; use rpc::futures::Stream as _; use test_client::{ - self, AccountKeyring, runtime::{Extrinsic, Transfer, SessionKeys, RuntimeApi, Block}, DefaultTestClientBuilderExt, - TestClientBuilderExt, Backend, Client, Executor -}; -use transaction_pool::{ - txpool::Pool, - FullChainApi, + self, AccountKeyring, runtime::{Extrinsic, Transfer, SessionKeys, RuntimeApi, Block}, + DefaultTestClientBuilderExt, TestClientBuilderExt, Backend, Client, Executor, }; +use txpool::{BasicPool, FullChainApi}; use tokio::runtime; fn uxt(sender: AccountKeyring, nonce: u64) -> Extrinsic { @@ -44,18 +41,23 @@ fn uxt(sender: AccountKeyring, nonce: u64) -> Extrinsic { tx.into_signed_tx() } +type FullTransactionPool = BasicPool< + FullChainApi, Block>, + Block, +>; + struct TestSetup { pub runtime: runtime::Runtime, pub client: Arc>, pub keystore: BareCryptoStorePtr, - pub pool: Arc, Block>>>, + pub pool: Arc, } impl Default for TestSetup { fn default() -> Self { let keystore = KeyStore::new(); let client = Arc::new(test_client::TestClientBuilder::new().set_keystore(keystore.clone()).build()); - let pool = Arc::new(Pool::new(Default::default(), FullChainApi::new(client.clone()))); + let pool = Arc::new(BasicPool::new(Default::default(), FullChainApi::new(client.clone()))); TestSetup { runtime: runtime::Runtime::new().expect("Failed to create runtime in test setup"), client, @@ -66,7 +68,7 @@ impl Default for TestSetup { } impl TestSetup { - fn author(&self) -> Author, Block>, RuntimeApi> { + fn author(&self) -> Author { Author { client: self.client.clone(), pool: self.pool.clone(), diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 2c19c57a31018..bf9491add3949 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -41,11 +41,12 @@ chain-spec = { package = "substrate-chain-spec", path = "../chain-spec" } client-api = { package = "substrate-client-api", path = "../api" } client = { package = "substrate-client", path = "../" } sr-api = { path = "../../primitives/sr-api" } -tx-pool-api = { package = "substrate-transaction-pool-runtime-api", path = "../../primitives/transaction-pool/runtime-api" } +txpool-runtime-api = { package = "sp-transaction-pool-runtime-api", path = "../../primitives/transaction-pool/runtime-api" } client_db = { package = "substrate-client-db", path = "../db" } codec = { package = "parity-scale-codec", version = "1.0.0" } substrate-executor = { path = "../executor" } -transaction_pool = { package = "sc-transaction-pool", path = "../../client/transaction-pool" } +txpool = { package = "sc-transaction-pool", path = "../transaction-pool" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../primitives/transaction-pool" } rpc-servers = { package = "substrate-rpc-servers", path = "../rpc-servers" } rpc = { package = "substrate-rpc", path = "../rpc" } tel = { package = "substrate-telemetry", path = "../telemetry" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index ef3403e6d8ee9..1666257ab9fd6 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{Service, NetworkStatus, NetworkState, error::{self, Error}, DEFAULT_PROTOCOL_ID}; +use crate::{Service, NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID}; use crate::{SpawnTaskHandle, start_rpc_servers, build_network_future, TransactionPoolAdapter}; use crate::status_sinks; use crate::config::{Configuration, DatabaseConfig}; @@ -30,7 +30,6 @@ use consensus_common::import_queue::ImportQueue; use futures::{prelude::*, sync::mpsc}; use futures03::{ compat::{Compat, Future01CompatExt}, - future::ready, FutureExt as _, TryFutureExt as _, StreamExt as _, TryStreamExt as _, future::{select, Either} @@ -42,9 +41,11 @@ use network::{config::BoxFinalityProofRequestBuilder, specialization::NetworkSpe use parking_lot::{Mutex, RwLock}; use primitives::{Blake2Hasher, H256, Hasher}; use rpc; +use sr_api::ConstructRuntimeApi; use sr_primitives::generic::BlockId; use sr_primitives::traits::{ - Block as BlockT, Extrinsic, ProvideRuntimeApi, NumberFor, One, Zero, Header, SaturatedConversion + Block as BlockT, ProvideRuntimeApi, NumberFor, One, + Zero, Header, SaturatedConversion, }; use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; use std::{ @@ -53,7 +54,7 @@ use std::{ }; use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; use tel::{telemetry, SUBSTRATE_INFO}; -use transaction_pool::txpool::{self, ChainApi, Pool as TransactionPool}; +use txpool_api::{TransactionPool, TransactionPoolMaintainer}; use sp_blockchain; use grafana_data_source::{self, record_metrics}; @@ -295,7 +296,7 @@ where TGen: RuntimeGenesis, TCSExt: Extension { client, backend, keystore, - fetcher: Some(fetcher), + fetcher: Some(fetcher.clone()), select_chain: None, import_queue: (), finality_proof_request_builder: None, @@ -559,10 +560,19 @@ impl( self, - transaction_pool_builder: impl FnOnce(transaction_pool::txpool::Options, Arc) -> Result + transaction_pool_builder: impl FnOnce( + txpool::txpool::Options, + Arc, + Option, + ) -> Result ) -> Result, Error> { - let transaction_pool = transaction_pool_builder(self.config.transaction_pool.clone(), self.client.clone())?; + TNetP, UExPool, TRpc, Backend>, Error> + where TSc: Clone, TFchr: Clone { + let transaction_pool = transaction_pool_builder( + self.config.transaction_pool.clone(), + self.client.clone(), + self.fetcher.clone(), + )?; Ok(ServiceBuilder { config: self.config, @@ -586,10 +596,23 @@ impl( self, - rpc_ext_builder: impl FnOnce(Arc, Arc, Arc) -> URpc + rpc_ext_builder: impl FnOnce( + Arc, + Arc, + Arc, + Option, + Option>>, + ) -> Result, ) -> Result, Error> { - let rpc_extensions = rpc_ext_builder(self.client.clone(), self.transaction_pool.clone(), self.backend.clone()); + TNetP, TExPool, URpc, Backend>, Error> + where TSc: Clone, TFchr: Clone { + let rpc_extensions = rpc_ext_builder( + self.client.clone(), + self.transaction_pool.clone(), + self.backend.clone(), + self.fetcher.clone(), + self.remote_backend.clone(), + )?; Ok(ServiceBuilder { config: self.config, @@ -742,7 +765,7 @@ where } } -impl +impl ServiceBuilder< TBl, TRtApi, @@ -756,7 +779,7 @@ ServiceBuilder< BoxFinalityProofRequestBuilder, Arc>, TNetP, - TransactionPool, + TExPool, TRpc, TBackend, > where @@ -764,11 +787,11 @@ ServiceBuilder< as ProvideRuntimeApi>::Api: sr_api::Metadata + offchain::OffchainWorkerApi + - tx_pool_api::TaggedTransactionQueue + + txpool_runtime_api::TaggedTransactionQueue + session::SessionKeys + sr_api::ApiExt, TBl: BlockT::Out>, - TRtApi: 'static + Send + Sync, + TRtApi: ConstructRuntimeApi> + 'static + Send + Sync, TCfg: Default, TGen: RuntimeGenesis, TCSExt: Extension, @@ -777,7 +800,9 @@ ServiceBuilder< TSc: Clone, TImpQu: 'static + ImportQueue, TNetP: NetworkSpecialization, - TExPoolApi: 'static + ChainApi::Hash>, + TExPool: 'static + + TransactionPool::Hash> + + TransactionPoolMaintainer::Hash>, TRpc: rpc::RpcExtension + Clone, { /// Builds the service. @@ -787,7 +812,7 @@ ServiceBuilder< TSc, NetworkStatus, NetworkService::Hash>, - TransactionPool, + TExPool, offchain::OffchainWorkers< Client, TBackend::OffchainStorage, @@ -900,7 +925,6 @@ ServiceBuilder< { // block notifications let txpool = Arc::downgrade(&transaction_pool); - let wclient = Arc::downgrade(&client); let offchain = offchain_workers.as_ref().map(Arc::downgrade); let to_spawn_tx_ = to_spawn_tx.clone(); let network_state_info: Arc = network.clone(); @@ -912,14 +936,12 @@ ServiceBuilder< let number = *notification.header.number(); let txpool = txpool.upgrade(); - if let (Some(txpool), Some(client)) = (txpool.as_ref(), wclient.upgrade()) { - let future = maintain_transaction_pool( + if let Some(txpool) = txpool.as_ref() { + let future = txpool.maintain( &BlockId::hash(notification.hash), - &client, - &*txpool, ¬ification.retracted, - ).map_err(|e| warn!("Pool error processing new block: {:?}", e))?; - let _ = to_spawn_tx_.unbounded_send(future); + ).map(|_| Ok(())).compat(); + let _ = to_spawn_tx_.unbounded_send(Box::new(future)); } let offchain = offchain.as_ref().and_then(|o| o.upgrade()); @@ -1202,157 +1224,3 @@ ServiceBuilder< }) } } - -pub(crate) fn maintain_transaction_pool( - id: &BlockId, - client: &Arc>, - transaction_pool: &TransactionPool, - retracted: &[Block::Hash], -) -> error::Result + Send>> where - Block: BlockT::Out>, - Backend: 'static + client_api::backend::Backend, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - tx_pool_api::TaggedTransactionQueue, - Executor: 'static + client::CallExecutor, - PoolApi: 'static + txpool::ChainApi, - Api: 'static, -{ - // Put transactions from retracted blocks back into the pool. - let client_copy = client.clone(); - let retracted_transactions = retracted.to_vec().into_iter() - .filter_map(move |hash| client_copy.block(&BlockId::hash(hash)).ok().unwrap_or(None)) - .flat_map(|block| block.block.deconstruct().1.into_iter()) - .filter(|tx| tx.is_signed().unwrap_or(false)); - let resubmit_future = transaction_pool - .submit_at(id, retracted_transactions, true) - .then(|resubmit_result| ready(match resubmit_result { - Ok(_) => Ok(()), - Err(e) => { - warn!("Error re-submitting transactions: {:?}", e); - Ok(()) - } - })) - .compat(); - - // Avoid calling into runtime if there is nothing to prune from the pool anyway. - if transaction_pool.status().is_empty() { - return Ok(Box::new(resubmit_future)) - } - - let block = client.block(id)?; - Ok(match block { - Some(block) => { - let parent_id = BlockId::hash(*block.block.header().parent_hash()); - let prune_future = transaction_pool - .prune(id, &parent_id, block.block.extrinsics()) - .boxed() - .compat() - .map_err(|e| { format!("{:?}", e); }); - - Box::new(resubmit_future.and_then(|_| prune_future)) - }, - None => Box::new(resubmit_future), - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use futures03::executor::block_on; - use consensus_common::{BlockOrigin, SelectChain}; - use substrate_test_runtime_client::{prelude::*, runtime::Transfer}; - - #[test] - fn should_remove_transactions_from_the_pool() { - let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); - let client = Arc::new(client); - let pool = TransactionPool::new(Default::default(), ::transaction_pool::FullChainApi::new(client.clone())); - let transaction = Transfer { - amount: 5, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }.into_signed_tx(); - let best = longest_chain.best_chain().unwrap(); - - // store the transaction in the pool - block_on(pool.submit_one(&BlockId::hash(best.hash()), transaction.clone())).unwrap(); - - // import the block - let mut builder = client.new_block(Default::default()).unwrap(); - builder.push(transaction.clone()).unwrap(); - let block = builder.bake().unwrap(); - let id = BlockId::hash(block.header().hash()); - client.import(BlockOrigin::Own, block).unwrap(); - - // fire notification - this should clean up the queue - assert_eq!(pool.status().ready, 1); - maintain_transaction_pool( - &id, - &client, - &pool, - &[] - ).unwrap().wait().unwrap(); - - // then - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - } - - #[test] - fn should_add_reverted_transactions_to_the_pool() { - let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); - let client = Arc::new(client); - let pool = TransactionPool::new(Default::default(), ::transaction_pool::FullChainApi::new(client.clone())); - let transaction = Transfer { - amount: 5, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }.into_signed_tx(); - let best = longest_chain.best_chain().unwrap(); - - // store the transaction in the pool - block_on(pool.submit_one(&BlockId::hash(best.hash()), transaction.clone())).unwrap(); - - // import the block - let mut builder = client.new_block(Default::default()).unwrap(); - builder.push(transaction.clone()).unwrap(); - let block = builder.bake().unwrap(); - let block1_hash = block.header().hash(); - let id = BlockId::hash(block1_hash.clone()); - client.import(BlockOrigin::Own, block).unwrap(); - - // fire notification - this should clean up the queue - assert_eq!(pool.status().ready, 1); - maintain_transaction_pool( - &id, - &client, - &pool, - &[] - ).unwrap().wait().unwrap(); - - // then - assert_eq!(pool.status().ready, 0); - assert_eq!(pool.status().future, 0); - - // import second block - let builder = client.new_block_at(&BlockId::hash(best.hash()), Default::default()).unwrap(); - let block = builder.bake().unwrap(); - let id = BlockId::hash(block.header().hash()); - client.import(BlockOrigin::Own, block).unwrap(); - - // fire notification - this should add the transaction back to the pool. - maintain_transaction_pool( - &id, - &client, - &pool, - &[block1_hash] - ).unwrap().wait().unwrap(); - - // then - assert_eq!(pool.status().ready, 1); - assert_eq!(pool.status().future, 0); - } -} diff --git a/client/service/src/config.rs b/client/service/src/config.rs index a17c0ee877de2..6ef2aa46ac270 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -22,7 +22,7 @@ pub use network::config::{ExtTransport, NetworkConfiguration, Roles}; pub use substrate_executor::WasmExecutionMethod; use std::{path::PathBuf, net::SocketAddr, sync::Arc}; -use transaction_pool; +pub use txpool::txpool::Options as TransactionPoolOptions; use chain_spec::{ChainSpec, RuntimeGenesis, Extension, NoExtension}; use primitives::crypto::Protected; use target_info::Target; @@ -40,7 +40,7 @@ pub struct Configuration { /// Node roles. pub roles: Roles, /// Extrinsic pool configuration. - pub transaction_pool: transaction_pool::txpool::Options, + pub transaction_pool: TransactionPoolOptions, /// Network configuration. pub network: NetworkConfiguration, /// Path to the base configuration directory. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 6ddc6ab797d59..702f72f8322ea 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -53,12 +53,13 @@ use sr_primitives::generic::BlockId; use sr_primitives::traits::{NumberFor, Block as BlockT}; pub use self::error::Error; -pub use self::builder::{ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert}; +pub use self::builder::{ + ServiceBuilder, ServiceBuilderExport, ServiceBuilderImport, ServiceBuilderRevert, +}; pub use config::{Configuration, Roles, PruningMode}; pub use chain_spec::{ChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension}; -pub use transaction_pool::txpool::{ - self, Pool as TransactionPool, Options as TransactionPoolOptions, ChainApi, IntoPoolError -}; +pub use txpool_api::{TransactionPool, TransactionPoolMaintainer, InPoolTransaction, IntoPoolError}; +pub use txpool::txpool::Options as TransactionPoolOptions; pub use client::FinalityNotifications; pub use rpc::Metadata as RpcMetadata; #[doc(hidden)] @@ -144,8 +145,9 @@ pub trait AbstractService: 'static + Future + type RuntimeApi: Send + Sync; /// Chain selection algorithm. type SelectChain: consensus_common::SelectChain; - /// API of the transaction pool. - type TransactionPoolApi: ChainApi; + /// Transaction pool. + type TransactionPool: TransactionPool + + TransactionPoolMaintainer; /// Network specialization. type NetworkSpecialization: NetworkSpecialization; @@ -193,22 +195,23 @@ pub trait AbstractService: 'static + Future + fn network_status(&self, interval: Duration) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)>; /// Get shared transaction pool instance. - fn transaction_pool(&self) -> Arc>; + fn transaction_pool(&self) -> Arc; /// Get a handle to a future that will resolve on exit. fn on_exit(&self) -> ::exit_future::Exit; } -impl AbstractService for +impl AbstractService for Service, TSc, NetworkStatus, - NetworkService, TransactionPool, TOc> + NetworkService, TExPool, TOc> where TBl: BlockT, TBackend: 'static + client_api::backend::Backend, TExec: 'static + client::CallExecutor + Send + Sync + Clone, TRtApi: 'static + Send + Sync, TSc: consensus_common::SelectChain + 'static + Clone + Send, - TExPoolApi: 'static + ChainApi, + TExPool: 'static + TransactionPool + + TransactionPoolMaintainer, TOc: 'static + Send + Sync, TNetSpec: NetworkSpecialization, { @@ -217,7 +220,7 @@ where type CallExecutor = TExec; type RuntimeApi = TRtApi; type SelectChain = TSc; - type TransactionPoolApi = TExPoolApi; + type TransactionPool = TExPool; type NetworkSpecialization = TNetSpec; fn telemetry_on_connect_stream(&self) -> mpsc::UnboundedReceiver<()> { @@ -282,7 +285,7 @@ where stream } - fn transaction_pool(&self) -> Arc> { + fn transaction_pool(&self) -> Arc { self.transaction_pool.clone() } @@ -589,35 +592,35 @@ pub struct TransactionPoolAdapter { /// Get transactions for propagation. /// /// Function extracted to simplify the test and prevent creating `ServiceFactory`. -fn transactions_to_propagate(pool: &TransactionPool) +fn transactions_to_propagate(pool: &Pool) -> Vec<(H, B::Extrinsic)> where - PoolApi: ChainApi, + Pool: TransactionPool, B: BlockT, H: std::hash::Hash + Eq + sr_primitives::traits::Member + sr_primitives::traits::MaybeSerialize, - E: txpool::error::IntoPoolError + From, + E: IntoPoolError + From, { pool.ready() .filter(|t| t.is_propagateable()) .map(|t| { - let hash = t.hash.clone(); - let ex: B::Extrinsic = t.data.clone(); + let hash = t.hash().clone(); + let ex: B::Extrinsic = t.data().clone(); (hash, ex) }) .collect() } -impl network::TransactionPool for - TransactionPoolAdapter> +impl network::TransactionPool for + TransactionPoolAdapter where C: network::ClientHandle + Send + Sync, - PoolApi: 'static + ChainApi, + Pool: 'static + TransactionPool, B: BlockT, H: std::hash::Hash + Eq + sr_primitives::traits::Member + sr_primitives::traits::MaybeSerialize, - E: txpool::error::IntoPoolError + From, + E: 'static + IntoPoolError + From, { fn transactions(&self) -> Vec<(H, ::Extrinsic)> { - transactions_to_propagate(&self.pool) + transactions_to_propagate(&*self.pool) } fn hash_of(&self, transaction: &B::Extrinsic) -> H { @@ -647,7 +650,7 @@ where match import_result { Ok(_) => report_handle.report_peer(who, reputation_change_good), Err(e) => match e.into_pool_error() { - Ok(txpool::error::Error::AlreadyImported(_)) => (), + Ok(txpool_api::error::Error::AlreadyImported(_)) => (), Ok(e) => { report_handle.report_peer(who, reputation_change_bad); debug!("Error adding transaction to the pool: {:?}", e) @@ -679,16 +682,14 @@ mod tests { use consensus_common::SelectChain; use sr_primitives::traits::BlindCheckable; use substrate_test_runtime_client::{prelude::*, runtime::{Extrinsic, Transfer}}; + use txpool::{BasicPool, FullChainApi}; #[test] fn should_not_propagate_transactions_that_are_marked_as_such() { // given let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); let client = Arc::new(client); - let pool = Arc::new(TransactionPool::new( - Default::default(), - transaction_pool::FullChainApi::new(client.clone()) - )); + let pool = Arc::new(BasicPool::new(Default::default(), FullChainApi::new(client.clone()))); let best = longest_chain.best_chain().unwrap(); let transaction = Transfer { amount: 5, @@ -701,7 +702,7 @@ mod tests { assert_eq!(pool.status().ready, 2); // when - let transactions = transactions_to_propagate(&pool); + let transactions = transactions_to_propagate(&*pool); // then assert_eq!(transactions.len(), 1); diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 365fd87bfec86..7a85966a39cbd 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -18,3 +18,4 @@ consensus = { package = "substrate-consensus-common", path = "../../../primitive client = { package = "substrate-client", path = "../../" } sr-primitives = { path = "../../../primitives/sr-primitives" } primitives = { package = "substrate-primitives", path = "../../../primitives/core" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../../primitives/transaction-pool" } diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 594e5bf7ed400..3b1e3593dc8fc 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -36,6 +36,7 @@ use service::{ use network::{multiaddr, Multiaddr}; use network::config::{NetworkConfiguration, TransportConfig, NodeKeyConfig, Secret, NonReservedPeerMode}; use sr_primitives::{generic::BlockId, traits::Block as BlockT}; +use txpool_api::TransactionPool; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 241c7beb23850..3f1c80c2de4af 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -7,14 +7,17 @@ edition = "2018" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0" } derive_more = "0.99.2" -futures = { version = "0.3.1", features = ["thread-pool"] } +futures = { version = "0.3.1", features = ["compat", "compat"] } log = "0.4.8" parking_lot = "0.9.0" primitives = { package = "substrate-primitives", path = "../../primitives/core" } sr-api = { path = "../../primitives/sr-api" } sr-primitives = { path = "../../primitives/sr-primitives" } -tx-runtime-api = { package = "substrate-transaction-pool-runtime-api", path = "../../primitives/transaction-pool/runtime-api" } txpool = { package = "sc-transaction-graph", path = "./graph" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../primitives/transaction-pool" } +txpool-runtime-api = { package = "sp-transaction-pool-runtime-api", path = "../../primitives/transaction-pool/runtime-api" } +client-api = { package = "substrate-client-api", path = "../api" } +sp-blockchain = { path = "../../primitives/blockchain" } [dev-dependencies] keyring = { package = "substrate-keyring", path = "../../primitives/keyring" } diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index 685a2f6ff9b4b..c9dd98d8ba8f4 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -12,6 +12,7 @@ parking_lot = "0.9.0" serde = { version = "1.0.101", features = ["derive"] } primitives = { package = "substrate-primitives", path = "../../../primitives/core" } sr-primitives = { path = "../../../primitives/sr-primitives" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../../primitives/transaction-pool" } [dev-dependencies] assert_matches = "1.3.0" diff --git a/client/transaction-pool/graph/benches/basics.rs b/client/transaction-pool/graph/benches/basics.rs index 1ddd27df9641d..920facaa2cfd7 100644 --- a/client/transaction-pool/graph/benches/basics.rs +++ b/client/transaction-pool/graph/benches/basics.rs @@ -48,8 +48,8 @@ fn to_tag(nonce: u64, from: AccountId) -> Tag { impl ChainApi for TestApi { type Block = Block; type Hash = H256; - type Error = error::Error; - type ValidationFuture = futures::future::Ready>; + type Error = txpool_api::error::Error; + type ValidationFuture = futures::future::Ready>; fn validate_transaction( &self, diff --git a/client/transaction-pool/graph/src/base_pool.rs b/client/transaction-pool/graph/src/base_pool.rs index cb37aee07f4b8..a683741aa4076 100644 --- a/client/transaction-pool/graph/src/base_pool.rs +++ b/client/transaction-pool/graph/src/base_pool.rs @@ -34,8 +34,8 @@ use sr_primitives::transaction_validity::{ TransactionLongevity as Longevity, TransactionPriority as Priority, }; +use txpool_api::{error, PoolStatus, InPoolTransaction}; -use crate::error; use crate::future::{FutureTransactions, WaitingTransaction}; use crate::ready::ReadyTransactions; @@ -104,13 +104,65 @@ pub struct Transaction { pub propagate: bool, } -impl Transaction { - /// Returns `true` if the transaction should be propagated to other peers. - pub fn is_propagateable(&self) -> bool { +impl AsRef for Transaction { + fn as_ref(&self) -> &Extrinsic { + &self.data + } +} + +impl InPoolTransaction for Transaction { + type Transaction = Extrinsic; + type Hash = Hash; + + fn data(&self) -> &Extrinsic { + &self.data + } + + fn hash(&self) -> &Hash { + &self.hash + } + + fn priority(&self) -> &Priority { + &self.priority + } + + fn longevity(&self) ->&Longevity { + &self.valid_till + } + + fn requires(&self) -> &[Tag] { + &self.requires + } + + fn provides(&self) -> &[Tag] { + &self.provides + } + + fn is_propagateable(&self) -> bool { self.propagate } } +impl Transaction { + /// Explicit transaction clone. + /// + /// Transaction should be cloned only if absolutely necessary && we want + /// every reason to be commented. That's why we `Transaction` is not `Clone`, + /// but there's explicit `duplicate` method. + pub fn duplicate(&self) -> Self { + Transaction { + data: self.data.clone(), + bytes: self.bytes.clone(), + hash: self.hash.clone(), + priority: self.priority.clone(), + valid_till: self.valid_till.clone(), + requires: self.requires.clone(), + provides: self.provides.clone(), + propagate: self.propagate, + } + } +} + impl fmt::Debug for Transaction where Hash: fmt::Debug, Extrinsic: fmt::Debug, @@ -159,6 +211,7 @@ const RECENTLY_PRUNED_TAGS: usize = 2; /// required tags. #[derive(Debug)] pub struct BasePool { + reject_future_transactions: bool, future: FutureTransactions, ready: ReadyTransactions, /// Store recently pruned tags (for last two invocations). @@ -169,18 +222,37 @@ pub struct BasePool { recently_pruned_index: usize, } -impl Default for BasePool { +impl Default for BasePool { fn default() -> Self { + Self::new(false) + } +} + +impl BasePool { + /// Create new pool given reject_future_transactions flag. + pub fn new(reject_future_transactions: bool) -> Self { BasePool { + reject_future_transactions, future: Default::default(), ready: Default::default(), recently_pruned: Default::default(), recently_pruned_index: 0, } } -} -impl BasePool { + /// Temporary enables future transactions, runs closure and then restores + /// `reject_future_transactions` flag back to previous value. + /// + /// The closure accepts the mutable reference to the pool and original value + /// of the `reject_future_transactions` flag. + pub(crate) fn with_futures_enabled(&mut self, closure: impl FnOnce(&mut Self, bool) -> T) -> T { + let previous = self.reject_future_transactions; + self.reject_future_transactions = false; + let return_value = closure(self, previous); + self.reject_future_transactions = previous; + return_value + } + /// Imports transaction to the pool. /// /// The pool consists of two parts: Future and Ready. @@ -206,6 +278,10 @@ impl BasePool BasePool Vec>> { + self.future.clear() + } + /// Prunes transactions that provide given list of tags. /// /// This will cause all transactions that provide these tags to be removed from the pool, @@ -385,7 +466,7 @@ impl BasePool BasePool Status { - Status { + pub fn status(&self) -> PoolStatus { + PoolStatus { ready: self.ready.len(), ready_bytes: self.ready.bytes(), future: self.future.len(), @@ -423,26 +504,6 @@ impl BasePool bool { - self.ready == 0 && self.future == 0 - } -} - /// Queue limits #[derive(Debug, Clone)] pub struct Limit { @@ -972,4 +1033,85 @@ requires: [03,02], provides: [04], data: [4]}".to_owned() propagate: false, }.is_propagateable(), false); } + + #[test] + fn should_reject_future_transactions() { + // given + let mut pool = pool(); + + // when + pool.reject_future_transactions = true; + + // then + let err = pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![], + propagate: true, + }); + + if let Err(error::Error::RejectedFutureTransaction) = err { + } else { + assert!(false, "Invalid error kind: {:?}", err); + } + } + + #[test] + fn should_clear_future_queue() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![], + propagate: true, + }).unwrap(); + + // then + assert_eq!(pool.future.len(), 1); + + // and then when + assert_eq!(pool.clear_future().len(), 1); + + // then + assert_eq!(pool.future.len(), 0); + } + + #[test] + fn should_accept_future_transactions_when_explcitly_asked_to() { + // given + let mut pool = pool(); + pool.reject_future_transactions = true; + + // when + let flag_value = pool.with_futures_enabled(|pool, flag| { + pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![], + propagate: true, + }).unwrap(); + + flag + }); + + // then + assert_eq!(flag_value, true); + assert_eq!(pool.reject_future_transactions, true); + assert_eq!(pool.future.len(), 1); + } } diff --git a/client/transaction-pool/graph/src/future.rs b/client/transaction-pool/graph/src/future.rs index c7b13c912df18..8b01e9d654144 100644 --- a/client/transaction-pool/graph/src/future.rs +++ b/client/transaction-pool/graph/src/future.rs @@ -227,6 +227,12 @@ impl FutureTransactions { self.waiting.values().map(|waiting| &*waiting.transaction) } + /// Removes and returns all future transactions. + pub fn clear(&mut self) -> Vec>> { + self.wanted_tags.clear(); + self.waiting.drain().map(|(_, tx)| tx.transaction).collect() + } + /// Returns number of transactions in the Future queue. pub fn len(&self) -> usize { self.waiting.len() diff --git a/client/transaction-pool/graph/src/lib.rs b/client/transaction-pool/graph/src/lib.rs index 715e60874be95..db92bef2728ec 100644 --- a/client/transaction-pool/graph/src/lib.rs +++ b/client/transaction-pool/graph/src/lib.rs @@ -32,11 +32,9 @@ mod rotator; mod validated_pool; pub mod base_pool; -pub mod error; pub mod watcher; -pub use self::error::IntoPoolError; -pub use self::base_pool::{Transaction, Status}; +pub use self::base_pool::Transaction; pub use self::pool::{ Pool, Options, ChainApi, EventStream, ExtrinsicFor, diff --git a/client/transaction-pool/graph/src/listener.rs b/client/transaction-pool/graph/src/listener.rs index a96c31544fc75..f9b71555e264f 100644 --- a/client/transaction-pool/graph/src/listener.rs +++ b/client/transaction-pool/graph/src/listener.rs @@ -92,7 +92,7 @@ impl Listene /// Transaction was removed as invalid. pub fn invalid(&mut self, tx: &H) { - warn!(target: "transaction-pool", "Extrinsic invalid: {:?}", tx); + warn!(target: "txpool", "Extrinsic invalid: {:?}", tx); self.fire(tx, |watcher| watcher.invalid()); } diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/graph/src/pool.rs index d6ef61d17efd5..d40942c5e9365 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/graph/src/pool.rs @@ -21,7 +21,6 @@ use std::{ }; use crate::base_pool as base; -use crate::error; use crate::watcher::Watcher; use serde::Serialize; @@ -35,6 +34,8 @@ use sr_primitives::{ traits::{self, SaturatedConversion}, transaction_validity::{TransactionValidity, TransactionTag as Tag, TransactionValidityError}, }; +use txpool_api::{error, PoolStatus}; + use crate::validated_pool::{ValidatedPool, ValidatedTransaction}; /// Modification notification event stream type; @@ -92,6 +93,8 @@ pub struct Options { pub ready: base::Limit, /// Future queue limits. pub future: base::Limit, + /// Reject future transactions. + pub reject_future_transactions: bool, } impl Default for Options { @@ -105,6 +108,7 @@ impl Default for Options { count: 128, total_bytes: 1 * 1024 * 1024, }, + reject_future_transactions: false, } } } @@ -131,7 +135,9 @@ impl Pool { let validated_pool = self.validated_pool.clone(); self.verify(at, xts, force) .map(move |validated_transactions| validated_transactions - .map(|validated_transactions| validated_pool.submit(validated_transactions))) + .map(|validated_transactions| validated_pool.submit(validated_transactions + .into_iter() + .map(|(_, tx)| tx)))) } /// Imports one unverified extrinsic to the pool @@ -161,10 +167,40 @@ impl Pool { let validated_pool = self.validated_pool.clone(); Either::Right( self.verify_one(at, block_number, xt, false) - .map(move |validated_transactions| validated_pool.submit_and_watch(validated_transactions)) + .map(move |validated_transactions| validated_pool.submit_and_watch(validated_transactions.1)) ) } + /// Revalidate all ready transactions. + /// + /// Returns future that performs validation of all ready transactions and + /// then resubmits all transactions back to the pool. + pub fn revalidate_ready(&self, at: &BlockId) -> impl Future> { + let validated_pool = self.validated_pool.clone(); + let ready = self.validated_pool.ready().map(|tx| tx.data.clone()); + self.verify(at, ready, false) + .map(move |revalidated_transactions| revalidated_transactions.map( + move |revalidated_transactions| validated_pool.resubmit(revalidated_transactions) + )) + } + + /// Prunes known ready transactions. + /// + /// Used to clear the pool from transactions that were part of recently imported block. + /// The main difference from the `prune` is that we do not revalidate any transactions + /// and ignore unknown passed hashes. + pub fn prune_known(&self, at: &BlockId, hashes: &[ExHash]) -> Result<(), B::Error> { + // Get details of all extrinsics that are already in the pool + let in_pool_tags = self.validated_pool.extrinsics_tags(hashes) + .into_iter().filter_map(|x| x).flat_map(|x| x); + + // Prune all transactions that provide given tags + let prune_status = self.validated_pool.prune_tags(in_pool_tags)?; + let pruned_transactions = hashes.into_iter().cloned() + .chain(prune_status.pruned.iter().map(|tx| tx.hash.clone())); + self.validated_pool.fire_pruned(at, pruned_transactions) + } + /// Prunes ready transactions. /// /// Used to clear the pool from transactions that were part of recently imported block. @@ -184,7 +220,8 @@ impl Pool { extrinsics.len() ); // Get details of all extrinsics that are already in the pool - let (in_pool_hashes, in_pool_tags) = self.validated_pool.extrinsics_tags(extrinsics); + let in_pool_hashes = extrinsics.iter().map(|extrinsic| self.hash_of(extrinsic)).collect::>(); + let in_pool_tags = self.validated_pool.extrinsics_tags(&in_pool_hashes); // Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option>)`) let all = extrinsics.iter().zip(in_pool_tags.into_iter()); @@ -274,7 +311,7 @@ impl Pool { &at, known_imported_hashes, pruned_hashes, - reverified_transactions, + reverified_transactions.into_iter().map(|(_, xt)| xt).collect(), )) ))) } @@ -303,7 +340,7 @@ impl Pool { } /// Returns pool status. - pub fn status(&self) -> base::Status { + pub fn status(&self) -> PoolStatus { self.validated_pool.status() } @@ -325,7 +362,7 @@ impl Pool { at: &BlockId, xts: impl IntoIterator>, force: bool, - ) -> impl Future>, B::Error>> { + ) -> impl Future, ValidatedTransactionFor>, B::Error>> { // we need a block number to compute tx validity let block_number = match self.resolve_block_number(at) { Ok(block_number) => block_number, @@ -338,7 +375,7 @@ impl Pool { ); // make single validation future that waits all until all extrinsics are validated - Either::Right(join_all(validation_futures).then(|x| ready(Ok(x)))) + Either::Right(join_all(validation_futures).then(|x| ready(Ok(x.into_iter().collect())))) } /// Returns future that validates single transaction at given block. @@ -348,14 +385,17 @@ impl Pool { block_number: NumberFor, xt: ExtrinsicFor, force: bool, - ) -> impl Future> { + ) -> impl Future, ValidatedTransactionFor)> { let (hash, bytes) = self.validated_pool.api().hash_and_length(&xt); if !force && self.validated_pool.is_banned(&hash) { - return Either::Left(ready(ValidatedTransaction::Invalid(hash, error::Error::TemporarilyBanned.into()))) + return Either::Left(ready(( + hash.clone(), + ValidatedTransaction::Invalid(hash, error::Error::TemporarilyBanned.into()), + ))) } Either::Right(self.validated_pool.api().validate_transaction(block_id, xt.clone()) - .then(move |validation_result| ready(match validation_result { + .then(move |validation_result| ready((hash.clone(), match validation_result { Ok(validity) => match validity { Ok(validity) => if validity.provides.is_empty() { ValidatedTransaction::Invalid(hash, error::Error::NoTagsProvided.into()) @@ -379,7 +419,7 @@ impl Pool { ValidatedTransaction::Unknown(hash, error::Error::UnknownTransaction(e).into()), }, Err(e) => ValidatedTransaction::Invalid(hash, e), - }))) + })))) } } @@ -391,50 +431,30 @@ impl Clone for Pool { } } -impl sr_primitives::offchain::TransactionPool for Pool { - fn submit_at( - &self, - at: &BlockId, - extrinsic: ::Extrinsic, - ) -> Result<(), ()> { - log::debug!( - target: "txpool", - "(offchain call) Submitting a transaction to the pool: {:?}", - extrinsic - ); - - let result = futures::executor::block_on(self.submit_one(&at, extrinsic)); - - result.map(|_| ()) - .map_err(|e| log::warn!( - target: "txpool", - "(offchain call) Error submitting a transaction to the pool: {:?}", - e - )) - } -} - #[cfg(test)] mod tests { use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, time::Instant, }; use parking_lot::Mutex; use futures::executor::block_on; use super::*; + use txpool_api::TransactionStatus; use sr_primitives::transaction_validity::{ValidTransaction, InvalidTransaction}; use codec::Encode; use test_runtime::{Block, Extrinsic, Transfer, H256, AccountId}; use assert_matches::assert_matches; use crate::base_pool::Limit; - use crate::watcher; const INVALID_NONCE: u64 = 254; #[derive(Clone, Debug, Default)] struct TestApi { delay: Arc>>>, + invalidate: Arc>>, + clear_requirements: Arc>>, + add_requirements: Arc>>, } impl ChainApi for TestApi { @@ -449,6 +469,7 @@ mod tests { at: &BlockId, uxt: ExtrinsicFor, ) -> Self::ValidationFuture { + let hash = self.hash_and_length(&uxt).0; let block_number = self.block_id_to_number(at).unwrap().unwrap(); let nonce = uxt.transfer().nonce; @@ -462,16 +483,30 @@ mod tests { } } + if self.invalidate.lock().contains(&hash) { + return futures::future::ready(Ok(InvalidTransaction::Custom(0).into())); + } + futures::future::ready(if nonce < block_number { Ok(InvalidTransaction::Stale.into()) } else { - Ok(Ok(ValidTransaction { + let mut transaction = ValidTransaction { priority: 4, requires: if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] }, provides: if nonce == INVALID_NONCE { vec![] } else { vec![vec![nonce as u8]] }, longevity: 3, propagate: true, - })) + }; + + if self.clear_requirements.lock().contains(&hash) { + transaction.requires.clear(); + } + + if self.add_requirements.lock().contains(&hash) { + transaction.requires.push(vec![128]); + } + + Ok(Ok(transaction)) }) } @@ -651,6 +686,7 @@ mod tests { let pool = Pool::new(Options { ready: limit.clone(), future: limit.clone(), + ..Default::default() }, TestApi::default()); let hash1 = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { @@ -685,6 +721,7 @@ mod tests { let pool = Pool::new(Options { ready: limit.clone(), future: limit.clone(), + ..Default::default() }, TestApi::default()); // when @@ -742,8 +779,8 @@ mod tests { // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(watcher::Status::Ready)); - assert_eq!(stream.next(), Some(watcher::Status::Finalized(H256::from_low_u64_be(2).into()))); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(H256::from_low_u64_be(2).into()))); assert_eq!(stream.next(), None); } @@ -767,8 +804,8 @@ mod tests { // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(watcher::Status::Ready)); - assert_eq!(stream.next(), Some(watcher::Status::Finalized(H256::from_low_u64_be(2).into()))); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(H256::from_low_u64_be(2).into()))); assert_eq!(stream.next(), None); } @@ -796,8 +833,8 @@ mod tests { // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(watcher::Status::Future)); - assert_eq!(stream.next(), Some(watcher::Status::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::Future)); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); } #[test] @@ -819,8 +856,8 @@ mod tests { // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(watcher::Status::Ready)); - assert_eq!(stream.next(), Some(watcher::Status::Invalid)); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::Invalid)); assert_eq!(stream.next(), None); } @@ -846,8 +883,8 @@ mod tests { // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(watcher::Status::Ready)); - assert_eq!(stream.next(), Some(watcher::Status::Broadcast(peers))); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::Broadcast(peers))); } #[test] @@ -860,6 +897,7 @@ mod tests { let pool = Pool::new(Options { ready: limit.clone(), future: limit.clone(), + ..Default::default() }, TestApi::default()); let xt = uxt(Transfer { @@ -883,8 +921,8 @@ mod tests { // then let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(watcher::Status::Ready)); - assert_eq!(stream.next(), Some(watcher::Status::Dropped)); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::Dropped)); } #[test] @@ -941,5 +979,81 @@ mod tests { assert_eq!(pool.status().future, 0); } } + + #[test] + fn should_revalidate_ready_transactions() { + fn transfer(nonce: u64) -> Extrinsic { + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce, + }) + } + + // given + let pool = pool(); + let tx0 = transfer(0); + let hash0 = pool.validated_pool.api().hash_and_length(&tx0).0; + let watcher0 = block_on(pool.submit_and_watch(&BlockId::Number(0), tx0)).unwrap(); + let tx1 = transfer(1); + let hash1 = pool.validated_pool.api().hash_and_length(&tx1).0; + let watcher1 = block_on(pool.submit_and_watch(&BlockId::Number(0), tx1)).unwrap(); + let tx2 = transfer(2); + let hash2 = pool.validated_pool.api().hash_and_length(&tx2).0; + let watcher2 = block_on(pool.submit_and_watch(&BlockId::Number(0), tx2)).unwrap(); + let tx3 = transfer(3); + let hash3 = pool.validated_pool.api().hash_and_length(&tx3).0; + let watcher3 = block_on(pool.submit_and_watch(&BlockId::Number(0), tx3)).unwrap(); + let tx4 = transfer(4); + let hash4 = pool.validated_pool.api().hash_and_length(&tx4).0; + let watcher4 = block_on(pool.submit_and_watch(&BlockId::Number(0), tx4)).unwrap(); + assert_eq!(pool.status().ready, 5); + + // when + pool.validated_pool.api().invalidate.lock().insert(hash3); + pool.validated_pool.api().clear_requirements.lock().insert(hash1); + pool.validated_pool.api().add_requirements.lock().insert(hash0); + block_on(pool.revalidate_ready(&BlockId::Number(0))).unwrap(); + + // then + // hash0 now has unsatisfied requirements => it is moved to the future queue + // hash1 is now independent of hash0 => it is in ready queue + // hash2 still depends on hash1 => it is in ready queue + // hash3 is now invalid => it is removed from the pool + // hash4 now depends on invalidated hash3 => it is moved to the future queue + // + // events for hash3 are: Ready, Invalid + // events for hash4 are: Ready, Invalid + assert_eq!(pool.status().ready, 2); + assert_eq!( + futures::executor::block_on_stream(watcher3.into_stream()).collect::>(), + vec![TransactionStatus::Ready, TransactionStatus::Invalid], + ); + + // when + pool.validated_pool.remove_invalid(&[hash0, hash1, hash2, hash4]); + + // then + // events for hash0 are: Ready, Future, Invalid + // events for hash1 are: Ready, Invalid + // events for hash2 are: Ready, Invalid + assert_eq!( + futures::executor::block_on_stream(watcher0.into_stream()).collect::>(), + vec![TransactionStatus::Ready, TransactionStatus::Future, TransactionStatus::Invalid], + ); + assert_eq!( + futures::executor::block_on_stream(watcher1.into_stream()).collect::>(), + vec![TransactionStatus::Ready, TransactionStatus::Invalid], + ); + assert_eq!( + futures::executor::block_on_stream(watcher2.into_stream()).collect::>(), + vec![TransactionStatus::Ready, TransactionStatus::Invalid], + ); + assert_eq!( + futures::executor::block_on_stream(watcher4.into_stream()).collect::>(), + vec![TransactionStatus::Ready, TransactionStatus::Future, TransactionStatus::Invalid], + ); + } } diff --git a/client/transaction-pool/graph/src/ready.rs b/client/transaction-pool/graph/src/ready.rs index 3698bf447eeaa..3684572bd0233 100644 --- a/client/transaction-pool/graph/src/ready.rs +++ b/client/transaction-pool/graph/src/ready.rs @@ -28,8 +28,8 @@ use sr_primitives::traits::Member; use sr_primitives::transaction_validity::{ TransactionTag as Tag, }; +use txpool_api::error; -use crate::error; use crate::future::WaitingTransaction; use crate::base_pool::Transaction; @@ -433,6 +433,7 @@ impl ReadyTransactions { } } +/// Iterator of ready transactions ordered by priority. pub struct BestIterator { all: Arc>>>, awaiting: HashMap)>, diff --git a/client/transaction-pool/graph/src/validated_pool.rs b/client/transaction-pool/graph/src/validated_pool.rs index 7317d41f42e97..2aca2adb72f3a 100644 --- a/client/transaction-pool/graph/src/validated_pool.rs +++ b/client/transaction-pool/graph/src/validated_pool.rs @@ -18,16 +18,16 @@ use std::{ collections::{HashSet, HashMap}, fmt, hash, + sync::Arc, time, }; use crate::base_pool as base; -use crate::error; use crate::listener::Listener; use crate::rotator::PoolRotator; use crate::watcher::Watcher; use serde::Serialize; -use log::debug; +use log::{debug, warn}; use futures::channel::mpsc; use parking_lot::{Mutex, RwLock}; @@ -36,6 +36,7 @@ use sr_primitives::{ traits::{self, SaturatedConversion}, transaction_validity::TransactionTag as Tag, }; +use txpool_api::{error, PoolStatus}; use crate::base_pool::PruneStatus; use crate::pool::{EventStream, Options, ChainApi, BlockHash, ExHash, ExtrinsicFor, TransactionFor}; @@ -76,11 +77,12 @@ pub(crate) struct ValidatedPool { impl ValidatedPool { /// Create a new transaction pool. pub fn new(options: Options, api: B) -> Self { + let base_pool = base::BasePool::new(options.reject_future_transactions); ValidatedPool { api, options, listener: Default::default(), - pool: Default::default(), + pool: RwLock::new(base_pool), import_notification_sinks: Default::default(), rotator: Default::default(), } @@ -189,18 +191,134 @@ impl ValidatedPool { } } + /// Resubmits revalidated transactions back to the pool. + /// + /// Removes and then submits passed transactions and all dependent transactions. + /// Transactions that are missing from the pool are not submitted. + pub fn resubmit(&self, mut updated_transactions: HashMap, ValidatedTransactionFor>) { + #[derive(Debug, Clone, Copy, PartialEq)] + enum Status { Future, Ready, Failed, Dropped }; + + let (mut initial_statuses, final_statuses) = { + let mut pool = self.pool.write(); + + // remove all passed transactions from the ready/future queues + // (this may remove additional transactions as well) + // + // for every transaction that has an entry in the `updated_transactions`, + // we store updated validation result in txs_to_resubmit + // for every transaction that has no entry in the `updated_transactions`, + // we store last validation result (i.e. the pool entry) in txs_to_resubmit + let mut initial_statuses = HashMap::new(); + let mut txs_to_resubmit = Vec::with_capacity(updated_transactions.len()); + while !updated_transactions.is_empty() { + let hash = updated_transactions.keys().next().cloned().expect("transactions is not empty; qed"); + + // note we are not considering tx with hash invalid here - we just want + // to remove it along with dependent transactions and `remove_invalid()` + // does exactly what we need + let removed = pool.remove_invalid(&[hash.clone()]); + for removed_tx in removed { + let removed_hash = removed_tx.hash.clone(); + let updated_transaction = updated_transactions.remove(&removed_hash); + let tx_to_resubmit = if let Some(updated_tx) = updated_transaction { + updated_tx + } else { + // in most cases we'll end up in successful `try_unwrap`, but if not + // we still need to reinsert transaction back to the pool => duplicate call + let transaction = match Arc::try_unwrap(removed_tx) { + Ok(transaction) => transaction, + Err(transaction) => transaction.duplicate(), + }; + ValidatedTransaction::Valid(transaction) + }; + + initial_statuses.insert(removed_hash.clone(), Status::Ready); + txs_to_resubmit.push((removed_hash, tx_to_resubmit)); + } + } + + // if we're rejecting future transactions, then insertion order matters here: + // if tx1 depends on tx2, then if tx1 is inserted before tx2, then it goes + // to the future queue and gets rejected immediately + // => let's temporary stop rejection and clear future queue before return + pool.with_futures_enabled(|pool, reject_future_transactions| { + // now resubmit all removed transactions back to the pool + let mut final_statuses = HashMap::new(); + for (hash, tx_to_resubmit) in txs_to_resubmit { + match tx_to_resubmit { + ValidatedTransaction::Valid(tx) => match pool.import(tx) { + Ok(imported) => match imported { + base::Imported::Ready { promoted, failed, removed, .. } => { + final_statuses.insert(hash, Status::Ready); + for hash in promoted { + final_statuses.insert(hash, Status::Ready); + } + for hash in failed { + final_statuses.insert(hash, Status::Failed); + } + for tx in removed { + final_statuses.insert(tx.hash.clone(), Status::Dropped); + } + }, + base::Imported::Future { .. } => { + final_statuses.insert(hash, Status::Future); + }, + }, + Err(err) => { + // we do not want to fail if single transaction import has failed + // nor we do want to propagate this error, because it could tx unknown to caller + // => let's just notify listeners (and issue debug message) + warn!( + target: "txpool", + "[{:?}] Removing invalid transaction from update: {}", + hash, + err, + ); + final_statuses.insert(hash, Status::Failed); + }, + }, + ValidatedTransaction::Invalid(_, _) | ValidatedTransaction::Unknown(_, _) => { + final_statuses.insert(hash, Status::Failed); + }, + } + } + + // if the pool is configured to reject future transactions, let's clear the future + // queue, updating final statuses as required + if reject_future_transactions { + for future_tx in pool.clear_future() { + final_statuses.insert(future_tx.hash.clone(), Status::Dropped); + } + } + + (initial_statuses, final_statuses) + }) + }; + + // and now let's notify listeners about status changes + let mut listener = self.listener.write(); + for (hash, final_status) in final_statuses { + let initial_status = initial_statuses.remove(&hash); + if initial_status.is_none() || Some(final_status) != initial_status { + match final_status { + Status::Future => listener.future(&hash), + Status::Ready => listener.ready(&hash, None), + Status::Failed => listener.invalid(&hash), + Status::Dropped => listener.dropped(&hash, None), + } + } + } + } + /// For each extrinsic, returns tags that it provides (if known), or None (if it is unknown). - pub fn extrinsics_tags(&self, extrinsics: &[ExtrinsicFor]) -> (Vec>, Vec>>) { - let hashes = extrinsics.iter().map(|extrinsic| self.api.hash_and_length(extrinsic).0).collect::>(); - let in_pool = self.pool.read().by_hash(&hashes); - ( - hashes, - in_pool.into_iter() - .map(|existing_in_pool| existing_in_pool - .map(|transaction| transaction.provides.iter().cloned() - .collect())) - .collect(), - ) + pub fn extrinsics_tags(&self, hashes: &[ExHash]) -> Vec>> { + self.pool.read().by_hash(&hashes) + .into_iter() + .map(|existing_in_pool| existing_in_pool + .map(|transaction| transaction.provides.iter().cloned() + .collect())) + .collect() } /// Prunes ready transactions that provide given list of tags. @@ -249,20 +367,29 @@ impl ValidatedPool { // Fire `pruned` notifications for collected hashes and make sure to include // `known_imported_hashes` since they were just imported as part of the block. let hashes = hashes.chain(known_imported_hashes.into_iter()); - { - let header_hash = self.api.block_id_to_hash(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())?; - let mut listener = self.listener.write(); - for h in hashes { - listener.pruned(header_hash, &h); - } - } + self.fire_pruned(at, hashes)?; + // perform regular cleanup of old transactions in the pool // and update temporary bans. self.clear_stale(at)?; Ok(()) } + /// Fire notifications for pruned transactions. + pub fn fire_pruned( + &self, + at: &BlockId, + hashes: impl Iterator>, + ) -> Result<(), B::Error> { + let header_hash = self.api.block_id_to_hash(at)? + .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())?; + let mut listener = self.listener.write(); + for h in hashes { + listener.pruned(header_hash, &h); + } + Ok(()) + } + /// Removes stale transactions from the pool. /// /// Stale transactions are transaction beyond their longevity period. @@ -270,8 +397,8 @@ impl ValidatedPool { /// See `prune_tags` if you want this. pub fn clear_stale(&self, at: &BlockId) -> Result<(), B::Error> { let block_number = self.api.block_id_to_number(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())? - .saturated_into::(); + .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())? + .saturated_into::(); let now = time::Instant::now(); let to_remove = { self.ready() @@ -346,7 +473,7 @@ impl ValidatedPool { } /// Returns pool status. - pub fn status(&self) -> base::Status { + pub fn status(&self) -> PoolStatus { self.pool.read().status() } } diff --git a/client/transaction-pool/graph/src/watcher.rs b/client/transaction-pool/graph/src/watcher.rs index 11d6b9f40742a..fa93386c8cad9 100644 --- a/client/transaction-pool/graph/src/watcher.rs +++ b/client/transaction-pool/graph/src/watcher.rs @@ -20,34 +20,14 @@ use futures::{ Stream, channel::mpsc, }; -use serde::{Serialize, Deserialize}; - -/// Possible extrinsic status events -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum Status { - /// Extrinsic is part of the future queue. - Future, - /// Extrinsic is part of the ready queue. - Ready, - /// Extrinsic has been finalized in block with given hash. - Finalized(H2), - /// Some state change (perhaps another extrinsic was included) rendered this extrinsic invalid. - Usurped(H), - /// The extrinsic has been broadcast to the given peers. - Broadcast(Vec), - /// Extrinsic has been dropped from the pool because of the limit. - Dropped, - /// Extrinsic was detected as invalid. - Invalid, -} +use txpool_api::TransactionStatus; /// Extrinsic watcher. /// /// Represents a stream of status updates for particular extrinsic. #[derive(Debug)] pub struct Watcher { - receiver: mpsc::UnboundedReceiver>, + receiver: mpsc::UnboundedReceiver>, hash: H, } @@ -60,7 +40,7 @@ impl Watcher { /// Pipe the notifications to given sink. /// /// Make sure to drive the future to completion. - pub fn into_stream(self) -> impl Stream> { + pub fn into_stream(self) -> impl Stream> { self.receiver } } @@ -68,7 +48,7 @@ impl Watcher { /// Sender part of the watcher. Exposed only for testing purposes. #[derive(Debug)] pub struct Sender { - receivers: Vec>>, + receivers: Vec>>, finalized: bool, } @@ -94,49 +74,48 @@ impl Sender { /// Transaction became ready. pub fn ready(&mut self) { - self.send(Status::Ready) + self.send(TransactionStatus::Ready) } /// Transaction was moved to future. pub fn future(&mut self) { - self.send(Status::Future) + self.send(TransactionStatus::Future) } /// Some state change (perhaps another extrinsic was included) rendered this extrinsic invalid. pub fn usurped(&mut self, hash: H) { - self.send(Status::Usurped(hash)) + self.send(TransactionStatus::Usurped(hash)) } /// Extrinsic has been finalized in block with given hash. pub fn finalized(&mut self, hash: H2) { - self.send(Status::Finalized(hash)); + self.send(TransactionStatus::Finalized(hash)); self.finalized = true; } /// Extrinsic has been marked as invalid by the block builder. pub fn invalid(&mut self) { - self.send(Status::Invalid); + self.send(TransactionStatus::Invalid); // we mark as finalized as there are no more notifications self.finalized = true; } /// Transaction has been dropped from the pool because of the limit. pub fn dropped(&mut self) { - self.send(Status::Dropped); + self.send(TransactionStatus::Dropped); } /// The extrinsic has been broadcast to the given peers. pub fn broadcast(&mut self, peers: Vec) { - self.send(Status::Broadcast(peers)) + self.send(TransactionStatus::Broadcast(peers)) } - /// Returns true if the are no more listeners for this extrinsic or it was finalized. pub fn is_done(&self) -> bool { self.finalized || self.receivers.is_empty() } - fn send(&mut self, status: Status) { + fn send(&mut self, status: TransactionStatus) { self.receivers.retain(|sender| sender.unbounded_send(status.clone()).is_ok()) } } diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 28681fb11ba8a..87b494201d5e3 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -17,20 +17,20 @@ //! Chain api required for the transaction pool. use std::{marker::PhantomData, pin::Pin, sync::Arc}; +use codec::{Decode, Encode}; +use futures::{channel::oneshot, executor::{ThreadPool, ThreadPoolBuilder}, future::{Future, FutureExt, ready}}; -use codec::Encode; - -use futures::{channel::oneshot, executor::{ThreadPool, ThreadPoolBuilder}, future::Future}; - +use client_api::{ + blockchain::HeaderBackend, + light::{Fetcher, RemoteCallRequest} +}; use primitives::{H256, Blake2Hasher, Hasher}; - -use sr_primitives::{generic::BlockId, traits, transaction_validity::TransactionValidity}; - -use tx_runtime_api::TaggedTransactionQueue; +use sr_primitives::{generic::BlockId, traits::{self, Block as BlockT}, transaction_validity::TransactionValidity}; +use txpool_runtime_api::TaggedTransactionQueue; use crate::error::{self, Error}; -/// The transaction pool logic +/// The transaction pool logic for full client. pub struct FullChainApi { client: Arc, pool: ThreadPool, @@ -38,7 +38,7 @@ pub struct FullChainApi { } impl FullChainApi where - Block: traits::Block, + Block: BlockT, T: traits::ProvideRuntimeApi + traits::BlockIdTo { /// Create new transaction pool logic. pub fn new(client: Arc) -> Self { @@ -55,7 +55,7 @@ impl FullChainApi where } impl txpool::ChainApi for FullChainApi where - Block: traits::Block, + Block: BlockT, T: traits::ProvideRuntimeApi + traits::BlockIdTo + 'static + Send + Sync, T::Api: TaggedTransactionQueue, sr_api::ApiErrorFor: Send, @@ -110,3 +110,84 @@ impl txpool::ChainApi for FullChainApi where }) } } + +/// The transaction pool logic for light client. +pub struct LightChainApi { + client: Arc, + fetcher: Arc, + _phantom: PhantomData, +} + +impl LightChainApi where + Block: BlockT, + T: HeaderBackend, + F: Fetcher, +{ + /// Create new transaction pool logic. + pub fn new(client: Arc, fetcher: Arc) -> Self { + LightChainApi { + client, + fetcher, + _phantom: Default::default(), + } + } +} + +impl txpool::ChainApi for LightChainApi where + Block: BlockT, + T: HeaderBackend + 'static, + F: Fetcher + 'static, +{ + type Block = Block; + type Hash = H256; + type Error = error::Error; + type ValidationFuture = Box> + Send + Unpin>; + + fn validate_transaction( + &self, + at: &BlockId, + uxt: txpool::ExtrinsicFor, + ) -> Self::ValidationFuture { + let header_hash = self.client.expect_block_hash_from_id(at); + let header_and_hash = header_hash + .and_then(|header_hash| self.client.expect_header(BlockId::Hash(header_hash)) + .map(|header| (header_hash, header))); + let (block, header) = match header_and_hash { + Ok((header_hash, header)) => (header_hash, header), + Err(err) => return Box::new(ready(Err(err.into()))), + }; + let remote_validation_request = self.fetcher.remote_call(RemoteCallRequest { + block, + header, + method: "TaggedTransactionQueue_validate_transaction".into(), + call_data: uxt.encode(), + retry_count: None, + }); + let remote_validation_request = remote_validation_request.then(move |result| { + let result: error::Result = result + .map_err(Into::into) + .and_then(|result| Decode::decode(&mut &result[..]) + .map_err(|e| Error::RuntimeApi( + format!("Error decoding tx validation result: {:?}", e) + )) + ); + ready(result) + }); + + Box::new(remote_validation_request) + } + + fn block_id_to_number(&self, at: &BlockId) -> error::Result>> { + Ok(self.client.block_number_from_id(at)?) + } + + fn block_id_to_hash(&self, at: &BlockId) -> error::Result>> { + Ok(self.client.block_hash_from_id(at)?) + } + + fn hash_and_length(&self, ex: &txpool::ExtrinsicFor) -> (Self::Hash, usize) { + ex.using_encoded(|x| { + (Blake2Hasher::hash(x), x.len()) + }) + } +} diff --git a/client/transaction-pool/src/error.rs b/client/transaction-pool/src/error.rs index 83667d4cbccb3..d769944ad6a59 100644 --- a/client/transaction-pool/src/error.rs +++ b/client/transaction-pool/src/error.rs @@ -23,7 +23,9 @@ pub type Result = std::result::Result; #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { /// Pool error. - Pool(txpool::error::Error), + Pool(txpool_api::error::Error), + /// Blockchain error. + Blockchain(sp_blockchain::Error), /// Error while converting a `BlockId`. #[from(ignore)] BlockIdConversion(String), @@ -36,14 +38,15 @@ impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Error::Pool(ref err) => Some(err), + Error::Blockchain(ref err) => Some(err), Error::BlockIdConversion(_) => None, Error::RuntimeApi(_) => None, } } } -impl txpool::IntoPoolError for Error { - fn into_pool_error(self) -> std::result::Result { +impl txpool_api::IntoPoolError for Error { + fn into_pool_error(self) -> std::result::Result { match self { Error::Pool(e) => Ok(e), e => Err(e), diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index d7703de65285d..1499ab26d4bfc 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -14,15 +14,119 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Substrate transaction pool. +//! Substrate transaction pool implementation. #![warn(missing_docs)] #![warn(unused_extern_crates)] mod api; +mod maintainer; + pub mod error; #[cfg(test)] mod tests; -pub use api::FullChainApi; pub use txpool; +pub use crate::api::{FullChainApi, LightChainApi}; +pub use crate::maintainer::{FullBasicPoolMaintainer, LightBasicPoolMaintainer}; + +use std::{collections::HashMap, sync::Arc}; +use futures::{Future, FutureExt}; + +use sr_primitives::{ + generic::BlockId, + traits::Block as BlockT, +}; +use txpool_api::{ + TransactionPool, PoolStatus, ImportNotificationStream, + TxHash, TransactionFor, TransactionStatusStreamFor, +}; + +/// Basic implementation of transaction pool that can be customized by providing PoolApi. +pub struct BasicPool + where + Block: BlockT, + PoolApi: txpool::ChainApi, +{ + pool: Arc>, +} + +impl BasicPool + where + Block: BlockT, + PoolApi: txpool::ChainApi, +{ + /// Create new basic transaction pool with provided api. + pub fn new(options: txpool::Options, pool_api: PoolApi) -> Self { + BasicPool { + pool: Arc::new(txpool::Pool::new(options, pool_api)), + } + } + + /// Gets shared reference to the underlying pool. + pub fn pool(&self) -> &Arc> { + &self.pool + } +} + +impl TransactionPool for BasicPool + where + Block: BlockT, + PoolApi: 'static + txpool::ChainApi, +{ + type Block = PoolApi::Block; + type Hash = txpool::ExHash; + type InPoolTransaction = txpool::base_pool::Transaction, TransactionFor>; + type Error = error::Error; + + fn submit_at( + &self, + at: &BlockId, + xts: impl IntoIterator> + 'static, + ) -> Box, Self::Error>>, Self::Error>> + Send + Unpin> { + Box::new(self.pool.submit_at(at, xts, false)) + } + + fn submit_one( + &self, + at: &BlockId, + xt: TransactionFor, + ) -> Box, Self::Error>> + Send + Unpin> { + Box::new(self.pool.submit_one(at, xt)) + } + + fn submit_and_watch( + &self, + at: &BlockId, + xt: TransactionFor, + ) -> Box>, Self::Error>> + Send + Unpin> { + Box::new( + self.pool.submit_and_watch(at, xt) + .map(|result| result.map(|watcher| Box::new(watcher.into_stream()) as _)) + ) + } + + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { + self.pool.remove_invalid(hashes) + } + + fn status(&self) -> PoolStatus { + self.pool.status() + } + + fn ready(&self) -> Box>> { + Box::new(self.pool.ready()) + } + + fn import_notification_stream(&self) -> ImportNotificationStream { + self.pool.import_notification_stream() + } + + fn hash_of(&self, xt: &TransactionFor) -> TxHash { + self.pool.hash_of(xt) + } + + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.pool.on_broadcasted(propagations) + } +} diff --git a/client/transaction-pool/src/maintainer.rs b/client/transaction-pool/src/maintainer.rs new file mode 100644 index 0000000000000..a390dde88b765 --- /dev/null +++ b/client/transaction-pool/src/maintainer.rs @@ -0,0 +1,587 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::{ + marker::{PhantomData, Unpin}, + sync::Arc, + time::Instant, +}; +use futures::{ + Future, FutureExt, + future::{Either, join, ready}, +}; +use log::warn; +use parking_lot::Mutex; + +use client_api::{ + client::BlockBody, + light::{Fetcher, RemoteBodyRequest}, +}; +use primitives::{Blake2Hasher, H256}; +use sr_primitives::{ + generic::BlockId, + traits::{Block as BlockT, Extrinsic, Header, NumberFor, ProvideRuntimeApi, SimpleArithmetic}, +}; +use sp_blockchain::HeaderBackend; +use txpool_api::TransactionPoolMaintainer; +use txpool_runtime_api::TaggedTransactionQueue; + +use txpool::{self, ChainApi}; + +/// Basic transaction pool maintainer for full clients. +pub struct FullBasicPoolMaintainer { + pool: Arc>, + client: Arc, +} + +impl FullBasicPoolMaintainer { + /// Create new basic full pool maintainer. + pub fn new( + pool: Arc>, + client: Arc, + ) -> Self { + FullBasicPoolMaintainer { pool, client } + } +} + +impl TransactionPoolMaintainer +for + FullBasicPoolMaintainer +where + Block: BlockT::Out>, + Client: ProvideRuntimeApi + HeaderBackend + BlockBody + 'static, + Client::Api: TaggedTransactionQueue, + PoolApi: ChainApi + 'static, +{ + type Block = Block; + type Hash = Block::Hash; + + fn maintain( + &self, + id: &BlockId, + retracted: &[Block::Hash], + ) -> Box + Send + Unpin> { + // Put transactions from retracted blocks back into the pool. + let client_copy = self.client.clone(); + let retracted_transactions = retracted.to_vec().into_iter() + .filter_map(move |hash| client_copy.block_body(&BlockId::hash(hash)).ok().unwrap_or(None)) + .flat_map(|block| block.into_iter()) + .filter(|tx| tx.is_signed().unwrap_or(false)); + let resubmit_future = self.pool + .submit_at(id, retracted_transactions, true) + .then(|resubmit_result| ready(match resubmit_result { + Ok(_) => (), + Err(e) => { + warn!("Error re-submitting transactions: {:?}", e); + () + } + })); + + // Avoid calling into runtime if there is nothing to prune from the pool anyway. + if self.pool.status().is_empty() { + return Box::new(resubmit_future) + } + + let block = (self.client.header(*id), self.client.block_body(id)); + match block { + (Ok(Some(header)), Ok(Some(extrinsics))) => { + let parent_id = BlockId::hash(*header.parent_hash()); + let prune_future = self.pool + .prune(id, &parent_id, &extrinsics) + .then(|prune_result| ready(match prune_result { + Ok(_) => (), + Err(e) => { + warn!("Error pruning transactions: {:?}", e); + () + } + })); + + Box::new(resubmit_future.then(|_| prune_future)) + }, + (Ok(_), Ok(_)) => Box::new(resubmit_future), + err => { + warn!("Error reading block: {:?}", err); + Box::new(resubmit_future) + }, + } + } +} + +/// Basic transaction pool maintainer for light clients. +pub struct LightBasicPoolMaintainer { + pool: Arc>, + client: Arc, + fetcher: Arc, + revalidate_time_period: Option, + revalidate_block_period: Option>, + revalidation_status: Arc>>>, + _phantom: PhantomData, +} + +impl LightBasicPoolMaintainer + where + Block: BlockT::Out>, + Client: ProvideRuntimeApi + HeaderBackend + BlockBody + 'static, + Client::Api: TaggedTransactionQueue, + PoolApi: ChainApi + 'static, + F: Fetcher + 'static, +{ + /// Create light pool maintainer with default constants. + /// + /// Default constants are: revalidate every 60 seconds or every 20 blocks + /// (whatever happens first). + pub fn with_defaults( + pool: Arc>, + client: Arc, + fetcher: Arc, + ) -> Self { + Self::new( + pool, + client, + fetcher, + Some(std::time::Duration::from_secs(60)), + Some(20.into()), + ) + } + + /// Create light pool maintainer with passed constants. + pub fn new( + pool: Arc>, + client: Arc, + fetcher: Arc, + revalidate_time_period: Option, + revalidate_block_period: Option>, + ) -> Self { + Self { + pool, + client, + fetcher, + revalidate_time_period, + revalidate_block_period, + revalidation_status: Arc::new(Mutex::new(TxPoolRevalidationStatus::NotScheduled)), + _phantom: Default::default(), + } + } + + /// Returns future that prunes block transactions from the pool. + fn prune( + &self, + id: &BlockId, + header: &Block::Header, + ) -> impl std::future::Future { + // fetch transactions (possible future optimization: proofs of inclusion) that + // have been included into new block and prune these from the pool + let id = id.clone(); + let pool = self.pool.clone(); + self.fetcher.remote_body(RemoteBodyRequest { + header: header.clone(), + retry_count: None, + }) + .then(move |transactions| ready( + transactions + .map_err(|e| format!("{}", e)) + .and_then(|transactions| { + let hashes = transactions + .into_iter() + .map(|tx| pool.hash_of(&tx)) + .collect::>(); + pool.prune_known(&id, &hashes) + .map_err(|e| format!("{}", e)) + }) + )) + .then(|r| { + if let Err(e) = r { + warn!("Error pruning known transactions: {}", e) + } + ready(()) + }) + } + + /// Returns future that performs in-pool transations revalidation, if required. + fn revalidate( + &self, + id: &BlockId, + header: &Block::Header, + ) -> impl std::future::Future { + // to determine whether ready transaction is still valid, we perform periodic revalidaton + // of ready transactions + let is_revalidation_required = self.revalidation_status.lock().is_required( + *header.number(), + self.revalidate_time_period, + self.revalidate_block_period, + ); + match is_revalidation_required { + true => { + let revalidation_status = self.revalidation_status.clone(); + Either::Left(self.pool + .revalidate_ready(id) + .map(|r| r.map_err(|e| warn!("Error revalidating known transactions: {}", e))) + .map(move |_| revalidation_status.lock().clear())) + }, + false => Either::Right(ready(())), + } + } +} + +impl TransactionPoolMaintainer +for + LightBasicPoolMaintainer +where + Block: BlockT::Out>, + Client: ProvideRuntimeApi + HeaderBackend + BlockBody + 'static, + Client::Api: TaggedTransactionQueue, + PoolApi: ChainApi + 'static, + F: Fetcher + 'static, +{ + type Block = Block; + type Hash = Block::Hash; + + fn maintain( + &self, + id: &BlockId, + _retracted: &[Block::Hash], + ) -> Box + Send + Unpin> { + // Do nothing if transaction pool is empty. + if self.pool.status().is_empty() { + self.revalidation_status.lock().clear(); + return Box::new(ready(())); + } + let header = self.client.header(*id) + .and_then(|h| h.ok_or(sp_blockchain::Error::UnknownBlock(format!("{}", id)))); + let header = match header { + Ok(header) => header, + Err(err) => { + println!("Failed to maintain light tx pool: {:?}", err); + return Box::new(ready(())); + } + }; + + // else prune block transactions from the pool + let prune_future = self.prune(id, &header); + + // and then (optionally) revalidate in-pool transactions + let revalidate_future = self.revalidate(id, &header); + + let maintain_future = join( + prune_future, + revalidate_future, + ).map(|_| ()); + + Box::new(maintain_future) + } +} + +/// The status of transactions revalidation at light tx pool. +#[cfg_attr(test, derive(Debug))] +enum TxPoolRevalidationStatus { + /// The revalidation has never been completed. + NotScheduled, + /// The revalidation is scheduled. + Scheduled(Option, Option), + /// The revalidation is in progress. + InProgress, +} + +impl TxPoolRevalidationStatus { + /// Called when revalidation is completed. + pub fn clear(&mut self) { + *self = TxPoolRevalidationStatus::NotScheduled; + } + + /// Returns true if revalidation is required. + pub fn is_required( + &mut self, + block: N, + revalidate_time_period: Option, + revalidate_block_period: Option, + ) -> bool { + match *self { + TxPoolRevalidationStatus::NotScheduled => { + *self = TxPoolRevalidationStatus::Scheduled( + revalidate_time_period.map(|period| Instant::now() + period), + revalidate_block_period.map(|period| block + period), + ); + false + }, + TxPoolRevalidationStatus::Scheduled(revalidate_at_time, revalidate_at_block) => { + let is_required = revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) + || revalidate_at_block.map(|at| block >= at).unwrap_or(false); + if is_required { + *self = TxPoolRevalidationStatus::InProgress; + } + is_required + }, + TxPoolRevalidationStatus::InProgress => false, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures::executor::block_on; + use codec::Encode; + use test_client::{prelude::*, runtime::{Block, Transfer}, consensus::{BlockOrigin, SelectChain}}; + use txpool_api::PoolStatus; + use crate::api::{FullChainApi, LightChainApi}; + + #[test] + fn should_remove_transactions_from_the_full_pool() { + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let client = Arc::new(client); + let pool = txpool::Pool::new(Default::default(), FullChainApi::new(client.clone())); + let pool = Arc::new(pool); + let transaction = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_signed_tx(); + let best = longest_chain.best_chain().unwrap(); + + // store the transaction in the pool + block_on(pool.submit_one(&BlockId::hash(best.hash()), transaction.clone())).unwrap(); + + // import the block + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push(transaction.clone()).unwrap(); + let block = builder.bake().unwrap(); + let id = BlockId::hash(block.header().hash()); + client.import(BlockOrigin::Own, block).unwrap(); + + // fire notification - this should clean up the queue + assert_eq!(pool.status().ready, 1); + block_on(FullBasicPoolMaintainer::new(pool.clone(), client).maintain(&id, &[])); + + // then + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + } + + #[test] + fn should_remove_transactions_from_the_light_pool() { + let transaction = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_signed_tx(); + let fetcher_transaction = transaction.clone(); + let fetcher = Arc::new(test_client::new_light_fetcher() + .with_remote_body(Some(Box::new(move |_| Ok(vec![fetcher_transaction.clone()])))) + .with_remote_call(Some(Box::new(move |_| { + let validity: sr_primitives::transaction_validity::TransactionValidity = + Ok(sr_primitives::transaction_validity::ValidTransaction { + priority: 0, + requires: Vec::new(), + provides: vec![vec![42]], + longevity: 0, + propagate: true, + }); + Ok(validity.encode()) + })))); + + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let client = Arc::new(client); + let pool = txpool::Pool::new(Default::default(), LightChainApi::new( + client.clone(), + fetcher.clone(), + )); + let pool = Arc::new(pool); + let best = longest_chain.best_chain().unwrap(); + + // store the transaction in the pool + block_on(pool.submit_one(&BlockId::hash(best.hash()), transaction.clone())).unwrap(); + + // fire notification - this should clean up the queue + assert_eq!(pool.status().ready, 1); + block_on(LightBasicPoolMaintainer::with_defaults(pool.clone(), client.clone(), fetcher).maintain( + &BlockId::Number(0), + &[], + )); + + // then + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + } + + #[test] + fn should_schedule_transactions_revalidation_at_light_pool() { + // when revalidation is not scheduled, it became scheduled + let mut status = TxPoolRevalidationStatus::NotScheduled; + assert!(!status.is_required(10u32, None, None)); + match status { + TxPoolRevalidationStatus::Scheduled(_, _) => (), + _ => panic!("Unexpected status: {:?}", status), + } + + // revalidation required at time + let mut status = TxPoolRevalidationStatus::Scheduled(Some(Instant::now()), None); + assert!(status.is_required(10u32, None, None)); + match status { + TxPoolRevalidationStatus::InProgress => (), + _ => panic!("Unexpected status: {:?}", status), + } + + // revalidation required at block + let mut status = TxPoolRevalidationStatus::Scheduled(None, Some(10)); + assert!(status.is_required(10u32, None, None)); + match status { + TxPoolRevalidationStatus::InProgress => (), + _ => panic!("Unexpected status: {:?}", status), + } + } + + #[test] + fn should_revalidate_transactions_at_light_pool() { + use std::sync::atomic; + use sr_primitives::transaction_validity::*; + + let build_fetcher = || { + let validated = Arc::new(atomic::AtomicBool::new(false)); + Arc::new(test_client::new_light_fetcher() + .with_remote_body(Some(Box::new(move |_| Ok(vec![])))) + .with_remote_call(Some(Box::new(move |_| { + let is_inserted = validated.swap(true, atomic::Ordering::SeqCst); + let validity: TransactionValidity = if is_inserted { + Err(TransactionValidityError::Invalid( + InvalidTransaction::Custom(0) + )) + } else { + Ok(ValidTransaction { + priority: 0, + requires: Vec::new(), + provides: vec![vec![42]], + longevity: 0, + propagate: true, + }) + }; + Ok(validity.encode()) + })))) + }; + + fn with_fetcher_maintain + 'static>( + fetcher: Arc, + revalidate_time_period: Option, + revalidate_block_period: Option, + prepare_maintainer: impl Fn(&Mutex>), + ) -> PoolStatus { + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let client = Arc::new(client); + + // now let's prepare pool + let pool = txpool::Pool::new(Default::default(), LightChainApi::new( + client.clone(), + fetcher.clone(), + )); + let pool = Arc::new(pool); + let best = longest_chain.best_chain().unwrap(); + + // let's prepare maintainer + let maintainer = LightBasicPoolMaintainer::new( + pool.clone(), + client, + fetcher, + revalidate_time_period, + revalidate_block_period, + ); + prepare_maintainer(&*maintainer.revalidation_status); + + // store the transaction in the pool + block_on(pool.submit_one( + &BlockId::hash(best.hash()), + Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_signed_tx(), + )).unwrap(); + + // and run maintain procedures + block_on(maintainer.maintain(&BlockId::Number(0), &[])); + + pool.status() + } + + // when revalidation is never required - nothing happens + let fetcher = build_fetcher(); + //let maintainer = DefaultLightTransactionPoolMaintainer::new(client.clone(), fetcher.clone(), None, None); + let status = with_fetcher_maintain(fetcher, None, None, |_revalidation_status| {}); + assert_eq!(status.ready, 1); + + // when revalidation is scheduled by time - it is performed + let fetcher = build_fetcher(); + let status = with_fetcher_maintain(fetcher, None, None, |revalidation_status| + *revalidation_status.lock() = TxPoolRevalidationStatus::Scheduled(Some(Instant::now()), None) + ); + assert_eq!(status.ready, 0); + + // when revalidation is scheduled by block number - it is performed + let fetcher = build_fetcher(); + let status = with_fetcher_maintain(fetcher, None, None, |revalidation_status| + *revalidation_status.lock() = TxPoolRevalidationStatus::Scheduled(None, Some(0)) + ); + assert_eq!(status.ready, 0); + } + + #[test] + fn should_add_reverted_transactions_to_the_pool() { + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let client = Arc::new(client); + let pool = txpool::Pool::new(Default::default(), FullChainApi::new(client.clone())); + let pool = Arc::new(pool); + let transaction = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_signed_tx(); + let best = longest_chain.best_chain().unwrap(); + + // store the transaction in the pool + block_on(pool.submit_one(&BlockId::hash(best.hash()), transaction.clone())).unwrap(); + + // import the block + let mut builder = client.new_block(Default::default()).unwrap(); + builder.push(transaction.clone()).unwrap(); + let block = builder.bake().unwrap(); + let block1_hash = block.header().hash(); + let id = BlockId::hash(block1_hash.clone()); + client.import(BlockOrigin::Own, block).unwrap(); + + // fire notification - this should clean up the queue + assert_eq!(pool.status().ready, 1); + block_on(FullBasicPoolMaintainer::new(pool.clone(), client.clone()).maintain(&id, &[])); + + // then + assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().future, 0); + + // import second block + let builder = client.new_block_at(&BlockId::hash(best.hash()), Default::default()).unwrap(); + let block = builder.bake().unwrap(); + let id = BlockId::hash(block.header().hash()); + client.import(BlockOrigin::Own, block).unwrap(); + + // fire notification - this should add the transaction back to the pool. + block_on(FullBasicPoolMaintainer::new(pool.clone(), client).maintain(&id, &[block1_hash])); + + // then + assert_eq!(pool.status().ready, 1); + assert_eq!(pool.status().future, 0); + } +} diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 5ad126c3f6503..29f510a55661d 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -28,6 +28,7 @@ # Transaction pool /client/transaction-pool/ @tomusdrw +/primitives/transaction-pool/ @tomusdrw # Offchain /client/offchain/ @tomusdrw diff --git a/docs/README.adoc b/docs/README.adoc index 76e640db9b439..d8c582296cabb 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -439,7 +439,7 @@ substrate-executor, substrate-finality-grandpa, substrate-keyring, substrate-key substrate-network-libp2p, substrate-primitives, substrate-rpc, substrate-rpc-servers, substrate-serializer, substrate-service, substrate-service-test, substrate-state-db, substrate-state-machine, substrate-telemetry, substrate-test-client, -substrate-test-runtime, substrate-transaction-graph, substrate-transaction-pool, +substrate-test-runtime, substrate-transaction-graph, sp-transaction-pool, substrate-trie * Substrate Runtime [source, shell] diff --git a/primitives/sr-primitives/src/offchain/mod.rs b/primitives/sr-primitives/src/offchain/mod.rs index 51dc19bc12d60..07c25570eef8c 100644 --- a/primitives/sr-primitives/src/offchain/mod.rs +++ b/primitives/sr-primitives/src/offchain/mod.rs @@ -16,27 +16,4 @@ //! A collection of higher lever helpers for offchain calls. -use crate::{ - traits, - generic::BlockId, -}; - pub mod http; - -/// An abstraction for transaction pool. -/// -/// This trait is used by offchain calls to be able to submit transactions. -/// The main use case is for offchain workers, to feed back the results of computations, -/// but since the transaction pool access is a separate `ExternalitiesExtension` it can -/// be also used in context of other offchain calls. For one may generate and submit -/// a transaction for some misbehavior reports (say equivocation). -pub trait TransactionPool: Send + Sync { - /// Submit transaction. - /// - /// The transaction will end up in the pool and be propagated to others. - fn submit_at( - &self, - at: &BlockId, - extrinsic: Block::Extrinsic, - ) -> Result<(), ()>; -} diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml new file mode 100644 index 0000000000000..b2ea581249f99 --- /dev/null +++ b/primitives/transaction-pool/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "sp-transaction-pool-api" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +derive_more = "0.15.0" +futures = "0.3.1" +log = "0.4.8" +serde = { version = "1.0.101", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.0.0" } +primitives = { package = "substrate-primitives", path = "../core" } +sr-primitives = { path = "../sr-primitives" } diff --git a/primitives/transaction-pool/runtime-api/Cargo.toml b/primitives/transaction-pool/runtime-api/Cargo.toml index e002208502592..2a1014989d91a 100644 --- a/primitives/transaction-pool/runtime-api/Cargo.toml +++ b/primitives/transaction-pool/runtime-api/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "substrate-transaction-pool-runtime-api" +name = "sp-transaction-pool-runtime-api" version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" diff --git a/primitives/transaction-pool/src/error.rs b/primitives/transaction-pool/src/error.rs new file mode 100644 index 0000000000000..19270f349a460 --- /dev/null +++ b/primitives/transaction-pool/src/error.rs @@ -0,0 +1,82 @@ +// Copyright 2018-2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Transaction pool errors. + +use sr_primitives::transaction_validity::{ + TransactionPriority as Priority, InvalidTransaction, UnknownTransaction, +}; + +/// Transaction pool result. +pub type Result = std::result::Result; + +/// Transaction pool error type. +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum Error { + /// Transaction is not verifiable yet, but might be in the future. + #[display(fmt="Unknown transaction validity: {:?}", _0)] + UnknownTransaction(UnknownTransaction), + /// Transaction is invalid. + #[display(fmt="Invalid transaction validity: {:?}", _0)] + InvalidTransaction(InvalidTransaction), + /// The transaction validity returned no "provides" tag. + /// + /// Such transactions are not accepted to the pool, since we use those tags + /// to define identity of transactions (occupance of the same "slot"). + #[display(fmt="The transaction does not provide any tags, so the pool can't identify it.")] + NoTagsProvided, + /// The transaction is temporarily banned. + #[display(fmt="Temporarily Banned")] + TemporarilyBanned, + /// The transaction is already in the pool. + #[display(fmt="[{:?}] Already imported", _0)] + AlreadyImported(Box), + /// The transaction cannot be imported cause it's a replacement and has too low priority. + #[display(fmt="Too low priority ({} > {})", old, new)] + TooLowPriority { + /// Transaction already in the pool. + old: Priority, + /// Transaction entering the pool. + new: Priority + }, + /// Deps cycle etected and we couldn't import transaction. + #[display(fmt="Cycle Detected")] + CycleDetected, + /// Transaction was dropped immediately after it got inserted. + #[display(fmt="Transaction couldn't enter the pool because of the limit.")] + ImmediatelyDropped, + /// Invalid block id. + InvalidBlockId(String), + /// The pool is not accepting future transactions. + #[display(fmt="The pool is not accepting future transactions")] + RejectedFutureTransaction, +} + +impl std::error::Error for Error {} + +/// Transaction pool error conversion. +pub trait IntoPoolError: ::std::error::Error + Send + Sized { + /// Try to extract original `Error` + /// + /// This implementation is optional and used only to + /// provide more descriptive error messages for end users + /// of RPC API. + fn into_pool_error(self) -> ::std::result::Result { Err(self) } +} + +impl IntoPoolError for Error { + fn into_pool_error(self) -> ::std::result::Result { Ok(self) } +} diff --git a/primitives/transaction-pool/src/lib.rs b/primitives/transaction-pool/src/lib.rs new file mode 100644 index 0000000000000..30671d4b1a16d --- /dev/null +++ b/primitives/transaction-pool/src/lib.rs @@ -0,0 +1,328 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Transaction pool types. + +#![warn(missing_docs)] + +pub mod error; + +pub use error::IntoPoolError; +pub use sr_primitives::transaction_validity::{ + TransactionLongevity, TransactionPriority, TransactionTag, +}; + +use std::{ + collections::HashMap, + hash::Hash, + sync::Arc, +}; +use futures::{ + Future, Stream, + channel::mpsc, +}; +use serde::{Deserialize, Serialize}; +use sr_primitives::{ + generic::BlockId, + traits::{Block as BlockT, Member}, +}; + +/// Transaction pool status. +#[derive(Debug)] +pub struct PoolStatus { + /// Number of transactions in the ready queue. + pub ready: usize, + /// Sum of bytes of ready transaction encodings. + pub ready_bytes: usize, + /// Number of transactions in the future queue. + pub future: usize, + /// Sum of bytes of ready transaction encodings. + pub future_bytes: usize, +} + +impl PoolStatus { + /// Returns true if the are no transactions in the pool. + pub fn is_empty(&self) -> bool { + self.ready == 0 && self.future == 0 + } +} + +/// Possible transaction status events. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum TransactionStatus { + /// Transaction is part of the future queue. + Future, + /// Transaction is part of the ready queue. + Ready, + /// Transaction has been finalized in block with given hash. + Finalized(BlockHash), + /// Some state change (perhaps another transaction was included) rendered this transaction invalid. + Usurped(Hash), + /// The transaction has been broadcast to the given peers. + Broadcast(Vec), + /// Transaction has been dropped from the pool because of the limit. + Dropped, + /// Transaction was detected as invalid. + Invalid, +} + +/// The stream of transaction events. +pub type TransactionStatusStream = dyn Stream> + Send + Unpin; + +/// The import notification event stream. +pub type ImportNotificationStream = mpsc::UnboundedReceiver<()>; + +/// Transaction hash type for a pool. +pub type TxHash

=

::Hash; +/// Block hash type for a pool. +pub type BlockHash

= <

::Block as BlockT>::Hash; +/// Transaction type for a pool. +pub type TransactionFor

= <

::Block as BlockT>::Extrinsic; +/// Type of transactions event stream for a pool. +pub type TransactionStatusStreamFor

= TransactionStatusStream, BlockHash

>; + +/// In-pool transaction interface. +/// +/// The pool is container of transactions that are implementing this trait. +/// See `sr_primitives::ValidTransaction` for details about every field. +pub trait InPoolTransaction { + /// Transaction type. + type Transaction; + /// Transaction hash type. + type Hash; + + /// Get the reference to the transaction data. + fn data(&self) -> &Self::Transaction; + /// Get hash of the transaction. + fn hash(&self) -> &Self::Hash; + /// Get priority of the transaction. + fn priority(&self) -> &TransactionPriority; + /// Get longevity of the transaction. + fn longevity(&self) ->&TransactionLongevity; + /// Get transaction dependencies. + fn requires(&self) -> &[TransactionTag]; + /// Get tags that transaction provides. + fn provides(&self) -> &[TransactionTag]; + /// Return a flag indicating if the transaction should be propagated to other peers. + fn is_propagateable(&self) -> bool; +} + +/// Transaction pool interface. +pub trait TransactionPool: Send + Sync { + /// Block type. + type Block: BlockT; + /// Transaction hash type. + type Hash: Hash + Eq + Member + Serialize; + /// In-pool transaction type. + type InPoolTransaction: InPoolTransaction< + Transaction = TransactionFor, + Hash = TxHash + >; + /// Error type. + type Error: From + IntoPoolError; + + /// Returns a future that imports a bunch of unverified transactions to the pool. + fn submit_at( + &self, + at: &BlockId, + xts: impl IntoIterator> + 'static, + ) -> Box, Self::Error>>, + Self::Error + >> + Send + Unpin>; + + /// Returns a future that imports one unverified transaction to the pool. + fn submit_one( + &self, + at: &BlockId, + xt: TransactionFor, + ) -> Box, + Self::Error + >> + Send + Unpin>; + + /// Returns a future that import a single transaction and starts to watch their progress in the pool. + fn submit_and_watch( + &self, + at: &BlockId, + xt: TransactionFor, + ) -> Box>, Self::Error>> + Send + Unpin>; + + /// Remove transactions identified by given hashes (and dependent transactions) from the pool. + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec>; + + /// Returns pool status. + fn status(&self) -> PoolStatus; + + /// Get an iterator for ready transactions ordered by priority + fn ready(&self) -> Box>>; + + /// Return an event stream of transactions imported to the pool. + fn import_notification_stream(&self) -> ImportNotificationStream; + + /// Returns transaction hash + fn hash_of(&self, xt: &TransactionFor) -> TxHash; + + /// Notify the pool about transactions broadcast. + fn on_broadcasted(&self, propagations: HashMap, Vec>); +} + +/// An abstraction for transaction pool. +/// +/// This trait is used by offchain calls to be able to submit transactions. +/// The main use case is for offchain workers, to feed back the results of computations, +/// but since the transaction pool access is a separate `ExternalitiesExtension` it can +/// be also used in context of other offchain calls. For one may generate and submit +/// a transaction for some misbehavior reports (say equivocation). +pub trait OffchainSubmitTransaction: Send + Sync { + /// Submit transaction. + /// + /// The transaction will end up in the pool and be propagated to others. + fn submit_at( + &self, + at: &BlockId, + extrinsic: Block::Extrinsic, + ) -> Result<(), ()>; +} + +impl OffchainSubmitTransaction for TPool { + fn submit_at( + &self, + at: &BlockId, + extrinsic: ::Extrinsic, + ) -> Result<(), ()> { + log::debug!( + target: "txpool", + "(offchain call) Submitting a transaction to the pool: {:?}", + extrinsic + ); + + let result = futures::executor::block_on(self.submit_one(&at, extrinsic)); + + result.map(|_| ()) + .map_err(|e| log::warn!( + target: "txpool", + "(offchain call) Error submitting a transaction to the pool: {:?}", + e + )) + } +} + +/// Transaction pool maintainer interface. +pub trait TransactionPoolMaintainer: Send + Sync { + /// Block type. + type Block: BlockT; + /// Transaction Hash type. + type Hash: Hash + Eq + Member + Serialize; + + /// Returns a future that performs maintenance procedures on the pool when + /// with given hash is imported. + fn maintain( + &self, + id: &BlockId, + retracted: &[Self::Hash], + ) -> Box + Send + Unpin>; +} + +/// Maintainable pool implementation. +pub struct MaintainableTransactionPool { + pool: Pool, + maintainer: Maintainer, +} + +impl MaintainableTransactionPool { + /// Create new maintainable pool using underlying pool and maintainer. + pub fn new(pool: Pool, maintainer: Maintainer) -> Self { + MaintainableTransactionPool { pool, maintainer } + } +} + +impl TransactionPool for MaintainableTransactionPool + where + Pool: TransactionPool, + Maintainer: Send + Sync, +{ + type Block = Pool::Block; + type Hash = Pool::Hash; + type InPoolTransaction = Pool::InPoolTransaction; + type Error = Pool::Error; + + fn submit_at( + &self, + at: &BlockId, + xts: impl IntoIterator> + 'static, + ) -> Box, Self::Error>>, Self::Error>> + Send + Unpin> { + self.pool.submit_at(at, xts) + } + + fn submit_one( + &self, + at: &BlockId, + xt: TransactionFor, + ) -> Box, Self::Error>> + Send + Unpin> { + self.pool.submit_one(at, xt) + } + + fn submit_and_watch( + &self, + at: &BlockId, + xt: TransactionFor, + ) -> Box>, Self::Error>> + Send + Unpin> { + self.pool.submit_and_watch(at, xt) + } + + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { + self.pool.remove_invalid(hashes) + } + + fn status(&self) -> PoolStatus { + self.pool.status() + } + + fn ready(&self) -> Box>> { + self.pool.ready() + } + + fn import_notification_stream(&self) -> ImportNotificationStream { + self.pool.import_notification_stream() + } + + fn hash_of(&self, xt: &TransactionFor) -> TxHash { + self.pool.hash_of(xt) + } + + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.pool.on_broadcasted(propagations) + } +} + +impl TransactionPoolMaintainer for MaintainableTransactionPool + where + Pool: Send + Sync, + Maintainer: TransactionPoolMaintainer +{ + type Block = Maintainer::Block; + type Hash = Maintainer::Hash; + + fn maintain( + &self, + id: &BlockId, + retracted: &[Self::Hash], + ) -> Box + Send + Unpin> { + self.maintainer.maintain(id, retracted) + } +} diff --git a/test/utils/primitives/src/lib.rs b/test/utils/primitives/src/lib.rs index d30b9eabf6fa8..eebdbb165ff5d 100644 --- a/test/utils/primitives/src/lib.rs +++ b/test/utils/primitives/src/lib.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! The Substrate test primitives to share +//! The Substrate test primitives to share #![cfg_attr(not(feature = "std"), no_std)] diff --git a/test/utils/runtime/Cargo.toml b/test/utils/runtime/Cargo.toml index f89524d2846cd..f51bceb095fc9 100644 --- a/test/utils/runtime/Cargo.toml +++ b/test/utils/runtime/Cargo.toml @@ -34,7 +34,7 @@ frame-system-rpc-runtime-api = { path = "../../../frame/system/rpc/runtime-api", pallet-timestamp = { path = "../../../frame/timestamp", default-features = false } substrate-client = { path = "../../../client", optional = true } substrate-trie = { path = "../../../primitives/trie", default-features = false } -transaction-pool-api = { package = "substrate-transaction-pool-runtime-api", path = "../../../primitives/transaction-pool/runtime-api", default-features = false } +txpool-runtime-api = { package = "sp-transaction-pool-runtime-api", path = "../../../primitives/transaction-pool/runtime-api", default-features = false } trie-db = { version = "0.16.0", default-features = false } [dev-dependencies] @@ -78,6 +78,6 @@ std = [ "pallet-timestamp/std", "substrate-client", "substrate-trie/std", - "transaction-pool-api/std", + "txpool-runtime-api/std", "trie-db/std", ] diff --git a/test/utils/runtime/client/Cargo.toml b/test/utils/runtime/client/Cargo.toml index 83e5edb246ce3..22758584c79e7 100644 --- a/test/utils/runtime/client/Cargo.toml +++ b/test/utils/runtime/client/Cargo.toml @@ -14,3 +14,4 @@ sp-blockchain = { path = "../../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "1.0.0" } client-api = { package = "substrate-client-api", path = "../../../../client/api" } client = { package = "substrate-client", path = "../../../../client/" } +futures = "0.3.1" diff --git a/test/utils/runtime/client/src/lib.rs b/test/utils/runtime/client/src/lib.rs index d56bcfa770b5d..57be949bcaf98 100644 --- a/test/utils/runtime/client/src/lib.rs +++ b/test/utils/runtime/client/src/lib.rs @@ -30,7 +30,15 @@ pub use runtime; use primitives::sr25519; use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; -use sr_primitives::traits::{Block as BlockT, Header as HeaderT, Hash as HashT}; +use sr_primitives::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor}; +use client::{ + light::fetcher::{ + Fetcher, + RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, + RemoteCallRequest, RemoteChangesRequest, RemoteBodyRequest, + }, +}; + /// A prelude to import in tests. pub mod prelude { @@ -247,6 +255,81 @@ impl TestClientBuilderExt for TestClientBuilder< } } +/// Type of optional fetch callback. +type MaybeFetcherCallback = Option Result + Send + Sync>>; + +/// Type of fetcher future result. +type FetcherFutureResult = futures::future::Ready>; + +/// Implementation of light client fetcher used in tests. +#[derive(Default)] +pub struct LightFetcher { + call: MaybeFetcherCallback, Vec>, + body: MaybeFetcherCallback, Vec>, +} + +impl LightFetcher { + /// Sets remote call callback. + pub fn with_remote_call( + self, + call: MaybeFetcherCallback, Vec>, + ) -> Self { + LightFetcher { + call, + body: self.body, + } + } + + /// Sets remote body callback. + pub fn with_remote_body( + self, + body: MaybeFetcherCallback, Vec>, + ) -> Self { + LightFetcher { + call: self.call, + body, + } + } +} + +impl Fetcher for LightFetcher { + type RemoteHeaderResult = FetcherFutureResult; + type RemoteReadResult = FetcherFutureResult, Option>>>; + type RemoteCallResult = FetcherFutureResult>; + type RemoteChangesResult = FetcherFutureResult, u32)>>; + type RemoteBodyResult = FetcherFutureResult>; + + fn remote_header(&self, _: RemoteHeaderRequest) -> Self::RemoteHeaderResult { + unimplemented!() + } + + fn remote_read(&self, _: RemoteReadRequest) -> Self::RemoteReadResult { + unimplemented!() + } + + fn remote_read_child(&self, _: RemoteReadChildRequest) -> Self::RemoteReadResult { + unimplemented!() + } + + fn remote_call(&self, req: RemoteCallRequest) -> Self::RemoteCallResult { + match self.call { + Some(ref call) => futures::future::ready(call(req)), + None => unimplemented!(), + } + } + + fn remote_changes(&self, _: RemoteChangesRequest) -> Self::RemoteChangesResult { + unimplemented!() + } + + fn remote_body(&self, req: RemoteBodyRequest) -> Self::RemoteBodyResult { + match self.body { + Some(ref body) => futures::future::ready(body(req)), + None => unimplemented!(), + } + } +} + /// Creates new client instance used for tests. pub fn new() -> Client { TestClientBuilder::new().build() @@ -275,3 +358,8 @@ pub fn new_light() -> ( backend, ) } + +/// Creates new light client fetcher used for tests. +pub fn new_light_fetcher() -> LightFetcher { + LightFetcher::default() +} diff --git a/test/utils/runtime/src/lib.rs b/test/utils/runtime/src/lib.rs index 58caae2098d07..7dd98c56c0cf9 100644 --- a/test/utils/runtime/src/lib.rs +++ b/test/utils/runtime/src/lib.rs @@ -477,7 +477,7 @@ cfg_if! { } } - impl transaction_pool_api::TaggedTransactionQueue for Runtime { + impl txpool_runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction(utx: ::Extrinsic) -> TransactionValidity { if let Extrinsic::IncludeData(data) = utx { return Ok(ValidTransaction { @@ -662,7 +662,7 @@ cfg_if! { } } - impl transaction_pool_api::TaggedTransactionQueue for Runtime { + impl txpool_runtime_api::TaggedTransactionQueue for Runtime { fn validate_transaction(utx: ::Extrinsic) -> TransactionValidity { if let Extrinsic::IncludeData(data) = utx { return Ok(ValidTransaction{ diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index a4b6d216930a7..9b5b32919c1e1 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -5,7 +5,9 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] +client = { package = "substrate-client", path = "../../../../client/" } codec = { package = "parity-scale-codec", version = "1.0.0" } +futures = "0.3.1" jsonrpc-core = "14.0.3" jsonrpc-core-client = "14.0.3" jsonrpc-derive = "14.0.3" @@ -15,10 +17,9 @@ sr-primitives = { path = "../../../../primitives/sr-primitives" } frame-system-rpc-runtime-api = { path = "../../../../frame/system/rpc/runtime-api" } substrate-primitives = { path = "../../../../primitives/core" } sp-blockchain = { path = "../../../../primitives/blockchain" } -sc-transaction-graph = { path = "../../../../client/transaction-pool/graph" } +txpool-api = { package = "sp-transaction-pool-api", path = "../../../../primitives/transaction-pool" } [dev-dependencies] test-client = { package = "substrate-test-runtime-client", path = "../../../../test/utils/runtime/client" } -sc-transaction-pool = { path = "../../../../client/transaction-pool" } env_logger = "0.7.0" -futures = "0.3.1" +txpool = { package = "sc-transaction-pool", path = "../../../../client/transaction-pool" } diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 63b91c3cd2ef0..ebda962031921 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -18,20 +18,34 @@ use std::sync::Arc; -use codec::{self, Codec, Encode}; -use sp_blockchain::HeaderBackend; -use jsonrpc_core::{Result, Error, ErrorCode}; +use codec::{self, Codec, Decode, Encode}; +use client::{ + light::blockchain::{future_header, RemoteBlockchain}, + light::fetcher::{Fetcher, RemoteCallRequest}, +}; +use jsonrpc_core::{ + Error, ErrorCode, + futures::future::{result, Future}, +}; use jsonrpc_derive::rpc; +use futures::future::{ready, TryFutureExt}; +use sp_blockchain::{ + HeaderBackend, + Error as ClientError +}; use sr_primitives::{ generic::BlockId, traits, }; use substrate_primitives::hexdisplay::HexDisplay; -use sc_transaction_graph::{self, ChainApi, Pool}; +use txpool_api::{TransactionPool, InPoolTransaction}; pub use frame_system_rpc_runtime_api::AccountNonceApi; pub use self::gen_client::Client as SystemClient; +/// Future that resolves to account nonce. +pub type FutureResult = Box + Send>; + /// System RPC methods. #[rpc] pub trait SystemApi { @@ -41,22 +55,22 @@ pub trait SystemApi { /// currently in the pool and if no transactions are found in the pool /// it fallbacks to query the index from the runtime (aka. state nonce). #[rpc(name = "system_accountNextIndex", alias("account_nextIndex"))] - fn nonce(&self, account: AccountId) -> Result; + fn nonce(&self, account: AccountId) -> FutureResult; } const RUNTIME_ERROR: i64 = 1; -/// An implementation of System-specific RPC methods. -pub struct System { +/// An implementation of System-specific RPC methods on full client. +pub struct FullSystem { client: Arc, - pool: Arc>, + pool: Arc

, _marker: std::marker::PhantomData, } -impl System { - /// Create new `System` given client and transaction pool. - pub fn new(client: Arc, pool: Arc>) -> Self { - System { +impl FullSystem { + /// Create new `FullSystem` given client and transaction pool. + pub fn new(client: Arc, pool: Arc

) -> Self { + FullSystem { client, pool, _marker: Default::default(), @@ -64,74 +78,164 @@ impl System { } } -impl SystemApi for System +impl SystemApi for FullSystem where C: traits::ProvideRuntimeApi, C: HeaderBackend, C: Send + Sync + 'static, C::Api: AccountNonceApi, - P: ChainApi + Sync + Send + 'static, + P: TransactionPool + 'static, Block: traits::Block, AccountId: Clone + std::fmt::Display + Codec, - Index: Clone + std::fmt::Display + Codec + traits::SimpleArithmetic, + Index: Clone + std::fmt::Display + Codec + Send + traits::SimpleArithmetic + 'static, { - fn nonce(&self, account: AccountId) -> Result { - let api = self.client.runtime_api(); - let best = self.client.info().best_hash; - let at = BlockId::hash(best); + fn nonce(&self, account: AccountId) -> FutureResult { + let get_nonce = || { + let api = self.client.runtime_api(); + let best = self.client.info().best_hash; + let at = BlockId::hash(best); + + let nonce = api.account_nonce(&at, account.clone()).map_err(|e| Error { + code: ErrorCode::ServerError(RUNTIME_ERROR), + message: "Unable to query nonce.".into(), + data: Some(format!("{:?}", e).into()), + })?; + + Ok(adjust_nonce(&*self.pool, account, nonce)) + }; + + Box::new(result(get_nonce())) + } +} + +/// An implementation of System-specific RPC methods on light client. +pub struct LightSystem { + client: Arc, + remote_blockchain: Arc>, + fetcher: Arc, + pool: Arc

, +} + +impl LightSystem { + /// Create new `LightSystem`. + pub fn new( + client: Arc, + remote_blockchain: Arc>, + fetcher: Arc, + pool: Arc

, + ) -> Self { + LightSystem { + client, + remote_blockchain, + fetcher, + pool, + } + } +} - let nonce = api.account_nonce(&at, account.clone()).map_err(|e| Error { +impl SystemApi for LightSystem +where + P: TransactionPool + 'static, + C: HeaderBackend, + C: Send + Sync + 'static, + F: Fetcher + 'static, + Block: traits::Block, + AccountId: Clone + std::fmt::Display + Codec + Send + 'static, + Index: Clone + std::fmt::Display + Codec + Send + traits::SimpleArithmetic + 'static, +{ + fn nonce(&self, account: AccountId) -> FutureResult { + let best_hash = self.client.info().best_hash; + let best_id = BlockId::hash(best_hash); + let future_best_header = future_header(&*self.remote_blockchain, &*self.fetcher, best_id); + let fetcher = self.fetcher.clone(); + let call_data = account.encode(); + let future_best_header = future_best_header + .and_then(move |maybe_best_header| ready( + match maybe_best_header { + Some(best_header) => Ok(best_header), + None => Err(ClientError::UnknownBlock(format!("{}", best_hash))), + } + )); + let future_nonce = future_best_header.and_then(move |best_header| + fetcher.remote_call(RemoteCallRequest { + block: best_hash, + header: best_header, + method: "AccountNonceApi_account_nonce".into(), + call_data, + retry_count: None, + }) + ).compat(); + let future_nonce = future_nonce.and_then(|nonce| Decode::decode(&mut &nonce[..]) + .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e))); + let future_nonce = future_nonce.map_err(|e| Error { code: ErrorCode::ServerError(RUNTIME_ERROR), message: "Unable to query nonce.".into(), data: Some(format!("{:?}", e).into()), - })?; - - log::debug!(target: "rpc", "State nonce for {}: {}", account, nonce); - // Now we need to query the transaction pool - // and find transactions originating from the same sender. - // - // Since extrinsics are opaque to us, we look for them using - // `provides` tag. And increment the nonce if we find a transaction - // that matches the current one. - let mut current_nonce = nonce.clone(); - let mut current_tag = (account.clone(), nonce.clone()).encode(); - for tx in self.pool.ready() { - log::debug!( - target: "rpc", - "Current nonce to {}, checking {} vs {:?}", - current_nonce, - HexDisplay::from(¤t_tag), - tx.provides.iter().map(|x| format!("{}", HexDisplay::from(x))).collect::>(), - ); - // since transactions in `ready()` need to be ordered by nonce - // it's fine to continue with current iterator. - if tx.provides.get(0) == Some(¤t_tag) { - current_nonce += traits::One::one(); - current_tag = (account.clone(), current_nonce.clone()).encode(); - } - } + }); + + let pool = self.pool.clone(); + let future_nonce = future_nonce.map(move |nonce| adjust_nonce(&*pool, account, nonce)); + + Box::new(future_nonce) + } +} - Ok(current_nonce) +/// Adjust account nonce from state, so that tx with the nonce will be +/// placed after all ready txpool transactions. +fn adjust_nonce( + pool: &P, + account: AccountId, + nonce: Index, +) -> Index where + P: TransactionPool, + AccountId: Clone + std::fmt::Display + Encode, + Index: Clone + std::fmt::Display + Encode + traits::SimpleArithmetic + 'static, +{ + log::debug!(target: "rpc", "State nonce for {}: {}", account, nonce); + // Now we need to query the transaction pool + // and find transactions originating from the same sender. + // + // Since extrinsics are opaque to us, we look for them using + // `provides` tag. And increment the nonce if we find a transaction + // that matches the current one. + let mut current_nonce = nonce.clone(); + let mut current_tag = (account.clone(), nonce.clone()).encode(); + for tx in pool.ready() { + log::debug!( + target: "rpc", + "Current nonce to {}, checking {} vs {:?}", + current_nonce, + HexDisplay::from(¤t_tag), + tx.provides().iter().map(|x| format!("{}", HexDisplay::from(x))).collect::>(), + ); + // since transactions in `ready()` need to be ordered by nonce + // it's fine to continue with current iterator. + if tx.provides().get(0) == Some(¤t_tag) { + current_nonce += traits::One::one(); + current_tag = (account.clone(), current_nonce.clone()).encode(); + } } + + current_nonce } #[cfg(test)] mod tests { use super::*; - use sc_transaction_pool; use futures::executor::block_on; use test_client::{ runtime::Transfer, AccountKeyring, }; + use txpool::{BasicPool, FullChainApi}; #[test] fn should_return_next_nonce_for_some_account() { // given let _ = env_logger::try_init(); let client = Arc::new(test_client::new()); - let pool = Arc::new(Pool::new(Default::default(), sc_transaction_pool::FullChainApi::new(client.clone()))); + let pool = Arc::new(BasicPool::new(Default::default(), FullChainApi::new(client.clone()))); let new_transaction = |nonce: u64| { let t = Transfer { @@ -148,12 +252,12 @@ mod tests { let ext1 = new_transaction(1); block_on(pool.submit_one(&BlockId::number(0), ext1)).unwrap(); - let accounts = System::new(client, pool); + let accounts = FullSystem::new(client, pool); // when let nonce = accounts.nonce(AccountKeyring::Alice.into()); // then - assert_eq!(nonce.unwrap(), 2); + assert_eq!(nonce.wait().unwrap(), 2); } }